repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
paalge/scikit-image | skimage/draw/tests/test_draw.py | 1 | 30229 | from numpy.testing import assert_array_equal, assert_equal, assert_raises, \
assert_almost_equal
import numpy as np
from skimage._shared.testing import test_parallel
from skimage.draw import (set_color, line, line_aa, polygon, polygon_perimeter,
circle, circle_perimeter, circle_perimeter_aa,
ellipse, ellipse_perimeter,
_bezier_segment, bezier_curve)
from skimage.measure import regionprops
def test_set_color():
img = np.zeros((10, 10))
rr, cc = line(0, 0, 0, 30)
set_color(img, (rr, cc), 1)
img_ = np.zeros((10, 10))
img_[0, :] = 1
assert_array_equal(img, img_)
def test_set_color_with_alpha():
img = np.zeros((10, 10))
rr, cc, alpha = line_aa(0, 0, 0, 30)
set_color(img, (rr, cc), 1, alpha=alpha)
# Wrong dimensionality color
assert_raises(ValueError, set_color, img, (rr, cc), (255, 0, 0), alpha=alpha)
img = np.zeros((10, 10, 3))
rr, cc, alpha = line_aa(0, 0, 0, 30)
set_color(img, (rr, cc), (1, 0, 0), alpha=alpha)
@test_parallel()
def test_line_horizontal():
img = np.zeros((10, 10))
rr, cc = line(0, 0, 0, 9)
img[rr, cc] = 1
img_ = np.zeros((10, 10))
img_[0, :] = 1
assert_array_equal(img, img_)
def test_line_vertical():
img = np.zeros((10, 10))
rr, cc = line(0, 0, 9, 0)
img[rr, cc] = 1
img_ = np.zeros((10, 10))
img_[:, 0] = 1
assert_array_equal(img, img_)
def test_line_reverse():
img = np.zeros((10, 10))
rr, cc = line(0, 9, 0, 0)
img[rr, cc] = 1
img_ = np.zeros((10, 10))
img_[0, :] = 1
assert_array_equal(img, img_)
def test_line_diag():
img = np.zeros((5, 5))
rr, cc = line(0, 0, 4, 4)
img[rr, cc] = 1
img_ = np.eye(5)
assert_array_equal(img, img_)
def test_line_aa_horizontal():
img = np.zeros((10, 10))
rr, cc, val = line_aa(0, 0, 0, 9)
set_color(img, (rr, cc), 1, alpha=val)
img_ = np.zeros((10, 10))
img_[0, :] = 1
assert_array_equal(img, img_)
def test_line_aa_vertical():
img = np.zeros((10, 10))
rr, cc, val = line_aa(0, 0, 9, 0)
img[rr, cc] = val
img_ = np.zeros((10, 10))
img_[:, 0] = 1
assert_array_equal(img, img_)
def test_line_aa_diagonal():
img = np.zeros((10, 10))
rr, cc, val = line_aa(0, 0, 9, 6)
img[rr, cc] = 1
# Check that each pixel belonging to line,
# also belongs to line_aa
r, c = line(0, 0, 9, 6)
for r_i, c_i in zip(r, c):
assert_equal(img[r_i, c_i], 1)
def test_line_equal_aliasing_horizontally_vertically():
img0 = np.zeros((25, 25))
img1 = np.zeros((25, 25))
# Near-horizontal line
rr, cc, val = line_aa(10, 2, 12, 20)
img0[rr, cc] = val
# Near-vertical (transpose of prior)
rr, cc, val = line_aa(2, 10, 20, 12)
img1[rr, cc] = val
# Difference - should be zero
assert_array_equal(img0, img1.T)
def test_polygon_rectangle():
img = np.zeros((10, 10), 'uint8')
rr, cc = polygon((1, 4, 4, 1, 1), (1, 1, 4, 4, 1))
img[rr, cc] = 1
img_ = np.zeros((10, 10))
img_[1:4, 1:4] = 1
assert_array_equal(img, img_)
def test_polygon_rectangle_angular():
img = np.zeros((10, 10), 'uint8')
poly = np.array(((0, 3), (4, 7), (7, 4), (3, 0), (0, 3)))
rr, cc = polygon(poly[:, 0], poly[:, 1])
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_polygon_parallelogram():
img = np.zeros((10, 10), 'uint8')
poly = np.array(((1, 1), (5, 1), (7, 6), (3, 6), (1, 1)))
rr, cc = polygon(poly[:, 0], poly[:, 1])
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_polygon_exceed():
img = np.zeros((10, 10), 'uint8')
poly = np.array(((1, -1), (100, -1), (100, 100), (1, 100), (1, 1)))
rr, cc = polygon(poly[:, 0], poly[:, 1], img.shape)
img[rr, cc] = 1
img_ = np.zeros((10, 10))
img_[1:, :] = 1
assert_array_equal(img, img_)
def test_circle():
img = np.zeros((15, 15), 'uint8')
rr, cc = circle(7, 7, 6)
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_circle_perimeter_bresenham():
img = np.zeros((15, 15), 'uint8')
rr, cc = circle_perimeter(7, 7, 0, method='bresenham')
img[rr, cc] = 1
assert(np.sum(img) == 1)
assert(img[7][7] == 1)
img = np.zeros((17, 15), 'uint8')
rr, cc = circle_perimeter(7, 7, 7, method='bresenham')
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_circle_perimeter_bresenham_shape():
img = np.zeros((15, 20), 'uint8')
rr, cc = circle_perimeter(7, 10, 9, method='bresenham', shape=(15, 20))
img[rr, cc] = 1
shift = 5
img_ = np.zeros((15 + 2 * shift, 20), 'uint8')
rr, cc = circle_perimeter(7 + shift, 10, 9, method='bresenham', shape=None)
img_[rr, cc] = 1
assert_array_equal(img, img_[shift:-shift, :])
def test_circle_perimeter_andres():
img = np.zeros((15, 15), 'uint8')
rr, cc = circle_perimeter(7, 7, 0, method='andres')
img[rr, cc] = 1
assert(np.sum(img) == 1)
assert(img[7][7] == 1)
img = np.zeros((17, 15), 'uint8')
rr, cc = circle_perimeter(7, 7, 7, method='andres')
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_circle_perimeter_aa():
img = np.zeros((15, 15), 'uint8')
rr, cc, val = circle_perimeter_aa(7, 7, 0)
img[rr, cc] = 1
assert(np.sum(img) == 1)
assert(img[7][7] == 1)
img = np.zeros((17, 17), 'uint8')
rr, cc, val = circle_perimeter_aa(8, 8, 7)
img[rr, cc] = val * 255
img_ = np.array(
[[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 82, 180, 236, 255, 236, 180, 82, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 189, 172, 74, 18, 0, 18, 74, 172, 189, 0, 0, 0, 0],
[ 0, 0, 0, 229, 25, 0, 0, 0, 0, 0, 0, 0, 25, 229, 0, 0, 0],
[ 0, 0, 189, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 189, 0, 0],
[ 0, 82, 172, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 172, 82, 0],
[ 0, 180, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 74, 180, 0],
[ 0, 236, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 236, 0],
[ 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 0],
[ 0, 236, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 236, 0],
[ 0, 180, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 74, 180, 0],
[ 0, 82, 172, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 172, 82, 0],
[ 0, 0, 189, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 189, 0, 0],
[ 0, 0, 0, 229, 25, 0, 0, 0, 0, 0, 0, 0, 25, 229, 0, 0, 0],
[ 0, 0, 0, 0, 189, 172, 74, 18, 0, 18, 74, 172, 189, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 82, 180, 236, 255, 236, 180, 82, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_circle_perimeter_aa_shape():
img = np.zeros((15, 20), 'uint8')
rr, cc, val = circle_perimeter_aa(7, 10, 9, shape=(15, 20))
img[rr, cc] = val * 255
shift = 5
img_ = np.zeros((15 + 2 * shift, 20), 'uint8')
rr, cc, val = circle_perimeter_aa(7 + shift, 10, 9, shape=None)
img_[rr, cc] = val * 255
assert_array_equal(img, img_[shift:-shift, :])
def test_ellipse_trivial():
img = np.zeros((2, 2), 'uint8')
rr, cc = ellipse(0.5, 0.5, 0.5, 0.5)
img[rr, cc] = 1
img_correct = np.array([
[0, 0],
[0, 0]
])
assert_array_equal(img, img_correct)
img = np.zeros((2, 2), 'uint8')
rr, cc = ellipse(0.5, 0.5, 1.1, 1.1)
img[rr, cc] = 1
img_correct = np.array([
[1, 1],
[1, 1],
])
assert_array_equal(img, img_correct)
img = np.zeros((3, 3), 'uint8')
rr, cc = ellipse(1, 1, 0.9, 0.9)
img[rr, cc] = 1
img_correct = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0],
])
assert_array_equal(img, img_correct)
img = np.zeros((3, 3), 'uint8')
rr, cc = ellipse(1, 1, 1.1, 1.1)
img[rr, cc] = 1
img_correct = np.array([
[0, 1, 0],
[1, 1, 1],
[0, 1, 0],
])
assert_array_equal(img, img_correct)
img = np.zeros((3, 3), 'uint8')
rr, cc = ellipse(1, 1, 1.5, 1.5)
img[rr, cc] = 1
img_correct = np.array([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
])
assert_array_equal(img, img_correct)
def test_ellipse_generic():
img = np.zeros((4, 4), 'uint8')
rr, cc = ellipse(1.5, 1.5, 1.1, 1.7)
img[rr, cc] = 1
img_ = np.array([
[0, 0, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 0, 0],
])
assert_array_equal(img, img_)
img = np.zeros((5, 5), 'uint8')
rr, cc = ellipse(2, 2, 1.7, 1.7)
img[rr, cc] = 1
img_ = np.array([
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0],
])
assert_array_equal(img, img_)
img = np.zeros((10, 10), 'uint8')
rr, cc = ellipse(5, 5, 3, 4)
img[rr, cc] = 1
img_ = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
])
assert_array_equal(img, img_)
img = np.zeros((10, 10), 'uint8')
rr, cc = ellipse(4.5, 5, 3.5, 4)
img[rr, cc] = 1
img_ = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
])
assert_array_equal(img, img_)
img = np.zeros((15, 15), 'uint8')
rr, cc = ellipse(7, 7, 3, 7)
img[rr, cc] = 1
img_ = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
])
assert_array_equal(img, img_)
def test_ellipse_with_shape():
img = np.zeros((15, 15), 'uint8')
rr, cc = ellipse(7, 7, 3, 10, shape=img.shape)
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_ellipse_negative():
rr, cc = ellipse(-3, -3, 1.7, 1.7)
rr_, cc_ = np.nonzero(np.array([
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0],
]))
assert_array_equal(rr, rr_ - 5)
assert_array_equal(cc, cc_ - 5)
def test_ellipse_rotation_symmetry():
img1 = np.zeros((150, 150), dtype=np.uint8)
img2 = np.zeros((150, 150), dtype=np.uint8)
for angle in range(0, 180, 15):
img1.fill(0)
rr, cc = ellipse(80, 70, 60, 40, rotation=np.deg2rad(angle))
img1[rr, cc] = 1
img2.fill(0)
rr, cc = ellipse(80, 70, 60, 40, rotation=np.deg2rad(angle + 180))
img2[rr, cc] = 1
assert_array_equal(img1, img2)
def test_ellipse_rotated():
img = np.zeros((1000, 1200), dtype=np.uint8)
for rot in range(0, 180, 10):
img.fill(0)
angle = np.deg2rad(rot)
rr, cc = ellipse(500, 600, 200, 400, rotation=angle)
img[rr, cc] = 1
# estimate orientation of ellipse
angle_estim = np.round(regionprops(img)[0].orientation, 3) % (np.pi / 2)
assert_almost_equal(angle_estim, angle % (np.pi / 2), 2)
def test_ellipse_perimeter_dot_zeroangle():
# dot, angle == 0
img = np.zeros((30, 15), 'uint8')
rr, cc = ellipse_perimeter(15, 7, 0, 0, 0)
img[rr, cc] = 1
assert(np.sum(img) == 1)
assert(img[15][7] == 1)
def test_ellipse_perimeter_dot_nzeroangle():
# dot, angle != 0
img = np.zeros((30, 15), 'uint8')
rr, cc = ellipse_perimeter(15, 7, 0, 0, 1)
img[rr, cc] = 1
assert(np.sum(img) == 1)
assert(img[15][7] == 1)
def test_ellipse_perimeter_flat_zeroangle():
# flat ellipse
img = np.zeros((20, 18), 'uint8')
img_ = np.zeros((20, 18), 'uint8')
rr, cc = ellipse_perimeter(6, 7, 0, 5, 0)
img[rr, cc] = 1
rr, cc = line(6, 2, 6, 12)
img_[rr, cc] = 1
assert_array_equal(img, img_)
def test_ellipse_perimeter_zeroangle():
# angle == 0
img = np.zeros((30, 15), 'uint8')
rr, cc = ellipse_perimeter(15, 7, 14, 6, 0)
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_ellipse_perimeter_nzeroangle():
# angle != 0
img = np.zeros((30, 25), 'uint8')
rr, cc = ellipse_perimeter(15, 11, 12, 6, 1.1)
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_array_equal(img, img_)
def test_ellipse_perimeter_shape():
img = np.zeros((15, 20), 'uint8')
rr, cc = ellipse_perimeter(7, 10, 9, 9, 0, shape=(15, 20))
img[rr, cc] = 1
shift = 5
img_ = np.zeros((15 + 2 * shift, 20), 'uint8')
rr, cc = ellipse_perimeter(7 + shift, 10, 9, 9, 0, shape=None)
img_[rr, cc] = 1
assert_array_equal(img, img_[shift:-shift, :])
def test_bezier_segment_straight():
image = np.zeros((200, 200), dtype=int)
r0, r1, r2 = 50, 150, 150
c0, c1, c2 = 50, 50, 150
rr, cc = _bezier_segment(r0, c0, r1, c1, r2, c2, 0)
image[rr, cc] = 1
image2 = np.zeros((200, 200), dtype=int)
rr, cc = line(r0, c0, r2, c2)
image2[rr, cc] = 1
assert_array_equal(image, image2)
def test_bezier_segment_curved():
img = np.zeros((25, 25), 'uint8')
r0, c0 = 20, 20
r1, c1 = 20, 2
r2, c2 = 2, 2
rr, cc = _bezier_segment(r0, c0, r1, c1, r2, c2, 1)
img[rr, cc] = 1
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_equal(img[r0, c0], 1)
assert_equal(img[r2, c2], 1)
assert_array_equal(img, img_)
def test_bezier_curve_straight():
image = np.zeros((200, 200), dtype=int)
r0, c0 = 50, 50
r1, c1 = 150, 50
r2, c2 = 150, 150
rr, cc = bezier_curve(r0, c0, r1, c1, r2, c2, 0)
image[rr, cc] = 1
image2 = np.zeros((200, 200), dtype=int)
rr, cc = line(r0, c0, r2, c2)
image2[rr, cc] = 1
assert_array_equal(image, image2)
def test_bezier_curved_weight_eq_1():
img = np.zeros((23, 8), 'uint8')
r0, c0 = 1, 1
r1, c1 = 11, 11
r2, c2 = 21, 1
rr, cc = bezier_curve(r0, c0, r1, c1, r2, c2, 1)
img[rr, cc] = 1
assert_equal(img[r0, c0], 1)
assert_equal(img[r2, c2], 1)
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_equal(img, img_)
def test_bezier_curved_weight_neq_1():
img = np.zeros((23, 10), 'uint8')
r0, c0 = 1, 1
r1, c1 = 11, 11
r2, c2 = 21, 1
rr, cc = bezier_curve(r0, c0, r1, c1, r2, c2, 2)
img[rr, cc] = 1
assert_equal(img[r0, c0], 1)
assert_equal(img[r2, c2], 1)
img_ = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
)
assert_equal(img, img_)
def test_bezier_curve_shape():
img = np.zeros((15, 20), 'uint8')
r0, c0 = 1, 5
r1, c1 = 6, 11
r2, c2 = 1, 14
rr, cc = bezier_curve(r0, c0, r1, c1, r2, c2, 2, shape=(15, 20))
img[rr, cc] = 1
shift = 5
img_ = np.zeros((15 + 2 * shift, 20), 'uint8')
r0, c0 = 1 + shift, 5
r1, c1 = 6 + shift, 11
r2, c2 = 1 + shift, 14
rr, cc = bezier_curve(r0, c0, r1, c1, r2, c2, 2, shape=None)
img_[rr, cc] = 1
assert_array_equal(img, img_[shift:-shift, :])
def test_polygon_perimeter():
expected = np.array(
[[1, 1, 1, 1],
[1, 0, 0, 1],
[1, 1, 1, 1]]
)
out = np.zeros_like(expected)
rr, cc = polygon_perimeter([0, 2, 2, 0],
[0, 0, 3, 3])
out[rr, cc] = 1
assert_array_equal(out, expected)
out = np.zeros_like(expected)
rr, cc = polygon_perimeter([-1, -1, 3, 3],
[-1, 4, 4, -1],
shape=out.shape, clip=True)
out[rr, cc] = 1
assert_array_equal(out, expected)
assert_raises(ValueError, polygon_perimeter, [0], [1], clip=True)
def test_polygon_perimeter_outside_image():
rr, cc = polygon_perimeter([-1, -1, 3, 3],
[-1, 4, 4, -1], shape=(3, 4))
assert_equal(len(rr), 0)
assert_equal(len(cc), 0)
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
| bsd-3-clause | -6,287,617,031,671,260,000 | 33.906467 | 95 | 0.38645 | false | 2.064399 | true | false | false |
rezoo/chainer | examples/static_graph_optimizations/cifar/train_cifar_custom_loop.py | 1 | 5414 | """CIFAR example with static subgraph optimizations.
This is a version of the Chainer CIFAR example that has been modified
to support the static subgraph optimizations feature. Note that
the code is mostly unchanged except for the addition of the
`@static_graph` decorator to the model chain's `__call__()` method.
This code is a custom loop version of train_cifar.py. That is, we train
models without using the Trainer class in chainer and instead write a
training loop that manually computes the loss of minibatches and
applies an optimizer to update the model.
"""
import argparse
import chainer
from chainer import configuration
from chainer.dataset import convert
import chainer.links as L
from chainer import serializers
from chainer.datasets import get_cifar10
from chainer.datasets import get_cifar100
import models.VGG
def main():
parser = argparse.ArgumentParser(description='Chainer CIFAR example:')
parser.add_argument('--dataset', '-d', default='cifar10',
help='The dataset to use: cifar10 or cifar100')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='Number of images in each mini-batch')
parser.add_argument('--learnrate', '-l', type=float, default=0.05,
help='Learning rate for SGD')
parser.add_argument('--epoch', '-e', type=int, default=300,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=0,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--test', action='store_true',
help='Use tiny datasets for quick tests')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
args = parser.parse_args()
print('GPU: {}'.format(args.gpu))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# Set up a neural network to train.
# Classifier reports softmax cross entropy loss and accuracy at every
# iteration, which will be used by the PrintReport extension below.
if args.dataset == 'cifar10':
print('Using CIFAR10 dataset.')
class_labels = 10
train, test = get_cifar10()
elif args.dataset == 'cifar100':
print('Using CIFAR100 dataset.')
class_labels = 100
train, test = get_cifar100()
else:
raise RuntimeError('Invalid dataset choice.')
if args.test:
train = train[:200]
test = test[:200]
train_count = len(train)
test_count = len(test)
model = L.Classifier(models.VGG.VGG(class_labels))
if args.gpu >= 0:
# Make a specified GPU current
chainer.backends.cuda.get_device_from_id(args.gpu).use()
model.to_gpu() # Copy the model to the GPU
optimizer = chainer.optimizers.MomentumSGD(args.learnrate)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(5e-4))
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
sum_accuracy = 0
sum_loss = 0
while train_iter.epoch < args.epoch:
batch = train_iter.next()
# Reduce learning rate by 0.5 every 25 epochs.
if train_iter.epoch % 25 == 0 and train_iter.is_new_epoch:
optimizer.lr *= 0.5
print('Reducing learning rate to: {}'.format(optimizer.lr))
x_array, t_array = convert.concat_examples(batch, args.gpu)
x = chainer.Variable(x_array)
t = chainer.Variable(t_array)
optimizer.update(model, x, t)
sum_loss += float(model.loss.data) * len(t.data)
sum_accuracy += float(model.accuracy.data) * len(t.data)
if train_iter.is_new_epoch:
print('epoch: {}'.format(train_iter.epoch))
print('train mean loss: {}, accuracy: {}'.format(
sum_loss / train_count, sum_accuracy / train_count))
# evaluation
sum_accuracy = 0
sum_loss = 0
model.predictor.train = False
# It is good practice to turn off train mode during evaluation.
with configuration.using_config('train', False):
for batch in test_iter:
x_array, t_array = convert.concat_examples(batch, args.gpu)
x = chainer.Variable(x_array)
t = chainer.Variable(t_array)
loss = model(x, t)
sum_loss += float(loss.data) * len(t.data)
sum_accuracy += float(model.accuracy.data) * len(t.data)
test_iter.reset()
model.predictor.train = True
print('test mean loss: {}, accuracy: {}'.format(
sum_loss / test_count, sum_accuracy / test_count))
sum_accuracy = 0
sum_loss = 0
# Save the model and the optimizer
print('save the model')
serializers.save_npz('mlp.model', model)
print('save the optimizer')
serializers.save_npz('mlp.state', optimizer)
if __name__ == '__main__':
main()
| mit | -5,925,498,630,314,898,000 | 38.518248 | 79 | 0.612117 | false | 4.01037 | true | false | false |
mattjhayes/nmeta | nmeta/baseclass.py | 1 | 3293 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The baseclass module is part of the nmeta suite and
provides an inheritable class methods for logging
"""
#*** logging imports:
import logging
import logging.handlers
import coloredlogs
class BaseClass(object):
"""
This class provides the common methods for inheritance by
other classes
"""
def __init__(self):
"""
Initialise the BaseClass class
"""
pass
def configure_logging(self, name, s_name, c_name):
"""
Configure logging for the class that has inherited
this method
"""
#*** Set up Logging:
self.logger = logging.getLogger(name)
#*** Get logging config values from config class:
_logging_level_s = self.config.get_value(s_name)
_logging_level_c = self.config.get_value(c_name)
_syslog_enabled = self.config.get_value('syslog_enabled')
_loghost = self.config.get_value('loghost')
_logport = self.config.get_value('logport')
_logfacility = self.config.get_value('logfacility')
_syslog_format = self.config.get_value('syslog_format')
_console_log_enabled = self.config.get_value('console_log_enabled')
_coloredlogs_enabled = self.config.get_value('coloredlogs_enabled')
_console_format = self.config.get_value('console_format')
self.logger.propagate = False
#*** Syslog:
if _syslog_enabled:
#*** Log to syslog on host specified in config.yaml:
self.syslog_handler = logging.handlers.SysLogHandler(address=(
_loghost, _logport),
facility=_logfacility)
syslog_formatter = logging.Formatter(_syslog_format)
self.syslog_handler.setFormatter(syslog_formatter)
self.syslog_handler.setLevel(_logging_level_s)
#*** Add syslog log handler to logger:
self.logger.addHandler(self.syslog_handler)
#*** Console logging:
if _console_log_enabled:
#*** Log to the console:
if _coloredlogs_enabled:
#*** Colourise the logs to make them easier to understand:
coloredlogs.install(level=_logging_level_c,
logger=self.logger, fmt=_console_format, datefmt='%H:%M:%S')
else:
#*** Add console log handler to logger:
self.console_handler = logging.StreamHandler()
console_formatter = logging.Formatter(_console_format)
self.console_handler.setFormatter(console_formatter)
self.console_handler.setLevel(_logging_level_c)
self.logger.addHandler(self.console_handler)
| apache-2.0 | -772,745,715,622,030,500 | 39.158537 | 76 | 0.624051 | false | 4.315858 | true | false | false |
jakobj/nest-simulator | pynest/examples/twoneurons.py | 6 | 2672 | # -*- coding: utf-8 -*-
#
# twoneurons.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Two neuron example
----------------------------
This script simulates two connected pre- and postsynaptic neurons.
The presynaptic neuron receives a constant external current,
and the membrane potential of both neurons are recorded.
See Also
~~~~~~~~
:doc:`one_neuron`
"""
###############################################################################
# First, we import all necessary modules for simulation, analysis and plotting.
# Additionally, we set the verbosity to suppress info messages and reset
# the kernel.
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
###############################################################################
# Second, we create the two neurons and the recording device.
neuron_1 = nest.Create("iaf_psc_alpha")
neuron_2 = nest.Create("iaf_psc_alpha")
voltmeter = nest.Create("voltmeter")
###############################################################################
# Third, we set the external current of neuron 1.
neuron_1.I_e = 376.0
###############################################################################
# Fourth, we connect neuron 1 to neuron 2.
# Then, we connect a voltmeter to the two neurons.
# To learn more about the previous steps, please check out the
# :doc:`one neuron example <one_neuron>`.
weight = 20.0
delay = 1.0
nest.Connect(neuron_1, neuron_2, syn_spec={"weight": weight, "delay": delay})
nest.Connect(voltmeter, neuron_1)
nest.Connect(voltmeter, neuron_2)
###############################################################################
# Now we simulate the network using ``Simulate``, which takes the
# desired simulation time in milliseconds.
nest.Simulate(1000.0)
###############################################################################
# Finally, we plot the neurons' membrane potential as a function of
# time.
nest.voltage_trace.from_device(voltmeter)
plt.show()
| gpl-2.0 | 8,742,777,730,280,329,000 | 30.809524 | 79 | 0.602545 | false | 4.175 | false | false | false |
xurble/FeedThing | feedthing/settings_server_example.py | 1 | 1398 |
import os
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
ALLOWED_HOSTS = ['localhost'] # change this to your servers domain
FEEDS_SERVER = 'https://example.com/' # change this to where you are running - it's in the user agent string used when polling sites
FEEDS_CLOUDFLARE_WORKER = None # You will need a cloudflare account with the django-feed-reader cloudflare worker installed to use this setting
# this is where collectstatic will gather its files
STATIC_ROOT = os.path.join(SITE_ROOT, "..", "static")
DEBUG = False # or true if you are running locally
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'feedthing', # Or path to database file if using sqlite3.
'USER': 'auser', # Not used with sqlite3.
'PASSWORD': 'apassword', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
'OPTIONS': {
'init_command': 'SET default_storage_engine=INNODB',
}
}
}
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'BigLongStringOfCharactersHere'
| mit | 8,047,012,714,879,576,000 | 40.117647 | 145 | 0.610157 | false | 3.926966 | false | false | false |
joyxu/kernelci-backend | app/models/tests/test_defconfig_model.py | 1 | 5980 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import models.base as modb
import models.defconfig as moddf
class TestDefconfModel(unittest.TestCase):
def test_defconfig_document_valid_instance(self):
defconf_doc = moddf.DefconfigDocument('job', 'kernel', 'defconfig')
self.assertIsInstance(defconf_doc, modb.BaseDocument)
self.assertIsInstance(defconf_doc, moddf.DefconfigDocument)
def test_defconfig_document_collection(self):
defconfig_doc = moddf.DefconfigDocument('job', 'kernel', 'defconfig')
self.assertEqual(defconfig_doc.collection, 'defconfig')
def test_defconfig_document_to_dict(self):
defconf_doc = moddf.DefconfigDocument(
'job', 'kernel', 'defconfig', 'defconfig_full')
defconf_doc.id = "defconfig_id"
defconf_doc.job_id = "job_id"
defconf_doc.created_on = "now"
defconf_doc.metadata = {}
defconf_doc.status = "FAIL"
defconf_doc.dirname = "defconfig"
defconf_doc.boot_resul_description = []
defconf_doc.errors = 1
defconf_doc.warnings = 1
defconf_doc.build_time = 1
defconf_doc.arch = "foo"
defconf_doc.git_url = "git_url"
defconf_doc.git_commit = "git_commit"
defconf_doc.git_branch = "git_branch"
defconf_doc.git_describe = "git_describe"
defconf_doc.version = "1.0"
defconf_doc.modules = "modules-file"
defconf_doc.dtb_dir = "dtb-dir"
defconf_doc.kernel_config = "kernel-config"
defconf_doc.system_map = "system-map"
defconf_doc.text_offset = "offset"
defconf_doc.kernel_image = "kernel-image"
defconf_doc.modules_dir = "modules-dir"
defconf_doc.build_log = "build.log"
defconf_doc.kconfig_fragments = "config-frag"
defconf_doc.file_server_resource = "file-resource"
defconf_doc.file_server_url = "server-url"
expected = {
"name": "job-kernel-defconfig_full",
"_id": "defconfig_id",
"job": "job",
"kernel": "kernel",
"defconfig": "defconfig",
"job_id": "job_id",
"created_on": "now",
"metadata": {},
"status": "FAIL",
"defconfig": "defconfig",
"errors": 1,
"warnings": 1,
"build_time": 1,
"arch": "foo",
"dirname": "defconfig",
"git_url": "git_url",
"git_describe": "git_describe",
"git_branch": "git_branch",
"git_commit": "git_commit",
"build_platform": [],
"version": "1.0",
"dtb_dir": "dtb-dir",
"kernel_config": "kernel-config",
"kernel_image": "kernel-image",
"system_map": "system-map",
"text_offset": "offset",
"modules": "modules-file",
"modules_dir": "modules-dir",
"build_log": "build.log",
"kconfig_fragments": "config-frag",
"defconfig_full": "defconfig_full",
"file_server_resource": "file-resource",
"file_server_url": "server-url",
}
self.assertDictEqual(expected, defconf_doc.to_dict())
def test_deconfig_set_status_wrong_and_right(self):
defconf_doc = moddf.DefconfigDocument("job", "kernel", "defconfig")
self.assertRaises(ValueError, setattr, defconf_doc, "status", "foo")
self.assertRaises(ValueError, setattr, defconf_doc, "status", [])
self.assertRaises(ValueError, setattr, defconf_doc, "status", {})
self.assertRaises(ValueError, setattr, defconf_doc, "status", ())
defconf_doc.status = "FAIL"
self.assertEqual(defconf_doc.status, "FAIL")
defconf_doc.status = "PASS"
self.assertEqual(defconf_doc.status, "PASS")
defconf_doc.status = "UNKNOWN"
self.assertEqual(defconf_doc.status, "UNKNOWN")
defconf_doc.status = "BUILD"
self.assertEqual(defconf_doc.status, "BUILD")
def test_defconfig_set_build_platform_wrong(self):
defconf_doc = moddf.DefconfigDocument("job", "kernel", "defconfig")
self.assertRaises(
TypeError, setattr, defconf_doc, "build_platform", ())
self.assertRaises(
TypeError, setattr, defconf_doc, "build_platform", {})
self.assertRaises(
TypeError, setattr, defconf_doc, "build_platform", "")
def test_defconfig_set_build_platform(self):
defconf_doc = moddf.DefconfigDocument("job", "kernel", "defconfig")
defconf_doc.build_platform = ["a", "b"]
self.assertListEqual(defconf_doc.build_platform, ["a", "b"])
def test_defconfig_set_metadata_wrong(self):
defconf_doc = moddf.DefconfigDocument("job", "kernel", "defconfig")
self.assertRaises(TypeError, setattr, defconf_doc, "metadata", ())
self.assertRaises(TypeError, setattr, defconf_doc, "metadata", [])
self.assertRaises(TypeError, setattr, defconf_doc, "metadata", "")
def test_defconfig_from_json_is_none(self):
self.assertIsNone(moddf.DefconfigDocument.from_json({}))
self.assertIsNone(moddf.DefconfigDocument.from_json(""))
self.assertIsNone(moddf.DefconfigDocument.from_json([]))
self.assertIsNone(moddf.DefconfigDocument.from_json(()))
| agpl-3.0 | 4,970,841,871,047,545,000 | 40.527778 | 77 | 0.615385 | false | 3.7375 | true | false | false |
javidgon/dookio | server/src/server.py | 1 | 4802 | import os
import redis
import requests
import json
from werkzeug.wrappers import Request, Response
from src.utils import (fetch_apps,
contact_nodes,
was_applied,
exist_application,
add_app_to_webserver_routing,
remove_app_from_webserver_routing,
add_container_to_webserver_routing,
pick_up_node)
@Request.application
def application(request):
"""
Set up the infrastructure required for Heroku-ish app deployment.
This setting uses Hipache/Redis as webserver and load balancer, and Docker
as app container. Please be aware that the docker configuration is done in
the different nodes (node.py).
The standard configuration chooses one node of the list defined above,
and provides him with the required information for a success deployment
(user & repo params).
"""
DOMAIN = os.environ.get('DOOKIO_DOMAIN', 'localhost')
redis_cli = redis.StrictRedis(host='localhost', port=6379, db=0)
# Dookio-cli: apps command
if request.path == '/apps':
apps = fetch_apps(redis_cli)
return Response(
[('--> {} (replicated in {} containers)\n'.format(
app[app.find(":") + 1:], len(apps[app]))) for app in apps])
# Pick up the proper params
conf = {
'action': request.args.get('action'),
'multiplicator': int(request.args.get('multiplicator', 1)),
'user': request.args.get('user'),
'repo': request.args.get('repo'),
'application_address': '{}.{}.{}'.format(
request.args.get('repo'), request.args.get('user'), DOMAIN)
}
if not all([conf.get('user'), conf.get('repo')]):
return Response(
'There was a problem. Please be sure you are '
'providing both "user", "repo"\n')
# Dookio-cli: containers command
action = conf.get('action')
if request.path == '/containers':
response_nodes = contact_nodes(conf)
if action == 'stop':
if was_applied(response_nodes):
remove_app_from_webserver_routing(redis_cli, conf)
elif action == 'start':
if (was_applied(response_nodes) and
not exist_application(redis_cli, conf)):
add_app_to_webserver_routing(redis_cli, conf)
for node_ip, response in response_nodes.iteritems():
# We only want to iterate over the valid responses.
status_code = response[1]
body = response[0][0]
if status_code == 200:
port = body.get('Ports')[0].get('PublicPort')
add_container_to_webserver_routing(redis_cli,
node_ip,
port,
conf)
resp = [{
'node': node_ip,
'containers': content[0]
} for node_ip, content in response_nodes.iteritems()]
return Response(json.dumps(resp))
# Dookio-cli: scale command
if request.path == '/scale':
if not exist_application(redis_cli, conf):
return Response(
'The app can not scale unless is running!\n')
if request.path == '/scale' or request.path == '/':
user = conf.get('user')
repo = conf.get('repo')
# Stop all existing containers
conf['action'] = 'stop'
contact_nodes(conf)
conf['action'] = None
remove_app_from_webserver_routing(redis_cli, conf)
for i in range(conf.get('multiplicator')):
node = pick_up_node()
response = requests.get('{}:5000'.format(node),
params={'user': user, 'repo': repo})
if response.status_code == 200:
# Set up hipache webserver for the specified branch
container = json.loads(response.content)
if not exist_application(redis_cli, conf):
add_app_to_webserver_routing(redis_cli, conf)
add_container_to_webserver_routing(redis_cli,
node,
container.get('port'),
conf)
else:
return Response(response.content, status=response.status_code)
return Response(
'App successfully deployed! Go to http://{}\n'.format(
conf.get('application_address')))
else:
return Response(
'Something went wrong! Are you using the proper parameters?. \n')
| mit | -7,947,853,317,819,878,000 | 39.694915 | 78 | 0.532486 | false | 4.568982 | false | false | false |
DanePubliczneGovPl/ckanext-danepubliczne | ckanext/danepubliczne/schema/article.py | 1 | 4861 | import re
import ckan.plugins as p
import ckan.plugins.toolkit as tk
import ckan.logic.auth as auth
from ckan.common import _
class Article(p.SingletonPlugin, tk.DefaultDatasetForm):
'''
Dataset type handling articles
'''
p.implements(p.ITemplateHelpers) # Helpers for templates
_PACKAGE_TYPE = 'article'
def get_helpers(self):
return {
'dp_recent_articles': self.h_recent_articles,
'dp_shorten_article': self.h_shorten_article
}
def h_recent_articles(self, count=4):
search = tk.get_action('package_search')(data_dict={
'rows': count,
'sort': 'metadata_created desc',
'fq': '+type:' + Article._PACKAGE_TYPE,
'facet': 'false'
})
if search['count'] == 0:
return []
return search['results']
def h_shorten_article(self, markdown, length=140, trail='...'):
# Try to return first paragraph (two consecutive \n disregarding white characters)
paragraph = markdown
m = re.search('([ \t\r\f\v]*\n){2}', markdown)
if m:
paragraph = paragraph[0:m.regs[0][0]]
if len(paragraph) > length:
paragraph = paragraph[0:(length - len(trail))] + trail
return paragraph
p.implements(p.IDatasetForm)
def package_types(self):
return [Article._PACKAGE_TYPE]
def is_fallback(self):
return False
def _modify_package_schema(self, schema):
to_extras = tk.get_converter('convert_to_extras')
to_tags = tk.get_converter('convert_to_tags')
optional = tk.get_validator('ignore_missing')
boolean_validator = tk.get_validator('boolean_validator')
not_empty = tk.get_validator('not_empty')
checkboxes = [optional, tk.get_validator('boolean_validator'), to_extras]
def fixed_type(value, context):
return Article._PACKAGE_TYPE
schema = {
'id': schema['id'],
'name': schema['name'],
'title': [not_empty, unicode],
'author': schema['author'],
'notes': [not_empty, unicode], # notes [content] is obligatory
'type': [fixed_type],
'private': [optional, boolean_validator],
'license_id': [not_empty, unicode],
'tag_string': schema['tag_string'],
'resources': schema['resources']
}
return schema
def show_package_schema(self):
not_empty = tk.get_validator('not_empty')
schema = super(Article, self).show_package_schema()
schema.update({
'notes': [not_empty, unicode], # notes [content] is obligatory
})
return schema
def create_package_schema(self):
schema = super(Article, self).create_package_schema()
schema = self._modify_package_schema(schema)
return schema
def update_package_schema(self):
schema = super(Article, self).update_package_schema()
schema = self._modify_package_schema(schema)
return schema
def new_template(self):
return 'article/new.html'
def read_template(self):
return 'article/read.html'
def edit_template(self):
return 'article/edit.html'
def search_template(self):
return 'article/search.html'
#
# def history_template(self):
# return 'article/history.html'
#
def package_form(self):
return 'article/new_package_form.html'
p.implements(p.IAuthFunctions)
def get_auth_functions(self):
return {
'package_create': _package_create, # new = context.get('package') == None
'package_delete': _package_delete, # data_dict['id]
'package_update': _package_update, # context['package'].type
}
def _package_create(context, data_dict=None):
user = context['user']
package = context.get('package') # None for new
if package and package['type'] == 'article':
return {'success': False, 'msg': _('User %s not authorized to create articles') % user}
return auth.create.package_create(context, data_dict)
def _package_delete(context, data_dict=None):
user = context['user']
package = auth.get_package_object(context, data_dict)
if package and package.type == 'article':
return {'success': False, 'msg': _('User %s not authorized to delete articles') % user}
return auth.delete.package_delete(context, data_dict)
def _package_update(context, data_dict=None):
user = context['user']
package = auth.get_package_object(context, data_dict)
if package and (package.type == 'article' or package.type == 'application'):
return {'success': False, 'msg': _('User %s not authorized to update articles') % user}
return auth.update.package_update(context, data_dict) | agpl-3.0 | -2,679,612,956,739,913,700 | 29.968153 | 95 | 0.603168 | false | 3.891914 | false | false | false |
anchore/anchore-engine | tests/unit/anchore_engine/services/policy_engine/engine/test_vulnerability_matches.py | 1 | 16014 | import pytest
import copy
import datetime
from anchore_engine.db.entities.policy_engine import (
FixedArtifact,
Vulnerability,
VulnerableArtifact,
ImagePackageVulnerability,
ImagePackage,
Image,
DistroTuple,
DistroNamespace,
DistroMapping,
)
from anchore_engine.subsys import logger
logger.enable_test_logging(level="DEBUG")
@pytest.fixture
def empty_vulnerability():
v = Vulnerability()
v.id = "CVE-1"
v.namespace_name = "rhel:8"
v.description = "test vulnerability"
v.metadata_json = {}
v.created_at = datetime.datetime.utcnow()
v.updated_at = datetime.datetime.utcnow()
v.fixed_in = []
v.vulnerable_in = []
v.severity = "high"
v.link = "somelink"
return v
@pytest.fixture
def empty_semver_vulnerability():
v = Vulnerability()
v.id = "CVE-2000"
v.namespace_name = "github:npm"
v.description = "test vulnerability for semver handling"
v.metadata_json = {}
v.created_at = datetime.datetime.utcnow()
v.updated_at = datetime.datetime.utcnow()
v.fixed_in = []
v.vulnerable_in = []
v.severity = "high"
v.link = "somelink"
return v
@pytest.fixture
def vulnerability_with_fix(empty_vulnerability):
fixed_vuln = copy.deepcopy(empty_vulnerability)
f = FixedArtifact()
f.vulnerability_id = fixed_vuln.id
f.name = "pkg1"
f.namespace_name = fixed_vuln.namespace_name
f.version = "0:1.1.el8"
f.version_format = "RPM"
f.parent = fixed_vuln
f.include_later_versions = True
f.epochless_version = f.version
f.fix_metadata = {}
f.created_at = datetime.datetime.now()
f.updated_at = datetime.datetime.now()
f.fix_observed_at = f.updated_at
fixed_vuln.fixed_in = [f]
return fixed_vuln
@pytest.fixture
def vulnerability_with_nofix(empty_vulnerability):
fixed_vuln = copy.deepcopy(empty_vulnerability)
f = FixedArtifact()
f.vulnerability_id = fixed_vuln.id
f.name = "pkg1"
f.namespace_name = fixed_vuln.namespace_name
f.version = "None"
f.version_format = "RPM"
f.parent = fixed_vuln
f.include_later_versions = True
f.epochless_version = f.version
f.fix_metadata = {}
f.created_at = datetime.datetime.now()
f.updated_at = datetime.datetime.now()
f.fix_observed_at = f.updated_at
fixed_vuln.fixed_in = [f]
return fixed_vuln
@pytest.fixture
def vulnerability_with_multifix(empty_semver_vulnerability):
fixed_vuln = copy.deepcopy(empty_semver_vulnerability)
f = FixedArtifact()
f.vulnerability_id = fixed_vuln.id
f.name = "semverpkg1"
f.namespace_name = fixed_vuln.namespace_name
f.version = ">= 1.1.0 < 1.1.2"
f.version_format = "semver"
f.parent = fixed_vuln
f.include_later_versions = False
f.epochless_version = f.version
f.fix_metadata = {"first_patched_version": "1.1.2"}
f.created_at = datetime.datetime.now()
f.updated_at = datetime.datetime.now()
f.fix_observed_at = f.updated_at
fixed_vuln.fixed_in = [f]
f = FixedArtifact()
f.vulnerability_id = fixed_vuln.id
f.name = "semverpkg1"
f.namespace_name = fixed_vuln.namespace_name
f.version = ">= 2.2.0 < 2.2.2"
f.version_format = "semver"
f.parent = fixed_vuln
f.include_later_versions = False
f.epochless_version = f.version
f.fix_metadata = {"first_patched_version": "2.2.2"}
f.created_at = datetime.datetime.now()
f.updated_at = datetime.datetime.now()
f.fix_observed_at = f.updated_at
return fixed_vuln
@pytest.fixture
def vulnerability_with_vulnartifact(empty_vulnerability):
vuln_art = copy.deepcopy(empty_vulnerability)
v = VulnerableArtifact(
vulnerability_id=vuln_art.id,
namespace_name=vuln_art.namespace_name,
name="pkg1",
version="1.0.el8",
parent=vuln_art,
)
v.epochless_version = "0:" + v.version
v.version_format = "rpm"
v.include_previous_versions = False
vuln_art.vulnerable_in = [v]
v = VulnerableArtifact(
vulnerability_id=vuln_art.id,
namespace_name=vuln_art.namespace_name,
name="pkg1",
version="0.9.el8",
parent=vuln_art,
)
v.epochless_version = "0:" + v.version
v.version_format = "rpm"
v.include_previous_versions = False
vuln_art.vulnerable_in.append(v)
return vuln_art
@pytest.fixture
def vulnerability_with_both(vulnerability_with_fix, vulnerability_with_vulnartifact):
vulnerability_with_fix.fixed_in[0].include_later_versions = False
vulnerability_with_fix.vulnerable_in = vulnerability_with_vulnartifact.vulnerable_in
return vulnerability_with_fix
@pytest.fixture
def nvd_vulnerability():
"""
Returns a vulnerability similar to an NVD record but with an added fixed record, similar to how GitHub advisories have both vuln range and fix version
:return:
"""
v = Vulnerability()
v.id = "CVE-2"
v.created_at = v.updated_at = datetime.datetime.utcnow()
v.severity = "high"
v.namespace_name = "nvdv2:cves"
@pytest.fixture
def vulnerable_semver_pkg1():
pkg = ImagePackage()
pkg.image_id = "image1"
pkg.image_user_id = "admin"
pkg.name = "semverpkg1"
pkg.normalized_src_pkg = "semverpkg1"
pkg.version = "1.1.0"
pkg.fullversion = "1.1.0"
pkg.release = None
pkg.pkg_type = "npm"
pkg.distro_name = "npm"
pkg.distro_version = "N/A"
pkg.like_distro = "npm"
pkg.arch = "amd64"
pkg.pkg_path = "/app/myapp/package.json"
return pkg
@pytest.fixture
def vulnerable_semver_pkg2():
pkg = ImagePackage()
pkg.image_id = "image1"
pkg.image_user_id = "admin"
pkg.name = "semverpkg1"
pkg.normalized_src_pkg = "semverpkg1"
pkg.version = "2.2.0"
pkg.fullversion = "2.2.0"
pkg.release = None
pkg.pkg_type = "npm"
pkg.distro_name = "npm"
pkg.distro_version = "N/A"
pkg.like_distro = "npm"
pkg.arch = "amd64"
pkg.pkg_path = "/app/myapp2/package.json"
return pkg
@pytest.fixture
def vulnerable_pkg1():
pkg = ImagePackage()
pkg.image_id = "image1"
pkg.image_user_id = "admin"
pkg.name = "pkg1"
pkg.normalized_src_pkg = "pkg1"
pkg.version = "0:1.0.el8"
pkg.fullversion = "0:1.0.el8"
pkg.release = None
pkg.pkg_type = "RPM"
pkg.distro_name = "rhel"
pkg.distro_version = "8"
pkg.like_distro = "RHEL"
pkg.arch = "amd64"
pkg.pkg_path = "rpmdb"
return pkg
@pytest.fixture
def nonvulnerable_pkg1():
pkg = ImagePackage()
pkg.image_id = "image1"
pkg.image_user_id = "admin"
pkg.name = "pkg1"
pkg.normalized_src_pkg = "pkg1"
pkg.version = "1.1.el8"
pkg.fullversion = "0:1.1.el8"
pkg.release = None
pkg.pkg_type = "RPM"
pkg.distro_name = "centos"
pkg.distro_version = "8"
pkg.like_distro = "RHEL"
return pkg
@pytest.fixture
def python_pkg1_100():
pkg = ImagePackage()
pkg.image_id = "image1"
pkg.image_user_id = "admin"
pkg.name = "pythonpkg1"
pkg.normalized_src_pkg = "pythonpkg1"
pkg.version = "1.0.0"
pkg.fullversion = "1.0.0"
pkg.release = None
pkg.pkg_type = "python"
pkg.distro_name = "centos"
pkg.distro_version = "8"
pkg.like_distro = "RHEL"
return pkg
@pytest.fixture
def python_pkg1_101():
pkg = ImagePackage()
pkg.image_id = "image1"
pkg.image_user_id = "admin"
pkg.name = "pythonpkg1"
pkg.normalized_src_pkg = "pythonpkg1"
pkg.version = "1.0.1"
pkg.fullversion = "1.0.1"
pkg.release = None
pkg.pkg_type = "python"
pkg.distro_name = "centos"
pkg.distro_version = "8"
pkg.like_distro = "RHEL"
return pkg
def mock_distros_for(distro, version, like_distro=""):
"""
Mock implementation that doesn't use db
:param cls:
:param distro:
:param version:
:param like_distro:
:return:
"""
logger.info("Calling mocked distro_for %s %s %s", distro, version, like_distro)
return [DistroTuple(distro=distro, version=version, flavor=like_distro)]
@pytest.fixture
def monkeypatch_distros(monkeysession):
"""
Creates a monkey patch for the distro lookup to avoid DB operations
:return:
"""
monkeysession.setattr(DistroMapping, "distros_for", mock_distros_for)
def test_fixed_match(
vulnerability_with_fix, vulnerable_pkg1, nonvulnerable_pkg1, monkeypatch_distros
):
"""
Test matches against fixed artifacts
:return:
"""
f = vulnerability_with_fix.fixed_in[0]
logger.info("Testing package %s", vulnerable_pkg1)
logger.info("Testing vuln %s", f)
assert isinstance(f, FixedArtifact)
assert f.match_but_not_fixed(vulnerable_pkg1)
assert not f.match_but_not_fixed(nonvulnerable_pkg1)
pkg_vuln = ImagePackageVulnerability()
pkg_vuln.package = vulnerable_pkg1
pkg_vuln.vulnerability = vulnerability_with_fix
pkg_vuln.pkg_type = vulnerable_pkg1.name
pkg_vuln.pkg_version = vulnerable_pkg1.version
pkg_vuln.pkg_image_id = vulnerable_pkg1.image_id
pkg_vuln.pkg_user_id = vulnerable_pkg1.image_user_id
pkg_vuln.pkg_name = vulnerable_pkg1.name
pkg_vuln.pkg_arch = vulnerable_pkg1.arch
pkg_vuln.vulnerability_id = vulnerability_with_fix.id
pkg_vuln.vulnerability_namespace_name = vulnerability_with_fix.namespace_name
assert pkg_vuln.fixed_in() == f.version
def test_notfixed_match(vulnerability_with_nofix, vulnerable_pkg1, monkeypatch_distros):
"""
Test matches against fixed artifacts
:return:
"""
f = vulnerability_with_nofix.fixed_in[0]
logger.info("Testing package %s", vulnerable_pkg1)
logger.info("Testing vuln %s", f)
assert isinstance(f, FixedArtifact)
assert f.match_but_not_fixed(vulnerable_pkg1)
pkg_vuln = ImagePackageVulnerability()
pkg_vuln.package = vulnerable_pkg1
pkg_vuln.vulnerability = vulnerability_with_nofix
pkg_vuln.pkg_type = vulnerable_pkg1.name
pkg_vuln.pkg_version = vulnerable_pkg1.version
pkg_vuln.pkg_image_id = vulnerable_pkg1.image_id
pkg_vuln.pkg_user_id = vulnerable_pkg1.image_user_id
pkg_vuln.pkg_name = vulnerable_pkg1.name
pkg_vuln.pkg_arch = vulnerable_pkg1.arch
pkg_vuln.vulnerability_id = vulnerability_with_nofix.id
pkg_vuln.vulnerability_namespace_name = vulnerability_with_nofix.namespace_name
assert pkg_vuln.fixed_in() is None
def test_vulnerable_in(
vulnerability_with_vulnartifact,
vulnerable_pkg1,
nonvulnerable_pkg1,
monkeypatch_distros,
):
"""
Test vulnerable in matches
:return:
"""
f = vulnerability_with_vulnartifact.vulnerable_in[0]
logger.info("Testing package %s", vulnerable_pkg1)
logger.info("Testing vuln %s", f)
assert isinstance(f, VulnerableArtifact)
assert f.match_and_vulnerable(vulnerable_pkg1)
assert not f.match_and_vulnerable(nonvulnerable_pkg1)
f = vulnerability_with_vulnartifact.vulnerable_in[1]
logger.info("Testing package %s", vulnerable_pkg1)
logger.info("Testing vuln %s", f)
assert isinstance(f, VulnerableArtifact)
assert not f.match_and_vulnerable(
vulnerable_pkg1
) # Both not vuln now, this entry is for 0.9.x
assert not f.match_and_vulnerable(nonvulnerable_pkg1)
pkg_vuln = ImagePackageVulnerability()
pkg_vuln.package = vulnerable_pkg1
pkg_vuln.vulnerability = vulnerability_with_vulnartifact
pkg_vuln.pkg_type = vulnerable_pkg1.name
pkg_vuln.pkg_version = vulnerable_pkg1.version
pkg_vuln.pkg_image_id = vulnerable_pkg1.image_id
pkg_vuln.pkg_user_id = vulnerable_pkg1.image_user_id
pkg_vuln.pkg_name = vulnerable_pkg1.name
pkg_vuln.pkg_arch = vulnerable_pkg1.arch
pkg_vuln.vulnerability_id = vulnerability_with_vulnartifact.id
pkg_vuln.vulnerability_namespace_name = (
vulnerability_with_vulnartifact.namespace_name
)
assert pkg_vuln.fixed_in() == None
def test_fixed_and_vulnerable(
vulnerability_with_both, vulnerable_pkg1, nonvulnerable_pkg1, monkeypatch_distros
):
"""
Test both fixed and vulnerable matches
:return:
"""
f = vulnerability_with_both.fixed_in[0]
v = vulnerability_with_both.vulnerable_in[0]
logger.info("Testing package %s", vulnerable_pkg1)
logger.info("Testing vuln %s", f)
assert isinstance(v, VulnerableArtifact)
assert v.match_and_vulnerable(vulnerable_pkg1)
assert not v.match_and_vulnerable(nonvulnerable_pkg1)
pkg_vuln = ImagePackageVulnerability()
pkg_vuln.package = vulnerable_pkg1
pkg_vuln.vulnerability = vulnerability_with_both
pkg_vuln.pkg_type = vulnerable_pkg1.name
pkg_vuln.pkg_version = vulnerable_pkg1.version
pkg_vuln.pkg_image_id = vulnerable_pkg1.image_id
pkg_vuln.pkg_user_id = vulnerable_pkg1.image_user_id
pkg_vuln.pkg_name = vulnerable_pkg1.name
pkg_vuln.pkg_arch = vulnerable_pkg1.arch
pkg_vuln.vulnerability_id = vulnerability_with_both.id
pkg_vuln.vulnerability_namespace_name = vulnerability_with_both.namespace_name
assert pkg_vuln.fixed_in() == "0:1.1.el8"
def test_non_comparable_versions(python_pkg1_100, python_pkg1_101, monkeypatch_distros):
"""
Tests matching where fixed and vuln records use a version format that doesn't support comparators beyond equality (e.g CPEs)
:return:
"""
assert isinstance(python_pkg1_100, ImagePackage)
assert isinstance(python_pkg1_101, ImagePackage)
v1 = Vulnerability()
v1.id = "CVE-100"
v1.namespace_name = "nvdv2:cves"
v1.severity = "high"
v1.fixed_in = []
v1.vulnerable_in = []
v1.created_at = v1.updated_at = datetime.datetime.utcnow()
vuln1 = VulnerableArtifact()
vuln1.created_at = vuln1.updated_at = v1.created_at
vuln1.namespace_name = v1.namespace_name
vuln1.name = python_pkg1_100.name
vuln1.vulnerability_id = v1.id
vuln1.parent = v1
vuln1.version = python_pkg1_100.version
vuln1.include_previous_versions = True
vuln1.epochless_version = vuln1.version
vuln1.version_format = (
"static" # Random string, but not in set of ['semver', 'rpm', 'deb', 'apk']
)
v1.vulnerable_in.append(vuln1)
assert v1.vulnerable_in[0].match_and_vulnerable(python_pkg1_100)
assert not v1.vulnerable_in[0].match_and_vulnerable(python_pkg1_101)
def test_multifix_vulnerability(
vulnerability_with_multifix,
vulnerable_semver_pkg1,
vulnerable_semver_pkg2,
monkeypatch_distros,
):
"""
Test matches against multiple semver range fixed artifacts (e.g. like a GHSA record)
:return:
"""
f = vulnerability_with_multifix.fixed_in[0]
f2 = vulnerability_with_multifix.fixed_in[1]
logger.info("Testing package %s", vulnerable_semver_pkg1)
logger.info("Testing vuln %s", f)
assert isinstance(f, FixedArtifact)
assert f.match_but_not_fixed(vulnerable_semver_pkg1)
assert not f.match_but_not_fixed(vulnerable_semver_pkg2)
t = ImagePackageVulnerability()
t.package = vulnerable_semver_pkg1
t.vulnerability = vulnerability_with_multifix
assert t.fixed_artifact() == f
assert t.fixed_in() == "1.1.2"
logger.info("Testing package %s", vulnerable_semver_pkg2)
logger.info("Testing vuln %s", f2)
assert isinstance(f2, FixedArtifact)
assert not f2.match_but_not_fixed(vulnerable_semver_pkg1)
assert f2.match_but_not_fixed(vulnerable_semver_pkg2)
t = ImagePackageVulnerability()
t.package = vulnerable_semver_pkg2
t.vulnerability = vulnerability_with_multifix
assert t.fixed_artifact() == f2
assert t.fixed_in() == "2.2.2"
# Unset the fix version
f2.fix_metadata = {}
logger.info("Testing vuln with fix removed %s", f2)
assert isinstance(f2, FixedArtifact)
assert not f2.match_but_not_fixed(vulnerable_semver_pkg1)
assert f2.match_but_not_fixed(vulnerable_semver_pkg2)
t = ImagePackageVulnerability()
t.package = vulnerable_semver_pkg2
t.vulnerability = vulnerability_with_multifix
assert t.fixed_artifact() == f2
assert t.fixed_in() is None
| apache-2.0 | -3,932,605,720,874,548,700 | 29.855491 | 154 | 0.675846 | false | 2.941047 | true | false | false |
ashwith/workshopfiles | python/code/radioactivity.py | 1 | 3022 | #
# Copyright (c) 2013 Ashwith Jerome Rego
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pylab
import random
import math
#=================================================================
# Radioactive Decay - Monte Carlo Simulation #
#=================================================================
def simRadio(nMolecules, halfLife, nTrials, tStop, tStep):
"""
Performs a Monte Carlo Simulation for radioactive decay
Keyword arguments:
nMolecules -- number of molecules in the material
halfLife -- Half life of the material in time units
nTrials -- Number of simulation trials to be performed
tStop -- Stop time for each simulation in time units
tStep -- Step time in simulation for each time unit.
Return a list of time coordinates and a list of the average
number of molecules at each time step.
"""
# Calculate the decay constants,
# the number of simulation steps
k = math.log(2)/halfLife
nSteps = int(tStop/tStep)
# initialize list to store average number
# of molecules after each simulations step
nMol = [0] * nSteps
# First loop - runs through each trial
for index_i in range(nTrials):
tmpNMolecules = nMolecules
# Seond loop - runs through each simulations step
for index_j in range(nSteps):
# Third loop - decide the fate of each molecule
# in a simulation step. Delete it randomly
# probability k
for index_k in range(tmpNMolecules):
if random.uniform(0,1) <= k*tStep:
tmpNMolecules -= 1
nMol[index_j] += tmpNMolecules
tAxis = []
# Calculate the average for each simulation step
# and the coordinates for the time axis
for index_i in range(nSteps):
nMol[index_i] /= float(nTrials)
tAxis += [index_i*tStep]
return [0] + tAxis, [nMolecules] + nMol
def testSim():
nMolecules = 100
nTrials = 1000
tStop = 15
tStep = 0.1
halfLife = 2
pylab.figure();
t, y = simRadio(nMolecules, halfLife, nTrials, tStop, tStep)
pylab.plot(t, y)
pylab.title("Radioactive Decay - Monte Carlo Simulation")
pylab.xlabel("Time")
pylab.ylabel("Number of Molecules")
pylab.text(6, 90, "Simulated Half Life = " + str(y[int(2/tStep)]) + " time units.")
pylab.show()
| gpl-3.0 | 7,523,910,302,151,611,000 | 33.735632 | 87 | 0.625083 | false | 3.839898 | false | false | false |
thomasgibson/firedrake-hybridization | compressible_examples/sk_nonlinear.py | 1 | 7534 | from gusto import *
import itertools
from firedrake import (as_vector, SpatialCoordinate,
PeriodicIntervalMesh,
ExtrudedMesh, exp, sin, Function,
FunctionSpace, VectorFunctionSpace,
BrokenElement)
from firedrake.petsc import PETSc
from argparse import ArgumentParser
import numpy as np
import sys
PETSc.Log.begin()
parser = ArgumentParser(description=("""
Nonhydrostatic gravity wave test based on that of Skamarock and Klemp (1994).
"""), add_help=False)
parser.add_argument("--test",
action="store_true",
help="Enable a quick test run.")
parser.add_argument("--dt",
action="store",
default=6.0,
type=float,
help="Time step size (s)")
parser.add_argument("--res",
default=1,
type=int,
action="store",
help="Resolution scaling parameter")
parser.add_argument("--debug",
action="store_true",
help="Turn on KSP monitors")
parser.add_argument("--help",
action="store_true",
help="Show help.")
args, _ = parser.parse_known_args()
if args.help:
help = parser.format_help()
PETSc.Sys.Print("%s\n" % help)
sys.exit(1)
res = args.res
nlayers = res*10 # horizontal layers
columns = res*300 # number of columns
dt = args.dt # Time steps (s)
if args.test:
tmax = dt
else:
tmax = 3600.
H = 1.0e4 # Height position of the model top
L = 3.0e5
PETSc.Sys.Print("""
Number of vertical layers: %s,\n
Number of horizontal columns: %s.\n
""" % (nlayers, columns))
m = PeriodicIntervalMesh(columns, L)
dx = L / columns
cfl = 20.0 * dt / dx
dz = H / nlayers
PETSc.Sys.Print("""
Problem parameters:\n
Test case: Skamarock and Klemp gravity wave.\n
Time-step size: %s,\n
Test run: %s,\n
Dx (m): %s,\n
Dz (m): %s,\n
CFL: %s\n
""" % (dt,
bool(args.test),
dx,
dz,
cfl))
PETSc.Sys.Print("Initializing problem with dt: %s and tmax: %s.\n" % (dt,
tmax))
# build volume mesh
mesh = ExtrudedMesh(m, layers=nlayers, layer_height=H/nlayers)
fieldlist = ['u', 'rho', 'theta']
timestepping = TimesteppingParameters(dt=dt)
dirname = 'sk_nonlinear_dx%s_dz%s_dt%s' % (dx, dz, dt)
points_x = np.linspace(0., L, 100)
points_z = [H/2.]
points = np.array([p for p in itertools.product(points_x, points_z)])
dumptime = 100 # print every 100s
dumpfreq = int(dumptime / dt)
PETSc.Sys.Print("Output frequency: %s\n" % dumpfreq)
output = OutputParameters(dirname=dirname,
dumpfreq=dumpfreq,
dumplist=['u'],
perturbation_fields=['theta', 'rho'],
point_data=[('theta_perturbation', points)],
log_level='INFO')
parameters = CompressibleParameters()
diagnostics = Diagnostics(*fieldlist)
diagnostic_fields = [CourantNumber()]
state = State(mesh,
vertical_degree=1,
horizontal_degree=1,
family="CG",
timestepping=timestepping,
output=output,
parameters=parameters,
diagnostics=diagnostics,
fieldlist=fieldlist,
diagnostic_fields=diagnostic_fields)
# Initial conditions
u0 = state.fields("u")
rho0 = state.fields("rho")
theta0 = state.fields("theta")
# spaces
Vu = u0.function_space()
Vt = theta0.function_space()
Vr = rho0.function_space()
# Thermodynamic constants required for setting initial conditions
# and reference profiles
g = parameters.g
N = parameters.N
p_0 = parameters.p_0
c_p = parameters.cp
R_d = parameters.R_d
kappa = parameters.kappa
x, z = SpatialCoordinate(mesh)
# N^2 = (g/theta)dtheta/dz => dtheta/dz = theta N^2g => theta=theta_0exp(N^2gz)
Tsurf = 300.
thetab = Tsurf*exp(N**2*z/g)
theta_b = Function(Vt).interpolate(thetab)
rho_b = Function(Vr)
# Calculate hydrostatic Pi
PETSc.Sys.Print("Computing hydrostatic varaibles...\n")
# Use vertical hybridization preconditioner for the balance initialization
piparams = {'ksp_type': 'preonly',
'pc_type': 'python',
'mat_type': 'matfree',
'pc_python_type': 'gusto.VerticalHybridizationPC',
# Vertical trace system is only coupled vertically in columns
# block ILU is a direct solver!
'vert_hybridization': {'ksp_type': 'preonly',
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'}}
compressible_hydrostatic_balance(state,
theta_b,
rho_b,
params=piparams)
PETSc.Sys.Print("Finished computing hydrostatic varaibles...\n")
a = 5.0e3
deltaTheta = 1.0e-2
theta_pert = deltaTheta*sin(np.pi*z/H)/(1 + (x - L/2)**2/a**2)
theta0.interpolate(theta_b + theta_pert)
rho0.assign(rho_b)
u0.project(as_vector([20.0, 0.0]))
state.initialise([('u', u0),
('rho', rho0),
('theta', theta0)])
state.set_reference_profiles([('rho', rho_b),
('theta', theta_b)])
# Set up advection schemes
ueqn = EulerPoincare(state, Vu)
rhoeqn = AdvectionEquation(state, Vr, equation_form="continuity")
supg = True
if supg:
thetaeqn = SUPGAdvection(state, Vt, equation_form="advective")
else:
thetaeqn = EmbeddedDGAdvection(state, Vt, equation_form="advective",
options=EmbeddedDGOptions())
advected_fields = []
advected_fields.append(("u", ThetaMethod(state, u0, ueqn)))
advected_fields.append(("rho", SSPRK3(state, rho0, rhoeqn)))
advected_fields.append(("theta", SSPRK3(state, theta0, thetaeqn)))
# Set up linear solver
solver_parameters = {'mat_type': 'matfree',
'ksp_type': 'preonly',
'pc_type': 'python',
'pc_python_type': 'firedrake.SCPC',
'pc_sc_eliminate_fields': '0, 1',
# The reduced operator is not symmetric
'condensed_field': {'ksp_type': 'fgmres',
'ksp_rtol': 1.0e-8,
'ksp_atol': 1.0e-8,
'ksp_max_it': 100,
'pc_type': 'gamg',
'pc_gamg_sym_graph': None,
'mg_levels': {'ksp_type': 'gmres',
'ksp_max_it': 5,
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'}}}
if args.debug:
solver_parameters['condensed_field']['ksp_monitor_true_residual'] = None
linear_solver = CompressibleSolver(state,
solver_parameters=solver_parameters,
overwrite_solver_parameters=True)
# Set up forcing
compressible_forcing = CompressibleForcing(state)
# Build time stepper
stepper = CrankNicolson(state,
advected_fields,
linear_solver,
compressible_forcing)
PETSc.Sys.Print("Starting simulation...\n")
stepper.run(t=0, tmax=tmax)
| mit | 3,148,641,974,101,727,000 | 30.132231 | 79 | 0.543005 | false | 3.528806 | true | false | false |
c3nav/c3nav | src/c3nav/mapdata/render/engines/openscad.py | 1 | 17896 | import math
from abc import ABC, abstractmethod
from collections import UserList
from operator import attrgetter
from shapely import prepared
from shapely.geometry import JOIN_STYLE, MultiPolygon
from shapely.ops import unary_union
from c3nav.mapdata.render.engines import register_engine
from c3nav.mapdata.render.engines.base3d import Base3DEngine
from c3nav.mapdata.render.utils import get_full_levels, get_main_levels
from c3nav.mapdata.utils.geometry import assert_multipolygon
class AbstractOpenScadElem(ABC):
@abstractmethod
def render(self) -> str:
raise NotADirectoryError
class AbstractOpenScadBlock(AbstractOpenScadElem, UserList):
def render_children(self):
return '\n'.join(child.render() for child in self.data if child is not None)
class OpenScadRoot(AbstractOpenScadBlock):
def render(self):
return self.render_children()
class OpenScadBlock(AbstractOpenScadBlock):
def __init__(self, command, comment=None, children=None):
super().__init__(children if children else [])
self.command = command
self.comment = comment
def render(self):
if self.comment or len(self.data) != 1:
return '%s {%s\n %s\n}' % (
self.command,
'' if self.comment is None else (' // '+self.comment),
self.render_children().replace('\n', '\n ')
)
return '%s %s' % (self.command, self.render_children())
class OpenScadCommand(AbstractOpenScadElem):
def __init__(self, command):
super().__init__()
self.command = command
def render(self):
return self.command
@register_engine
class OpenSCADEngine(Base3DEngine):
filetype = 'scad'
def __init__(self, *args, center=True, **kwargs):
super().__init__(*args, center=center, **kwargs)
if center:
self.root = OpenScadBlock('scale([%(scale)f, %(scale)f, %(scale)f]) translate([%(x)f, %(y)f, 0])' % {
'scale': self.scale,
'x': -(self.minx + self.maxx) / 2,
'y': -(self.miny + self.maxy) / 2,
})
else:
self.root = OpenScadBlock('scale([%(scale)f, %(scale)f, %(scale)f])' % {
'scale': self.scale,
'x': -(self.minx + self.maxx) / 2,
'y': -(self.miny + self.maxy) / 2,
})
def custom_render(self, level_render_data, access_permissions, full_levels):
if full_levels:
levels = get_full_levels(level_render_data)
else:
levels = get_main_levels(level_render_data)
buildings = None
areas = None
main_building_block = None
main_building_block_diff = None
current_upper_bound = None
for geoms in levels:
# hide indoor and outdoor rooms if their access restriction was not unlocked
restricted_spaces_indoors = unary_union(
tuple(area.geom for access_restriction, area in geoms.restricted_spaces_indoors.items()
if access_restriction not in access_permissions)
)
restricted_spaces_outdoors = unary_union(
tuple(area.geom for access_restriction, area in geoms.restricted_spaces_outdoors.items()
if access_restriction not in access_permissions)
)
restricted_spaces = unary_union((restricted_spaces_indoors, restricted_spaces_outdoors)) # noqa
# crop altitudeareas
for altitudearea in geoms.altitudeareas:
altitudearea.geometry = altitudearea.geometry.geom.difference(restricted_spaces)
altitudearea.geometry_prep = prepared.prep(altitudearea.geometry)
# crop heightareas
new_heightareas = []
for geometry, height in geoms.heightareas:
geometry = geometry.geom.difference(restricted_spaces)
geometry_prep = prepared.prep(geometry)
new_heightareas.append((geometry, geometry_prep, height))
geoms.heightareas = new_heightareas
if geoms.on_top_of_id is None:
buildings = geoms.buildings
areas = MultiPolygon()
current_upper_bound = geoms.upper_bound
holes = geoms.holes.difference(restricted_spaces)
buildings = buildings.difference(holes)
areas = areas.union(holes.buffer(0).buffer(0.01, join_style=JOIN_STYLE.mitre))
main_building_block = OpenScadBlock('union()', comment='Level %s' % geoms.short_label)
self.root.append(main_building_block)
main_building_block_diff = OpenScadBlock('difference()')
main_building_block.append(main_building_block_diff)
main_building_block_inner = OpenScadBlock('union()')
main_building_block_diff.append(main_building_block_inner)
main_building_block_inner.append(
self._add_polygon(None, buildings.intersection(self.bbox), geoms.lower_bound, geoms.upper_bound)
)
for altitudearea in sorted(geoms.altitudeareas, key=attrgetter('altitude')):
if not altitudearea.geometry.intersects(self.bbox):
continue
if altitudearea.altitude2 is not None:
name = 'Altitudearea %s-%s' % (altitudearea.altitude/1000, altitudearea.altitude2/1000)
else:
name = 'Altitudearea %s' % (altitudearea.altitude / 1000)
# why all this buffering?
# buffer(0) ensures a valid geometry, this is sadly needed sometimes
# the rest of the buffering is meant to make polygons overlap a little so no glitches appear
# the intersections below will ensure that they they only overlap with each other and don't eat walls
geometry = altitudearea.geometry.buffer(0)
inside_geometry = geometry.intersection(buildings).buffer(0).buffer(0.01, join_style=JOIN_STYLE.mitre)
outside_geometry = geometry.difference(buildings).buffer(0).buffer(0.01, join_style=JOIN_STYLE.mitre)
geometry_buffered = geometry.buffer(0.01, join_style=JOIN_STYLE.mitre)
if geoms.on_top_of_id is None:
areas = areas.union(geometry)
buildings = buildings.difference(geometry).buffer(0)
inside_geometry = inside_geometry.intersection(areas).buffer(0)
outside_geometry = outside_geometry.intersection(areas).buffer(0)
geometry_buffered = geometry_buffered.intersection(areas).buffer(0)
outside_geometry = outside_geometry.intersection(self.bbox)
if not inside_geometry.is_empty:
if altitudearea.altitude2 is not None:
min_slope_altitude = min(altitudearea.altitude, altitudearea.altitude2)
max_slope_altitude = max(altitudearea.altitude, altitudearea.altitude2)
bounds = inside_geometry.bounds
# cut in
polygon = self._add_polygon(None, inside_geometry,
min_slope_altitude-10, current_upper_bound+1000)
slope = self._add_slope(bounds, altitudearea.altitude, altitudearea.altitude2,
altitudearea.point1, altitudearea.point2, bottom=True)
main_building_block_diff.append(
OpenScadBlock('difference()', children=[polygon, slope], comment=name+' inside cut')
)
# actual thingy
if max_slope_altitude > current_upper_bound and inside_geometry.intersects(self.bbox):
polygon = self._add_polygon(None, inside_geometry.intersection(self.bbox),
current_upper_bound-10, max_slope_altitude+10)
slope = self._add_slope(bounds, altitudearea.altitude, altitudearea.altitude2,
altitudearea.point1, altitudearea.point2, bottom=False)
main_building_block.append(
OpenScadBlock('difference()',
children=[polygon, slope], comment=name + 'outside')
)
else:
if altitudearea.altitude < current_upper_bound:
main_building_block_diff.append(
self._add_polygon(name+' inside cut', inside_geometry,
altitudearea.altitude, current_upper_bound+1000)
)
else:
main_building_block.append(
self._add_polygon(name+' inside', inside_geometry.intersection(self.bbox),
min(altitudearea.altitude-700, current_upper_bound-10),
altitudearea.altitude)
)
if not outside_geometry.is_empty:
if altitudearea.altitude2 is not None:
min_slope_altitude = min(altitudearea.altitude, altitudearea.altitude2)
max_slope_altitude = max(altitudearea.altitude, altitudearea.altitude2)
bounds = outside_geometry.bounds
polygon = self._add_polygon(None, outside_geometry,
min_slope_altitude-710, max_slope_altitude+10)
slope1 = self._add_slope(bounds, altitudearea.altitude, altitudearea.altitude2,
altitudearea.point1, altitudearea.point2, bottom=False)
slope2 = self._add_slope(bounds, altitudearea.altitude-700, altitudearea.altitude2-700,
altitudearea.point1, altitudearea.point2, bottom=True)
union = OpenScadBlock('union()', children=[slope1, slope2], comment=name+'outside')
main_building_block.append(
OpenScadBlock('difference()',
children=[polygon, union], comment=name+'outside')
)
else:
if geoms.on_top_of_id is None:
lower = geoms.lower_bound
else:
lower = altitudearea.altitude-700
if lower == current_upper_bound:
lower -= 10
main_building_block.append(
self._add_polygon(name+' outside', outside_geometry, lower, altitudearea.altitude)
)
# obstacles
if altitudearea.altitude2 is not None:
obstacles_diff_block = OpenScadBlock('difference()', comment=name + ' obstacles')
had_obstacles = False
obstacles_block = OpenScadBlock('union()')
obstacles_diff_block.append(obstacles_block)
min_slope_altitude = min(altitudearea.altitude, altitudearea.altitude2)
max_slope_altitude = max(altitudearea.altitude, altitudearea.altitude2)
bounds = geometry.bounds
for height, obstacles in altitudearea.obstacles.items():
height_diff = OpenScadBlock('difference()')
had_height_obstacles = None
height_union = OpenScadBlock('union()')
height_diff.append(height_union)
for obstacle in obstacles:
if not obstacle.geom.intersects(self.bbox):
continue
obstacle = obstacle.geom.buffer(0).buffer(0.01, join_style=JOIN_STYLE.mitre)
if self.min_width:
obstacle = obstacle.union(self._satisfy_min_width(obstacle)).buffer(0)
obstacle = obstacle.intersection(geometry_buffered)
if not obstacle.is_empty:
had_height_obstacles = True
had_obstacles = True
height_union.append(
self._add_polygon(None, obstacle.intersection(self.bbox),
min_slope_altitude-20, max_slope_altitude+height+10)
)
if had_height_obstacles:
obstacles_block.append(height_diff)
height_diff.append(
self._add_slope(bounds, altitudearea.altitude+height, altitudearea.altitude2+height,
altitudearea.point1, altitudearea.point2, bottom=False)
)
if had_obstacles:
main_building_block.append(obstacles_diff_block)
obstacles_diff_block.append(
self._add_slope(bounds, altitudearea.altitude-10, altitudearea.altitude2-10,
altitudearea.point1, altitudearea.point2, bottom=True)
)
else:
obstacles_block = OpenScadBlock('union()', comment=name + ' obstacles')
had_obstacles = False
for height, obstacles in altitudearea.obstacles.items():
for obstacle in obstacles:
if not obstacle.geom.intersects(self.bbox):
continue
obstacle = obstacle.geom.buffer(0).buffer(0.01, join_style=JOIN_STYLE.mitre)
if self.min_width:
obstacle = obstacle.union(self._satisfy_min_width(obstacle)).buffer(0)
obstacle = obstacle.intersection(geometry_buffered).intersection(self.bbox)
if not obstacle.is_empty:
had_obstacles = True
obstacles_block.append(
self._add_polygon(None, obstacle,
altitudearea.altitude-10, altitudearea.altitude+height)
)
if had_obstacles:
main_building_block.append(obstacles_block)
if self.min_width and geoms.on_top_of_id is None:
main_building_block_inner.append(
self._add_polygon('min width',
self._satisfy_min_width(buildings).intersection(self.bbox).buffer(0),
geoms.lower_bound, geoms.upper_bound)
)
def _add_polygon(self, name, geometry, minz, maxz):
geometry = geometry.buffer(0)
polygons = []
for polygon in assert_multipolygon(geometry):
points = []
points_lookup = {}
output_rings = []
for ring in [polygon.exterior]+list(polygon.interiors):
output_ring = []
for coords in ring.coords:
try:
i = points_lookup[coords]
except KeyError:
points_lookup[coords] = len(points)
i = len(points)
points.append(list(coords))
output_ring.append(i)
if output_ring[0] == output_ring[-1]:
output_ring = output_ring[:-1]
output_rings.append(output_ring)
polygons.append(OpenScadCommand('polygon(%(points)r, %(rings)r, 10);' % {
'points': points,
'rings': output_rings,
}))
if not polygons:
return None
extrude_cmd = 'linear_extrude(height=%f, convexity=10)' % (abs(maxz-minz)/1000)
translate_cmd = 'translate([0, 0, %f])' % (min(maxz, minz)/1000)
return OpenScadBlock(translate_cmd, children=[OpenScadBlock(extrude_cmd, comment=name, children=polygons)])
def _add_slope(self, bounds, altitude1, altitude2, point1, point2, bottom=False):
distance = point1.distance(point2)
altitude_diff = (altitude2-altitude1)/1000
rotate_y = -math.degrees(math.atan2(altitude_diff, distance))
rotate_z = math.degrees(math.atan2(point2.y-point1.y, point2.x-point1.x))
if bottom:
rotate_y += 180
minx, miny, maxx, maxy = bounds
size = ((maxx-minx)+(maxy-miny))*2
cmd = OpenScadCommand('square([%f, %f], center=true);' % (size, size))
cmd = OpenScadBlock('linear_extrude(height=16, convexity=10)', children=[cmd])
cmd = OpenScadBlock('rotate([0, %f, %f])' % (rotate_y, rotate_z), children=[cmd])
cmd = OpenScadBlock('translate([%f, %f, %f])' % (point1.x, point1.y, altitude1/1000), children=[cmd])
return cmd
def _satisfy_min_width(self, geometry):
return geometry.buffer(self.min_width/2, join_style=JOIN_STYLE.mitre)
def render(self, filename=None):
return self.root.render().encode()
| apache-2.0 | 5,166,118,962,549,087,000 | 49.269663 | 118 | 0.531515 | false | 4.470647 | false | false | false |
auduny/home-assistant | homeassistant/components/rainmachine/sensor.py | 4 | 2585 | """This platform provides support for sensor data from RainMachine."""
import logging
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import (
DATA_CLIENT, DOMAIN as RAINMACHINE_DOMAIN,
OPERATION_RESTRICTIONS_UNIVERSAL, SENSOR_UPDATE_TOPIC, SENSORS,
RainMachineEntity)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up RainMachine sensors based on the old way."""
pass
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up RainMachine sensors based on a config entry."""
rainmachine = hass.data[RAINMACHINE_DOMAIN][DATA_CLIENT][entry.entry_id]
sensors = []
for sensor_type in rainmachine.sensor_conditions:
name, icon, unit = SENSORS[sensor_type]
sensors.append(
RainMachineSensor(rainmachine, sensor_type, name, icon, unit))
async_add_entities(sensors, True)
class RainMachineSensor(RainMachineEntity):
"""A sensor implementation for raincloud device."""
def __init__(self, rainmachine, sensor_type, name, icon, unit):
"""Initialize."""
super().__init__(rainmachine)
self._icon = icon
self._name = name
self._sensor_type = sensor_type
self._state = None
self._unit = unit
@property
def icon(self) -> str:
"""Return the icon."""
return self._icon
@property
def should_poll(self):
"""Disable polling."""
return False
@property
def state(self) -> str:
"""Return the name of the entity."""
return self._state
@property
def unique_id(self) -> str:
"""Return a unique, HASS-friendly identifier for this entity."""
return '{0}_{1}'.format(
self.rainmachine.device_mac.replace(':', ''), self._sensor_type)
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def update():
"""Update the state."""
self.async_schedule_update_ha_state(True)
self._dispatcher_handlers.append(async_dispatcher_connect(
self.hass, SENSOR_UPDATE_TOPIC, update))
async def async_update(self):
"""Update the sensor's state."""
self._state = self.rainmachine.data[OPERATION_RESTRICTIONS_UNIVERSAL][
'freezeProtectTemp']
| apache-2.0 | 4,025,846,147,999,986,000 | 29.05814 | 78 | 0.635977 | false | 4.077287 | false | false | false |
jardiacaj/finem_imperii | organization/views/heir.py | 1 | 1805 | from django.contrib import messages
from django.shortcuts import get_object_or_404, redirect
from django.views.decorators.http import require_POST
from character.models import Character
from organization.models.capability import Capability
from organization.views.proposal import capability_success
from organization.views.decorator import capability_required_decorator
@require_POST
@capability_required_decorator
def heir_capability_view(request, capability_id):
capability = get_object_or_404(
Capability, id=capability_id, type=Capability.HEIR)
first_heir_id = request.POST.get('first_heir')
second_heir_id = request.POST.get('second_heir')
try:
first_heir = Character.objects.get(id=first_heir_id)
except Character.DoesNotExist:
first_heir = None
try:
second_heir = Character.objects.get(id=second_heir_id)
except Character.DoesNotExist:
second_heir = None
if (
first_heir not in capability.applying_to.get_heir_candidates()
or
first_heir == capability.applying_to.get_position_occupier()
):
messages.error(request, "Invalid first heir", "danger")
return redirect(capability.get_absolute_url())
if (
second_heir not in capability.applying_to.get_heir_candidates()
or
second_heir == capability.applying_to.get_position_occupier()
) and second_heir is not None:
messages.error(request, "Invalid second heir", "danger")
return redirect(capability.get_absolute_url())
proposal = {
'first_heir': first_heir.id,
'second_heir': second_heir.id if second_heir is not None else 0
}
capability.create_proposal(request.hero, proposal)
return capability_success(capability, request)
| agpl-3.0 | -6,983,427,591,600,242,000 | 33.711538 | 75 | 0.695845 | false | 3.808017 | false | false | false |
symmetricapi/django-symmetric | symmetric/functions.py | 1 | 12865 | import datetime
from django.conf import settings
from django.db import models
from django.utils import timezone
def underscore_to_camel_case(string):
words = [word.capitalize() for word in string.split('_')]
words[0] = words[0].lower()
return ''.join(words)
def camel_case_to_underscore(string):
words = []
start_index = 0
for index, c in enumerate(string):
# Ignore the first character regardless of case
if c.isupper() and index:
words.append(string[start_index:index].lower())
start_index = index
words.append(string[start_index:].lower())
return '_'.join(words)
def sanitize_order_by(string):
"""Make sure the string has no double underscores, also convert from camelcase."""
if string and string.find('__') == -1 and string.find('?') == -1:
return camel_case_to_underscore(string)
return ''
def iso_8601_to_time(iso):
"""Parse an iso 8601 date into a datetime.time."""
if not iso:
return None
return datetime.datetime.strptime(iso, '%H:%M:%S').time()
def iso_8601_to_date(iso):
"""Parse an iso 8601 date into a datetime.date."""
if not iso:
return None
return datetime.datetime.strptime(iso[:10], '%Y-%m-%d').date()
def iso_8601_to_datetime(iso):
"""Parse an iso 8601 string into a timezone aware datetime, ignoring and fractional seconds."""
if not iso:
return None
dt = datetime.datetime.strptime(iso[:19], '%Y-%m-%dT%H:%M:%S')
# strptime doesn't seem to handle timezones, parse them here
if len(iso) == 19:
return timezone.make_aware(dt, timezone.get_current_timezone())
else:
# Make the datetime UTC if Z is the timezone, ignoring fractional seconds in between
if (len(iso) == 20 or iso[19] == '.') and iso[-1] == 'Z':
return timezone.make_aware(dt, timezone.UTC())
# Parse a complete timezone e.g. +00:00, checking for the correct length or ignored fractional seconds
if (len(iso) == 25 or iso[19] == '.') and iso[-6] in ('+', '-') and iso[-3] == ':':
try:
hours = int(iso[-5:-3])
minutes = int(iso[-2:])
minutes += hours * 60
if iso[-6] == '-':
minutes = -minutes
return timezone.make_aware(dt, timezone.get_fixed_timezone(minutes))
except:
# drop through and raise the exception
pass
raise ValueError('Invalid timezone %s.' % iso[19:])
def time_to_iso_8601(t):
"""Format a datetime.time as an iso 8601 string - HH:MM:SS."""
if t:
return t.replace(microsecond=0).isoformat()
else:
return None
def date_to_iso_8601(d):
"""Format a datetime.date as an iso 8601 string - YYYY-MM-DD."""
if d:
return d.isoformat()
else:
return None
def datetime_to_iso_8601(dt):
"""Format a datetime as an iso 8601 string - YYYY-MM-DDTHH:MM:SS with optional timezone +HH:MM."""
if dt:
return dt.replace(microsecond=0).isoformat()
else:
return None
def decode_int(value):
"""Decode an int after checking to make sure it is not already a int, 0.0, or empty."""
if isinstance(value, (int, long)):
return value
elif value == 0.0:
return 0
elif not value:
return None
return int(value)
def decode_float(value):
"""Decode a float after checking to make sure it is not already a float, 0, or empty."""
if type(value) is float:
return value
elif value == 0:
return 0.0
elif not value:
return None
return float(value)
def decode_bool(value):
"""Decode a bool after checking to make sure it is not already a bool, int, or empty.
If value is 0.0, None is returned because floats shouldn't be used.
"""
t = type(value)
if t is bool:
return value
elif t is int:
return bool(value)
elif not value:
return None
elif t in (str, unicode) and value.lower() in ('false', 'f', 'off', 'no'):
return False
return True
_api_models = {}
class _ApiModel(object):
def __init__(self, model):
# Tuples of (name, encoded_name, encode, decode)
self.fields = []
self.list_fields = []
self.encoded_fields = {}
self.id_field = None
self.select_related_args = []
# data dictionary, set fields instead of creating a new dictionary for each get_data
self._data = {}
self._list_data = {}
include_fields = None
exclude_fields = None
include_related = ()
list_fields = None
update_fields = None
readonly_fields = None
camelcase = getattr(settings, 'API_CAMELCASE', True)
renameid = getattr(settings, 'API_RENAME_ID', True)
if hasattr(model, 'API'):
if hasattr(model.API, 'include_fields'):
include_fields = model.API.include_fields
if hasattr(model.API, 'exclude_fields'):
exclude_fields = model.API.exclude_fields
if hasattr(model.API, 'include_related'):
include_related = model.API.include_related
if hasattr(model.API, 'list_fields'):
list_fields = model.API.list_fields
if hasattr(model.API, 'update_fields'):
update_fields = model.API.update_fields
if hasattr(model.API, 'readonly_fields'):
readonly_fields = model.API.readonly_fields
# Calculate all of the fields and list fields
for field in model._meta.fields:
if include_fields and field.name not in include_fields:
continue
elif exclude_fields and field.name in exclude_fields:
continue
else:
name = field.name
if field.name == 'id' and renameid:
encoded_name = camel_case_to_underscore(model.__name__) + '_id'
else:
encoded_name = field.name
if isinstance(field, models.IntegerField):
encode = None
decode = decode_int
elif isinstance(field, models.FloatField):
encode = None
decode = decode_float
elif isinstance(field, models.BooleanField):
encode = None
decode = decode_bool
elif isinstance(field, models.DateTimeField):
encode = datetime_to_iso_8601
decode = iso_8601_to_datetime
elif isinstance(field, models.TimeField):
encode = time_to_iso_8601
decode = iso_8601_to_time
elif isinstance(field, models.DateField):
encode = date_to_iso_8601
decode = iso_8601_to_date
elif isinstance(field, models.ForeignKey):
if field.name in include_related:
encode = get_object_data
decode = set_object_data
# Calculate the select_related_args
related_model = _get_api_model(field.rel.to)
if related_model.select_related_args:
for arg in related_model.select_related_args:
self.select_related_args.append('%s__%s' % (field.name, arg))
else:
self.select_related_args.append(field.name)
# For include related fields, also add an encoded_field entry for the option of updating the foreign key to another entry
# Setting the name_id attribute to None has the same effect as setting name to None, it will set the foreign key to null in the db
field_coding = (name + '_id', underscore_to_camel_case(encoded_name + '_id') if camelcase else encoded_name + '_id', None, decode_int)
self.encoded_fields[field_coding[1]] = field_coding
else:
name += '_id'
encoded_name += '_id'
encode = None
decode = decode_int
# For pointers to parent models, add as readonly field without the ptr suffix
if isinstance(field, models.OneToOneField) and encoded_name.endswith('_ptr_id'):
encoded_name = encoded_name[:-7] + '_id'
field_coding = (name, underscore_to_camel_case(encoded_name) if camelcase else encoded_name, None, decode_int)
self.fields.append(field_coding)
self.list_fields.append(field_coding)
continue
elif isinstance(field, models.AutoField):
encode = None
decode = decode_int
elif isinstance(field, (models.FileField, models.ManyToManyField)):
continue
else:
encode = None
decode = None
if camelcase:
field_coding = (name, underscore_to_camel_case(encoded_name), encode, decode)
else:
field_coding = (name, encoded_name, encode, decode)
self.fields.append(field_coding)
if field.name == 'id':
self.id_field = field_coding
if not field.editable or field.primary_key:
pass
elif update_fields and field.name not in update_fields:
pass
elif readonly_fields and field.name in readonly_fields:
pass
else:
self.encoded_fields[field_coding[1]] = field_coding
if list_fields is None or field.name in list_fields:
if field_coding[2] == get_object_data:
self.list_fields.append((field_coding[0], field_coding[1], get_object_list_data, field_coding[3]))
else:
self.list_fields.append(field_coding)
def get_list_data(self, obj):
for name, encoded_name, encode, decode in self.list_fields:
if encode:
self._list_data[encoded_name] = encode(getattr(obj, name))
else:
self._list_data[encoded_name] = getattr(obj, name)
return self._list_data
def get_data(self, obj):
for name, encoded_name, encode, decode in self.fields:
if encode:
self._data[encoded_name] = encode(getattr(obj, name))
else:
self._data[encoded_name] = getattr(obj, name)
return self._data
def set_data(self, obj, data):
for key, value in data.iteritems():
if self.encoded_fields.has_key(key):
name, encoded_name, encode, decode = self.encoded_fields[key]
if decode:
if decode is set_object_data:
decode(getattr(obj, name), value)
else:
setattr(obj, name, decode(value))
else:
setattr(obj, name, value)
def _get_api_model(model):
key = model.__module__ + model.__name__
if not _api_models.has_key(key):
_api_models[key] = _ApiModel(model)
return _api_models[key]
def get_object_list_data(obj):
if obj is None:
return None
model = _get_api_model(type(obj))
return model.get_list_data(obj)
def get_object_data(obj):
if obj is None:
return None
model = _get_api_model(type(obj))
data = model.get_data(obj)
if hasattr(obj, '_exclude_data'):
# Return a copy with specific data fields removed
data = dict(data)
for excluded in obj._exclude_data:
del data[excluded]
return data
def set_object_data(obj, data):
model = _get_api_model(type(obj))
model.set_data(obj, data)
def save_object(obj):
model = type(obj)
if hasattr(model, 'API') and hasattr(model.API, 'include_related'):
for field in model.API.include_related:
# Do a quick check for readonly status - this is for a slight performance boost only
# There are other settings that can cause the sub-object to be readonly, but it's not enforced here
if hasattr(model.API, 'readonly_fields') and field in model.API.readonly_fields:
continue
subobj = getattr(obj, field, None)
if subobj:
save_object(subobj)
obj.full_clean()
obj.save()
| mit | 7,195,248,395,000,181,000 | 37.517964 | 158 | 0.550175 | false | 4.285476 | false | false | false |
reiaaoyama/exabgp | lib/exabgp/dep/cmd2.py | 7 | 63933 | """Variant on standard library's cmd with extra features.
To use, simply import cmd2.Cmd instead of cmd.Cmd; use precisely as though you
were using the standard library's cmd, while enjoying the extra features.
Searchable command history (commands: "hi", "li", "run")
Load commands from file, save to file, edit commands in file
Multi-line commands
Case-insensitive commands
Special-character shortcut commands (beyond cmd's "@" and "!")
Settable environment parameters
Optional _onchange_{paramname} called when environment parameter changes
Parsing commands with `optparse` options (flags)
Redirection to file with >, >>; input from file with <
Easy transcript-based testing of applications (see example/example.py)
Bash-style ``select`` available
Note that redirection with > and | will only work if `self.stdout.write()`
is used in place of `print`. The standard library's `cmd` module is
written to use `self.stdout.write()`,
- Catherine Devlin, Jan 03 2008 - catherinedevlin.blogspot.com
mercurial repository at http://www.assembla.com/wiki/show/python-cmd2
"""
import cmd
import re
import os
import sys
import optparse
import subprocess
import tempfile
import doctest
import unittest
import datetime
import urllib
import glob
import traceback
import platform
import copy
from code import InteractiveConsole, InteractiveInterpreter
from optparse import make_option
from exabgp.dep import pyparsing
__version__ = '0.6.8'
if sys.version_info[0] == 2:
pyparsing.ParserElement.enablePackrat()
"""
Packrat is causing Python3 errors that I don't understand.
> /usr/local/Cellar/python3/3.2/lib/python3.2/site-packages/pyparsing-1.5.6-py3.2.egg/pyparsing.py(999)scanString()
-> nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
(Pdb) n
NameError: global name 'exc' is not defined
(Pdb) parseFn
<bound method Or._parseCache of {Python style comment ^ C style comment}>
Bug report filed: https://sourceforge.net/tracker/?func=detail&atid=617311&aid=3381439&group_id=97203
"""
class OptionParser(optparse.OptionParser):
def exit(self, status=0, msg=None):
self.values._exit = True
if msg:
print (msg)
def print_help(self, *args, **kwargs):
try:
print (self._func.__doc__)
except AttributeError:
pass
optparse.OptionParser.print_help(self, *args, **kwargs)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
raise optparse.OptParseError(msg)
def remaining_args(oldArgs, newArgList):
'''
Preserves the spacing originally in the argument after
the removal of options.
>>> remaining_args('-f bar bar cow', ['bar', 'cow'])
'bar cow'
'''
pattern = '\s+'.join(re.escape(a) for a in newArgList) + '\s*$'
matchObj = re.search(pattern, oldArgs)
return oldArgs[matchObj.start():]
def _attr_get_(obj, attr):
'''Returns an attribute's value, or None (no error) if undefined.
Analagous to .get() for dictionaries. Useful when checking for
value of options that may not have been defined on a given
method.'''
try:
return getattr(obj, attr)
except AttributeError:
return None
def _which(editor):
try:
return subprocess.Popen(['which', editor], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0]
except OSError:
return None
optparse.Values.get = _attr_get_
options_defined = [] # used to distinguish --options from SQL-style --comments
def options(option_list, arg_desc="arg"):
'''Used as a decorator and passed a list of optparse-style options,
alters a cmd2 method to populate its ``opts`` argument from its
raw text argument.
Example: transform
def do_something(self, arg):
into
@options([make_option('-q', '--quick', action="store_true",
help="Makes things fast")],
"source dest")
def do_something(self, arg, opts):
if opts.quick:
self.fast_button = True
'''
if not isinstance(option_list, list):
option_list = [option_list]
for opt in option_list:
options_defined.append(pyparsing.Literal(opt.get_opt_string()))
def option_setup(func):
optionParser = OptionParser()
for opt in option_list:
optionParser.add_option(opt)
optionParser.set_usage("%s [options] %s" % (func.__name__[3:], arg_desc))
optionParser._func = func
def new_func(instance, arg):
try:
opts, newArgList = optionParser.parse_args(arg.split())
# Must find the remaining args in the original argument list, but
# mustn't include the command itself
#if hasattr(arg, 'parsed') and newArgList[0] == arg.parsed.command:
# newArgList = newArgList[1:]
newArgs = remaining_args(arg, newArgList)
if isinstance(arg, ParsedString):
arg = arg.with_args_replaced(newArgs)
else:
arg = newArgs
except optparse.OptParseError as e:
print (e)
optionParser.print_help()
return
if hasattr(opts, '_exit'):
return None
result = func(instance, arg, opts)
return result
new_func.__doc__ = '%s\n%s' % (func.__doc__, optionParser.format_help())
return new_func
return option_setup
class PasteBufferError(EnvironmentError):
if sys.platform[:3] == 'win':
errmsg = """Redirecting to or from paste buffer requires pywin32
to be installed on operating system.
Download from http://sourceforge.net/projects/pywin32/"""
elif sys.platform[:3] == 'dar':
# Use built in pbcopy on Mac OSX
pass
else:
errmsg = """Redirecting to or from paste buffer requires xclip
to be installed on operating system.
On Debian/Ubuntu, 'sudo apt-get install xclip' will install it."""
def __init__(self):
Exception.__init__(self, self.errmsg)
pastebufferr = """Redirecting to or from paste buffer requires %s
to be installed on operating system.
%s"""
if subprocess.mswindows:
try:
import win32clipboard
def get_paste_buffer():
win32clipboard.OpenClipboard(0)
try:
result = win32clipboard.GetClipboardData()
except TypeError:
result = '' #non-text
win32clipboard.CloseClipboard()
return result
def write_to_paste_buffer(txt):
win32clipboard.OpenClipboard(0)
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardText(txt)
win32clipboard.CloseClipboard()
except ImportError:
def get_paste_buffer(*args):
raise OSError(pastebufferr % ('pywin32', 'Download from http://sourceforge.net/projects/pywin32/'))
write_to_paste_buffer = get_paste_buffer
elif sys.platform == 'darwin':
can_clip = False
try:
# test for pbcopy - AFAIK, should always be installed on MacOS
subprocess.check_call('pbcopy -help', shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
can_clip = True
except (subprocess.CalledProcessError, OSError, IOError):
pass
if can_clip:
def get_paste_buffer():
pbcopyproc = subprocess.Popen('pbcopy -help', shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
return pbcopyproc.stdout.read()
def write_to_paste_buffer(txt):
pbcopyproc = subprocess.Popen('pbcopy', shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
pbcopyproc.communicate(txt.encode())
else:
def get_paste_buffer(*args):
raise OSError(pastebufferr % ('pbcopy', 'On MacOS X - error should not occur - part of the default installation'))
write_to_paste_buffer = get_paste_buffer
else:
can_clip = False
try:
subprocess.check_call('xclip -o -sel clip', shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
can_clip = True
except AttributeError: # check_call not defined, Python < 2.5
try:
teststring = 'Testing for presence of xclip.'
xclipproc = subprocess.Popen('xclip -sel clip', shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
xclipproc.stdin.write(teststring)
xclipproc.stdin.close()
xclipproc = subprocess.Popen('xclip -o -sel clip', shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
if xclipproc.stdout.read() == teststring:
can_clip = True
except Exception: # hate a bare Exception call, but exception classes vary too much b/t stdlib versions
pass
except Exception:
pass # something went wrong with xclip and we cannot use it
if can_clip:
def get_paste_buffer():
xclipproc = subprocess.Popen('xclip -o -sel clip', shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
return xclipproc.stdout.read()
def write_to_paste_buffer(txt):
xclipproc = subprocess.Popen('xclip -sel clip', shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
xclipproc.stdin.write(txt.encode())
xclipproc.stdin.close()
# but we want it in both the "primary" and "mouse" clipboards
xclipproc = subprocess.Popen('xclip', shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
xclipproc.stdin.write(txt.encode())
xclipproc.stdin.close()
else:
def get_paste_buffer(*args):
raise OSError(pastebufferr % ('xclip', 'On Debian/Ubuntu, install with "sudo apt-get install xclip"'))
write_to_paste_buffer = get_paste_buffer
pyparsing.ParserElement.setDefaultWhitespaceChars(' \t')
class ParsedString(str):
def full_parsed_statement(self):
new = ParsedString('%s %s' % (self.parsed.command, self.parsed.args))
new.parsed = self.parsed
new.parser = self.parser
return new
def with_args_replaced(self, newargs):
new = ParsedString(newargs)
new.parsed = self.parsed
new.parser = self.parser
new.parsed['args'] = newargs
new.parsed.statement['args'] = newargs
return new
class StubbornDict(dict):
'''Dictionary that tolerates many input formats.
Create it with stubbornDict(arg) factory function.
>>> d = StubbornDict(large='gross', small='klein')
>>> sorted(d.items())
[('large', 'gross'), ('small', 'klein')]
>>> d.append(['plain', ' plaid'])
>>> sorted(d.items())
[('large', 'gross'), ('plaid', ''), ('plain', ''), ('small', 'klein')]
>>> d += ' girl Frauelein, Maedchen\\n\\n shoe schuh'
>>> sorted(d.items())
[('girl', 'Frauelein, Maedchen'), ('large', 'gross'), ('plaid', ''), ('plain', ''), ('shoe', 'schuh'), ('small', 'klein')]
'''
def update(self, arg):
dict.update(self, StubbornDict.to_dict(arg))
append = update
def __iadd__(self, arg):
self.update(arg)
return self
def __add__(self, arg):
selfcopy = copy.copy(self)
selfcopy.update(stubbornDict(arg))
return selfcopy
def __radd__(self, arg):
selfcopy = copy.copy(self)
selfcopy.update(stubbornDict(arg))
return selfcopy
@classmethod
def to_dict(cls, arg):
'Generates dictionary from string or list of strings'
if hasattr(arg, 'splitlines'):
arg = arg.splitlines()
if hasattr(arg, '__reversed__'):
result = {}
for a in arg:
a = a.strip()
if a:
key_val = a.split(None, 1)
key = key_val[0]
if len(key_val) > 1:
val = key_val[1]
else:
val = ''
result[key] = val
else:
result = arg
return result
def stubbornDict(*arg, **kwarg):
'''
>>> sorted(stubbornDict('cow a bovine\\nhorse an equine').items())
[('cow', 'a bovine'), ('horse', 'an equine')]
>>> sorted(stubbornDict(['badger', 'porcupine a poky creature']).items())
[('badger', ''), ('porcupine', 'a poky creature')]
>>> sorted(stubbornDict(turtle='has shell', frog='jumpy').items())
[('frog', 'jumpy'), ('turtle', 'has shell')]
'''
result = {}
for a in arg:
result.update(StubbornDict.to_dict(a))
result.update(kwarg)
return StubbornDict(result)
def replace_with_file_contents(fname):
if fname:
try:
result = open(os.path.expanduser(fname[0])).read()
except IOError:
result = '< %s' % fname[0] # wasn't a file after all
else:
result = get_paste_buffer()
return result
class EmbeddedConsoleExit(SystemExit):
pass
class EmptyStatement(Exception):
pass
def ljust(x, width, fillchar=' '):
'analogous to str.ljust, but works for lists'
if hasattr(x, 'ljust'):
return x.ljust(width, fillchar)
else:
if len(x) < width:
x = (x + [fillchar] * width)[:width]
return x
class Cmd(cmd.Cmd):
echo = False
case_insensitive = True # Commands recognized regardless of case
continuation_prompt = '> '
timing = False # Prints elapsed time for each command
# make sure your terminators are not in legalChars!
legalChars = u'!#$%.:?@_' + pyparsing.alphanums + pyparsing.alphas8bit
shortcuts = {'?': 'help', '!': 'shell', '@': 'load', '@@': '_relative_load'}
excludeFromHistory = '''run r list l history hi ed edit li eof'''.split()
default_to_shell = False
noSpecialParse = 'set ed edit exit'.split()
defaultExtension = 'txt' # For ``save``, ``load``, etc.
default_file_name = 'command.txt' # For ``save``, ``load``, etc.
abbrev = True # Abbreviated commands recognized
current_script_dir = None
reserved_words = []
feedback_to_output = False # Do include nonessentials in >, | output
quiet = False # Do not suppress nonessential output
debug = False
locals_in_py = True
kept_state = None
redirector = '>' # for sending output to file
settable = stubbornDict('''
prompt
colors Colorized output (*nix only)
continuation_prompt On 2nd+ line of input
debug Show full error stack on error
default_file_name for ``save``, ``load``, etc.
editor Program used by ``edit``
case_insensitive upper- and lower-case both OK
feedback_to_output include nonessentials in `|`, `>` results
quiet Don't print nonessential feedback
echo Echo command issued into output
timing Report execution times
abbrev Accept abbreviated commands
''')
def poutput(self, msg):
'''Convenient shortcut for self.stdout.write(); adds newline if necessary.'''
if msg:
self.stdout.write(msg)
if msg[-1] != '\n':
self.stdout.write('\n')
def perror(self, errmsg, statement=None):
if self.debug:
traceback.print_exc()
print (str(errmsg))
def pfeedback(self, msg):
"""For printing nonessential feedback. Can be silenced with `quiet`.
Inclusion in redirected output is controlled by `feedback_to_output`."""
if not self.quiet:
if self.feedback_to_output:
self.poutput(msg)
else:
print (msg)
_STOP_AND_EXIT = True # distinguish end of script file from actual exit
_STOP_SCRIPT_NO_EXIT = -999
editor = os.environ.get('EDITOR')
if not editor:
if sys.platform[:3] == 'win':
editor = 'notepad'
else:
for editor in ['gedit', 'kate', 'vim', 'vi', 'emacs', 'nano', 'pico']:
if _which(editor):
break
colorcodes = {'bold':{True:'\x1b[1m',False:'\x1b[22m'},
'cyan':{True:'\x1b[36m',False:'\x1b[39m'},
'blue':{True:'\x1b[34m',False:'\x1b[39m'},
'red':{True:'\x1b[31m',False:'\x1b[39m'},
'magenta':{True:'\x1b[35m',False:'\x1b[39m'},
'green':{True:'\x1b[32m',False:'\x1b[39m'},
'underline':{True:'\x1b[4m',False:'\x1b[24m'}}
colors = (platform.system() != 'Windows')
def colorize(self, val, color):
'''Given a string (``val``), returns that string wrapped in UNIX-style
special characters that turn on (and then off) text color and style.
If the ``colors`` environment paramter is ``False``, or the application
is running on Windows, will return ``val`` unchanged.
``color`` should be one of the supported strings (or styles):
red/blue/green/cyan/magenta, bold, underline'''
if self.colors and (self.stdout == self.initial_stdout):
return self.colorcodes[color][True] + val + self.colorcodes[color][False]
return val
def do_cmdenvironment(self, args):
'''Summary report of interactive parameters.'''
self.stdout.write("""
Commands are %(casesensitive)scase-sensitive.
Commands may be terminated with: %(terminators)s
Settable parameters: %(settable)s\n""" % \
{ 'casesensitive': (self.case_insensitive and 'not ') or '',
'terminators': str(self.terminators),
'settable': ' '.join(self.settable)
})
def do_help(self, arg):
if arg:
funcname = self.func_named(arg)
if funcname:
fn = getattr(self, funcname)
try:
fn.optionParser.print_help(file=self.stdout)
except AttributeError:
cmd.Cmd.do_help(self, funcname[3:])
else:
cmd.Cmd.do_help(self, arg)
def __init__(self, *args, **kwargs):
cmd.Cmd.__init__(self, *args, **kwargs)
self.initial_stdout = sys.stdout
self.history = History()
self.pystate = {}
self.shortcuts = sorted(self.shortcuts.items(), reverse=True)
self.keywords = self.reserved_words + [fname[3:] for fname in dir(self)
if fname.startswith('do_')]
self._init_parser()
def do_shortcuts(self, args):
"""Lists single-key shortcuts available."""
result = "\n".join('%s: %s' % (sc[0], sc[1]) for sc in sorted(self.shortcuts))
self.stdout.write("Single-key shortcuts for other commands:\n%s\n" % (result))
prefixParser = pyparsing.Empty()
commentGrammars = pyparsing.Or([pyparsing.pythonStyleComment, pyparsing.cStyleComment])
commentGrammars.addParseAction(lambda x: '')
commentInProgress = pyparsing.Literal('/*') + pyparsing.SkipTo(
pyparsing.stringEnd ^ '*/')
terminators = [';']
blankLinesAllowed = False
multilineCommands = []
def _init_parser(self):
r'''
>>> c = Cmd()
>>> c.multilineCommands = ['multiline']
>>> c.case_insensitive = True
>>> c._init_parser()
>>> print (c.parser.parseString('').dump())
[]
>>> print (c.parser.parseString('').dump())
[]
>>> print (c.parser.parseString('/* empty command */').dump())
[]
>>> print (c.parser.parseString('plainword').dump())
['plainword', '']
- command: plainword
- statement: ['plainword', '']
- command: plainword
>>> print (c.parser.parseString('termbare;').dump())
['termbare', '', ';', '']
- command: termbare
- statement: ['termbare', '', ';']
- command: termbare
- terminator: ;
- terminator: ;
>>> print (c.parser.parseString('termbare; suffx').dump())
['termbare', '', ';', 'suffx']
- command: termbare
- statement: ['termbare', '', ';']
- command: termbare
- terminator: ;
- suffix: suffx
- terminator: ;
>>> print (c.parser.parseString('barecommand').dump())
['barecommand', '']
- command: barecommand
- statement: ['barecommand', '']
- command: barecommand
>>> print (c.parser.parseString('COMmand with args').dump())
['command', 'with args']
- args: with args
- command: command
- statement: ['command', 'with args']
- args: with args
- command: command
>>> print (c.parser.parseString('command with args and terminator; and suffix').dump())
['command', 'with args and terminator', ';', 'and suffix']
- args: with args and terminator
- command: command
- statement: ['command', 'with args and terminator', ';']
- args: with args and terminator
- command: command
- terminator: ;
- suffix: and suffix
- terminator: ;
>>> print (c.parser.parseString('simple | piped').dump())
['simple', '', '|', ' piped']
- command: simple
- pipeTo: piped
- statement: ['simple', '']
- command: simple
>>> print (c.parser.parseString('double-pipe || is not a pipe').dump())
['double', '-pipe || is not a pipe']
- args: -pipe || is not a pipe
- command: double
- statement: ['double', '-pipe || is not a pipe']
- args: -pipe || is not a pipe
- command: double
>>> print (c.parser.parseString('command with args, terminator;sufx | piped').dump())
['command', 'with args, terminator', ';', 'sufx', '|', ' piped']
- args: with args, terminator
- command: command
- pipeTo: piped
- statement: ['command', 'with args, terminator', ';']
- args: with args, terminator
- command: command
- terminator: ;
- suffix: sufx
- terminator: ;
>>> print (c.parser.parseString('output into > afile.txt').dump())
['output', 'into', '>', 'afile.txt']
- args: into
- command: output
- output: >
- outputTo: afile.txt
- statement: ['output', 'into']
- args: into
- command: output
>>> print (c.parser.parseString('output into;sufx | pipethrume plz > afile.txt').dump())
['output', 'into', ';', 'sufx', '|', ' pipethrume plz', '>', 'afile.txt']
- args: into
- command: output
- output: >
- outputTo: afile.txt
- pipeTo: pipethrume plz
- statement: ['output', 'into', ';']
- args: into
- command: output
- terminator: ;
- suffix: sufx
- terminator: ;
>>> print (c.parser.parseString('output to paste buffer >> ').dump())
['output', 'to paste buffer', '>>', '']
- args: to paste buffer
- command: output
- output: >>
- statement: ['output', 'to paste buffer']
- args: to paste buffer
- command: output
>>> print (c.parser.parseString('ignore the /* commented | > */ stuff;').dump())
['ignore', 'the /* commented | > */ stuff', ';', '']
- args: the /* commented | > */ stuff
- command: ignore
- statement: ['ignore', 'the /* commented | > */ stuff', ';']
- args: the /* commented | > */ stuff
- command: ignore
- terminator: ;
- terminator: ;
>>> print (c.parser.parseString('has > inside;').dump())
['has', '> inside', ';', '']
- args: > inside
- command: has
- statement: ['has', '> inside', ';']
- args: > inside
- command: has
- terminator: ;
- terminator: ;
>>> print (c.parser.parseString('multiline has > inside an unfinished command').dump())
['multiline', ' has > inside an unfinished command']
- multilineCommand: multiline
>>> print (c.parser.parseString('multiline has > inside;').dump())
['multiline', 'has > inside', ';', '']
- args: has > inside
- multilineCommand: multiline
- statement: ['multiline', 'has > inside', ';']
- args: has > inside
- multilineCommand: multiline
- terminator: ;
- terminator: ;
>>> print (c.parser.parseString('multiline command /* with comment in progress;').dump())
['multiline', ' command /* with comment in progress;']
- multilineCommand: multiline
>>> print (c.parser.parseString('multiline command /* with comment complete */ is done;').dump())
['multiline', 'command /* with comment complete */ is done', ';', '']
- args: command /* with comment complete */ is done
- multilineCommand: multiline
- statement: ['multiline', 'command /* with comment complete */ is done', ';']
- args: command /* with comment complete */ is done
- multilineCommand: multiline
- terminator: ;
- terminator: ;
>>> print (c.parser.parseString('multiline command ends\n\n').dump())
['multiline', 'command ends', '\n', '\n']
- args: command ends
- multilineCommand: multiline
- statement: ['multiline', 'command ends', '\n', '\n']
- args: command ends
- multilineCommand: multiline
- terminator: ['\n', '\n']
- terminator: ['\n', '\n']
>>> print (c.parser.parseString('multiline command "with term; ends" now\n\n').dump())
['multiline', 'command "with term; ends" now', '\n', '\n']
- args: command "with term; ends" now
- multilineCommand: multiline
- statement: ['multiline', 'command "with term; ends" now', '\n', '\n']
- args: command "with term; ends" now
- multilineCommand: multiline
- terminator: ['\n', '\n']
- terminator: ['\n', '\n']
>>> print (c.parser.parseString('what if "quoted strings /* seem to " start comments?').dump())
['what', 'if "quoted strings /* seem to " start comments?']
- args: if "quoted strings /* seem to " start comments?
- command: what
- statement: ['what', 'if "quoted strings /* seem to " start comments?']
- args: if "quoted strings /* seem to " start comments?
- command: what
'''
#outputParser = (pyparsing.Literal('>>') | (pyparsing.WordStart() + '>') | pyparsing.Regex('[^=]>'))('output')
outputParser = (pyparsing.Literal(self.redirector *2) | \
(pyparsing.WordStart() + self.redirector) | \
pyparsing.Regex('[^=]' + self.redirector))('output')
terminatorParser = pyparsing.Or([(hasattr(t, 'parseString') and t) or pyparsing.Literal(t) for t in self.terminators])('terminator')
stringEnd = pyparsing.stringEnd ^ '\nEOF'
self.multilineCommand = pyparsing.Or([pyparsing.Keyword(c, caseless=self.case_insensitive) for c in self.multilineCommands])('multilineCommand')
oneLineCommand = (~self.multilineCommand + pyparsing.Word(self.legalChars))('command')
pipe = pyparsing.Keyword('|', identChars='|')
self.commentGrammars.ignore(pyparsing.quotedString).setParseAction(lambda x: '')
doNotParse = self.commentGrammars | self.commentInProgress | pyparsing.quotedString
afterElements = \
pyparsing.Optional(pipe + pyparsing.SkipTo(outputParser ^ stringEnd, ignore=doNotParse)('pipeTo')) + \
pyparsing.Optional(outputParser + pyparsing.SkipTo(stringEnd, ignore=doNotParse).setParseAction(lambda x: x[0].strip())('outputTo'))
if self.case_insensitive:
self.multilineCommand.setParseAction(lambda x: x[0].lower())
oneLineCommand.setParseAction(lambda x: x[0].lower())
if self.blankLinesAllowed:
self.blankLineTerminationParser = pyparsing.NoMatch
else:
self.blankLineTerminator = (pyparsing.lineEnd + pyparsing.lineEnd)('terminator')
self.blankLineTerminator.setResultsName('terminator')
self.blankLineTerminationParser = ((self.multilineCommand ^ oneLineCommand) + pyparsing.SkipTo(self.blankLineTerminator, ignore=doNotParse).setParseAction(lambda x: x[0].strip())('args') + self.blankLineTerminator)('statement')
self.multilineParser = (((self.multilineCommand ^ oneLineCommand) + pyparsing.SkipTo(terminatorParser, ignore=doNotParse).setParseAction(lambda x: x[0].strip())('args') + terminatorParser)('statement') +
pyparsing.SkipTo(outputParser ^ pipe ^ stringEnd, ignore=doNotParse).setParseAction(lambda x: x[0].strip())('suffix') + afterElements)
self.multilineParser.ignore(self.commentInProgress)
self.singleLineParser = ((oneLineCommand + pyparsing.SkipTo(terminatorParser ^ stringEnd ^ pipe ^ outputParser, ignore=doNotParse).setParseAction(lambda x:x[0].strip())('args'))('statement') +
pyparsing.Optional(terminatorParser) + afterElements)
#self.multilineParser = self.multilineParser.setResultsName('multilineParser')
#self.singleLineParser = self.singleLineParser.setResultsName('singleLineParser')
self.blankLineTerminationParser = self.blankLineTerminationParser.setResultsName('statement')
self.parser = self.prefixParser + (
stringEnd |
self.multilineParser |
self.singleLineParser |
self.blankLineTerminationParser |
self.multilineCommand + pyparsing.SkipTo(stringEnd, ignore=doNotParse)
)
self.parser.ignore(self.commentGrammars)
inputMark = pyparsing.Literal('<')
inputMark.setParseAction(lambda x: '')
fileName = pyparsing.Word(self.legalChars + '/\\')
inputFrom = fileName('inputFrom')
inputFrom.setParseAction(replace_with_file_contents)
# a not-entirely-satisfactory way of distinguishing < as in "import from" from <
# as in "lesser than"
self.inputParser = inputMark + pyparsing.Optional(inputFrom) + pyparsing.Optional('>') + \
pyparsing.Optional(fileName) + (pyparsing.stringEnd | '|')
self.inputParser.ignore(self.commentInProgress)
def preparse(self, raw, **kwargs):
return raw
def postparse(self, parseResult):
return parseResult
def parsed(self, raw, **kwargs):
if isinstance(raw, ParsedString):
p = raw
else:
# preparse is an overridable hook; default makes no changes
s = self.preparse(raw, **kwargs)
s = self.inputParser.transformString(s.lstrip())
s = self.commentGrammars.transformString(s)
for (shortcut, expansion) in self.shortcuts:
if s.lower().startswith(shortcut):
s = s.replace(shortcut, expansion + ' ', 1)
break
result = self.parser.parseString(s)
result['raw'] = raw
result['command'] = result.multilineCommand or result.command
result = self.postparse(result)
p = ParsedString(result.args)
p.parsed = result
p.parser = self.parsed
for (key, val) in kwargs.items():
p.parsed[key] = val
return p
def postparsing_precmd(self, statement):
stop = 0
return stop, statement
def postparsing_postcmd(self, stop):
return stop
def func_named(self, arg):
result = None
target = 'do_' + arg
if target in dir(self):
result = target
else:
if self.abbrev: # accept shortened versions of commands
funcs = [fname for fname in self.keywords if fname.startswith(arg)]
if len(funcs) == 1:
result = 'do_' + funcs[0]
return result
def onecmd_plus_hooks(self, line):
# The outermost level of try/finally nesting can be condensed once
# Python 2.4 support can be dropped.
stop = 0
try:
try:
statement = self.complete_statement(line)
(stop, statement) = self.postparsing_precmd(statement)
if stop:
return self.postparsing_postcmd(stop)
if statement.parsed.command not in self.excludeFromHistory:
self.history.append(statement.parsed.raw)
try:
self.redirect_output(statement)
timestart = datetime.datetime.now()
statement = self.precmd(statement)
stop = self.onecmd(statement)
stop = self.postcmd(stop, statement)
if self.timing:
self.pfeedback('Elapsed: %s' % str(datetime.datetime.now() - timestart))
finally:
self.restore_output(statement)
except EmptyStatement:
return 0
except Exception as e:
self.perror(str(e), statement)
finally:
return self.postparsing_postcmd(stop)
def complete_statement(self, line):
"""Keep accepting lines of input until the command is complete."""
if (not line) or (
not pyparsing.Or(self.commentGrammars).
setParseAction(lambda x: '').transformString(line)):
raise EmptyStatement()
statement = self.parsed(line)
while statement.parsed.multilineCommand and (statement.parsed.terminator == ''):
statement = '%s\n%s' % (statement.parsed.raw,
self.pseudo_raw_input(self.continuation_prompt))
statement = self.parsed(statement)
if not statement.parsed.command:
raise EmptyStatement()
return statement
def redirect_output(self, statement):
if statement.parsed.pipeTo:
self.kept_state = Statekeeper(self, ('stdout',))
self.kept_sys = Statekeeper(sys, ('stdout',))
self.redirect = subprocess.Popen(statement.parsed.pipeTo, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
sys.stdout = self.stdout = self.redirect.stdin
elif statement.parsed.output:
if (not statement.parsed.outputTo) and (not can_clip):
raise EnvironmentError('Cannot redirect to paste buffer; install ``xclip`` and re-run to enable')
self.kept_state = Statekeeper(self, ('stdout',))
self.kept_sys = Statekeeper(sys, ('stdout',))
if statement.parsed.outputTo:
mode = 'w'
if statement.parsed.output == 2 * self.redirector:
mode = 'a'
sys.stdout = self.stdout = open(os.path.expanduser(statement.parsed.outputTo), mode)
else:
sys.stdout = self.stdout = tempfile.TemporaryFile(mode="w+")
if statement.parsed.output == '>>':
self.stdout.write(get_paste_buffer())
def restore_output(self, statement):
if self.kept_state:
if statement.parsed.output:
if not statement.parsed.outputTo:
self.stdout.seek(0)
write_to_paste_buffer(self.stdout.read())
elif statement.parsed.pipeTo:
for result in self.redirect.communicate():
self.kept_state.stdout.write(result or '')
self.stdout.close()
self.kept_state.restore()
self.kept_sys.restore()
self.kept_state = None
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
This may be overridden, but should not normally need to be;
see the precmd() and postcmd() methods for useful execution hooks.
The return value is a flag indicating whether interpretation of
commands by the interpreter should stop.
This (`cmd2`) version of `onecmd` already override's `cmd`'s `onecmd`.
"""
statement = self.parsed(line)
self.lastcmd = statement.parsed.raw
funcname = self.func_named(statement.parsed.command)
if not funcname:
return self._default(statement)
try:
func = getattr(self, funcname)
except AttributeError:
return self._default(statement)
stop = func(statement)
return stop
def _default(self, statement):
arg = statement.full_parsed_statement()
if self.default_to_shell:
result = os.system(arg)
if not result:
return self.postparsing_postcmd(None)
return self.postparsing_postcmd(self.default(arg))
def pseudo_raw_input(self, prompt):
"""copied from cmd's cmdloop; like raw_input, but accounts for changed stdin, stdout"""
if self.use_rawinput:
try:
line = raw_input(prompt)
except EOFError:
line = 'EOF'
else:
self.stdout.write(prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
if line[-1] == '\n': # this was always true in Cmd
line = line[:-1]
return line
def _cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
# An almost perfect copy from Cmd; however, the pseudo_raw_input portion
# has been split out so that it can be called separately
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey+": complete")
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
line = self.pseudo_raw_input(self.prompt)
if (self.echo) and (isinstance(self.stdin, file)):
self.stdout.write(line + '\n')
stop = self.onecmd_plus_hooks(line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
return stop
def do_EOF(self, arg):
return self._STOP_SCRIPT_NO_EXIT # End of script; should not exit app
do_eof = do_EOF
def do_quit(self, arg):
return self._STOP_AND_EXIT
do_exit = do_quit
do_q = do_quit
def select(self, options, prompt='Your choice? '):
'''Presents a numbered menu to the user. Modelled after
the bash shell's SELECT. Returns the item chosen.
Argument ``options`` can be:
| a single string -> will be split into one-word options
| a list of strings -> will be offered as options
| a list of tuples -> interpreted as (value, text), so
that the return value can differ from
the text advertised to the user '''
if isinstance(options, basestring):
options = zip(options.split(), options.split())
fulloptions = []
for opt in options:
if isinstance(opt, basestring):
fulloptions.append((opt, opt))
else:
try:
fulloptions.append((opt[0], opt[1]))
except IndexError:
fulloptions.append((opt[0], opt[0]))
for (idx, (value, text)) in enumerate(fulloptions):
self.poutput(' %2d. %s\n' % (idx+1, text))
while True:
response = raw_input(prompt)
try:
response = int(response)
result = fulloptions[response - 1][0]
break
except ValueError:
pass # loop and ask again
return result
@options([make_option('-l', '--long', action="store_true",
help="describe function of parameter")])
def do_show(self, arg, opts):
'''Shows value of a parameter.'''
param = arg.strip().lower()
result = {}
maxlen = 0
for p in self.settable:
if (not param) or p.startswith(param):
result[p] = '%s: %s' % (p, str(getattr(self, p)))
maxlen = max(maxlen, len(result[p]))
if result:
for p in sorted(result):
if opts.long:
self.poutput('%s # %s' % (result[p].ljust(maxlen), self.settable[p]))
else:
self.poutput(result[p])
else:
raise NotImplementedError("Parameter '%s' not supported (type 'show' for list of parameters)." % param)
def do_set(self, arg):
'''
Sets a cmd2 parameter. Accepts abbreviated parameter names so long
as there is no ambiguity. Call without arguments for a list of
settable parameters with their values.'''
try:
statement, paramName, val = arg.parsed.raw.split(None, 2)
val = val.strip()
paramName = paramName.strip().lower()
if paramName not in self.settable:
hits = [p for p in self.settable if p.startswith(paramName)]
if len(hits) == 1:
paramName = hits[0]
else:
return self.do_show(paramName)
currentVal = getattr(self, paramName)
if (val[0] == val[-1]) and val[0] in ("'", '"'):
val = val[1:-1]
else:
val = cast(currentVal, val)
setattr(self, paramName, val)
self.stdout.write('%s - was: %s\nnow: %s\n' % (paramName, currentVal, val))
if currentVal != val:
try:
onchange_hook = getattr(self, '_onchange_%s' % paramName)
onchange_hook(old=currentVal, new=val)
except AttributeError:
pass
except (ValueError, AttributeError, NotSettableError) as e:
self.do_show(arg)
def do_pause(self, arg):
'Displays the specified text then waits for the user to press RETURN.'
raw_input(arg + '\n')
def do_shell(self, arg):
'execute a command as if at the OS prompt.'
os.system(arg)
def do_py(self, arg):
'''
py <command>: Executes a Python command.
py: Enters interactive Python mode.
End with ``Ctrl-D`` (Unix) / ``Ctrl-Z`` (Windows), ``quit()``, '`exit()``.
Non-python commands can be issued with ``cmd("your command")``.
Run python code from external files with ``run("filename.py")``
'''
self.pystate['self'] = self
arg = arg.parsed.raw[2:].strip()
localvars = (self.locals_in_py and self.pystate) or {}
interp = InteractiveConsole(locals=localvars)
interp.runcode('import sys, os;sys.path.insert(0, os.getcwd())')
if arg.strip():
interp.runcode(arg)
else:
def quit():
raise EmbeddedConsoleExit
def onecmd_plus_hooks(arg):
return self.onecmd_plus_hooks(arg + '\n')
def run(arg):
try:
file = open(arg)
interp.runcode(file.read())
file.close()
except IOError as e:
self.perror(e)
self.pystate['quit'] = quit
self.pystate['exit'] = quit
self.pystate['cmd'] = onecmd_plus_hooks
self.pystate['run'] = run
try:
cprt = 'Type "help", "copyright", "credits" or "license" for more information.'
keepstate = Statekeeper(sys, ('stdin','stdout'))
sys.stdout = self.stdout
sys.stdin = self.stdin
interp.interact(banner= "Python %s on %s\n%s\n(%s)\n%s" %
(sys.version, sys.platform, cprt, self.__class__.__name__, self.do_py.__doc__))
except EmbeddedConsoleExit:
pass
keepstate.restore()
@options([make_option('-s', '--script', action="store_true", help="Script format; no separation lines"),
], arg_desc = '(limit on which commands to include)')
def do_history(self, arg, opts):
"""history [arg]: lists past commands issued
| no arg: list all
| arg is integer: list one history item, by index
| arg is string: string search
| arg is /enclosed in forward-slashes/: regular expression search
"""
if arg:
history = self.history.get(arg)
else:
history = self.history
for hi in history:
if opts.script:
self.poutput(hi)
else:
self.stdout.write(hi.pr())
def last_matching(self, arg):
try:
if arg:
return self.history.get(arg)[-1]
else:
return self.history[-1]
except IndexError:
return None
def do_list(self, arg):
"""list [arg]: lists last command issued
no arg -> list most recent command
arg is integer -> list one history item, by index
a..b, a:b, a:, ..b -> list spans from a (or start) to b (or end)
arg is string -> list all commands matching string search
arg is /enclosed in forward-slashes/ -> regular expression search
"""
try:
history = self.history.span(arg or '-1')
except IndexError:
history = self.history.search(arg)
for hi in history:
self.poutput(hi.pr())
do_hi = do_history
do_l = do_list
do_li = do_list
def do_ed(self, arg):
"""ed: edit most recent command in text editor
ed [N]: edit numbered command from history
ed [filename]: edit specified file name
commands are run after editor is closed.
"set edit (program-name)" or set EDITOR environment variable
to control which editing program is used."""
if not self.editor:
raise EnvironmentError("Please use 'set editor' to specify your text editing program of choice.")
filename = self.default_file_name
if arg:
try:
buffer = self.last_matching(int(arg))
except ValueError:
filename = arg
buffer = ''
else:
buffer = self.history[-1]
if buffer:
f = open(os.path.expanduser(filename), 'w')
f.write(buffer or '')
f.close()
os.system('%s %s' % (self.editor, filename))
self.do__load(filename)
do_edit = do_ed
saveparser = (pyparsing.Optional(pyparsing.Word(pyparsing.nums)^'*')("idx") +
pyparsing.Optional(pyparsing.Word(legalChars + '/\\'))("fname") +
pyparsing.stringEnd)
def do_save(self, arg):
"""`save [N] [filename.ext]`
Saves command from history to file.
| N => Number of command (from history), or `*`;
| most recent command if omitted"""
try:
args = self.saveparser.parseString(arg)
except pyparsing.ParseException:
self.perror('Could not understand save target %s' % arg)
raise SyntaxError(self.do_save.__doc__)
fname = args.fname or self.default_file_name
if args.idx == '*':
saveme = '\n\n'.join(self.history[:])
elif args.idx:
saveme = self.history[int(args.idx)-1]
else:
saveme = self.history[-1]
try:
f = open(os.path.expanduser(fname), 'w')
f.write(saveme)
f.close()
self.pfeedback('Saved to %s' % (fname))
except Exception as e:
self.perror('Error saving %s' % (fname))
raise
def read_file_or_url(self, fname):
# TODO: not working on localhost
if isinstance(fname, file):
result = open(fname, 'r')
else:
match = self.urlre.match(fname)
if match:
result = urllib.urlopen(match.group(1))
else:
fname = os.path.expanduser(fname)
try:
result = open(os.path.expanduser(fname), 'r')
except IOError:
result = open('%s.%s' % (os.path.expanduser(fname),
self.defaultExtension), 'r')
return result
def do__relative_load(self, arg=None):
'''
Runs commands in script at file or URL; if this is called from within an
already-running script, the filename will be interpreted relative to the
already-running script's directory.'''
if arg:
arg = arg.split(None, 1)
targetname, args = arg[0], (arg[1:] or [''])[0]
targetname = os.path.join(self.current_script_dir or '', targetname)
self.do__load('%s %s' % (targetname, args))
urlre = re.compile('(https?://[-\\w\\./]+)')
def do_load(self, arg=None):
"""Runs script of command(s) from a file or URL."""
if arg is None:
targetname = self.default_file_name
else:
arg = arg.split(None, 1)
targetname, args = arg[0], (arg[1:] or [''])[0].strip()
try:
target = self.read_file_or_url(targetname)
except IOError as e:
self.perror('Problem accessing script from %s: \n%s' % (targetname, e))
return
keepstate = Statekeeper(self, ('stdin','use_rawinput','prompt',
'continuation_prompt','current_script_dir'))
self.stdin = target
self.use_rawinput = False
self.prompt = self.continuation_prompt = ''
self.current_script_dir = os.path.split(targetname)[0]
stop = self._cmdloop()
self.stdin.close()
keepstate.restore()
self.lastcmd = ''
return stop and (stop != self._STOP_SCRIPT_NO_EXIT)
do__load = do_load # avoid an unfortunate legacy use of do_load from sqlpython
def do_run(self, arg):
"""run [arg]: re-runs an earlier command
no arg -> run most recent command
arg is integer -> run one history item, by index
arg is string -> run most recent command by string search
arg is /enclosed in forward-slashes/ -> run most recent by regex
"""
'run [N]: runs the SQL that was run N commands ago'
runme = self.last_matching(arg)
self.pfeedback(runme)
if runme:
stop = self.onecmd_plus_hooks(runme)
do_r = do_run
def fileimport(self, statement, source):
try:
f = open(os.path.expanduser(source))
except IOError:
self.stdout.write("Couldn't read from file %s\n" % source)
return ''
data = f.read()
f.close()
return data
def runTranscriptTests(self, callargs):
class TestMyAppCase(Cmd2TestCase):
CmdApp = self.__class__
self.__class__.testfiles = callargs
sys.argv = [sys.argv[0]] # the --test argument upsets unittest.main()
testcase = TestMyAppCase()
runner = unittest.TextTestRunner()
result = runner.run(testcase)
result.printErrors()
def run_commands_at_invocation(self, callargs):
for initial_command in callargs:
if self.onecmd_plus_hooks(initial_command + '\n'):
return self._STOP_AND_EXIT
def cmdloop(self):
parser = optparse.OptionParser()
parser.add_option('-t', '--test', dest='test',
action="store_true",
help='Test against transcript(s) in FILE (wildcards OK)')
(callopts, callargs) = parser.parse_args()
if callopts.test:
self.runTranscriptTests(callargs)
else:
if not self.run_commands_at_invocation(callargs):
self._cmdloop()
class HistoryItem(str):
listformat = '-------------------------[%d]\n%s\n'
def __init__(self, instr):
str.__init__(self)
self.lowercase = self.lower()
self.idx = None
def pr(self):
return self.listformat % (self.idx, str(self))
class History(list):
'''A list of HistoryItems that knows how to respond to user requests.
>>> h = History([HistoryItem('first'), HistoryItem('second'), HistoryItem('third'), HistoryItem('fourth')])
>>> h.span('-2..')
['third', 'fourth']
>>> h.span('2..3')
['second', 'third']
>>> h.span('3')
['third']
>>> h.span(':')
['first', 'second', 'third', 'fourth']
>>> h.span('2..')
['second', 'third', 'fourth']
>>> h.span('-1')
['fourth']
>>> h.span('-2..-3')
['third', 'second']
>>> h.search('o')
['second', 'fourth']
>>> h.search('/IR/')
['first', 'third']
'''
def zero_based_index(self, onebased):
result = onebased
if result > 0:
result -= 1
return result
def to_index(self, raw):
if raw:
result = self.zero_based_index(int(raw))
else:
result = None
return result
def search(self, target):
target = target.strip()
if target[0] == target[-1] == '/' and len(target) > 1:
target = target[1:-1]
else:
target = re.escape(target)
pattern = re.compile(target, re.IGNORECASE)
return [s for s in self if pattern.search(s)]
spanpattern = re.compile(r'^\s*(?P<start>\-?\d+)?\s*(?P<separator>:|(\.{2,}))?\s*(?P<end>\-?\d+)?\s*$')
def span(self, raw):
if raw.lower() in ('*', '-', 'all'):
raw = ':'
results = self.spanpattern.search(raw)
if not results:
raise IndexError
if not results.group('separator'):
return [self[self.to_index(results.group('start'))]]
start = self.to_index(results.group('start'))
end = self.to_index(results.group('end'))
reverse = False
if end is not None:
if end < start:
(start, end) = (end, start)
reverse = True
end += 1
result = self[start:end]
if reverse:
result.reverse()
return result
rangePattern = re.compile(r'^\s*(?P<start>[\d]+)?\s*\-\s*(?P<end>[\d]+)?\s*$')
def append(self, new):
new = HistoryItem(new)
list.append(self, new)
new.idx = len(self)
def extend(self, new):
for n in new:
self.append(n)
def get(self, getme=None, fromEnd=False):
if not getme:
return self
try:
getme = int(getme)
if getme < 0:
return self[:(-1 * getme)]
else:
return [self[getme-1]]
except IndexError:
return []
except ValueError:
rangeResult = self.rangePattern.search(getme)
if rangeResult:
start = rangeResult.group('start') or None
end = rangeResult.group('start') or None
if start:
start = int(start) - 1
if end:
end = int(end)
return self[start:end]
getme = getme.strip()
if getme.startswith(r'/') and getme.endswith(r'/'):
finder = re.compile(getme[1:-1], re.DOTALL | re.MULTILINE | re.IGNORECASE)
def isin(hi):
return finder.search(hi)
else:
def isin(hi):
return (getme.lower() in hi.lowercase)
return [itm for itm in self if isin(itm)]
class NotSettableError(Exception):
pass
def cast(current, new):
"""Tries to force a new value into the same type as the current."""
typ = type(current)
if typ == bool:
try:
return bool(int(new))
except (ValueError, TypeError):
pass
try:
new = new.lower()
except Exception:
pass
if (new=='on') or (new[0] in ('y','t')):
return True
if (new=='off') or (new[0] in ('n','f')):
return False
else:
try:
return typ(new)
except Exception:
pass
print ("Problem setting parameter (now %s) to %s; incorrect type?" % (current, new))
return current
class Statekeeper(object):
def __init__(self, obj, attribs):
self.obj = obj
self.attribs = attribs
if self.obj:
self.save()
def save(self):
for attrib in self.attribs:
setattr(self, attrib, getattr(self.obj, attrib))
def restore(self):
if self.obj:
for attrib in self.attribs:
setattr(self.obj, attrib, getattr(self, attrib))
class Borg(object):
'''All instances of any Borg subclass will share state.
from Python Cookbook, 2nd Ed., recipe 6.16'''
_shared_state = {}
def __new__(cls, *a, **k):
obj = object.__new__(cls, *a, **k)
obj.__dict__ = cls._shared_state
return obj
class OutputTrap(Borg):
'''Instantiate an OutputTrap to divert/capture ALL stdout output. For use in unit testing.
Call `tearDown()` to return to normal output.'''
def __init__(self):
self.contents = ''
self.old_stdout = sys.stdout
sys.stdout = self
def write(self, txt):
self.contents += txt
def read(self):
result = self.contents
self.contents = ''
return result
def tearDown(self):
sys.stdout = self.old_stdout
self.contents = ''
class Cmd2TestCase(unittest.TestCase):
'''Subclass this, setting CmdApp, to make a unittest.TestCase class
that will execute the commands in a transcript file and expect the results shown.
See example.py'''
CmdApp = None
def fetchTranscripts(self):
self.transcripts = {}
for fileset in self.CmdApp.testfiles:
for fname in glob.glob(fileset):
tfile = open(fname)
self.transcripts[fname] = iter(tfile.readlines())
tfile.close()
if not len(self.transcripts):
raise StandardError("No test files found - nothing to test.")
def setUp(self):
if self.CmdApp:
self.outputTrap = OutputTrap()
self.cmdapp = self.CmdApp()
self.fetchTranscripts()
def runTest(self): # was testall
if self.CmdApp:
its = sorted(self.transcripts.items())
for (fname, transcript) in its:
self._test_transcript(fname, transcript)
regexPattern = pyparsing.QuotedString(quoteChar=r'/', escChar='\\', multiline=True, unquoteResults=True)
regexPattern.ignore(pyparsing.cStyleComment)
notRegexPattern = pyparsing.Word(pyparsing.printables)
notRegexPattern.setParseAction(lambda t: re.escape(t[0]))
expectationParser = regexPattern | notRegexPattern
anyWhitespace = re.compile(r'\s', re.DOTALL | re.MULTILINE)
def _test_transcript(self, fname, transcript):
lineNum = 0
finished = False
line = transcript.next()
lineNum += 1
tests_run = 0
while not finished:
# Scroll forward to where actual commands begin
while not line.startswith(self.cmdapp.prompt):
try:
line = transcript.next()
except StopIteration:
finished = True
break
lineNum += 1
command = [line[len(self.cmdapp.prompt):]]
line = transcript.next()
# Read the entirety of a multi-line command
while line.startswith(self.cmdapp.continuation_prompt):
command.append(line[len(self.cmdapp.continuation_prompt):])
try:
line = transcript.next()
except StopIteration:
raise (StopIteration,
'Transcript broke off while reading command beginning at line %d with\n%s'
% (command[0]))
lineNum += 1
command = ''.join(command)
# Send the command into the application and capture the resulting output
stop = self.cmdapp.onecmd_plus_hooks(command)
#TODO: should act on ``stop``
result = self.outputTrap.read()
# Read the expected result from transcript
if line.startswith(self.cmdapp.prompt):
message = '\nFile %s, line %d\nCommand was:\n%r\nExpected: (nothing)\nGot:\n%r\n'%\
(fname, lineNum, command, result)
self.assert_(not(result.strip()), message)
continue
expected = []
while not line.startswith(self.cmdapp.prompt):
expected.append(line)
try:
line = transcript.next()
except StopIteration:
finished = True
break
lineNum += 1
expected = ''.join(expected)
# Compare actual result to expected
message = '\nFile %s, line %d\nCommand was:\n%s\nExpected:\n%s\nGot:\n%s\n'%\
(fname, lineNum, command, expected, result)
expected = self.expectationParser.transformString(expected)
# checking whitespace is a pain - let's skip it
expected = self.anyWhitespace.sub('', expected)
result = self.anyWhitespace.sub('', result)
self.assert_(re.match(expected, result, re.MULTILINE | re.DOTALL), message)
def tearDown(self):
if self.CmdApp:
self.outputTrap.tearDown()
if __name__ == '__main__':
doctest.testmod(optionflags = doctest.NORMALIZE_WHITESPACE)
'''
To make your application transcript-testable, replace
::
app = MyApp()
app.cmdloop()
with
::
app = MyApp()
cmd2.run(app)
Then run a session of your application and paste the entire screen contents
into a file, ``transcript.test``, and invoke the test like::
python myapp.py --test transcript.test
Wildcards can be used to test against multiple transcript files.
'''
| bsd-3-clause | -3,257,221,394,057,746,400 | 39.033187 | 239 | 0.56262 | false | 4.104848 | true | false | false |
petrushev/gettornado | src/gettornado/http.py | 1 | 1442 | import zlib
from urllib import urlencode
from StringIO import StringIO
from gzip import GzipFile
from PyQt5.QtCore import pyqtSignal, QObject, QUrl, pyqtSlot
from PyQt5.QtNetwork import QNetworkAccessManager, QNetworkRequest
def decodeData(data, headers):
encoding = headers.get('Content-Encoding', None)
if encoding=='deflate':
f = StringIO(zlib.decompress(data))
elif encoding=='x-gzip' or encoding=='gzip':
f = GzipFile('', 'rb', 9, StringIO(data))
else:
return data
# data in object file handler
data = f.read()
f.close()
return data
class QRequest(QObject):
manager = QNetworkAccessManager()
finished = pyqtSignal()
def __init__(self, url, params=None, parent=None):
QObject.__init__(self, parent=parent)
self.data = ''
self.headers = {}
self.statusCode = None
self.params = params
if params is not None:
url = url + "?" + urlencode(params)
self.qUrl = QUrl(url)
self.request = QNetworkRequest(self.qUrl)
def get(self):
self.response = self.manager.get(self.request)
self.response.readyRead.connect(self._onDataReady)
self.response.finished.connect(self._onFinished)
@pyqtSlot()
def _onDataReady(self):
self.data = self.data + self.response.readAll().data()
@pyqtSlot()
def _onFinished(self):
self.finished.emit()
| gpl-3.0 | 4,849,590,899,934,975,000 | 23.033333 | 66 | 0.637309 | false | 3.855615 | false | false | false |
openstack-dev/grenade | tools/generate-grenade-plugins-list.py | 1 | 1963 | #! /usr/bin/env python
# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is intended to be run as part of a periodic proposal bot
# job in OpenStack infrastructure.
#
# In order to function correctly, the environment in which the
# script runs must have
# * network access to the review.openstack.org Gerrit API
# working directory
# * network access to https://git.openstack.org/cgit
import json
try:
# For Python 3.0 and later
from urllib.error import HTTPError
import urllib.request as urllib
except ImportError:
# Fall back to Python 2's urllib2
import urllib2 as urllib
from urllib2 import HTTPError
url = 'https://review.openstack.org/projects/'
# This is what a project looks like
'''
"openstack-attic/akanda": {
"id": "openstack-attic%2Fakanda",
"state": "READ_ONLY"
},
'''
def is_in_openstack_namespace(proj):
return proj.startswith('openstack/')
def has_grenade_plugin(proj):
try:
r = urllib.urlopen(
"https://git.openstack.org/cgit/%s/plain/devstack/upgrade/upgrade.sh" % proj)
return True
except HTTPError as err:
if err.code == 404:
return False
r = urllib.urlopen(url)
projects = sorted(filter(is_in_openstack_namespace, json.loads(r.read()[4:])))
found_plugins = filter(has_grenade_plugin, projects)
for project in found_plugins:
print(project[10:])
| apache-2.0 | -916,719,589,163,870,100 | 29.2 | 89 | 0.712175 | false | 3.731939 | false | false | false |
WillBickerstaff/sundial | lib/gui/about.py | 1 | 2387 | '''
Created on 1 Sep 2012
@author: will
'''
from Tkinter import (Label, Frame, Scrollbar, Entry, N, S, E, W, DISABLED,
VERTICAL, Button, LEFT, ACTIVE, FLAT)
import tkFont
from tkSimpleDialog import Dialog
license_text = '''
The MIT License (MIT)
=====================
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
class AboutDialog(Dialog):
def body(self, master):
self.hdgFont = tkFont.Font(family="Helvetica", size=14, weight='bold')
self.monoFont = tkFont.Font(family='Monospace', size=10)
Label(master, text="Sundial Solver", font=self.hdgFont).grid(row=0,
sticky=E + W)
Label(master, text="V 0.1.0").grid(row=1, sticky=E + W)
Label(master, text=u"\u00A9 2012 Will Bickerstaff").grid(row=2,
sticky=E + W)
Label(master, text="<[email protected]>").grid(row=3,
sticky=E + W)
lictxt = Label(master, text=license_text, anchor=W)
lictxt.grid(row=4, column=0, sticky=N + S + E + W)
def buttonbox(self):
box = Frame(self)
w = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
box.pack()
| mit | -1,381,779,866,263,614,000 | 40.155172 | 78 | 0.644323 | false | 3.971714 | false | false | false |
willforde/python-htmlement | examples.py | 1 | 7032 | #!/usr/bin/env python
"""
For more information, see:
@see https://docs.python.org/3/library/xml.etree.elementtree.html#xml.etree.ElementTree.Element
@see https://docs.python.org/3/library/xml.etree.elementtree.html#xpath-support
"""
from __future__ import print_function, unicode_literals
from htmlement import HTMLement
def example_simple():
"""
This example will parse a simple html tree and
extract the website title and all anchors
>>> example_simple()
Parsing: GitHub
GitHub => https://github.com/willforde
GitHub Project => https://github.com/willforde/python-htmlement
"""
html = """
<html>
<head>
<title>GitHub</title>
</head>
<body>
<a href="https://github.com/willforde">GitHub</a>
<a href="https://github.com/willforde/python-htmlement">GitHub Project</a>
</body>
</html>
"""
# Parse the document
parser = HTMLement()
parser.feed(html)
root = parser.close()
# Root is an xml.etree.Element and supports the ElementTree API
# (e.g. you may use its limited support for XPath expressions)
# Get title
title = root.find('head/title').text
print("Parsing: {}".format(title))
# Get all anchors
for a in root.iterfind(".//a"):
# Get href attribute
url = a.get("href")
# Get anchor name
name = a.text
print("{} => {}".format(name, url))
def example_filter():
"""
This example will parse a simple html tree and
extract all the list items within the ul menu element using a tree filter.
The tree filter will tell the parser to only parse the elements within the
requested section and to ignore all other elements.
Useful for speeding up the parsing of html pages.
>>> example_filter()
Menu Items
- Coffee
- Tea
- Milk
"""
html = """
<html>
<head>
<title>Coffee shop</title>
</head>
<body>
<ul class="menu">
<li>Coffee</li>
<li>Tea</li>
<li>Milk</li>
</ul>
<ul class="extras">
<li>Sugar</li>
<li>Cream</li>
</ul>
</body>
</html>
"""
# Parse the document
parser = HTMLement("ul", attrs={"class": "menu"})
parser.feed(html)
root = parser.close()
# Root should now be a 'ul' xml.etree.Element with all it's child elements available
# All other elements have been ignored. Way faster to parse.
# We are unable to get the title here sense all
# elements outside the filter was ignored
print("Menu Items")
# Get all listitems
for item in root.iterfind(".//li"):
# Get text from listitem
print("- {}".format(item.text))
def example_complex():
"""
This example will parse a more complex html tree of python talk's and will
extract the image, title, url and date of each talk.
A filter will be used to extract the main talks div element
>>> example_complex()
Image = /presentations/c7f1fbb5d03a409d9de8abb5238d6a68/thumb_slide_0.jpg
Url = /pycon2016/alex-martelli-exception-and-error-handling-in-python-2-and-python-3
Title = Alex Martelli - Exception and error handling in Python 2 and Python 3
Date = Jun 1, 2016
<BLANKLINE>
Image = /presentations/eef8ffe5b6784f7cb84948cf866b2608/thumb_slide_0.jpg
Url = /presentations/518cae54da12460e895163d809e25933/thumb_slide_0.jpg
Title = Jake Vanderplas - Statistics for Hackers
Date = May 29, 2016
<BLANKLINE>
Image = /presentations/8b3ee51b5fcc4a238c4cb4b7787979ac/thumb_slide_0.jpg
Url = /pycon2016/brett-slatkin-refactoring-python-why-and-how-to-restructure-your-code
Title = Brett Slatkin - Refactoring Python: Why and how to restructure your code
Date = May 29, 2016
<BLANKLINE>
"""
html = """
<html>
<head>
<title>PyCon 2016</title>
</head>
<body>
<div class="main">
<h1>Talks by PyCon 2016</h1>
<div class="talks" id="d5esfbb5d03adfdfede8a342238d6a68">
<div class="talk" data-id="c7f1fbb5d03a409d9de8abb5238d6a68">
<a href="/pycon2016/kelsey-gilmore-innis-seriously-strong-security-on-a-shoestring">
<img src="/presentations/c7f1fbb5d03a409d9de8abb5238d6a68/thumb_slide_0.jpg">
</a>
<div class="talk-listing-meta">
<h3 class="title">
<a href="/pycon2016/alex-martelli-exception-and-error-handling-in-python-2-and-python-3">
Alex Martelli - Exception and error handling in Python 2 and Python 3
</a>
</h3>
<p class="date">Jun 1, 2016</p>
</div>
</div>
<div class="talk" data-id="518cae54da12460e895163d809e25933">
<a href="/pycon2016/manuel-ebert-putting-1-million-new-words-into-the-dictionary">
<img src="/presentations/eef8ffe5b6784f7cb84948cf866b2608/thumb_slide_0.jpg">
</a>
<div class="talk-listing-meta">
<h3 class="title">
<a href="/presentations/518cae54da12460e895163d809e25933/thumb_slide_0.jpg">
Jake Vanderplas - Statistics for Hackers
</a>
</h3>
<p class="date">May 29, 2016</p>
</div>
</div>
<div class="talk" data-id="8b3ee51b5fcc4a238c4cb4b7787979ac">
<a href="/pycon2016/brett-slatkin-refactoring-python-why-and-how-to-restructure-your-code">
<img src="/presentations/8b3ee51b5fcc4a238c4cb4b7787979ac/thumb_slide_0.jpg">
</a>
<div class="talk-listing-meta">
<h3 class="title">
<a href="/pycon2016/brett-slatkin-refactoring-python-why-and-how-to-restructure-your-code">
Brett Slatkin - Refactoring Python: Why and how to restructure your code
</a>
</h3>
<p class="date">May 29, 2016</p>
</div>
</div>
</div>
</div>
</body>
</html>
"""
# Parse the document
parser = HTMLement("div", attrs={"class": "talks", "id": True})
parser.feed(html)
root = parser.close()
# Extract all div tags with class of talk
for talk in root.iterfind("./div[@class='talk']"):
# Fetch image
img = talk.find(".//img").get("src")
print("Image = {}".format(img))
# Fetch title and url
title_anchor = talk.find("./div/h3/a")
url = title_anchor.get("href")
print("Url = {}".format(url))
title = title_anchor.text
print("Title = {}".format(title))
# Fetch date
date = talk.find("./div/p").text
print("Date = {}".format(date))
print("")
if __name__ == "__main__":
example_simple()
print("")
example_filter()
print("")
example_complex()
| mit | 584,451,453,548,795,500 | 31.859813 | 109 | 0.585893 | false | 3.455528 | false | false | false |
kmpf/uap | include/subcommands/monitor_disk_io.py | 1 | 7226 | #!/usr/bin/env python
# Run this tool in two stages:
#
# 1. Collect data: ./monitor-disk-io.py [command]
# 2. Print statistics: ./monitor-disk-io.py
# 3. $$$
#
# The first command will run strace and write the output to
# _monitor_disk_io/strace-out.txt. The second call (without any arguments)
# will parse that file and try to find out which file every read, write,
# and seek corresponds to.
# If you want to profile a BASH command, put it in a script and pass the
# path of that script (i. e. BASH voodoo is not allowed here but can be
# accomplished if a BASH script is used).
import yaml
import re
import os
import subprocess
import logging
import glob
import copy
import sys
WRITE_PROC_FILES = False
logger = logging.getLogger("uap_logger")
proc_files = {}
if not os.path.exists('_monitor_disk_io'):
os.mkdir('_monitor_disk_io')
if len(sys.argv) > 1:
pigz = subprocess.Popen(
"pigz -p 2 -b 4096 -c > _monitor_disk_io/strace-out.txt.gz",
stdin=subprocess.PIPE,
shell=True)
args = ["strace", "-f", "-o", '/dev/stderr']
args.extend(sys.argv[1:])
p = subprocess.Popen(args, stderr=subprocess.PIPE)
strace_out = p.stderr
for line in strace_out:
pigz.stdin.write(line)
pigz.stdin.close()
pigz.wait()
exit(0)
pigz = subprocess.Popen("pigz -p 1 -d -c _monitor_disk_io/strace-out.txt.gz",
stdout=subprocess.PIPE, shell=True)
strace_out = pigz.stdout
if len(glob.glob('_monitor_disk_io/*.proc.txt')) > 0:
os.system("rm _monitor_disk_io/*.proc.txt")
path_for_pid_and_fd = {}
stats = {}
def handle_line(pid, line):
line = line.strip()
if WRITE_PROC_FILES:
if pid not in proc_files:
proc_files[pid] = open("_monitor_disk_io/%s.proc.txt" % pid, 'w')
proc_files[pid].write(line + "\n")
m = re.search(r'^(\w+)\((.*)\)\s+=\s+(.+)$', line)
if m:
command = str(m.group(1))
args = str(m.group(2)).strip()
retval = str(m.group(3))
if command == 'clone':
for _ in path_for_pid_and_fd[pid].keys():
if retval not in path_for_pid_and_fd:
path_for_pid_and_fd[retval] = {}
path_for_pid_and_fd[retval][_] = copy.copy(
path_for_pid_and_fd[pid][_])
if command == 'dup2':
fds = [_.strip() for _ in args.split(',')]
try:
path_for_pid_and_fd[pid][fds[1]] = copy.copy(
path_for_pid_and_fd[pid][fds[0]])
except BaseException:
path_for_pid_and_fd[pid][fds[1]] = '[unknown]'
if command == 'open':
if pid not in path_for_pid_and_fd:
path_for_pid_and_fd[pid] = {}
path_for_pid_and_fd[pid][retval] = re.search(
"^\\\"([^\\\"]+)\\\"", args).group(1)
if command == 'close':
if pid not in path_for_pid_and_fd:
path_for_pid_and_fd[pid] = {}
fd = args.strip()
if command == 'lseek':
fd = None
m = re.search(r"^(\d+),", args)
if m:
fd = m.group(1)
if fd:
path = '[unknown]'
try:
path = path_for_pid_and_fd[pid][fd]
except BaseException:
if fd == '0':
path = 'stdin'
elif fd == '1':
path = 'stdout'
elif fd == '2':
path = 'stderr'
else:
pass
if path not in stats:
stats[path] = {'read': {}, 'write': {}, 'lseek': 0}
stats[path]['lseek'] += 1
if command == 'read' or command == 'write':
fd = None
size = None
m = re.search(r"^(\d+),", args)
if m:
fd = m.group(1)
size = retval
if fd and size:
try:
sizek = int(size) / 1024
except ValueError:
return
path = '[unknown]'
try:
path = path_for_pid_and_fd[pid][fd]
except BaseException:
if fd == '0':
path = 'stdin'
elif fd == '1':
path = 'stdout'
elif fd == '2':
path = 'stderr'
else:
pass
if path not in stats:
stats[path] = {'read': {}, 'write': {}, 'lseek': 0}
if sizek not in stats[path][command]:
stats[path][command][sizek] = 0
stats[path][command][sizek] += 1
def size_to_cat(s):
if s < 32:
return (0, '< 32k ')
elif s < 128:
return (1, '32k+ ')
elif s < 1024:
return (2, '128k+ ')
elif s < 2048:
return (3, '1024k+ ')
elif s < 3072:
return (4, '2048k+ ')
elif s < 4096:
return (5, '3072k+ ')
elif s < 8192:
return (6, '4096k+ ')
else:
return (7, '8192k+')
line_buffer = {}
for line in strace_out:
line = line.strip()
pid = str(re.search(r'^(\d+)\s', line).group(1))
line = line[line.index(' ') + 1:]
if 'resumed>' in line:
line = re.sub(r'\<.+\>', '', line)
line_buffer[pid] += line
handle_line(pid, line_buffer[pid])
else:
if '<unfinished' in line:
line = re.sub(r'\<.+\>', '', line)
line_buffer[pid] = line
else:
line_buffer[pid] = line
handle_line(pid, line_buffer[pid])
for path in stats.keys():
cancel = False
for _ in [
'python_env',
'/proc',
'/etc',
'/usr',
'.git',
'.so',
'.py',
'.pyc',
'stdin',
'stdout',
'stderr',
'/dev']:
if _ in path:
cancel = True
continue
if cancel:
continue
printed_path = False
for mode in ['read', 'write']:
printed_mode = False
hist = {}
mod_size = {}
for _, count in stats[path][mode].items():
cat = size_to_cat(_)
if cat not in mod_size:
mod_size[cat] = 0
mod_size[cat] += count
for key in sorted(mod_size.keys(), reverse=True):
size = key[1]
# if key[0] == 0:
# continue
if not printed_path:
print('-' * len(path))
print(path)
print('-' * len(path))
printed_path = True
if not printed_mode:
print(mode.upper() + 'S:')
printed_mode = True
print('{:>8} {:>5}x'.format(str(size), str(mod_size[key])))
if stats[path]['lseek'] > 0:
if not printed_path:
print('-' * len(path))
print(path)
print('-' * len(path))
printed_path = True
print("LSEEKS: " + str(stats[path]['lseek']) + 'x')
for pid, f in proc_files.items():
f.close()
| gpl-3.0 | -6,964,457,910,135,146,000 | 29.618644 | 77 | 0.457791 | false | 3.580773 | false | false | false |
Tsumiki-Chan/Neko-Chan | functions/logger.py | 1 | 3475 | import sys
import sqlite3
import re
import os
import aiofiles
from time import localtime, strftime
import linecache
from functions import botfunc
import asyncio
import global_vars
from pprint import pprint
def printtf(message):
yield from print_to_file(message)
async def print_to_file(message):
# file = os.path.join(global_vars.cwd, global_vars.log_dir, global_vars.log_file)
# async with aiofiles.open(file, mode='a') as f:
# await f.write("{}\r\n".format(message))
print(message)
def PrintException(message=None):
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
filename = filename.replace(global_vars.cwd,"")
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
time = strftime("%H:%M:%S", localtime())
#line = '[' + str(time) + '] {} IN ({}:{} -> "{}"): {}'.format(exc_type.__name__, filename, lineno, line.strip(), exc_obj)
report = "**Bug report**:\r\n"
report += "Reported by: Exception catcher\r\n"
if message is not None:
if message.server is not None:
report += "Server: `{}` (ID: {})\r\n".format(message.server.name, message.server.id)
if message.author is not None:
report += "Author: `{}#{}` (ID: {})\r\n".format(message.author.name, message.author.discriminator, message.author.id)
report += "Time: {}\r\n".format(time)
report += "Type: `{}`\r\n".format(exc_type.__name__)
report += "Exception: `{}`\r\n".format(exc_obj)
report += "File: `{}:{}`\r\n".format(filename, lineno)
report += "Line: `{}`\r\n".format(line.strip())
if message is not None:
report += "Executed command: {}".format(message.content[:1000])
print(report)
def GetException():
try:
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
filename = filename.replace(global_vars.cwd,"")
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
time = strftime("%H:%M:%S", localtime())
line = '[' + str(time) + '] {} IN ({}:{} -> "{}"): {}'.format(exc_type.__name__, filename, lineno, line.strip(), exc_obj)
return line
except:
PrintException()
def debug(msg):
time = strftime("%H:%M:%S", localtime())
yield from printtf("[" + str(time) + "] " + str(msg).encode("UTF-8"))
def edit_log(chat, sender, message, messageid):
try:
# =============== SQLITE WAY ===============
try:
time = strftime("%H:%M:%S", localtime())
yield from printtf("[" + str(time) + "] [EDITED] " +
sender.encode("UTF-8") + ": " + message.encode("UTF-8"))
con = False
topic = skype.name(chat)
con = sqlite3.connect('files/' + topic + '.db')
cur = con.cursor()
cur.execute(
"CREATE TABLE IF NOT EXISTS messages (id INTEGER PRIMARY KEY, sender VARCHAR(100), display VARCHAR(100), message TEXT(1000), date VARCHAR(150))")
cur.execute(
"UPDATE messages SET message=message || '\n[EDITED] ' || ? WHERE id=?", (message, messageid))
con.commit()
except sqlite3.Error as e:
PrintException()
finally:
if con:
con.close()
except:
PrintException()
| gpl-3.0 | 8,536,972,281,154,609,000 | 36.771739 | 161 | 0.574964 | false | 3.51365 | false | false | false |
cmaughan/synergy | ext/toolchain/ftputil.py | 33 | 1544 | # synergy -- mouse and keyboard sharing utility
# Copyright (C) 2012 Synergy Si Ltd.
# Copyright (C) 2010 Nick Bolton
#
# This package is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# found in the file LICENSE that should have accompanied this file.
#
# This package is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ftplib import FTP
class FtpUploader:
def __init__(self, host, user, password, dir):
self.host = host
self.user = user
self.password = password
self.dir = dir
def upload(self, src, dest, subDir=None):
print "Connecting to '%s'" % self.host
ftp = FTP(self.host, self.user, self.password)
self.changeDir(ftp, self.dir)
if subDir:
self.changeDir(ftp, subDir)
print "Uploading '%s' as '%s'" % (src, dest)
f = open(src, 'rb')
ftp.storbinary('STOR ' + dest, f)
f.close()
ftp.close()
print "Done"
def changeDir(self, ftp, dir):
if dir not in ftp.nlst():
print "Creating dir '%s'" % dir
try:
ftp.mkd(dir)
except:
# sometimes nlst may returns nothing, so mkd fails with 'File exists'
print "Failed to create dir '%s'" % dir
print "Changing to dir '%s'" % dir
ftp.cwd(dir)
| gpl-2.0 | -5,496,776,305,634,481,000 | 27.592593 | 73 | 0.689119 | false | 3.223382 | false | false | false |
davidsoncasey/quiver-server | plot_equation.py | 1 | 3888 | from __future__ import division
import re
from math import sqrt
import multiprocessing
import Queue
import sympy
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
class DiffEquation(object):
'''
Class that contains equation information and, if the equation is valid,
prepares the plot.
'''
def __init__(self, equation_string):
self.equation_string = equation_string
self.equation = None
self.compute_func = None
self.figure = None
def regex_check(self):
'''A quick regular expression check to see that the input resembles an equation'''
match1 = re.match('^(([xy+\-*/()0-9. ]+|sin\(|cos\(|exp\(|log\()?)+$', self.equation_string)
match2 = re.match('^.*([xy]) *([xy]).*$', self.equation_string)
return match1 and not match2
def prep_equation(self):
'''
Attempt to convert the string to a SymPy function.
From there, use lambdify to generate a function that is efficient to compute
numerically.
'''
if self.regex_check():
q = multiprocessing.Queue()
def prep(conn):
try:
equation = sympy.sympify(self.equation_string)
q.put(equation)
except sympy.SympifyError:
q.put(None)
p = multiprocessing.Process(target=prep, args=(q,))
p.start()
# See if we can get the equation within 5 seconds
try:
equation = q.get(timeout=3)
except Queue.Empty:
equation = None
q.close()
# If the process is still running, kill it
if p.is_alive():
p.terminate()
p.join()
if equation:
self.equation = equation
x, y = sympy.symbols('x,y')
compute_func = sympy.utilities.lambdify((x, y), self.equation)
self.compute_func = compute_func
def make_plot(self):
'''Draw the plot on the figure attribute'''
if self.compute_func:
xvals, yvals = np.arange(-10, 11, 1), np.arange(-10, 11, 1)
X, Y = np.meshgrid(xvals, yvals)
U, V = np.meshgrid(np.zeros(len(xvals)), np.zeros(len(yvals)))
# Iterate through grid and compute function value at each point
# If value cannot be computed, default to 0
# If value can be computed, scale by sqrt of the magnitude
for i, a in enumerate(xvals):
for j, b in enumerate(yvals):
dx = 1
try:
dy = self.compute_func(a, b)
n = sqrt(dx + dy**2)
dy /= sqrt(n)
dx /= sqrt(n)
U[j][i] = dx
V[j][i] = dy
except (ValueError, ZeroDivisionError):
pass
# Plot the values
self.figure = plt.Figure()
axes = self.figure.add_subplot(1,1,1)
axes.quiver(X, Y, U, V, angles='xy', color='b', edgecolors=('k',))
axes.axhline(color='black')
axes.axvline(color='black')
latex = sympy.latex(self.equation)
axes.set_title(r'Direction field for $\frac{dy}{dx} = %s$' % latex, y=1.01)
def write_data(self, output):
'''Write the data out as base64 binary'''
if self.figure:
canvas = FigureCanvas(self.figure)
self.figure.savefig(output, format='png', bbox_inches='tight')
output.seek(0)
return output.getvalue()
return None
| mit | 3,456,048,695,903,881,000 | 35.679245 | 100 | 0.513889 | false | 4.324805 | false | false | false |
Hans-Cyton/cyton_robot | cyton_servo_controllers/src/cyton_servo_controllers/joint_controller.py | 2 | 8237 | # -*- coding: utf-8 -*-
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2010-2011, Antons Rebguns.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of University of Arizona nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = 'Antons Rebguns'
__copyright__ = 'Copyright (c) 2010-2011 Antons Rebguns'
__license__ = 'BSD'
__maintainer__ = 'Antons Rebguns'
__email__ = '[email protected]'
import math
import rospy
from dynamixel_driver.dynamixel_const import *
from dynamixel_controllers.srv import SetSpeed
from dynamixel_controllers.srv import TorqueEnable
from dynamixel_controllers.srv import SetComplianceSlope
from dynamixel_controllers.srv import SetComplianceMargin
from dynamixel_controllers.srv import SetCompliancePunch
from dynamixel_controllers.srv import SetTorqueLimit
from std_msgs.msg import Float64
from dynamixel_msgs.msg import MotorStateList
from dynamixel_msgs.msg import JointState
class JointController:
def __init__(self, dxl_io, controller_namespace, port_namespace):
self.running = False
self.dxl_io = dxl_io
self.controller_namespace = controller_namespace
self.port_namespace = port_namespace
self.joint_name = rospy.get_param(self.controller_namespace + '/joint_name')
self.joint_speed = rospy.get_param(self.controller_namespace + '/joint_speed', 1.0)
self.compliance_slope = rospy.get_param(self.controller_namespace + '/joint_compliance_slope', None)
self.compliance_margin = rospy.get_param(self.controller_namespace + '/joint_compliance_margin', None)
self.compliance_punch = rospy.get_param(self.controller_namespace + '/joint_compliance_punch', None)
self.torque_limit = rospy.get_param(self.controller_namespace + '/joint_torque_limit', None)
self.__ensure_limits()
self.speed_service = rospy.Service(self.controller_namespace + '/set_speed', SetSpeed, self.process_set_speed)
self.torque_service = rospy.Service(self.controller_namespace + '/torque_enable', TorqueEnable, self.process_torque_enable)
self.compliance_slope_service = rospy.Service(self.controller_namespace + '/set_compliance_slope', SetComplianceSlope, self.process_set_compliance_slope)
self.compliance_marigin_service = rospy.Service(self.controller_namespace + '/set_compliance_margin', SetComplianceMargin, self.process_set_compliance_margin)
self.compliance_punch_service = rospy.Service(self.controller_namespace + '/set_compliance_punch', SetCompliancePunch, self.process_set_compliance_punch)
self.torque_limit_service = rospy.Service(self.controller_namespace + '/set_torque_limit', SetTorqueLimit, self.process_set_torque_limit)
def __ensure_limits(self):
if self.compliance_slope is not None:
if self.compliance_slope < DXL_MIN_COMPLIANCE_SLOPE: self.compliance_slope = DXL_MIN_COMPLIANCE_SLOPE
elif self.compliance_slope > DXL_MAX_COMPLIANCE_SLOPE: self.compliance_slope = DXL_MAX_COMPLIANCE_SLOPE
else: self.compliance_slope = int(self.compliance_slope)
if self.compliance_margin is not None:
if self.compliance_margin < DXL_MIN_COMPLIANCE_MARGIN: self.compliance_margin = DXL_MIN_COMPLIANCE_MARGIN
elif self.compliance_margin > DXL_MAX_COMPLIANCE_MARGIN: self.compliance_margin = DXL_MAX_COMPLIANCE_MARGIN
else: self.compliance_margin = int(self.compliance_margin)
if self.compliance_punch is not None:
if self.compliance_punch < DXL_MIN_PUNCH: self.compliance_punch = DXL_MIN_PUNCH
elif self.compliance_punch > DXL_MAX_PUNCH: self.compliance_punch = DXL_MAX_PUNCH
else: self.compliance_punch = int(self.compliance_punch)
if self.torque_limit is not None:
if self.torque_limit < 0: self.torque_limit = 0.0
elif self.torque_limit > 1: self.torque_limit = 1.0
def initialize(self):
raise NotImplementedError
def start(self):
self.running = True
self.joint_state_pub = rospy.Publisher(self.controller_namespace + '/state', JointState, queue_size=1)
self.command_sub = rospy.Subscriber(self.controller_namespace + '/command', Float64, self.process_command)
self.motor_states_sub = rospy.Subscriber('motor_states/%s' % self.port_namespace, MotorStateList, self.process_motor_states)
def stop(self):
self.running = False
self.joint_state_pub.unregister()
self.motor_states_sub.unregister()
self.command_sub.unregister()
self.speed_service.shutdown('normal shutdown')
self.torque_service.shutdown('normal shutdown')
self.compliance_slope_service.shutdown('normal shutdown')
def set_torque_enable(self, torque_enable):
raise NotImplementedError
def set_speed(self, speed):
raise NotImplementedError
def set_compliance_slope(self, slope):
raise NotImplementedError
def set_compliance_margin(self, margin):
raise NotImplementedError
def set_compliance_punch(self, punch):
raise NotImplementedError
def set_torque_limit(self, max_torque):
raise NotImplementedError
def process_set_speed(self, req):
self.set_speed(req.speed)
return [] # success
def process_torque_enable(self, req):
self.set_torque_enable(req.torque_enable)
return []
def process_set_compliance_slope(self, req):
self.set_compliance_slope(req.slope)
return []
def process_set_compliance_margin(self, req):
self.set_compliance_margin(req.margin)
return []
def process_set_compliance_punch(self, req):
self.set_compliance_punch(req.punch)
return []
def process_set_torque_limit(self, req):
self.set_torque_limit(req.torque_limit)
return []
def process_motor_states(self, state_list):
raise NotImplementedError
def process_command(self, msg):
raise NotImplementedError
def rad_to_raw(self, angle, initial_position_raw, flipped, encoder_ticks_per_radian):
""" angle is in radians """
#print 'flipped = %s, angle_in = %f, init_raw = %d' % (str(flipped), angle, initial_position_raw)
angle_raw = angle * encoder_ticks_per_radian
#print 'angle = %f, val = %d' % (math.degrees(angle), int(round(initial_position_raw - angle_raw if flipped else initial_position_raw + angle_raw)))
return int(round(initial_position_raw - angle_raw if flipped else initial_position_raw + angle_raw))
def raw_to_rad(self, raw, initial_position_raw, flipped, radians_per_encoder_tick):
return (initial_position_raw - raw if flipped else raw - initial_position_raw) * radians_per_encoder_tick
| bsd-3-clause | -4,817,941,697,096,324,000 | 45.275281 | 166 | 0.70851 | false | 3.703687 | false | false | false |
timurbakibayev/trains | trains/urls.py | 1 | 2077 | """trains URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from tutu import views
from tutu import views_sim
from django.views.static import serve
from trains import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name="index"),
url(r'^track/new/$', views.new_track, name='new_track'),
url(r'^track/edit/(?P<track_id>[0-9]*)$', views.edit_track, name="edit_track"),
url(r'^track/delete/(?P<track_id>[0-9]*)$', views.delete_track, name="delete_track"),
url(r'^track/(?P<track_id>[0-9]*)/switch/edit/(?P<switch_id>[0-9]*)$', views.edit_switch, name="edit_switch"),
url(r'^track/(?P<track_id>[0-9]*)/switch/delete/(?P<switch_id>[0-9]*)$', views.delete_switch, name="delete_switch"),
url(r'^track/(?P<track_id>[0-9]*)$', views.show_track, name="show_track"),
url(r'^track/(?P<track_id>[0-9]*)/thumbnail$', views.thumbnail_track, name="thumbnail_track"),
url(r'^track/(?P<track_id>[0-9]*)/new_switch$', views.new_switch, name="new_switch"),
url(r'^track/(?P<track_id>[0-9]*)/simulation/$', views_sim.simulation, name="simulation"),
url(r'^reset$', views.reset, name="reset"),
url(r'^serve/(?P<path>.*)$', serve, {'document_root': settings.PICS_DIR}),
url(r'^files/(?P<file_url>.*)$', views_sim.serve_upload_files, name="file"),
url(r'^track/(?P<track_id>[0-9]*)/simulation_start/$', views_sim.simulation_start, name="simulation_start"),
]
| gpl-3.0 | 1,833,003,297,626,249,000 | 52.25641 | 120 | 0.658161 | false | 3.1 | false | false | false |
why2pac/dp-tornado | dp_tornado/helper/io/file/zip.py | 1 | 1635 | # -*- coding: utf-8 -*-
import zipfile
from dp_tornado.engine.helper import Helper as dpHelper
class ZipHelper(dpHelper):
def archive(self, destfile, srcfiles, mode='w', compression=zipfile.ZIP_STORED, allowZip64=False):
archive = zipfile.ZipFile(file=destfile, mode=mode, compression=compression, allowZip64=allowZip64)
if not isinstance(srcfiles, (list, tuple)):
srcfiles = (srcfiles, )
for srcfile in srcfiles:
arcname = None
compress_type = None
if isinstance(srcfile, (tuple, list)):
filename = srcfile[0]
arcname = srcfile[1] if len(srcfile) > 1 else arcname
compress_type = srcfile[2] if len(srcfile) > 2 else compress_type
else:
filename = srcfile
self._archive_append(archive, filename, arcname, compress_type)
archive.close()
return True
def _archive_append(self, archive, path, arcname, compress_type):
if self.helper.io.path.is_file(path):
archive.write(filename=path, arcname=arcname, compress_type=compress_type)
elif self.helper.io.path.is_dir(path):
for e in self.helper.io.path.browse(path):
self._archive_append(archive=archive, path=e, arcname=arcname, compress_type=compress_type)
def unarchive(self, srcfile, destpath, mode='r', compression=zipfile.ZIP_STORED, allowZip64=False):
with zipfile.ZipFile(file=srcfile, mode=mode, compression=compression, allowZip64=allowZip64) as archive:
archive.extractall(destpath)
return True
| mit | 218,176,716,657,059,700 | 35.333333 | 113 | 0.640367 | false | 3.968447 | false | false | false |
inhumanitas/hghooks | build/lib/hghooks/__init__.py | 2 | 3527 | # Copyright (c) 2010 by Lorenzo Gil Sanchez <[email protected]>
#
# This file is part of hghooks.
#
# hghooks is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# hghooks is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with hghooks. If not, see <http://www.gnu.org/licenses/>.
import os.path
import re
import shutil
import tempfile
version = "0.6.0"
re_options = re.IGNORECASE | re.MULTILINE | re.DOTALL
skip_pattern = re.compile('# hghooks: (.*)', re_options)
class CheckerManager(object):
def __init__(self, ui, repo, node, skip_text=None, extension='.py'):
self.ui = ui
self.repo = repo
self.node = node
self.skip_text = skip_text
self.extension = extension
self.strict_checking = self.ui.configbool('hghooks',
'strict_checking')
def skip_file(self, filename, filedata):
if not filename.endswith(self.extension):
return True
for match in skip_pattern.findall(filedata):
if self.skip_text in match:
return True
return False
def check(self, checker):
warnings = 0
total_revs = len(self.repo.changelog)
if self.strict_checking:
current_rev = self.repo[self.node].rev()
else:
# check only files from the last revision
current_rev = total_revs - 1
while current_rev < total_revs:
rev_warnings = 0
directory = tempfile.mkdtemp(suffix='-r%d' % current_rev,
prefix='hghooks')
current_rev += 1
self.ui.debug("Checking revision %d\n" % current_rev)
ctx = self.repo[current_rev - 1]
description = ctx.description()
if self.skip_text and self.skip_text in description:
continue
files_to_check = {}
existing_files = tuple(ctx)
for filename in ctx.files():
if filename not in existing_files:
continue # the file was removed in this changeset
filectx = ctx.filectx(filename)
filedata = filectx.data()
if self.skip_text and self.skip_file(filename, filedata):
continue
full_path = os.path.join(directory, filename)
files_to_check[full_path] = filedata
if files_to_check:
errors_num, log = checker(files_to_check, description)
rev_warnings += errors_num
if rev_warnings:
self.ui.warn('%s\n\n %d errors in revision %d\n' %
(log, rev_warnings, current_rev - 1))
else:
self.ui.debug('No warnings in revision %d. Good job!\n' %
(current_rev - 1))
warnings += rev_warnings
shutil.rmtree(directory)
if warnings:
return True # failure
else:
return False # success
| lgpl-3.0 | 8,400,114,941,443,210,000 | 33.578431 | 77 | 0.575276 | false | 4.239183 | false | false | false |
raphaelvalentin/QTModel | UI_Widgets/QCursor/__init__.py | 1 | 1437 | from PyQt4 import QtCore, QtGui
import Ui_Cursor
class Ui_QCursorWindow(QtCore.QObject):
valueChanged = QtCore.Signal(str, float)
def setupUi(self, Form, data=[]):
Form.setObjectName( ("Form"))
Form.resize(900, 900)
#self.centralwidget = QtGui.QWidget(Form)
#self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName("verticalLayout")
if len(data):
self.setupCursor(Form, data)
spacerItem = QtGui.QSpacerItem(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
QtCore.QMetaObject.connectSlotsByName(Form)
def valueChangedSlot(self, key, value):
self.valueChanged.emit(key, value)
def setupCursor(self, Form, data):
for key, vmin, value, vmax in data:
self.parent = QtGui.QWidget(Form)
ui_widget = Ui_Cursor.Ui_Cursor()
ui_widget.setupUi(self.parent)
ui_widget.setLabel(key)
ui_widget.valueChanged.connect(self.valueChangedSlot)
ui_widget.setRange(vmin, vmax)
ui_widget.setValue(value)
self.verticalLayout.addWidget(self.parent)
| gpl-2.0 | 5,900,187,781,599,456,000 | 35.846154 | 154 | 0.654836 | false | 4.070822 | false | false | false |
jaumeg3/WebPractDjango | FilmRevolutionApp/migrations/0009_auto_20170521_1048.py | 1 | 1477 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FilmRevolutionApp', '0008_auto_20170521_1046'),
]
operations = [
migrations.AlterField(
model_name='actor',
name='age',
field=models.IntegerField(
validators=[django.core.validators.MaxValueValidator(120),
django.core.validators.MinValueValidator(1)]),
),
migrations.AlterField(
model_name='actor',
name='birthday',
field=models.DateField(),
),
migrations.AlterField(
model_name='actor',
name='deathday',
field=models.DateField(null=True, blank=True),
),
migrations.AlterField(
model_name='director',
name='age',
field=models.IntegerField(
validators=[django.core.validators.MaxValueValidator(120),
django.core.validators.MinValueValidator(1)]),
),
migrations.AlterField(
model_name='director',
name='birthday',
field=models.DateField(),
),
migrations.AlterField(
model_name='director',
name='deathday',
field=models.DateField(null=True, blank=True),
),
]
| gpl-3.0 | 4,179,603,250,492,839,000 | 29.770833 | 74 | 0.544347 | false | 4.826797 | false | false | false |
Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/paho/mqtt/matcher.py | 2 | 2776 | class MQTTMatcher(object):
"""Intended to manage topic filters including wildcards.
Internally, MQTTMatcher use a prefix tree (trie) to store
values associated with filters, and has an iter_match()
method to iterate efficiently over all filters that match
some topic name."""
class Node(object):
__slots__ = '_children', '_content'
def __init__(self):
self._children = {}
self._content = None
def __init__(self):
self._root = self.Node()
def __setitem__(self, key, value):
"""Add a topic filter :key to the prefix tree
and associate it to :value"""
node = self._root
for sym in key.split('/'):
node = node._children.setdefault(sym, self.Node())
node._content = value
def __getitem__(self, key):
"""Retrieve the value associated with some topic filter :key"""
try:
node = self._root
for sym in key.split('/'):
node = node._children[sym]
if node._content is None:
raise KeyError(key)
return node._content
except KeyError:
raise KeyError(key)
def __delitem__(self, key):
"""Delete the value associated with some topic filter :key"""
lst = []
try:
parent, node = None, self._root
for k in key.split('/'):
parent, node = node, node._children[k]
lst.append((parent, k, node))
# TODO
node._content = None
except KeyError:
raise KeyError(key)
else: # cleanup
for parent, k, node in reversed(lst):
if node._children or node._content is not None:
break
del parent._children[k]
def iter_match(self, topic):
"""Return an iterator on all values associated with filters
that match the :topic"""
lst = topic.split('/')
normal = not topic.startswith('$')
def rec(node, i=0):
if i == len(lst):
if node._content is not None:
yield node._content
else:
part = lst[i]
if part in node._children:
for content in rec(node._children[part], i + 1):
yield content
if '+' in node._children and (normal or i > 0):
for content in rec(node._children['+'], i + 1):
yield content
if '#' in node._children and (normal or i > 0):
content = node._children['#']._content
if content is not None:
yield content
return rec(self._root)
| gpl-2.0 | 3,540,635,674,852,173,300 | 34.589744 | 71 | 0.505403 | false | 4.769759 | false | false | false |
whausen/part | src/adhocracy/controllers/milestone.py | 1 | 9009 | from datetime import date, datetime, time
import logging
import formencode
from formencode import htmlfill, Invalid, validators
from pylons import request, tmpl_context as c
from pylons.controllers.util import redirect
from pylons.decorators import validate
from pylons.i18n import _
from adhocracy import forms, model
from adhocracy.lib import helpers as h, pager, tiles, watchlist
from adhocracy.lib.auth import csrf, require
from adhocracy.lib.base import BaseController
from adhocracy.lib.instance import RequireInstance
from adhocracy.lib.templating import render, render_json
from adhocracy.lib.util import get_entity_or_abort
import adhocracy.lib.text as text
log = logging.getLogger(__name__)
class MilestoneNewForm(formencode.Schema):
allow_extra_fields = True
class MilestoneCreateForm(MilestoneNewForm):
title = validators.String(max=2000, min=4, not_empty=True)
text = validators.String(max=60000, min=4, not_empty=True)
category = forms.ValidCategoryBadge(if_missing=None, if_empty=None)
time = forms.ValidDate()
class MilestoneEditForm(formencode.Schema):
allow_extra_fields = True
class MilestoneUpdateForm(MilestoneEditForm):
title = validators.String(max=2000, min=4, not_empty=True)
text = validators.String(max=60000, min=4, not_empty=True)
category = forms.ValidCategoryBadge(if_missing=None, if_empty=None)
time = forms.ValidDate()
class MilestoneController(BaseController):
def __init__(self):
super(MilestoneController, self).__init__()
c.active_subheader_nav = 'milestones'
@RequireInstance
def index(self, format="html"):
require.milestone.index()
milestones = model.Milestone.all(instance=c.instance)
broken = [m for m in milestones if m.time is None]
for milestone in broken:
log.warning('Time of Milestone is None: %s' %
h.entity_url(milestone))
milestones = [m for m in milestones if m.time is not None]
today = datetime.combine(date.today(), time(0, 0))
past_milestones = [m for m in milestones if m.time < today]
c.show_past_milestones = len(past_milestones)
c.past_milestones_pager = pager.milestones(past_milestones)
current_milestones = [m for m in milestones if m not in
past_milestones]
c.show_current_milestones = len(current_milestones)
c.current_milestones_pager = pager.milestones(current_milestones)
c.milestones = past_milestones + current_milestones # for the timeline
if format == 'json':
return render_json(c.milestones_pager)
c.tile = tiles.instance.InstanceTile(c.instance)
c.tutorial = 'milestone_index'
c.tutorial_intro = _('tutorial_milestones_tab')
return render("/milestone/index.html")
@RequireInstance
@validate(schema=MilestoneNewForm(), form='bad_request',
post_only=False, on_get=True)
def new(self, errors=None):
require.milestone.create()
c.categories = model.CategoryBadge.all(instance=c.instance)
defaults = dict(request.params)
defaults['watch'] = defaults.get('watch', True)
return htmlfill.render(render("/milestone/new.html"),
defaults=defaults, errors=errors,
force_defaults=False)
@RequireInstance
@csrf.RequireInternalRequest(methods=['POST'])
def create(self, format='html'):
require.milestone.create()
c.categories = model.CategoryBadge.all(instance=c.instance)
try:
self.form_result = MilestoneCreateForm().to_python(request.params)
except Invalid, i:
return self.new(errors=i.unpack_errors())
category = self.form_result.get('category')
milestone = model.Milestone.create(c.instance, c.user,
self.form_result.get("title"),
self.form_result.get('text'),
self.form_result.get('time'),
category=category)
model.meta.Session.commit()
watchlist.check_watch(milestone)
#event.emit(event.T_PROPOSAL_CREATE, c.user, instance=c.instance,
# topics=[proposal], proposal=proposal, rev=description.head)
redirect(h.entity_url(milestone, format=format))
@RequireInstance
@validate(schema=MilestoneEditForm(), form="bad_request",
post_only=False, on_get=True)
def edit(self, id, errors={}):
c.categories = model.CategoryBadge.all(instance=c.instance)
c.milestone = get_entity_or_abort(model.Milestone, id)
require.milestone.edit(c.milestone)
defaults = {'category': (str(c.milestone.category.id) if
c.milestone.category else None)}
defaults.update(dict(request.params))
return htmlfill.render(render("/milestone/edit.html"),
defaults=defaults,
errors=errors, force_defaults=False)
@RequireInstance
@csrf.RequireInternalRequest(methods=['POST'])
def update(self, id, format='html'):
try:
c.milestone = get_entity_or_abort(model.Milestone, id)
self.form_result = MilestoneUpdateForm().to_python(request.params)
except Invalid, i:
return self.edit(id, errors=i.unpack_errors())
require.milestone.edit(c.milestone)
c.milestone.title = self.form_result.get('title')
c.milestone.text = self.form_result.get('text')
c.milestone.category = self.form_result.get('category')
c.milestone.time = self.form_result.get('time')
model.meta.Session.commit()
watchlist.check_watch(c.milestone)
#event.emit(event.T_PROPOSAL_EDIT, c.user, instance=c.instance,
# topics=[c.proposal], proposal=c.proposal, rev=_text)
redirect(h.entity_url(c.milestone))
@RequireInstance
def show(self, id, format='html'):
c.milestone = get_entity_or_abort(model.Milestone, id)
require.milestone.show(c.milestone)
if format == 'json':
return render_json(c.milestone)
c.tile = tiles.milestone.MilestoneTile(c.milestone)
# proposals .. directly assigned
by_milestone = model.Proposal.by_milestone(c.milestone,
instance=c.instance)
# proposals .. with the same category
by_category = []
if c.milestone.category:
by_category = [d for d in c.milestone.category.delegateables
if isinstance(d, model.Proposal)
and not d.is_deleted()]
proposals = list(set(by_milestone + by_category))
c.proposals_pager = pager.proposals(proposals, size=20,
enable_sorts=False)
c.show_proposals_pager = len(proposals)
# pages
pages = model.Page.by_milestone(c.milestone,
instance=c.instance,
include_deleted=False,
functions=[model.Page.NORM])
c.pages_pager = pager.pages(pages, size=20, enable_sorts=False)
c.show_pages_pager = len(pages) and c.instance.use_norms
self._common_metadata(c.milestone)
c.tutorial_intro = _('tutorial_milestone_details_tab')
c.tutorial = 'milestone_show'
return render("/milestone/show.html")
@RequireInstance
def ask_delete(self, id):
c.milestone = get_entity_or_abort(model.Milestone, id)
require.milestone.delete(c.milestone)
c.tile = tiles.milestone.MilestoneTile(c.milestone)
return render('/milestone/ask_delete.html')
@RequireInstance
@csrf.RequireInternalRequest()
def delete(self, id):
c.milestone = get_entity_or_abort(model.Milestone, id)
require.milestone.delete(c.milestone)
#event.emit(event.T_milestone_DELETE, c.user, instance=c.instance,
# topics=[c.milestone], milestone=c.milestone)
c.milestone.delete()
model.meta.Session.commit()
h.flash(_("The milestone %s has been deleted.") % c.milestone.title,
'success')
redirect(h.entity_url(c.instance))
def _common_metadata(self, milestone):
h.add_meta("description",
text.meta_escape(milestone.text,
markdown=False)[0:160])
h.add_meta("dc.title",
text.meta_escape(milestone.title, markdown=False))
h.add_meta("dc.date",
(milestone.time and milestone.time.strftime("%Y-%m-%d") or
''))
h.add_meta("dc.author",
text.meta_escape(milestone.creator.name, markdown=False))
| agpl-3.0 | -5,108,843,781,474,056,000 | 40.136986 | 79 | 0.615607 | false | 3.927201 | false | false | false |
albertaleksieiev/zpy | Zpy/storage/SuffixTree.py | 1 | 1964 | class SuffixTree:
"""
Suffix tree
https://en.wikipedia.org/wiki/Suffix_tree
"""
def __init__(self, nodes):
self.root = {"EOW":False, "next": {} }
for node in nodes:
self.addNode(node['word'], node['leaf_data'])
def addNode(self,word, leaf_data):
"""
Add node to char
:param word: word
:param leaf_data: Leaf data
:return: None
>>> st = SuffixTree([])
>>> st.addNode('abc','1M$')
>>> st.addNode('qwe', '2M$')
>>> st.addNode('abd', '3M$')
>>> st.root['next']['a']['next']['b']['next']['c']['EOW']
'1M$'
>>> st.root['next']['a']['next']['b']['next']['d']['EOW']
'3M$'
>>> 'c' in st.root['next']['a']['next']['b']['next'] and 'd' in st.root['next']['a']['next']['b']['next']
True
>>> 'q' not in st.root['next']['q']['next']['w']['next']
True
"""
node = self.root
for ch in word:
if ch not in node['next']:
node['next'] [ch] = {"EOW":False, "next": {} }
node = node['next'][ch]
node['EOW'] = leaf_data
def find(self, word):
"""
Find subword in tree (substring)
:param word: Keyword in tree
:return: Leaf data if exist else False
>>> st = SuffixTree([{'word':'Peppa','leaf_data':'PIG'}, {'word':'Mike', 'leaf_data':'Pro'}, \
{'word':'Peppa','leaf_data':'OVERRITE'}])
>>> st.find('Peppa')
'OVERRITE'
>>> st.find('Mike john')
'Pro'
>>> st.find('Mikeing')
False
"""
node = self.root
for index, ch in enumerate(word):
if ch not in node['next']:
return False
node = node['next'][ch]
if node['EOW'] and (index + 1 == len(word) or word[index + 1] == " "):
return node['EOW']
return False
| mit | 7,206,471,455,957,749,000 | 33.45614 | 113 | 0.440428 | false | 3.583942 | false | false | false |
PMBio/mtSet | mtSet/pycore/utils/simPhenoCore.py | 1 | 2315 | import sys
sys.path.append('./../../..')
from optparse import OptionParser
import scipy as SP
import os
import mtSet.pycore.utils.simulator as sim
from mtSet.pycore.utils.read_utils import readCovarianceMatrixFile
from mtSet.pycore.utils.read_utils import readBimFile
from mtSet.pycore.external.limix import plink_reader
def genPhenoCube(sim,Xr,vTotR=4e-3,nCausalR=10,pCommonR=0.8,vTotBg=0.4,pHidd=0.6,pCommon=0.8):
# region
nCommonR = int(SP.around(nCausalR*pCommonR))
# background
vCommonBg = pCommon*vTotBg
# noise
vTotH = pHidd*(1-vTotR-vTotBg)
vTotN = (1-pHidd)*(1-vTotR-vTotBg)
vCommonH = pCommon*vTotH
all_settings = {
'vTotR':vTotR,'nCommonR':nCommonR,'nCausalR':nCausalR,
'vTotBg':vTotBg,'vCommonBg':vCommonBg,'pCausalBg':1.,'use_XX':True,
'vTotH':vTotH,'vCommonH':vCommonH,'nHidden':10,
'vTotN':vTotN,'vCommonN':0.}
Y,info = sim.genPheno(Xr,**all_settings)
return Y,info
def simPheno(options):
print 'importing covariance matrix'
if options.cfile is None: options.cfile=options.bfile
XX = readCovarianceMatrixFile(options.cfile,readEig=False)['K']
print 'simulating phenotypes'
SP.random.seed(options.seed)
simulator = sim.CSimulator(bfile=options.bfile,XX=XX,P=options.nTraits)
Xr,region = simulator.getRegion(chrom_i=options.chrom,size=options.windowSize,min_nSNPs=options.nCausalR,pos_min=options.pos_min,pos_max=options.pos_max)
Y,info = genPhenoCube(simulator,Xr,vTotR=options.vTotR,nCausalR=options.nCausalR,pCommonR=options.pCommonR,vTotBg=options.vTotBg,pHidd=options.pHidden,pCommon=options.pCommon)
print 'exporting pheno file'
if options.pfile is not None:
outdir = os.path.split(options.pfile)[0]
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
identifier = '_seed%d_nTraits%d_wndSize%d_vTotR%.2f_nCausalR%d_pCommonR%.2f_vTotBg%.2f_pHidden%.2f_pCommon%.2f'%(options.seed,options.nTraits,options.windowSize,options.vTotR,options.nCausalR,options.pCommonR,options.vTotBg,options.pHidden,options.pCommon)
options.pfile = os.path.split(options.bfile)[-1] + '%s'%identifier
pfile = options.pfile + '.phe'
rfile = options.pfile + '.phe.region'
SP.savetxt(pfile,Y)
SP.savetxt(rfile,region)
| apache-2.0 | 5,678,166,294,457,854,000 | 41.090909 | 264 | 0.713607 | false | 2.560841 | false | false | false |
Bolt64/my_code | euler/square_remainders.py | 1 | 1168 | #!/usr/bin/env python3
from fractions import gcd
from time import sleep
"""
Use the frickin' binomial expansion. Problem solved.
"""
def get_cyclic_group(seed, modifier):
current=seed+modifier
cyclic_group=[1, current]
mod=seed**2
counter=2
while True:
if counter%2==0:
next_element=(cyclic_group[-2]+2*current)%mod
else:
next_element=(cyclic_group[-2]-2*current)%mod
if next_element in (cyclic_group[0], cyclic_group[1]):
return cyclic_group
cyclic_group.append(next_element)
counter+=1
def lcm(a,b):
if a%b==0:
return a
elif b%a==0:
return b
else:
return (a*b)//gcd(a,b)
def combine_groups(group_a, group_b, mod):
a=len(group_a)
b=len(group_b)
n=lcm(a, b)
combined_group=[]
for i in range(n):
combined_group.append((group_a[i%a]+group_b[i%b])%mod)
return combined_group
def get_r_max(n):
mod=n**2
a=get_cyclic_group(n-1, -1)
b=get_cyclic_group(n+1, 1)
return max(combine_groups(a,b,mod))
def main(lower=3, upper=1000):
return sum(get_r_max(i) for i in range(lower, upper+1))
| mit | -1,983,755,178,335,347,700 | 22.836735 | 62 | 0.596747 | false | 2.912718 | false | false | false |
perimosocordiae/sparray | bench/benchmarks/construction.py | 1 | 1438 | import numpy as np
import scipy.sparse as ss
from sparray import FlatSparray
class Construction2D(object):
def setup(self):
num_rows, num_cols = 3000, 4000
self.spm = ss.rand(num_rows, num_cols, density=0.1, format='coo')
self.arr = self.spm.A
self.data = self.spm.data
self.indices = self.spm.row * num_cols + self.spm.col
self.spm_csr = self.spm.tocsr()
def time_init(self):
FlatSparray(self.indices, self.data, shape=self.arr.shape)
def time_from_ndarray(self):
FlatSparray.from_ndarray(self.arr)
def time_from_spmatrix_coo(self):
FlatSparray.from_spmatrix(self.spm)
def time_from_spmatrix_csr(self):
FlatSparray.from_spmatrix(self.spm_csr)
class ConstructionND(object):
params = [[(1200000,), (1200,1000), (120,100,100), (20,30,40,50)]]
param_names = ['shape']
def setup(self, shape):
nnz = 10000
size = np.prod(shape)
self.indices = np.random.choice(size, nnz, replace=False)
self.sorted_indices = np.sort(self.indices)
self.data = np.ones(nnz, dtype=float)
arr = np.zeros(size, dtype=float)
arr[self.sorted_indices] = 1
self.arr = arr.reshape(shape)
def time_init(self, shape):
FlatSparray(self.indices, self.data, shape=shape)
def time_canonical_init(self, shape):
FlatSparray(self.sorted_indices, self.data, shape=shape, is_canonical=True)
def time_from_ndarray(self, shape):
FlatSparray.from_ndarray(self.arr)
| mit | -3,325,464,078,937,303,000 | 27.76 | 79 | 0.685675 | false | 2.910931 | false | false | false |
xhava/hippyvm | hippy/module/spl/spl.py | 2 | 43355 | import os
from hippy.builtin import (wrap_method, Optional, ThisUnwrapper,
handle_as_exception, StreamContextArg, Nullable)
from hippy.objects.instanceobject import W_InstanceObject
from hippy.objects.intobject import W_IntObject
from hippy.objects.resources.file_resource import W_FileResource
from hippy.objects.resources.dir_resource import W_DirResource
from hippy.error import PHPException
from hippy.builtin_klass import def_class, GetterSetterWrapper
from hippy.module.spl.interface import k_SeekableIterator, k_RecursiveIterator
from hippy.module.standard.file.funcs import (_is_dir, _is_file, _is_link,
_is_executable, _is_readable, _is_writable, _filetype, _fseek, _fstat,
_fopen, _basename, FopenError)
from hippy import rpath
from hippy import consts
from hippy.module.spl.exception import (
k_LogicException, k_RuntimeException, k_UnexpectedValueException)
class W_SplFileInfo(W_InstanceObject):
def clone(self, interp, contextclass):
w_res = W_InstanceObject.clone(self, interp, contextclass)
assert isinstance(w_res, W_SplFileInfo)
w_res.file_name = self.file_name
w_res.path_name = self.path_name
return w_res
class W_SplFileObject(W_SplFileInfo):
def clone(self, interp, contextclass):
w_res = W_InstanceObject.clone(self, interp, contextclass)
assert isinstance(w_res, W_SplFileObject)
w_res.file_name = self.file_name
w_res.path_name = self.path_name
w_res.delimiter = self.delimiter
w_res.enclosure = self.enclosure
w_res.open_mode = self.open_mode
return w_res
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo), str],
name='SplFileInfo::__construct')
def construct(interp, this, file_name):
this.file_name = file_name
this.path_name = rpath.realpath(file_name)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::__toString')
def spl_toString(interp, this):
return interp.space.wrap(this.file_name)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo), Optional(str)],
name='SplFileInfo::getBasename')
def get_basename(interp, this, suffix=''):
return _basename(interp.space, this.file_name, suffix)
def _extension(interp, filename):
name_split = filename.rsplit('.', 1)
if len(name_split) == 2:
filename, extension = name_split
else:
extension = ''
return interp.space.wrap(extension)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getExtension')
def get_extension(interp, this):
path = this.file_name
filename = rpath.split(path)[1]
return _extension(interp, filename)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getFilename')
def get_filename(interp, this):
return _get_filename(interp, this)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getPath')
def get_path(interp, this):
parts = this.file_name.split('/')
parts.pop()
path = ''
for i in parts:
path += i + '/'
path = path.rstrip('/')
return interp.space.wrap(path)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getPathname')
def get_pathname(interp, this):
return interp.space.wrap(this.file_name)
def _get_group(interp, filename):
res = os.stat(filename).st_gid
return interp.space.wrap(res)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getGroup', error_handler=handle_as_exception)
def get_group(interp, this):
filename = this.file_name
if not filename:
return interp.space.w_False
try:
return _get_group(interp, filename)
except OSError:
interp.throw("SplFileInfo::getGroup(): stat failed for %s" % filename,
klass=k_RuntimeException)
def _get_inode(interp, filename):
res = os.stat(filename).st_ino
return interp.space.wrap(res)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getInode', error_handler=handle_as_exception)
def get_inode(interp, this):
filename = this.file_name
if not filename:
return interp.space.w_False
try:
return _get_inode(interp, filename)
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getInode(): stat failed for %s" % filename)]))
def _get_owner(interp, filename):
res = os.stat(filename).st_uid
return interp.space.wrap(res)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getOwner', error_handler=handle_as_exception)
def get_owner(interp, this):
filename = this.file_name
if not filename:
return interp.space.w_False
try:
return _get_owner(interp, filename)
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getOwner(): stat failed for %s" % filename)]))
def _get_perms(interp, filename):
res = os.stat(filename).st_mode
return interp.space.wrap(res)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getPerms', error_handler=handle_as_exception)
def get_perms(interp, this):
filename = this.file_name
if not filename:
return interp.space.w_False
try:
return _get_perms(interp, filename)
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getPerms(): stat failed for %s" % filename)]))
def _get_size(interp, filename):
res = os.stat(filename).st_size
return interp.space.wrap(res)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getSize', error_handler=handle_as_exception)
def get_size(interp, this):
filename = this.file_name
if not filename:
return interp.space.w_False
try:
return _get_size(interp, filename)
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getSize(): stat failed for %s" % filename)]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getType', error_handler=handle_as_exception)
def get_type(interp, this):
filename = this.file_name
if not filename:
return interp.space.w_False
try:
return _filetype(interp.space, filename)
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getType(): stat failed for %s" % filename)]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::isDir')
def is_dir(interp, this):
filename = this.file_name
assert filename is not None
return _is_dir(interp.space, filename)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::isLink')
def is_link(interp, this):
filename = this.file_name
assert filename is not None
return _is_link(interp.space, filename)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::isExecutable')
def is_executable(interp, this):
return _is_executable(interp.space, this.file_name)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::isFile')
def is_file(interp, this):
return _is_file(interp.space, this.file_name)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::isReadable')
def is_readable(interp, this):
return _is_readable(interp.space, this.file_name)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::isWritable')
def is_writable(interp, this):
return _is_writable(interp.space, this.file_name)
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getATime', error_handler=handle_as_exception)
def getatime(interp, this):
filename = this.file_name
assert filename is not None
try:
res = os.stat(filename).st_atime
return interp.space.wrap(int(res))
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getATime(): "
"stat failed for %s" % filename)]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getCTime', error_handler=handle_as_exception)
def getctime(interp, this):
filename = this.file_name
assert filename is not None
try:
res = os.stat(filename).st_ctime
return interp.space.wrap(int(res))
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getCTime(): "
"stat failed for %s" % this.file_name)]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getMTime', error_handler=handle_as_exception)
def getmtime(interp, this):
filename = this.file_name
assert filename is not None
try:
res = os.stat(filename).st_mtime
return interp.space.wrap(int(res))
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getMTime(): "
"stat failed for %s" % this.file_name)]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getRealPath')
def get_realpath(interp, this):
try:
path = rpath.realpath(this.file_name)
return interp.space.wrap(path)
except OSError:
return interp.space.w_False
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo)],
name='SplFileInfo::getLinkTarget',
error_handler=handle_as_exception)
def get_linktarget(interp, this):
filename = this.file_name
assert filename is not None
try:
return interp.space.wrap(os.readlink(filename))
except OSError, e:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::getLinkTarget(): %s" % os.strerror(e.errno))]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileInfo), Optional(str),
Optional(bool), Optional(Nullable(StreamContextArg(None)))],
name='SplFileInfo::openFile', error_handler=handle_as_exception)
def openfile(interp, this, open_mode='r', use_include_path=False, w_ctx=None):
if open_mode == '':
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::openFile(): Invalid parameters")]))
args = [interp.space.wrap(this.file_name), interp.space.wrap(open_mode),
interp.space.wrap(use_include_path)]
if w_ctx:
if not interp.space.is_resource(w_ctx):
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::openFile() expects "
"parameter 3 to be resource, %s given"
% interp.space.get_type_name(w_ctx.tp).lower())]))
args.append(w_ctx)
try:
file_object = SplFileObjectClass.call_args(interp, args)
return file_object
except OSError, e:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileInfo::openFile(): %s" % os.strerror(e.errno))]))
def _get_pathname(interp, this):
return interp.space.wrap(this.path_name)
def _set_pathname(interp, this, value):
raise NotImplementedError()
def _get_filename(interp, this):
if this.file_name:
i = this.file_name.rfind('/') + 1
assert i >= 0
return interp.space.wrap(this.file_name[i:])
def _set_filename(interp, this, value):
raise NotImplementedError()
k_SplFileInfo = def_class(
'SplFileInfo',
methods=[construct,
spl_toString,
get_basename,
get_extension,
get_filename,
get_path,
get_pathname,
get_group,
get_inode,
get_owner,
get_perms,
get_size,
get_type,
is_dir,
is_link,
is_executable,
is_file,
is_readable,
is_writable,
getatime,
getctime,
getmtime,
get_realpath,
get_linktarget,
openfile],
properties=[GetterSetterWrapper(_get_pathname, _set_pathname,
"pathName", consts.ACC_PRIVATE),
GetterSetterWrapper(_get_filename, _set_filename,
"fileName", consts.ACC_PRIVATE), ],
instance_class=W_SplFileInfo)
SFO_DROP_NEW_LINE = 1
SFO_READ_AHEAD = 2
SFO_SKIP_EMPTY = 4
SFO_READ_CSV = 8
def _sfo_readline(interp, sfo):
if sfo.open_mode not in ('w', 'a', 'x', 'c'):
return sfo.w_res.readline(sfo.flags & SFO_DROP_NEW_LINE)
else:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap("SplFileObject: File cannot be read")]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject), str, Optional(str),
Optional(bool), Optional(Nullable(StreamContextArg(None)))],
name='SplFileObject::__construct',
error_handler=handle_as_exception)
def sfo_construct(interp, this, filename, open_mode='r',
use_include_path=False, w_ctx=None):
this.file_name = filename
this.path_name = rpath.realpath(filename)
this.delimiter = ","
this.enclosure = '"'
this.flags = 0
this.open_mode = open_mode
this.use_include_path = use_include_path
this.w_res = None
this.max_line_len = 0
if w_ctx:
if not interp.space.is_resource(w_ctx):
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileObject::__construct() expects "
"parameter 4 to be resource, %s given"
% interp.space.get_type_name(w_ctx.tp).lower())]))
assert filename is not None
if os.path.isdir(filename):
raise interp.throw("Cannot use SplFileObject with directories",
klass=k_LogicException)
try:
this.w_res = _fopen(interp.space, filename, this.open_mode,
use_include_path, w_ctx)
if this.w_res == interp.space.w_False:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileObject::__construct(): Failed to open stream")]))
except FopenError as e:
raise PHPException(k_RuntimeException.call_args(interp,
[interp.space.wrap(e.reasons.pop())]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::rewind')
def sfo_rewind(interp, this):
try:
this.w_res.rewind()
except OSError, e:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileObject::rewind(): %s" % os.strerror(e.errno))]))
if this.flags & SFO_READ_AHEAD:
_sfo_readline(interp, this)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::valid')
def sfo_valid(interp, this):
return interp.space.newbool(not this.w_res.feof())
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject), int],
name='SplFileObject::seek')
def sfo_seek(interp, this, line_pos):
if line_pos < 0:
raise interp.throw("SplFileObject::seek(): Can't seek file %s to "
"negative line %d" % (this.file_name, line_pos),
klass=k_LogicException)
this.w_res.seek_to_line(line_pos, this.flags & SFO_DROP_NEW_LINE)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::getChildren')
def sfo_get_children(interp, this):
return interp.space.w_Null
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::hasChildren')
def sfo_has_children(interp, this):
return interp.space.w_False
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject), str, Optional(int)],
name='SplFileObject::fwrite')
def sfo_fwrite(interp, this, data, length=-1):
try:
if length > 0:
n = this.w_res.write(data, length)
else:
n = this.w_res.writeall(data)
this.w_res.flush()
return interp.space.newint(n)
except IOError:
return interp.space.w_Null
except ValueError:
return interp.space.w_Null
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::fgetc')
def sfo_fgetc(interp, this):
w_res = this.w_res
assert isinstance(w_res, W_FileResource)
res = w_res.read(1)
if w_res.feof():
return interp.space.w_False
if res == os.linesep:
w_res.cur_line_no += 1
return interp.space.newstr(res)
def _fgets(interp, this):
line = _sfo_readline(interp, this)
w_res = this.w_res
assert isinstance(w_res, W_FileResource)
if not line:
w_res.eof = True
return interp.space.w_False
return interp.space.newstr(line)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject), 'args_w'],
name='SplFileObject::fgets',
error_handler=handle_as_exception)
def sfo_fgets(interp, this, args_w=[]):
if len(args_w) != 0:
interp.space.ec.warn("SplFileObject::fgets() expects exactly 0 "
"parameters, %d given" % len(args_w))
return interp.space.w_Null
try:
return _fgets(interp, this)
except IOError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileObject::fgets(): File cannot be read")]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::getCurrentLine',
error_handler=handle_as_exception)
def sfo_get_current_line(interp, this):
try:
return _fgets(interp, this)
except IOError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"SplFileObject::fgets(): File cannot be read")]))
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::key')
def sfo_key(interp, this):
w_res = this.w_res
assert isinstance(w_res, W_FileResource)
return interp.space.newint(w_res.cur_line_no)
def _current(interp, this):
w_res = this.w_res
assert isinstance(w_res, W_FileResource)
res = w_res.cur_line
if not res:
res = _sfo_readline(interp, this)
return interp.space.wrap(res)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::current')
def sfo_current(interp, this):
return _current(interp, this)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::__toString')
def sfo_tostring(interp, this):
return _current(interp, this)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::next')
def sfo_next(interp, this):
w_res = this.w_res
assert isinstance(w_res, W_FileResource)
w_res.cur_line = None
if this.flags & SFO_READ_AHEAD:
_sfo_readline(interp, this)
w_res.cur_line_no += 1
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::eof')
def sfo_eof(interp, this):
return interp.space.newbool(this.w_res.feof())
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::fflush')
def sfo_fflush(interp, this):
res = this.w_res.flush()
return interp.space.newbool(res)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::fstat')
def sfo_fstat(interp, this):
return _fstat(interp.space, this.w_res)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::ftell')
def sfo_ftell(interp, this):
pos = this.w_res.tell()
return interp.space.newint(pos)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject), int],
name='SplFileObject::ftruncate')
def sfo_ftruncate(interp, this, size):
res = this.w_res.truncate(size)
return interp.space.newbool(res)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject), int, Optional(int)],
name='SplFileObject::fseek')
def sfo_fseek(interp, this, offset, whence=0):
return _fseek(interp.space, this.w_res, offset, whence)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject), int, Optional(int)],
name='SplFileObject::fpassthru')
def sfo_fpassthru(interp, this, offset, whence=0):
bytes_thru = this.w_res.passthru()
return interp.space.newint(bytes_thru)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::getMaxLineLen')
def sfo_get_max_line_len(interp, this):
return interp.space.newint(this.max_line_len)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject), int],
name='SplFileObject::setMaxLineLen',
error_handler=handle_as_exception)
def sfo_set_max_line_len(interp, this, max_len):
raise NotImplementedError
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::fgetss')
def sfo_fgetss(interp, this):
raise NotImplementedError
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::fgetcsv')
def sfo_fgetcsv(interp, this):
raise NotImplementedError
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::fputcsv')
def sfo_fputcsv(interp, this):
raise NotImplementedError
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::flock')
def sfo_flock(interp, this):
raise NotImplementedError
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::fscanf')
def sfo_fscanf(interp, this):
raise NotImplementedError
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::getCsvControl')
def sfo_get_csv_control(interp, this):
raise NotImplementedError
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::setCsvControl')
def sfo_set_csv_control(interp, this):
raise NotImplementedError
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject)],
name='SplFileObject::getFlags')
def sfo_get_flags(interp, this):
return interp.space.wrap(this.flags)
@wrap_method(['interp', ThisUnwrapper(W_SplFileObject), int],
name='SplFileObject::setFlags')
def sfo_set_flags(interp, this, flags):
this.flags = flags
def _get_openmode(interp, this):
return interp.space.wrap(this.open_mode)
def _set_openmode(interp, this, w_value):
raise NotImplementedError()
def _get_delimiter(interp, this):
return interp.space.wrap(this.delimiter)
def _set_delimiter(interp, this, w_value):
raise NotImplementedError()
def _get_enclosure(interp, this):
return interp.space.wrap(this.enclosure)
def _set_enclosure(interp, this, w_value):
raise NotImplementedError()
SplFileObjectClass = def_class(
'SplFileObject',
[sfo_construct, sfo_rewind, sfo_valid, sfo_key, sfo_current, sfo_next,
sfo_seek, sfo_get_children, sfo_has_children, sfo_fwrite, sfo_eof,
sfo_fgets, sfo_fgetc, sfo_tostring, sfo_get_max_line_len, sfo_fgetss,
sfo_set_max_line_len, sfo_fflush, sfo_fgetcsv, sfo_flock, sfo_fputcsv,
sfo_fscanf, sfo_fseek, sfo_fstat, sfo_ftell, sfo_ftruncate,
sfo_get_csv_control, sfo_set_csv_control, sfo_get_flags, sfo_set_flags,
sfo_get_current_line, sfo_fpassthru],
properties=[GetterSetterWrapper(_get_openmode, _set_openmode,
"openMode", consts.ACC_PRIVATE),
GetterSetterWrapper(_get_delimiter, _set_delimiter,
"delimiter", consts.ACC_PRIVATE),
GetterSetterWrapper(_get_enclosure, _set_enclosure,
"enclosure", consts.ACC_PRIVATE)],
constants=[
('DROP_NEW_LINE', W_IntObject(SFO_DROP_NEW_LINE)),
('READ_AHEAD', W_IntObject(SFO_READ_AHEAD)),
('SKIP_EMPTY', W_IntObject(SFO_SKIP_EMPTY)),
('READ_CSV', W_IntObject(SFO_READ_CSV))],
implements=[k_RecursiveIterator, k_SeekableIterator],
instance_class=W_SplFileObject,
extends=k_SplFileInfo,)
class W_DirectoryIterator(W_SplFileInfo):
w_dir_res = None
def clone(self, interp, contextclass):
w_res = W_InstanceObject.clone(self, interp, contextclass)
assert isinstance(w_res, W_DirectoryIterator)
w_res.path_name = self.path_name
w_res.w_dir_res = self.w_dir_res
return w_res
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator), str],
name='DirectoryIterator::__construct',
error_handler=handle_as_exception)
def di_construct(interp, this, path):
if path == "":
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"Directory name must not be empty.")]))
this.path = path
this.file_name = path
this.index = 0
if not os.path.isdir(path):
raise PHPException(k_UnexpectedValueException.call_args(
interp, [interp.space.wrap(
"DirectoryIterator::__construct(%s): failed to open dir: No "
"such file or directory" % path)]))
try:
w_dir = W_DirResource(interp.space, path)
w_dir_res = w_dir.open()
if not isinstance(w_dir_res, W_DirResource):
raise OSError # rare case, but annotation fix
this.w_dir_res = w_dir_res
this.path_name = _di_pathname(this)
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"DirectoryIterator::__construct(): error while opening stream"
)]))
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::__toString')
def di_tostring(interp, this):
return interp.space.newstr(this.w_dir_res.items[this.w_dir_res.index])
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::current')
def di_current(interp, this):
return this
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::key')
def di_key(interp, this):
return interp.space.newint(this.w_dir_res.index)
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::next')
def di_next(interp, this):
this.w_dir_res.read()
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::rewind')
def di_rewind(interp, this):
return this.w_dir_res.rewind()
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator), int],
name='DirectoryIterator::seek')
def di_seek(interp, this, pos):
this.w_dir_res.seek_to_item(pos)
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::valid')
def di_valid(interp, this):
if this.w_dir_res.index < this.w_dir_res.no_of_items:
return interp.space.w_True
else:
return interp.space.w_False
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::getFilename')
def di_get_filename(interp, this):
if this.w_dir_res.index < this.w_dir_res.no_of_items:
res = this.w_dir_res.items[this.w_dir_res.index]
else:
res = ''
return interp.space.newstr(res)
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator), Optional(str)],
name='DirectoryIterator::getBasename')
def di_get_basename(interp, this, suffix=''):
if this.w_dir_res.index < this.w_dir_res.no_of_items:
filename = this.w_dir_res.items[this.w_dir_res.index]
return _basename(interp.space, filename, suffix)
else:
return interp.space.newstr('')
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::getExtension')
def di_get_extension(interp, this):
if this.w_dir_res.index < this.w_dir_res.no_of_items:
filename = this.w_dir_res.items[this.w_dir_res.index]
return _extension(interp, filename)
else:
return interp.space.newstr('')
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::getPath')
def di_get_path(interp, this):
path = this.path
return interp.space.newstr(path)
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::getPathname')
def di_get_pathname(interp, this):
if this.w_dir_res.index < this.w_dir_res.no_of_items:
return interp.space.newstr(_di_pathname(this))
else:
return interp.space.w_False
def _di_pathname(di):
return di.path + '/' + di.w_dir_res.items[di.w_dir_res.index]
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::getGroup')
def di_get_group(interp, this):
path = this.path
assert path is not None
return _get_group(interp, path)
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::getInode')
def di_get_inode(interp, this):
path = this.path
assert path is not None
return _get_inode(interp, path)
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::getOwner')
def di_get_owner(interp, this):
path = this.path
assert path is not None
return _get_owner(interp, path)
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::getPerms')
def di_get_perms(interp, this):
path = this.path
assert path is not None
return _get_perms(interp, path)
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::getSize')
def di_get_size(interp, this):
path = this.path
assert path is not None
return _get_size(interp, path)
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::getType')
def di_get_type(interp, this):
if this.w_dir_res.index < this.w_dir_res.no_of_items:
item = _di_pathname(this)
else:
item = this.path
return _filetype(interp.space, item)
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::isDir')
def di_is_dir(interp, this):
if this.w_dir_res.index < this.w_dir_res.no_of_items:
item = _di_pathname(this)
else:
item = this.path
assert item is not None
return _is_dir(interp.space, item)
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::isDot')
def di_is_dot(interp, this):
if this.w_dir_res.index < this.w_dir_res.no_of_items:
if this.w_dir_res.items[this.w_dir_res.index] in ('.', '..'):
return interp.space.w_True
return interp.space.w_False
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::isFile')
def di_is_file(interp, this):
if this.w_dir_res.index < this.w_dir_res.no_of_items:
item = _di_pathname(this)
else:
item = this.path
return _is_file(interp.space, item)
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::isExecutable')
def di_is_executable(interp, this):
if this.w_dir_res.index < this.w_dir_res.no_of_items:
item = _di_pathname(this)
else:
item = this.path
return _is_executable(interp.space, item)
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::isReadable')
def di_is_readable(interp, this):
if this.w_dir_res.index < this.w_dir_res.no_of_items:
item = _di_pathname(this)
else:
item = this.path
return _is_readable(interp.space, item)
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::isWritable')
def di_is_writable(interp, this):
if this.w_dir_res.index < this.w_dir_res.no_of_items:
item = _di_pathname(this)
else:
item = this.path
return _is_writable(interp.space, item)
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::isLink')
def di_is_link(interp, this):
if this.w_dir_res.index < this.w_dir_res.no_of_items:
item = _di_pathname(this)
else:
item = this.path
assert item is not None
return _is_link(interp.space, item)
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::getATime')
def di_getatime(interp, this):
if this.w_dir_res.index < this.w_dir_res.no_of_items:
item = _di_pathname(this)
else:
item = this.path
assert item is not None
try:
res = os.stat(item).st_atime
return interp.space.wrap(int(res))
except OSError:
return interp.space.w_False
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::getCTime')
def di_getctime(interp, this):
if this.w_dir_res.index < this.w_dir_res.no_of_items:
item = _di_pathname(this)
else:
item = this.path
assert item is not None
try:
res = os.stat(item).st_ctime
return interp.space.wrap(int(res))
except OSError:
return interp.space.w_False
@wrap_method(['interp', ThisUnwrapper(W_DirectoryIterator)],
name='DirectoryIterator::getMTime')
def di_getmtime(interp, this):
if this.w_dir_res.index < this.w_dir_res.no_of_items:
item = _di_pathname(this)
else:
item = this.path
assert item is not None
try:
res = os.stat(item).st_mtime
return interp.space.wrap(int(res))
except OSError:
return interp.space.w_False
k_DirectoryIterator = def_class(
'DirectoryIterator',
[di_construct,
di_current,
di_key,
di_next,
di_rewind,
di_seek,
di_valid,
di_get_filename,
di_get_basename,
di_get_extension,
di_get_path,
di_get_pathname,
di_get_group,
di_get_inode,
di_get_owner,
di_get_perms,
di_get_size,
di_get_type,
di_is_dir,
di_is_dot,
di_is_file,
di_is_link,
di_is_executable,
di_is_readable,
di_is_writable,
di_getatime,
di_getctime,
di_getmtime,
di_tostring],
implements=[k_SeekableIterator],
instance_class=W_DirectoryIterator,
extends=k_SplFileInfo,)
FI_CURRENT_AS_PATHNAME = 32
FI_CURRENT_AS_FILEINFO = 0
FI_CURRENT_AS_SELF = 16
FI_CURRENT_MODE_MASK = 240
FI_KEY_AS_PATHNAME = 0
FI_KEY_AS_FILENAME = 256
FI_FOLLOW_SYMLINKS = 512
FI_KEY_MODE_MASK = 3840
FI_NEW_CURRENT_AND_KEY = 256
FI_SKIP_DOTS = 4096
FI_UNIX_PATHS = 8192
FI_OTHER_MODE_MASK = 12288
class W_FilesystemIterator(W_DirectoryIterator):
w_dir_res = None
def clone(self, interp, contextclass):
w_res = W_InstanceObject.clone(self, interp, contextclass)
assert isinstance(w_res, W_FilesystemIterator)
w_res.path_name = self.path_name
w_res.w_dir_res = self.w_dir_res
w_res.flags = self.flags
w_res.path = self.path
return w_res
@wrap_method(['interp', ThisUnwrapper(W_FilesystemIterator), str,
Optional(int)], name='FilesystemIterator::__construct',
error_handler=handle_as_exception)
def fi_construct(interp, this, path, flags=
FI_KEY_AS_PATHNAME | FI_CURRENT_AS_FILEINFO | FI_SKIP_DOTS):
if not os.path.isdir(path):
raise PHPException(k_UnexpectedValueException.call_args(
interp, [interp.space.wrap(
"FilesystemIterator::__construct(%s): failed to open dir: No "
"such file or directory" % path)]))
this.flags = flags | FI_SKIP_DOTS # PHP wants us to do this.
this.path = path
this.file_name = path
this.index = 0
try:
w_dir = W_DirResource(interp.space, path, this.flags & FI_SKIP_DOTS)
w_dir_res = w_dir.open()
if not isinstance(w_dir_res, W_DirResource):
raise OSError
this.w_dir_res = w_dir_res
this.path_name = _di_pathname(this)
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"FilesystemIterator::__construct(): error while opening stream"
)]))
@wrap_method(['interp', ThisUnwrapper(W_FilesystemIterator)],
name='FilesystemIterator::current')
def fi_current(interp, this):
if this.flags & FI_CURRENT_AS_SELF:
return this
if this.flags & FI_CURRENT_AS_PATHNAME:
pathname = _di_pathname(this)
return interp.space.newstr(pathname)
else:
filename = _di_pathname(this)
file_info = k_SplFileInfo.call_args(interp,
[interp.space.wrap(filename)])
return file_info
@wrap_method(['interp', ThisUnwrapper(W_FilesystemIterator)],
name='FilesystemIterator::key')
def fi_key(interp, this):
if this.flags & FI_KEY_AS_FILENAME:
filename = this.w_dir_res.items[this.w_dir_res.index]
return interp.space.wrap(filename)
else:
pathname = _di_pathname(this)
return interp.space.newstr(pathname)
@wrap_method(['interp', ThisUnwrapper(W_FilesystemIterator)],
name='FilesystemIterator::getFlags')
def fi_get_flags(interp, this):
flags = this.flags & (FI_KEY_MODE_MASK | FI_CURRENT_MODE_MASK |
FI_OTHER_MODE_MASK)
return interp.space.newint(flags)
@wrap_method(['interp', ThisUnwrapper(W_FilesystemIterator), int],
name='FilesystemIterator::setFlags')
def fi_set_flags(interp, this, flags):
this.flags &= ~(FI_KEY_MODE_MASK | FI_CURRENT_MODE_MASK |
FI_OTHER_MODE_MASK)
this.flags |= ((FI_KEY_MODE_MASK | FI_CURRENT_MODE_MASK |
FI_OTHER_MODE_MASK) & flags)
k_FilesystemIterator = def_class(
'FilesystemIterator',
[fi_construct,
fi_current,
fi_key,
fi_get_flags,
fi_set_flags],
constants=[
('CURRENT_AS_PATHNAME', W_IntObject(FI_CURRENT_AS_PATHNAME)),
('CURRENT_AS_FILEINFO', W_IntObject(FI_CURRENT_AS_FILEINFO)),
('CURRENT_AS_SELF', W_IntObject(FI_CURRENT_AS_SELF)),
('CURRENT_MODE_MASK', W_IntObject(FI_CURRENT_MODE_MASK)),
('KEY_AS_PATHNAME', W_IntObject(FI_KEY_AS_PATHNAME)),
('KEY_AS_FILENAME', W_IntObject(FI_KEY_AS_FILENAME)),
('FOLLOW_SYMLINKS', W_IntObject(FI_FOLLOW_SYMLINKS)),
('KEY_MODE_MASK', W_IntObject(FI_KEY_MODE_MASK)),
('NEW_CURRENT_AND_KEY', W_IntObject(FI_NEW_CURRENT_AND_KEY)),
('SKIP_DOTS', W_IntObject(FI_SKIP_DOTS)),
('UNIX_PATHS', W_IntObject(FI_UNIX_PATHS)),
('OTHER_MODE_MASK', W_IntObject(FI_OTHER_MODE_MASK))],
implements=[k_SeekableIterator],
instance_class=W_FilesystemIterator,
extends=k_DirectoryIterator,)
class W_RecursiveDirectoryIterator(W_FilesystemIterator):
w_dir_res = None
def clone(self, interp, contextclass):
w_res = W_InstanceObject.clone(self, interp, contextclass)
assert isinstance(w_res, W_RecursiveDirectoryIterator)
w_res.path_name = self.path_name
w_res.w_dir_res = self.w_dir_res
w_res.flags = self.flags
w_res.path = self.path
return w_res
@wrap_method(['interp', ThisUnwrapper(W_RecursiveDirectoryIterator), str,
Optional(int)], name='RecursiveDirectoryIterator::__construct',
error_handler=handle_as_exception)
def rdi_construct(interp, this, path, flags=
FI_KEY_AS_PATHNAME | FI_CURRENT_AS_FILEINFO):
if not os.path.isdir(path):
raise PHPException(k_UnexpectedValueException.call_args(
interp, [interp.space.wrap(
"RecursiveDirectoryIterator::__construct(%s): failed to open dir: No "
"such file or directory" % path)]))
this.flags = flags
this.path = path
this.file_name = path
this.index = 0
try:
w_dir = W_DirResource(interp.space, path, this.flags & FI_SKIP_DOTS)
w_dir_res = w_dir.open()
if not isinstance(w_dir_res, W_DirResource):
raise OSError
this.w_dir_res = w_dir_res
this.path_name = _di_pathname(this)
except OSError:
raise PHPException(k_RuntimeException.call_args(
interp, [interp.space.wrap(
"RecursiveDirectoryIterator::__construct(): error while opening stream"
)]))
@wrap_method(['interp', ThisUnwrapper(W_RecursiveDirectoryIterator)],
name='RecursiveDirectoryIterator::hasChildren')
def rdi_has_children(interp, this):
if this.w_dir_res.index < this.w_dir_res.no_of_items:
if this.w_dir_res.items[this.w_dir_res.index] not in ('.', '..'):
item = _di_pathname(this)
assert item is not None
return _is_dir(interp.space, item)
return interp.space.w_False
@wrap_method(['interp', ThisUnwrapper(W_RecursiveDirectoryIterator)],
name='RecursiveDirectoryIterator::getChildren')
def rdi_get_children(interp, this):
if this.flags & FI_CURRENT_AS_PATHNAME:
if this.w_dir_res.index < this.w_dir_res.no_of_items:
pathname = _di_pathname(this)
return interp.space.newstr(pathname)
else:
return interp.space.newstr(this.path + '/')
else:
if this.w_dir_res.index < this.w_dir_res.no_of_items:
filename = _di_pathname(this)
sub_dir_iter = k_RecursiveDirectoryIterator.call_args(interp,
[interp.space.wrap(filename)])
return sub_dir_iter
else:
return this
@wrap_method(['interp', ThisUnwrapper(W_RecursiveDirectoryIterator)],
name='RecursiveDirectoryIterator::getSubPath')
def rdi_get_subpath(interp, this):
raise NotImplementedError
@wrap_method(['interp', ThisUnwrapper(W_RecursiveDirectoryIterator)],
name='RecursiveDirectoryIterator::getSubPathname')
def rdi_get_subpathname(interp, this):
raise NotImplementedError
k_RecursiveDirectoryIterator = def_class(
'RecursiveDirectoryIterator',
[rdi_construct,
rdi_has_children,
rdi_get_children,
rdi_get_subpath,
rdi_get_subpathname, ],
implements=[k_SeekableIterator, k_RecursiveIterator],
instance_class=W_RecursiveDirectoryIterator,
extends=k_FilesystemIterator,)
| mit | -7,211,609,275,790,048,000 | 31.969582 | 87 | 0.637597 | false | 3.407073 | false | false | false |
maxplanck-ie/Megamapper | mpileup_VCF.py | 1 | 13822 | #!/usr/bin/env python
"""
Creates a mpileup file from a bam file and a reference.
usage: %prog [options]
-p, --input1=p: bam file
-o, --output1=o: Output pileup
-R, --ref=R: Reference file type
-n, --ownFile=n: User-supplied fasta reference file
-d, --dbkey=d: dbkey of user-supplied file
-x, --indexDir=x: Index directory
-b, --bamIndex=b: BAM index file
-B, --baq=B: use BAQ model or not
-C, --mapCo=$mapCo: coefficient for downgrading mapping quality
-M, --mapCap=M: Cap mapping quality
-d, --readCap=d: Cap read quality
-q, --mapq=q: min map quality threshold
-Q, --baseq=Q: min base quality threshold
-I, --callindels=I: call indels or not
-i, --indels=i: Only output lines containing indels
-c, --consensus=c: Call the consensus sequence using MAQ consensus model ("10-column pileup")
-u, --fformat=$fformat: bcf or vcf format
-T, --theta=T: Theta parameter (error dependency coefficient)
-N, --hapNum=N: Number of haplotypes in sample
-r, --fraction=r: Expected fraction of differences between a pair of haplotypes
-P, --phredProb=I: Phred probability of an indel in sequencing/prep
-X, --cmdline=$cmdline: X: additional command line options
-f, --fileName=f: filename to appear in the vcf
"""
import os, shutil, subprocess, sys, tempfile
from galaxy import eggs
import pkg_resources; pkg_resources.require( "bx-python" )
from bx.cookbook import doc_optparse
def stop_err( msg ):
sys.stderr.write( '%s\n' % msg )
sys.exit()
def check_seq_file( dbkey, GALAXY_DATA_INDEX_DIR ):
seqFile = '%s/sam_fa_indices.loc' % GALAXY_DATA_INDEX_DIR
seqPath = ''
for line in open( seqFile ):
line = line.rstrip( '\r\n' )
if line and not line.startswith( '#' ) and line.startswith( 'index' ):
fields = line.split( '\t' )
if len( fields ) < 3:
continue
if fields[1] == dbkey:
seqPath = fields[2].strip()
break
return seqPath
def __main__():
#Parse Command Line
options, args = doc_optparse.parse( __doc__ )
seqPath = check_seq_file( options.dbkey, options.indexDir )
# output version # of tool
try:
tmp = tempfile.NamedTemporaryFile().name
tmp_stdout = open( tmp, 'wb' )
proc = subprocess.Popen( args='samtools 2>&1', shell=True, stdout=tmp_stdout )
tmp_stdout.close()
returncode = proc.wait()
stdout = None
for line in open( tmp_stdout.name, 'rb' ):
if line.lower().find( 'version' ) >= 0:
stdout = line.strip()
break
if stdout:
sys.stdout.write( 'Samtools %s\n' % stdout )
else:
raise Exception
except:
sys.stdout.write( 'Could not determine Samtools version\n' )
#prepare file names
tmpDir = tempfile.mkdtemp()
tmpf0 = tempfile.NamedTemporaryFile( dir=tmpDir )
tmpf0_name = tmpf0.name
tmpf0.close()
tmpf0bam_name = '%s.bam' % tmpf0_name
tmpf0bambai_name = '%s.bam.bai' % tmpf0_name
tmpf1 = tempfile.NamedTemporaryFile( dir=tmpDir )
tmpf1_name = tmpf1.name
tmpf1.close()
tmpf1fai_name = '%s.fai' % tmpf1_name
#link bam and bam index to working directory (can't move because need to leave original)
os.symlink( options.input1, tmpf0bam_name )
os.symlink( options.bamIndex, tmpf0bambai_name )
#get parameters for mpileup command
if options.baq == 'yes':
baq = ''
else:
baq = '-B'
if options.callindels == 'yes':
callindels = ''
else:
callindels = '-I'
if options.indels == 'yes':
indels = '-i'
else:
indels = ''
if options.fformat == 'pileup':
fformat = ''
else:
fformat = '-u'
opts = '%s -C %s -M %s -d %s -q %s -Q %s %s %s %s %s' % ( baq, options.mapCo, options.mapCap, options.readCap, options.mapq, options.baseq, callindels, indels, fformat, options.cmdline )
# use for debugging #
# opts = '-B -C 50 -q 30 -Q 30 -u '# -r 10:20,000,000-35,000,000'
# print options.cmdline #use for debugging
if options.consensus == 'yes':
opts += ' -c'
# print opts #use for debugging
# else:
# print opts #use for debugging
# if options.chs_cmdline == 'yes':
# opts = opts
# print cmdline #use for debugging
# else:
# opts = opts
# print opts #use for debugging
# opts # += ' -c -T %s -N %s -r %s -I %s' % ( options.theta, options.hapNum, options.fraction, options.phredProb )
# pileup only subset for troubleshooting: opts += -r 2:100,000-150,000
# samtools mpileup -C50 -d24 -q20 -Q30 -uf /media/DATA1/galaxy/reference_genomes/danrer7/sam_index/danrer7.fa test.bam | /home/ian/samtools-0.1.18/bcftools/bcftools view -bvcg - > var.raw.bcf
#/home/ian/samtools-0.1.18/bcftools/bcftools view var.raw.bcf | vcfutils.pl varFilter -D50 > var.flt.vcf'
# samtools mpileup -B -C50 -M60 -d24 -q20 -Q30 -I -i -c -T -N -r -uf /media/DATA1/galaxy/reference_genomes/danrer7/sam_index/danrer7.fa test.bam | /home/ian/samtools-0.1.18/bcftools/bcftools view -bvcg - > var.raw.bcf
# -C, --mapCo=$mapCo: coefficient for downgrading mapping quality
# -M, --mapCap=M: Cap mapping quality
# -d, --readCap=d: Cap read quality
# -q, --mapq=q: min map quality threshold
# -Q, --baseq=Q: min base quality threshold
# -c, --consensus=c: Call the consensus sequence using MAQ consensus model
# -T, --theta=T: Theta parameter (error dependency coefficient)
# -N, --hapNum=N: Number of haplotypes in sample
# -r, --fraction=r: Expected fraction of differences between a pair of haplotypes
# -P, --phredProb=I: Phred probability of an indel in sequencing/prep
# Input Options:
# -6 Assume the quality is in the Illumina 1.3+ encoding. -A Do not skip anomalous read pairs in variant calling.
# -B Disable probabilistic realignment for the computation of base alignment quality (BAQ). BAQ is the Phred-scaled probability of a read # base being misaligned. Applying this option greatly helps to reduce false SNPs caused by misalignments.
# -b FILE List of input BAM files, one file per line [null]
# -C INT Coefficient for downgrading mapping quality for reads containing excessive mismatches. Given a read with a phred-scaled probability q of being generated from the mapped position, the new mapping quality is about sqrt((INT-q)/INT)*INT. A zero value disables this functionality; if enabled, the recommended value for BWA is 50. [0]
# -d INT At a position, read maximally INT reads per input BAM. [250]
# -E Extended BAQ computation. This option helps sensitivity especially for MNPs, but may hurt specificity a little bit.
# -f FILE The faidx-indexed reference file in the FASTA format. The file can be optionally compressed by razip. [null]
# -l FILE BED or position list file containing a list of regions or sites where pileup or BCF should be generated [null]
# -q INT Minimum mapping quality for an alignment to be used [0]
# -Q INT Minimum base quality for a base to be considered [13]
# -r STR Only generate pileup in region STR [all sites]
# Output Options:
# -D Output per-sample read depth
# -g Compute genotype likelihoods and output them in the binary call format (BCF).
# -S Output per-sample Phred-scaled strand bias P-value
# -u Similar to -g except that the output is uncompressed BCF, which is preferred for piping.
# Options for Genotype Likelihood Computation (for -g or -u):
# -e INT Phred-scaled gap extension sequencing error probability. Reducing INT leads to longer indels. [20]
# -h INT Coefficient for modeling homopolymer errors. Given an l-long homopolymer run, the sequencing error of an indel of size s is modeled as INT*s/l. [100]
# -I Do not perform INDEL calling
# -L INT Skip INDEL calling if the average per-sample depth is above INT. [250]
# -o INT Phred-scaled gap open sequencing error probability. Reducing INT leads to more indel calls. [40]
# -P STR Comma dilimited list of platforms (determined by @RG-PL) from which indel candidates are obtained. It is recommended to collect indel candidates from sequencing technologies that have low indel error rate such as ILLUMINA. [all]
#where the -D option sets the maximum read depth to call a SNP. SAMtools acquires sample information from the SM tag in the @RG header lines. One alignment file can contain multiple samples; reads from one sample can also be distributed in different alignment files. SAMtools will regroup the reads anyway. In addition, if no @RG lines are present, each alignment file is taken as one sample.
# Tuning the parameters
#One should consider to apply the following parameters to mpileup in different scenarios:
# Apply -C50 to reduce the effect of reads with excessive mismatches. This aims to fix overestimated mapping quality and appears to be preferred for BWA-short.
# Given multiple technologies, apply -P to specify which technologies to use for collecting initial INDEL candidates. It is recommended to find INDEL candidates from technologies with low INDEL error rate, such as Illumina. When this option is in use, the value(s) following the option must appear in the PL tag in the @RG header lines.
# Apply -D and -S to keep per-sample read depth and strand bias. This is preferred if there are more than one samples at high coverage.
# Adjust -m and -F to control when to initiate indel realignment (requiring r877+). Samtools only finds INDELs where there are sufficient reads containing the INDEL at the same position. It does this to avoid excessive realignment that is computationally demanding. The default works well for many low-coverage samples but not for, say, 500 exomes. In the latter case, using -m 3 -F 0.0002 (3 supporting reads at minimum 0.02% frequency) is necessary to find singletons.
# Apply -A to use anomalous read pairs in mpileup, which are not used by default (requring r874+).
#prepare basic mpileup command
if options.fformat == 'vcf':
cmd = 'samtools mpileup %s -f %s %s | bcftools view -vcg - > %s' #| bcftools view -bvcg - > RAL_samtools.raw.bcf % ( opts, tmpf1_name, tmpf0bam_name, options.output1 )
#print cmd # use for debugging
if options.fileName != "":
cmd = "samtools mpileup %s -f %s %s | bcftools view -vcg - | sed 's|"+ tmpf0bam_name + "|" + options.fileName + "|' > %s"
else:
cmd = 'samtools mpileup %s -f %s %s > %s'
try:
# have to nest try-except in try-finally to handle 2.4
try:
#index reference if necessary and prepare mpileup command
if options.ref == 'indexed':
if not os.path.exists( "%s.fai" % seqPath ):
raise Exception, "No sequences are available for '%s', request them by reporting this error." % options.dbkey
cmd = cmd % ( opts, seqPath, tmpf0bam_name, options.output1 )
print cmd # use for debugging
elif options.ref == 'history':
os.symlink( options.ownFile, tmpf1_name )
cmdIndex = 'samtools faidx %s' % ( tmpf1_name )
tmp = tempfile.NamedTemporaryFile( dir=tmpDir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=cmdIndex, shell=True, cwd=tmpDir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
buffsize = 1048576
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
#did index succeed?
if returncode != 0:
raise Exception, 'Error creating index file\n' + stderr
cmd = cmd % ( opts, tmpf1_name, tmpf0bam_name, options.output1 )
print cmd # use for debugging
#perform mpileup command
tmp = tempfile.NamedTemporaryFile( dir=tmpDir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=cmd, shell=True, cwd=tmpDir, stderr=tmp_stderr.fileno() )
returncode = proc.communicate()
# returncode = proc.wait()
tmp_stderr.close()
#did it succeed?
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
buffsize = 1048576
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
print returncode
# raise Exception, stderr
except Exception, e:
stop_err( 'Error running Samtools mpileup tool\n' + str( e ) )
finally:
#clean up temp files
if os.path.exists( tmpDir ):
shutil.rmtree( tmpDir )
# check that there are results in the output file
if os.path.getsize( options.output1 ) > 0:
sys.stdout.write( 'Converted BAM to pileup' )
else:
stop_err( 'The output file is empty. Your input file may have had no matches, or there may be an error with your input file or settings.' )
if __name__ == "__main__" : __main__()
| bsd-3-clause | 6,976,894,021,865,176,000 | 50.962406 | 473 | 0.638403 | false | 3.493933 | false | false | false |
mementum/backtrader | backtrader/stores/vchartfile.py | 1 | 2700 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os.path
import backtrader as bt
class VChartFile(bt.Store):
'''Store provider for Visual Chart binary files
Params:
- ``path`` (default:``None``):
If the path is ``None`` and running under *Windows*, the registry will
be examined to find the root directory of the *Visual Chart* files.
'''
params = (
('path', None),
)
def __init__(self):
self._path = self.p.path
if self._path is None:
self._path = self._find_vchart()
@staticmethod
def _find_vchart():
# Find VisualChart registry key to get data directory
# If not found returns ''
VC_KEYNAME = r'SOFTWARE\VCG\Visual Chart 6\Config'
VC_KEYVAL = 'DocsDirectory'
VC_DATADIR = ['Realserver', 'Data', '01']
VC_NONE = ''
from backtrader.utils.py3 import winreg
if winreg is None:
return VC_NONE
vcdir = None
# Search for Directory in the usual root keys
for rkey in (winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE,):
try:
vckey = winreg.OpenKey(rkey, VC_KEYNAME)
except WindowsError as e:
continue
# Try to get the key value
try:
vcdir, _ = winreg.QueryValueEx(vckey, VC_KEYVAL)
except WindowsError as e:
continue
else:
break # found vcdir
if vcdir is not None: # something was found
vcdir = os.path.join(vcdir, *VC_DATADIR)
else:
vcdir = VC_NONE
return vcdir
def get_datapath(self):
return self._path
| gpl-3.0 | -3,481,570,702,733,509,000 | 30.034483 | 79 | 0.572593 | false | 4.251969 | false | false | false |
christophevg/codecanvas | src/codecanvas/instructions.py | 1 | 17240 | # instructions.py
# abstract instruction set, implemented as Code
# author: Christophe VG
from util.check import isstring, isidentifier
from util.visitor import visits, novisiting
from util.types import TypedList, Any
from codecanvas.base import Code, WithoutChildren, WithoutChildModification, List
# Mixins
class Identified(object):
def get_name(self): return self.id.name
name = property(get_name)
class Identifier(Code):
def __init__(self, name):
assert isidentifier(name), "Not an Identifier: " + name
self.name = name
def __repr__(self): return self.name
# Declarations
class Constant(Identified, Code):
def __init__(self, id, value, type=None):
# name
if isstring(id): id = Identifier(id)
assert isinstance(id, Identifier), "Name should be an identifier, not" + \
id.__class__.__name__
# TODO: add some value-checking ? (to avoid havoc)
if isstring(value): value = Identifier(value)
if type is None: type = VoidType()
assert isinstance(type, Type), "Type should be a Type, not " + \
type.__class__.__name__
super(Constant, self).__init__({"id": id, "value": value, "type": type})
self.id = id
self.value = value
self.type = type
class Function(Identified, Code):
def __init__(self, name, type=None, params=[]):
# name
assert not name is None, "A function needs at least a name." # TODO: extend
if isstring(name): name = Identifier(name)
assert isinstance(name, Identifier), "Name should be an identifier, not" + \
name.__class__.__name__
# type
if type is None: type = VoidType()
assert isinstance(type, Type), "Return-type should be a Type, not " + \
type.__class__.__name__
# params
if isinstance(params, list): params = TypedList(Parameter, params)
super(Function, self).__init__({"id":name, "type":type, "params": params})
self.id = name
self.type = type
self.params = params
class Prototype(WithoutChildren, Function):
@classmethod
def from_Function(clazz, function):
return Prototype(function.name, type=function.type, params=function.params)
class Parameter(Identified, Code):
def __init__(self, id, type=None, default=None):
# name
if isstring(id): id = Identifier(id)
assert isinstance(id, Identifier)
# type
if type is None: type = VoidType()
assert isinstance(type, Type)
assert default == None or isinstance(default, Expression)
super(Parameter, self).__init__({"id": id, "type": type, "default": default})
self.id = id
self.type = type
self.default = default
# Statements
class Statement(Code):
def __init__(self, data):
super(Statement, self).__init__(data)
class IfStatement(WithoutChildModification, Statement):
def __init__(self, expression, true_clause, false_clause=[]):
assert isinstance(expression, Expression)
assert isinstance(true_clause, list)
assert isinstance(false_clause, list)
super(IfStatement, self).__init__({"expression": expression})
self.expression = expression
self.true_clause = true_clause
self.false_clause = false_clause
def _children(self): return [self.true_clause, self.false_clause]
children = property(_children)
class CaseStatement(WithoutChildModification, Statement):
def __init__(self, expression, cases, consequences, case_else=None):
assert isinstance(expression, Expression)
assert isinstance(cases, list)
assert isinstance(consequences, list)
super(CaseStatement, self).__init__({"expression": expression})
self.expression = expression
self.cases = cases
self.consequences = consequences
self.case_else = case_else
def _children(self): return [self.cases, self.consequences]
children = property(_children)
@novisiting
class MutUnOp(WithoutChildren, Statement):
def __init__(self, operand):
assert isinstance(operand, Variable)
super(MutUnOp, self).__init__({"op": operand})
self.operand = operand
def ends(self):
return True
class Inc(MutUnOp): pass
class Dec(MutUnOp): pass
@novisiting
class ImmutUnOp(WithoutChildren, Statement): pass
class Print(WithoutChildren, Statement):
def __init__(self, string, *args):
# string
if isstring(string): string = StringLiteral(string)
assert isinstance(string, StringLiteral)
# TODO: assert args to be expressions
super(Print, self).__init__({"string": string, "args": args})
self.string = string
self.args = args
class Import(Statement):
def __init__(self, imported):
# TODO: checking
super(Import, self).__init__({"imported": imported})
self.imported = imported
class Raise(ImmutUnOp): pass
class Comment(ImmutUnOp):
def __init__(self, comment):
assert isstring(comment)
super(Comment, self).__init__({"comment": comment})
self.comment = comment
def __str__(self):
return "# " + self.comment
@novisiting
class VarExpOp(Statement):
def __init__(self, operand, expression):
if isstring(operand): operand = SimpleVariable(operand)
assert isinstance(operand, Variable)
assert isinstance(expression, Expression)
Statement.__init__(self, {"operand": operand, "expression": expression})
self.operand = operand
self.expression = expression
def ends(self):
return True
class Assign(VarExpOp): pass
class Add(VarExpOp): pass
class Sub(VarExpOp): pass
class Return(Statement):
def __init__(self, expression=None):
assert expression == None or isinstance(expression, Expression)
super(Return, self).__init__({})
self.expression = expression
def ends(self):
return True
@novisiting
class CondLoop(Statement):
def __init__(self, condition):
assert isinstance(condition, Expression)
super(CondLoop, self).__init__({"condition": condition})
self.condition = condition
class WhileDo(CondLoop): pass
class RepeatUntil(CondLoop): pass
class For(Statement):
def __init__(self, init, check, change):
assert isinstance(init, Statement) and not isinstance(init, Block)
assert isinstance(check, Expression)
assert isinstance(change, Statement) and not isinstance(change, Block)
super(For, self).__init__({"init": init, "check": check, "change": change})
self.init = init
self.check = check
self.change = change
class StructuredType(Statement):
def __init__(self, name, properties=[]):
if isstring(name): name = Identifier(name)
assert isinstance(name, Identifier)
super(StructuredType, self).__init__({"name":name})
self.name = name
def __repr__(self):
return "struct " + self.name + \
"(" + ",".join(",", [prop for prop in self]) + ")"
class Property(WithoutChildModification, Code):
def __init__(self, name, type):
if isstring(name): name = Identifier(name)
assert isinstance(name, Identifier)
assert isinstance(type, Type), "expected Type but got " + type.__class__.__name__
super(Property, self).__init__({"name": name, "type": type})
self.name = name
self.type = type
def __repr__(self): return "property " + self.name + ":" + self.type
# Expressions
@novisiting
class Expression(Code):
def as_label(self):
return str(self)
@novisiting
class Variable(Expression): pass
class SimpleVariable(Identified, Variable):
# TODO: info here is a small hack to allow semantic typing information :-(
def __init__(self, id, info=None):
if isstring(id): id = Identifier(id)
assert isinstance(id, Identifier)
super(SimpleVariable, self).__init__({"id": id, "info": info})
self.id = id
self.info = info
# TODO: rename to indexer or something like that
class ListVariable(Identified, Variable):
def __init__(self, id, index):
if isstring(id): id = Identifier(id)
assert isinstance(id, Identifier) or isinstance(id, Variable)
super(ListVariable, self).__init__({"id": id, "index": index})
self.id = id
self.index = index
class Object(Identified, Variable):
def __init__(self, id, type=None):
if isstring(id): id = Identifier(id)
assert isinstance(id, Identifier)
if type is None: type = VoidType()
assert isinstance(type, Type)
super(Object, self).__init__({"id": id, "type": type})
self.id = id
self.type = type
def __repr__(self):
return "Object(" + repr(self.id) + ":" + repr(self.type) + ")"
class ObjectProperty(Variable):
def __init__(self, obj, prop, type=None):
if isstring(obj): obj = Object(obj)
assert isinstance(obj, Object), "got " + obj.__class__.__name__
if isstring(prop): prop = Identifier(prop)
assert isinstance(prop, Identifier)
if type is None: type = VoidType()
assert isinstance(type, Type)
super(ObjectProperty, self).__init__({"obj" : obj, "prop": prop})
self.obj = obj
self.prop = prop
self.type = type
def __repr__(self):
return "ObjectProperty(" + repr(self.obj) + "." + repr(self.prop) + ":" + repr(self.type) + ")"
class StructProperty(Variable):
def __init__(self, obj, prop):
if isstring(obj): obj = Object(obj)
assert isinstance(obj, Object), "got " + obj.__class__.__name__
if isstring(prop): prop = Identifier(prop)
assert isinstance(prop, Identifier)
super(StructProperty, self).__init__({"obj" : obj, "prop": prop})
self.obj = obj
self.prop = prop
def __repr__(self):
return "ObjectProperty(" + repr(self.obj) + "." + repr(self.prop) + ")"
@novisiting
class UnOp(Expression):
def __init__(self, operand):
assert isinstance(operand, Expression)
super(UnOp, self).__init__({})
self.operand = operand
class Not(UnOp): pass
# TODO: extend this a bit ;-)
class ShiftLeft(Expression):
def __init__(self, var, amount):
self.var = var
self.amount = amount
super(ShiftLeft, self).__init__({"var": var, "amount": amount})
@novisiting
class BinOp(Expression):
def __init__(self, left, right):
assert isinstance(left, Expression)
assert isinstance(right, Expression)
super(BinOp, self).__init__({"left": left, "right": right})
self.left = left
self.right = right
class And(BinOp): pass
class Or(BinOp): pass
class Equals(BinOp): pass
class NotEquals(BinOp): pass
class LT(BinOp): pass
class LTEQ(BinOp): pass
class GT(BinOp): pass
class GTEQ(BinOp): pass
class Plus(BinOp): pass
class Minus(BinOp): pass
class Mult(BinOp): pass
class Div(BinOp): pass
class Modulo(BinOp): pass
class Call(Expression):
def __init__(self, info, arguments=[]):
info["arguments"] = len(arguments)
super(Call, self).__init__(info)
self.arguments = TypedList(Expression, arguments)
def ends(self):
return True
class FunctionCall(Call):
def __init__(self, function, arguments=[], type=None):
if isstring(function): function = Identifier(function)
assert isinstance(function, Identifier)
if type is None: type = VoidType()
assert isinstance(type, Type), "but got " + type.__class__.__name__
super(FunctionCall, self).__init__({"function": function}, arguments)
self.function = function
self.type = type
def as_label(self):
return self.function.name
class MethodCall(Call):
def __init__(self, obj, method, arguments=[], type=None):
assert isinstance(obj, Object) or isinstance(obj, ObjectProperty), \
"Expected Object(Property), but got " + obj.__class__.__name__
if isstring(method): method = Identifier(method)
if type is None: type = VoidType()
assert isinstance(type, Type), "but got " + type.__class__.__name__
assert isinstance(method, Identifier)
super(MethodCall, self).__init__({"obj": obj, "method": method}, arguments)
self.obj = obj
self.method = method
self.type = type
# Literals
@novisiting
class Literal(Expression): pass
class StringLiteral(Literal):
def __init__(self, data):
self.data = data
def __repr__(self):
return '"' + self.data.replace("\n", "\\n") + '"'
class BooleanLiteral(Literal):
def __init__(self, value):
assert isinstance(value, bool)
super(BooleanLiteral, self).__init__({"value": value})
self.value = value
def __repr__(self):
return "true" if self.value else "false"
class IntegerLiteral(Literal):
def __init__(self, value):
assert isinstance(value, int)
super(IntegerLiteral, self).__init__({"value": value})
self.value = value
def __repr__(self):
return str(self.value)
class ByteLiteral(Literal):
def __init__(self, value):
assert isinstance(value, int) and value < 256
super(ByteLiteral, self).__init__({"value": value})
self.value = value
def __repr__(self):
return "0x%02x" % self.value
def as_label(self):
return "0x%02x" % self.value
class FloatLiteral(Literal):
def __init__(self, value):
assert isinstance(value, float)
self.value = value
def __repr__(self):
return str(self.value)
class ListLiteral(Literal):
def __init__(self):
super(ListLiteral, self).__init__({})
def __repr__(self):
return "[]"
class TupleLiteral(Literal):
def __init__(self, expressions=[]):
self.expressions = TypedList(Expression, expressions)
def __repr__(self):
return "(" + ",".join([expr for expr in self.expressions]) + ")"
class AtomLiteral(Identified, Literal):
def __init__(self, id):
if isstring(id): id = Identifier(id)
assert isinstance(id, Identifier)
super(AtomLiteral, self).__init__({"name": id.name})
self.id = id
def __repr__(self):
return "atom " + self.name
# Types
class Type(Code): pass
class NamedType(Type):
def __init__(self, name):
assert isstring(name)
super(NamedType, self).__init__({"name": name})
self.name = name
def __repr__(self): return "type " + self.name
class VoidType(Type):
def __repr__(self): return "void"
class ManyType(Type):
def __init__(self, type):
assert isinstance(type, Type), \
"Expected Type but got " + type.__class__.__name__
super(ManyType, self).__init__({})
self.type = type
def __repr__(self): return "many " + str(self.type)
class AmountType(Type):
def __init__(self, type, size):
assert isinstance(type, Type), \
"Expected Type but got " + type.__class__.__name__
super(AmountType, self).__init__({})
self.type = type
self.size = size
def __repr__(self): return str(self.type) + "[" + str(self.size) + "]"
class TupleType(Type):
def __init__(self, types):
for type in types:
assert isinstance(type, Type)
super(TupleType, self).__init__({})
self.types = types
def __repr__(self): return "tuple " + ",".join([repr(type) for type in self.types])
class ObjectType(Type):
def __init__(self, name):
assert isidentifier(name), name + " is no identifier"
super(ObjectType, self).__init__({"name": name})
self.name = name
def __repr__(self): return "object " + self.name
class ByteType(Type):
def __repr__(self): return "byte"
class IntegerType(Type):
def __repr__(self): return "int"
class BooleanType(Type):
def __repr__(self): return "bool"
class FloatType(Type):
def __repr__(self): return "float"
class LongType(Type):
def __repr__(self): return "long"
class UnionType(Type):
def __init__(self, name, properties=[]):
if isstring(name): name = Identifier(name)
assert isinstance(name, Identifier)
super(UnionType, self).__init__({"name":name})
self.name = name
def __repr__(self):
return "union " + self.name.name + \
"(" + ",".join([str(prop) for prop in self]) + ")"
# Matching
class Match(Expression):
def __init__(self, comp, expression=None):
if isstring(comp): comp = Comparator(comp)
assert isinstance(comp, Comparator), \
"Expected Comparator but got " + comp.__class__.__module__ + ":" + comp.__class__.__name__
assert expression == None or isinstance(expression, Expression)
super(Match, self).__init__({"comp": comp, "exp": expression})
self.comp = comp
self.expression = expression
def as_label(self):
if not self.expression is None:
return self.comp.as_label() + "_" + self.expression.as_label()
else:
return self.comp.as_label()
class Comparator(Code):
def __init__(self, operator):
assert operator in [ "<", "<=", ">", ">=", "==", "!=", "!", "*" ]
super(Comparator, self).__init__({"operator": operator})
self.operator = operator
def as_label(self):
return {
"<" : "lt", "<=" : "lteq",
">" : "gt", ">=" : "gteq",
"==" : "eq", "!=" : "nq",
"!" : "not", "*" : "anything"
}[self.operator]
class Anything(Comparator):
def __init__(self):
super(Anything, self).__init__("*")
class VariableDecl(Identified, Variable):
def __init__(self, id, type):
if isstring(id): id = Identifier(id)
assert isinstance(id, Identifier)
assert isinstance(type, Type), "got " + type.__class__.__name__
super(VariableDecl, self).__init__({"id":id, "type":type})
self.id = id
self.type = type
# A visitor for instructions = Code or Code
@visits([Code])
class Visitor(): pass
| bsd-3-clause | -544,667,000,444,343,700 | 30.633028 | 101 | 0.638399 | false | 3.627946 | false | false | false |
PatrickSpieker/ThePyStrikesBack | scrapers/journalscrapers.py | 2 | 12546 |
# imports for class implementation
import csv
import itertools
import logging
import re
import urllib2
from datetime import date
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import Select
from exceptions import MissingAttributeException
from base import BaseJournalScraper
class BioMedCentralScraper(BaseJournalScraper):
"""Web scraper for publisher BioMed Central
Attributes:
http_address (str): Address of the BioMed Central webpage with journal information
"""
paid_for_patt = re.compile("do not need to pay")
def __init__(self, http_address):
f = urllib2.urlopen(http_address, timeout=5)
self.soup = BeautifulSoup(f, 'lxml')
@staticmethod
def __get_price(soup):
for tag in soup.find_all(class_="CmsArticle_body"):
text = tag.get_text()
price_matches = BioMedCentralScraper.PRICE_PATT.findall(text)
paid_for_matches = BioMedCentralScraper.paid_for_patt.findall(text)
if price_matches:
return str(int(round(float(price_matches[0].replace(",", "").replace("$", "").replace("'", "")))))
elif paid_for_matches:
return 0
raise MissingAttributeException
@staticmethod
def __get_journal_name(soup):
journal_name_tag = soup.find(class_="identity__title-link")
if not journal_name_tag:
raise MissingAttributeException
return journal_name_tag.string
@staticmethod
def __get_issn(soup):
issn_tag = soup.find(class_="SideBox_defList")
if not issn_tag:
raise MissingAttributeException
issn_matches = BioMedCentralScraper.ISSN_PATT.findall(issn_tag.get_text())
if not issn_matches:
raise MissingAttributeException
return issn_matches[0]
def get_entries(self):
for tag in self.soup.find_all(class_="list-stacked__item"):
link = tag.find("a")["href"]
try:
g = urllib2.urlopen(link + "about", timeout=5)
about_soup = BeautifulSoup(g, 'lxml')
except Exception:
print link + ": Connection problems, continuing to the next entry"
continue
try:
price = BioMedCentralScraper.__get_price(about_soup)
except MissingAttributeException:
print link
print "\n\tNo price could be found"
continue # skipping to the next entry
try:
journal_name = BioMedCentralScraper.__get_journal_name(about_soup)
except MissingAttributeException:
print link
print "\n\tNo journal name could be found"
continue # skipping to the next entry
try:
issn = BioMedCentralScraper.__get_issn(about_soup)
except MissingAttributeException:
print link
print "\n\tNo ISSN could be found"
continue
yield self.to_unicode_row(["BioMed Central", journal_name, str(date.today()), "OA", issn, str(price)])
class ElsevierScraper(BaseJournalScraper):
def __init__(self, csv_filepath):
f = open(csv_filepath, "r")
self.reader = csv.reader(f)
next(self.reader)
def get_entries(self):
for row in self.reader:
row = [BaseJournalScraper.clean_string(i) for i in row]
yield BaseJournalScraper.to_unicode_row(["Elsevier", row[1], str(date.today()),
'Hybrid' if row[2] == 'Hybrid' else 'OA',
row[0], str(int(round(float(row[4]))))])
class ExistingScraper(BaseJournalScraper):
def __init__(self, csv_filepath):
f = open(csv_filepath, "rU")
self.reader = csv.reader(f, dialect=csv.excel_tab)
next(self.reader)
@staticmethod
def __get_row(row):
if not row[2]:
raise MissingAttributeException
return BaseJournalScraper.to_unicode_row((row[0], row[1], row[6],
"OA" if row[4] else "Hybrid", row[2], str(int(round(float(row[4]))))))
def get_entries(self):
for row in self.reader:
try:
yield ExistingScraper.__get_row(row)
except MissingAttributeException as e:
logging.warning(str(row) + str(e))
class HindawiScraper(BaseJournalScraper):
def __init__(self, http_address):
f = urllib2.urlopen(http_address, timeout=5)
self.soup = BeautifulSoup(f, 'lxml')
@staticmethod
def __get_title(tag):
return tag.find("a").string.strip()
@staticmethod
def __get_price(results):
price_matches = BaseJournalScraper.PRICE_PATT.findall(results[1])
if not price_matches:
raise MissingAttributeException
return str(int(round(float(price_matches[0].replace(",", "").replace("$", "")))))
@staticmethod
def __get_issn(results):
issn_matches = BaseJournalScraper.ISSN_PATT.findall(results[0])
if not issn_matches:
raise MissingAttributeException
return issn_matches[0]
def get_entries(self):
for tag in itertools.chain(self.soup.find_all(class_="subscription_table_plus"),
self.soup.find_all(class_="subscription_table_minus")):
journal_title = HindawiScraper.__get_title(tag)
results = [i.string for i in tag.find_all("td") if i.string]
if not results or (len(results) != 2):
print "ERROR:"
print "\t" + str(tag.contents)
continue
try:
price = HindawiScraper.__get_price(results)
issn = HindawiScraper.__get_issn(results)
except MissingAttributeException:
print "ERROR:"
print "\t" + str(tag.contents)
continue
yield BaseJournalScraper.to_unicode_row(["Hindawi", journal_title, str(date.today()), "OA", issn, price])
class PLOSScraper(BaseJournalScraper):
"""
Scraper isn't actually finished yet. Can't port it
"""
def __init__(self, http_address):
driver = webdriver.PhantomJS(executable_path="/usr/local/bin/phantomjs")
driver.set_window_size(1120, 550)
driver.get("https://www.plos.org/publication-fees")
a = driver.find_elements_by_class_name("feature-block-text")
for i in a:
# print i.text
pass
def get_entries(self):
raise StopIteration
class SageHybridScraper(BaseJournalScraper):
"""
Scraper isn't actually finished yet. Can't port it
"""
def __init__(self, http_address):
pass
def get_entries(self):
raise StopIteration
class SpringerHybridScraper(BaseJournalScraper):
def __init__(self, csv_path):
f = open(csv_path, "r")
self.reader = csv.reader(f)
for i in range(9):
next(self.reader)
def get_entries(self):
for row in self.reader:
if row[11] == "Hybrid (Open Choice)":
yield BaseJournalScraper.to_unicode_row(["Springer", BaseJournalScraper.clean_string(row[1]),
str(date.today()), "Hybrid", row[5], str(3000)])
class SpringerOpenScraper(BaseJournalScraper):
def __init__(self, http_address):
f = urllib2.urlopen(http_address, timeout=5)
self.soup = BeautifulSoup(f, 'lxml')
@staticmethod
def __get_price(soup):
for tag in soup.find_all(class_="CmsArticle_body"):
text = tag.get_text()
price_matches = SpringerOpenScraper.PRICE_PATT.findall(text)
if price_matches:
return str(int(round(float(price_matches[0].replace(",", "").replace("$", "").replace("'", "")))))
raise MissingAttributeException
@staticmethod
def __get_journal_name(soup):
journal_name_tag = soup.find(id="journalTitle")
if not journal_name_tag:
raise MissingAttributeException
return journal_name_tag.string
@staticmethod
def __get_issn(soup):
issn_tag = soup.find(class_="SideBox_defList")
if not issn_tag:
raise MissingAttributeException
issn_matches = SpringerOpenScraper.ISSN_PATT.findall(issn_tag.get_text())
if not issn_matches:
raise MissingAttributeException
return issn_matches[0]
def get_entries(self):
for tag in self.soup.find_all(class_="list-stacked__item"):
link = tag.find("a")["href"]
if "springeropen.com" not in link:
print link + ": Not valid"
continue
try:
g = urllib2.urlopen(link + "about", timeout=5).read()
about_soup = BeautifulSoup(g, 'lxml')
except Exception:
print link + ": Connection problems, continuing to the next entry"
continue
try:
price = SpringerOpenScraper.__get_price(about_soup)
except MissingAttributeException:
print link + ": No price could be found"
continue # skipping to the next entry
try:
journal_name = SpringerOpenScraper.__get_journal_name(about_soup)
except MissingAttributeException:
print link + ": No journal name could be found"
continue # skipping to the next entry
try:
issn = SpringerOpenScraper.__get_issn(about_soup)
except MissingAttributeException:
print link + ": No ISSN could be found"
continue
yield self.to_unicode_row(["Springer", journal_name, str(date.today()), "OA", issn, str(price)])
class WileyScraper(BaseJournalScraper):
def __init__(self, http_address):
f = urllib2.urlopen(http_address)
self.soup = BeautifulSoup(f, 'lxml')
self.driver = webdriver.PhantomJS(executable_path="/usr/local/bin/phantomjs")
self.driver.set_window_size(1120, 550)
self.driver.get(http_address)
@staticmethod
def __get_child_tag_strings(tag):
for child in tag.children:
if not (str(child) == "\n"):
yield child.string
def __get_issn(self):
issn_matches = (WileyScraper.ISSN_PATT
.findall(self.driver
.find_element_by_xpath("//div[@id='displayJAPCL']/a[1]")
.get_attribute("href")))
if not issn_matches:
raise MissingAttributeException
return issn_matches[0]
def __get_price(self):
try:
price = str(int(round(float(self.driver.find_element_by_id("displayJAPC")
.text.replace(",", "").replace("$", "")))))
except ValueError as e:
raise MissingAttributeException
return price
def get_entries(self):
selected = self.soup.find(class_="journal")
journal_select = Select(self.driver.find_element_by_id("journal"))
# getting rid of first "description" row
journal_gen = WileyScraper.__get_child_tag_strings(selected)
next(journal_gen)
for journal in journal_gen:
try:
journal_select.select_by_visible_text(journal)
except NoSuchElementException:
print "Couldn't find matching journal for input: " + str(journal)
continue
oa_option_element = self.driver.find_element_by_id("displayJOAP")
if (oa_option_element.text == "Fully Open Access") or (oa_option_element.text == "OpenChoice"):
try:
price = self.__get_price()
except MissingAttributeException:
print journal + ": Unable to find price"
continue
try:
issn_matches = self.__get_issn()
except MissingAttributeException:
print "Error: " + journal + "\n\t" + oa_option_element.text
continue
journal_type = "OA" if oa_option_element.text == "Fully Open Access" else "Hybrid"
yield self.to_unicode_row(["Wiley", journal, str(date.today()), journal_type, issn_matches, price])
| mit | 2,909,259,211,513,953,300 | 35.15562 | 117 | 0.581301 | false | 3.958978 | false | false | false |
azogue/enerpi | tests/enerpiweb/test_webserver.py | 1 | 9770 | # -*- coding: utf-8 -*-
"""
ENERPIWEB tests - Routes
"""
from io import BytesIO
import json
import jsondiff
import os
import re
from tests.conftest import TestCaseEnerpiWebServer
class TestEnerpiWebServerRoutes(TestCaseEnerpiWebServer):
# Enerpi test scenario:
subpath_test_files = 'test_context_2probes'
cat_check_integrity = True
def test_0_routes(self):
routes_defined = {
"api_endpoints": [
"{}/api/stream/realtime".format(self.url_prefix),
"{}/api/stream/bokeh".format(self.url_prefix),
"{}/api/email/status".format(self.url_prefix),
"{}/api/editconfig/".format(self.url_prefix),
"{}/api/bokehplot".format(self.url_prefix),
"{}/api/showfile".format(self.url_prefix),
"{}/api/monitor".format(self.url_prefix),
"{}/api/billing".format(self.url_prefix),
"{}/api/bills".format(self.url_prefix),
"{}/api/last".format(self.url_prefix),
"{}/api/help".format(self.url_prefix),
"{}/control".format(self.url_prefix),
"{}/index".format(self.url_prefix),
"{}/".format(self.url_prefix),
"{}/api/stream/bokeh/from/<start>/to/<end>".format(self.url_prefix),
'{}/api/consumption/from/<start>/to/<end>'.format(self.url_prefix),
'{}/api/billing/from/<start>/to/<end>'.format(self.url_prefix),
'{}/api/power/from/<start>/to/<end>'.format(self.url_prefix),
"{}/api/stream/bokeh/last/<last_hours>".format(self.url_prefix),
"{}/api/stream/bokeh/from/<start>".format(self.url_prefix),
'{}/api/consumption/from/<start>'.format(self.url_prefix),
'{}/api/billing/from/<start>'.format(self.url_prefix),
"{}/api/email/status/<recipients>".format(self.url_prefix),
'{}/api/power/from/<start>'.format(self.url_prefix),
"{}/api/filedownload/<file_id>".format(self.url_prefix),
"{}/api/editconfig/<file>".format(self.url_prefix),
"{}/api/uploadfile/<file>".format(self.url_prefix),
"{}/api/hdfstores/<relpath_store>".format(self.url_prefix),
"{}/api/showfile/<file>".format(self.url_prefix),
"{}/api/restart/<service>".format(self.url_prefix)
]
}
self.endpoint_request('api/help')
result = self.endpoint_request('api/help?json=True')
routes = json.loads(result.data.decode())
print(routes)
print(routes_defined)
print(jsondiff.diff(routes, routes_defined))
self.assertEqual(routes, routes_defined)
# assert 0
from enerpiweb import app
endpoints = [rule.rule for rule in app.url_map.iter_rules() if rule.endpoint != 'static']
self.assertEqual(routes["api_endpoints"], endpoints)
self.assertEqual(routes_defined, json.loads(result.data.decode()))
self.endpoint_request('notexistent', status_check=404)
def test_1_index(self):
self.endpoint_request('', status_check=302, verbose=True)
self.endpoint_request("index")
self.endpoint_request("control")
alerta = '?alerta=%7B%22texto_alerta%22%3A+%22LOGFILE+%2FHOME%2FPI%2FENERPIDATA%2FENERPI.LOG'
alerta += '+DELETED%22%2C+%22alert_type%22%3A+%22warning%22%7D'
self.endpoint_request("control" + alerta)
self.endpoint_request("api/monitor")
def test_2_filehandler(self):
from enerpi.editconf import ENERPI_CONFIG_FILES
self.endpoint_request("api/editconfig/")
self.endpoint_request("api/editconfig/flask", status_check=404)
self.endpoint_request("api/editconfig/rsc", status_check=404)
self.endpoint_request("api/editconfig/nginx_err", status_check=404)
self.endpoint_request("api/editconfig/nginx", status_check=404)
self.endpoint_request("api/editconfig/enerpi", status_check=404)
self.endpoint_request("api/editconfig/uwsgi", status_check=404)
self.endpoint_request("api/editconfig/daemon_out", status_check=404)
self.endpoint_request("api/editconfig/daemon_err", status_check=404)
self.endpoint_request("api/editconfig/raw_store", status_check=404)
self.endpoint_request("api/editconfig/catalog", status_check=404)
self.endpoint_request("api/editconfig/notexistent", status_check=404)
rg_pre = re.compile('<pre>(.*)<\/pre>', flags=re.DOTALL)
for k, checks in zip(sorted(ENERPI_CONFIG_FILES.keys()), [('[ENERPI_DATA]', 'DATA_PATH', '[BROADCAST]'),
('=',),
('analog_channel', 'is_rms', 'name')]):
print('Config file "{}". Checking for {}'.format(k, checks))
r = self.endpoint_request("api/editconfig/{}".format(k))
r2 = self.endpoint_request("api/showfile/{}".format(k))
test = r.data.decode()
test_2 = r2.data.decode()
lookin = rg_pre.findall(test)
lookin_2 = rg_pre.findall(test_2)
print(lookin_2)
if not lookin:
print(test)
if not lookin_2:
print(test_2)
for c in checks:
self.assertIn(c, lookin[0], 'No se encuentra "{}" en "{}"'.format(c, lookin))
self.assertIn(c, lookin_2[0], 'No se encuentra "{}" en "{}"'.format(c, lookin))
alerta_js = json.dumps({'alert_type': 'success', 'texto_alerta': 'testing enerpi...'})
self.endpoint_request("api/editconfig/config?alerta={}".format(alerta_js))
self.endpoint_request("api/showfile/enerpi?alerta={}".format(alerta_js))
self.endpoint_request("api/showfile/enerpi?delete=true", status_check=302)
self.endpoint_request("api/showfile/notexistent", status_check=404)
# TODO tests edit configuration files + POST changes
def test_3_download_files(self):
from enerpi.editconf import ENERPI_CONFIG_FILES
for file in ENERPI_CONFIG_FILES.keys():
print('downloading id_file={}'.format(file))
self.endpoint_request("api/filedownload/{}".format(file))
self.endpoint_request("api/filedownload/notexistent", status_check=404)
self.endpoint_request("api/filedownload/{}?as_attachment=true".format('config'))
print(os.listdir(self.DATA_PATH))
print(self.raw_file)
self.endpoint_request("api/filedownload/{}?as_attachment=true".format('raw_store'),
status_check=302, verbose=True)
self.endpoint_request("api/hdfstores/DATA_2016_MONTH_11.h5", status_check=200, verbose=True)
self.endpoint_request("api/hdfstores/TODAY.h5", status_check=404, verbose=True)
self.endpoint_request("api/hdfstores/TODAY.h5?as_attachment=true", status_check=404)
def test_4_upload_files(self):
print('test_upload_files:')
file_bytes = BytesIO(open(os.path.join(self.DATA_PATH, 'sensors_enerpi.json'), 'rb').read())
filename = 'other_sensors.json'
r = self.post_file('api/uploadfile/sensors', file_bytes, filename, mimetype_check='text/html',
status_check=302, verbose=True)
self.assertIn('success', r.location)
self.assertIn('editconfig/sensors', r.location)
file_bytes = BytesIO(open(os.path.join(self.DATA_PATH, 'config_enerpi.ini'), 'rb').read())
filename = 'other_config.ini'
r = self.post_file('api/uploadfile/config', file_bytes, filename, mimetype_check='text/html',
status_check=302, verbose=True)
self.assertIn('success', r.location)
self.assertIn('editconfig/config', r.location)
file_bytes = BytesIO(open(os.path.join(self.DATA_PATH, 'secret_key_for_test'), 'rb').read())
filename = 'secret_key'
r = self.post_file('api/uploadfile/encryption_key', file_bytes, filename, mimetype_check='text/html',
status_check=302, verbose=True)
self.assertIn('success', r.location)
self.assertIn('editconfig/encryption_key', r.location)
file_bytes = BytesIO(open(os.path.join(self.DATA_PATH, 'secret_key_for_test'), 'rb').read())
self.post_file('api/uploadfile/secret_key', file_bytes, filename, status_check=500, verbose=True)
file_bytes = BytesIO(open(os.path.join(self.DATA_PATH, 'secret_key_for_test'), 'rb').read())
self.post_file('api/uploadfile/flask', file_bytes, filename, status_check=404, verbose=True)
self.endpoint_request("api/uploadfile/lala", status_check=405)
def test_5_last_broadcast(self):
print('LAST ENTRY:')
self.endpoint_request("api/last", mimetype_check='application/json')
def test_6_bokeh_plots(self):
self.endpoint_request("api/bokehplot")
self.endpoint_request("api/stream/bokeh", mimetype_check='text/event-stream')
self.endpoint_request("api/stream/bokeh/last/5", mimetype_check='text/event-stream')
self.endpoint_request("api/stream/bokeh/from/today", mimetype_check='text/event-stream')
self.endpoint_request("api/stream/bokeh/from/2016-08-10/to/2016-08-20/?use_median=true&rs_data=2h",
status_check=404)
self.endpoint_request("api/stream/bokeh/from/2016-08-01/to/2016-09-01/?rs_data=2h&kwh=true", status_check=404)
self.endpoint_request("api/stream/bokeh/from/yesterday/to/today", mimetype_check='text/event-stream')
if __name__ == '__main__':
import unittest
unittest.main()
| mit | -7,385,280,991,985,692,000 | 49.621762 | 118 | 0.608393 | false | 3.538573 | true | false | false |
venth/aws-adfs | test/test_credential_process_json.py | 1 | 1893 | import datetime
import json
from aws_adfs import login
from mock import patch
class TestCredentialProcessJson:
def setup_method(self, method):
self.access_key = 'AKIAIOSFODNN7EXAMPLE'
self.secret_key = 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY'
self.session_token = 'AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA=='
self.expiration = datetime.datetime(2020,6,20)
self.aws_session_token = {
'Credentials': {
'AccessKeyId': self.access_key,
'SecretAccessKey': self.secret_key,
'SessionToken': self.session_token,
'Expiration': self.expiration
}
}
capture = ''
def _replace_echo(self, value):
self.capture = value
def test_json_is_valid_credential_process_format(self):
with patch('click.echo', side_effect = self._replace_echo):
login._emit_json(self.aws_session_token)
result = json.loads(self.capture)
print(result)
# Version is currently hardlocked at 1, see https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sourcing-external.html
assert result["Version"] == 1
assert result["AccessKeyId"] == self.access_key
assert result["SecretAccessKey"] == self.secret_key
assert result["SessionToken"] == self.session_token
# Expiration must be ISO8601, see https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sourcing-external.html
assert result["Expiration"] == self.expiration.isoformat()
| mit | -6,768,541,995,889,286,000 | 45.170732 | 367 | 0.684628 | false | 2.953198 | false | false | false |
bewest/glucodump | glucodump/stream.py | 1 | 3981 | #!/usr/bin/python
import sys, os
import select, socket
import usbcomm
import usb
_default_host = 'localhost'
_default_port = 23200
_READ_ONLY = select.POLLIN | select.POLLPRI
class Stream(object):
def __init__(self,
host=_default_host,
port=_default_port):
self.host = host
self.port = port
self.usb = usbcomm.USBComm(idVendor=usbcomm.ids.Bayer, idProduct=usbcomm.ids.Bayer.Contour)
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setblocking(0)
self.poller = select.poll()
self.fd_to_socket = {}
self.clients = []
def close(self):
print >>sys.stderr, '\nMUX > Closing...'
for client in self.clients:
client.close()
self.usb.close()
self.server.close()
print >>sys.stderr, 'MUX > Done! =)'
def add_client(self, client):
print >>sys.stderr, 'MUX > New connection from', client.getpeername()
client.setblocking(0)
self.fd_to_socket[client.fileno()] = client
self.clients.append(client)
self.poller.register(client, _READ_ONLY)
def remove_client(self, client, why='?'):
try:
name = client.getpeername()
except:
name = 'client %d' % client.fileno()
print >>sys.stderr, 'MUX > Closing %s: %s' % (name, why)
self.poller.unregister(client)
self.clients.remove(client)
client.close()
def read(self):
self.sink = None
try:
data = self.usb.read( )
self.sink = data
except usb.core.USBError, e:
if e.errno != 110:
print e, dir(e), e.backend_error_code, e.errno
raise
return self.sink is not None
def flush(self):
if self.sink is not None:
for client in self.clients:
client.send(self.sink)
self.sink = None
def run(self):
try:
# self.tty.setTimeout(0) # Non-blocking
# self.tty.flushInput()
# self.tty.flushOutput()
# self.poller.register(self.usb.epout.bEndpointAddress, _READ_ONLY)
# self.fd_to_socket[self.usb.epout.bEndpointAddress] = self.usb
# print >>sys.stderr, 'MUX > Serial port: %s @ %s' % (self.device, self.baudrate)
print >>sys.stderr, 'MUX > usb port: %s' % (self.usb)
self.server.bind((self.host, self.port))
self.server.listen(5)
self.poller.register(self.server, _READ_ONLY)
self.fd_to_socket[self.server.fileno()] = self.server
print >>sys.stderr, 'MUX > Server: %s:%d' % self.server.getsockname()
print >>sys.stderr, 'MUX > Use ctrl+c to stop...\n'
while True:
events = self.poller.poll(500)
if self.read( ):
self.flush( )
for fd, flag in events:
# Get socket from fd
s = self.fd_to_socket[fd]
print fd, flag, s
if flag & select.POLLHUP:
self.remove_client(s, 'HUP')
elif flag & select.POLLERR:
self.remove_client(s, 'Received error')
elif flag & (_READ_ONLY):
# A readable server socket is ready to accept a connection
if s is self.server:
connection, client_address = s.accept()
self.add_client(connection)
# Data from serial port
elif s is self.usb:
data = s.read( )
for client in self.clients:
client.send(data)
# Data from client
else:
data = s.recv(80)
# Client has data
print "send to usb"
if data: self.usb.write(data)
# Interpret empty result as closed connection
else: self.remove_client(s, 'Got no data')
except usb.core.USBError, e:
print >>sys.stderr, '\nMUX > USB error: "%s". Closing...' % e
except socket.error, e:
print >>sys.stderr, '\nMUX > Socket error: %s' % e.strerror
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.close()
if __name__ == '__main__':
s = Stream( )
s.run( )
| gpl-2.0 | 4,263,681,791,854,187,000 | 26.455172 | 95 | 0.581261 | false | 3.458732 | false | false | false |
dburdick/fhirbase | ql/__init__.py | 1 | 4521 | import re
import os
import sha
import subprocess
from . import prepr
def getin(d, ks):
for p in ks:
if p not in d:
return None
d = d[p]
return d
def resolve(t):
acc = dict(idx=dict(), guard=dict(), deps=[])
for k in t:
if k not in acc['idx']:
resolve_recur(k, t, acc)
return acc['deps']
def resolve_recur(k, t, acc):
if getin(acc, ['guard',k]): raise Exception('Cycle dep %s guard %s' % (k,acc['guard'].keys()))
if getin(acc, ['idx',k]): return acc
acc['guard'][k] = True
for d in (t[k] or []):
resolve_recur(d, t, acc)
acc['guard'][k] = False
acc['deps'].append(k)
acc['idx'][k] = True
return acc
def normalize_path(pth):
return os.path.abspath(pth)
def resolve_import(fl, pth):
flpath = os.path.split(fl)
return normalize_path('/'.join(flpath[:-1]) + '/' + pth)
# TODO: support relative paths
def extract_import(fl, l):
if not re.search("^\s?--\s?#import",l): return None
pth = l.split('#import')[1].strip()
return resolve_import(fl, pth)
def read_imports(flr, idx):
fl = normalize_path(flr)
if fl in idx['files']: return idx
if not os.path.isfile(fl):
raise Exception('Could not find file: %s' % fl)
f = open(fl, 'r')
idx['files'][fl] = f.read()
idx['deps'][fl] = []
f.seek(0)
for l in f:
dep = extract_import(fl, l)
if dep:
idx['deps'][fl].append(dep)
if dep not in idx['files']: read_imports(dep, idx)
f.close()
return idx
def silent_pgexec(db, sql):
return subprocess.Popen("psql -d %s -c \"%s\" &2> /dev/null" % (db,sql),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE).stdout.read()
def pgexec(db, sql):
pr = subprocess.Popen('psql -v ON_ERROR_STOP=1 -d %s' % db, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pr.stdin.write(sql)
pr.stdin.write("\\q\r")
pr.stdin.close()
pr.wait()
returncode = pr.returncode
err = pr.stderr and pr.stderr.read()
out = pr.stdout and pr.stdout.read()
if err and pr.returncode != 0:
print '\x1b[31m%s\x1b[0m' % err
elif err and pr.returncode == 0:
print '\x1b[33m%s\x1b[0m' % err
return dict(returncode=returncode, stderr=err, stdout=out)
def shell(cmd):
pr = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
pr.communicate()
return pr
def is_changed(fl, content):
print 'Digest ' + s.hexdigest()
pgexec('SELECT digest FROM modules WHERE file=\'%s\'' % fl)
def is_test_file(fl):
return fl.find('_spec.sql') > 0
def should_reload(db, fl, digest):
res = pgexec(db, 'SELECT digest FROM modules WHERE file=\'%s\'' % fl)
if is_test_file(fl): return True
return not res['stdout'] or res['stdout'].find(digest) == -1
return True
def hl(cl, txt):
colors = dict(red=31,green=32,yellow=33)
code = colors[cl]
return '\x1b[%sm%s\x1b[0m' % (code,txt)
def load_to_pg(db, fl, content, force=False):
s = sha.new(content).hexdigest()
if force or should_reload(db, fl, s):
print '\t<- %s' % fl
sql = prepr.process(fl, content)
res = pgexec(db, sql)
#print res['stdout']
if res['returncode'] == 0:
pgexec(db, 'DELETE FROM modules WHERE file=\'%s\'' % fl)
pgexec(db, 'INSERT INTO modules (file,digest) VALUES (\'%s\',\'%s\')' % (fl, s))
if res['stderr'] and res['returncode'] != 0:
raise Exception(res['stderr'])
def reload(db, fl, force=False):
idx = dict(files=dict(),deps=dict())
read_imports(fl, idx)
deps = resolve(idx['deps'])
silent_pgexec(db, 'CREATE table IF NOT EXISTS modules (file text primary key, digest text);')
print 'Load %s' % fl
for f in deps:
load_to_pg(db, f, idx['files'][f], force)
def reload_test(db, fl, force=False):
idx = dict(files=dict(),deps=dict())
read_imports(fl, idx)
deps = resolve(idx['deps'])
silent_pgexec(db, 'CREATE table IF NOT EXISTS modules (file text primary key, digest text);')
print 'Load %s' % fl
for f in deps:
load_to_pg(db, f, idx['files'][f])
def pgdump(db):
print("mkdir -p dist && pg_dump %s --format=plain --no-acl --no-owner --file=dist/fhirbase.sql" % db)
os.system("mkdir -p dist && pg_dump %s --format=plain --no-acl --no-owner --file=dist/fhirbase.sql" % db)
def test():
deps = dict(a=['b','c','z'], c=['d','z'], b=['d','e'], x=['y','z'])
print resolve(deps)
| mit | -389,168,492,244,733,900 | 30.615385 | 147 | 0.588144 | false | 3.003987 | false | false | false |
golgoth31/pybroker | drivers/external/inputs.py | 1 | 3092 | # -*- coding: utf-8 -*-
# pybroker
# Copyright (c) 2016 David Sabatie <[email protected]>
#
# This file is part of Pybroker.
#
# Foobar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Foobar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
import sys
import time
import logging
import socket
import zmq
import json
import threading
from zmq.devices import Device
class Work():
"""ServerWorker"""
def __init__(self, options, worker):
self.options = options
self.worker = worker['out']
self.logger = logging.getLogger(
'pybroker.driver.external.inputs.' + self.worker.identity)
self.begin = 0
# Initiate the external socket
self.logger.debug("Starting external socket")
self._external = zmq.Context.instance()
# bind to external socket
self.worker_external = self._external.socket(zmq.ROUTER)
self.worker_external.bind(
self.options['type']+"://" + self.options['bind_address'] + ":"+str(self.options['bind_port']))
# poll_all = zmq.Poller()
# poll_all.register(self.worker_external, zmq.POLLIN)
def run(self):
# Startup message sequence
# self.logger.debug("Saying hello to the server from "+self.worker.identity)
zmq.proxy(self.worker_external, self.worker)
# self.worker.send_multipart(['toto'])
# while True:
# try:
# self.logger.debug('Entering main loop ...')
#
# self.logger.debug("Polling queues ...")
# socks = dict(poll_all.poll(timeout=0))
#
# self.logger.debug("Looking for data from the outside world ...")
# # Handle worker activity on self.broker_input
# if socks.get(self.worker_external) == zmq.POLLIN:
# self.logger.debug("getting external message")
# # Get client request
# msg = self.worker_external.recv_multipart()
# self.logger.debug(msg)
# if not msg:
# break
#
# # Stop action !
# if msg[3].lower() == 'stop':
# self.logger.info("Stop message received ! Stoping ...")
# break
#
# time.sleep(2)
# except KeyboardInterrupt:
# self.logger.critical(
# "Keyboard interrupt received, stopping...")
# break
self.context.destroy()
exit(1)
| gpl-3.0 | -7,790,567,582,930,434,000 | 34.54023 | 107 | 0.581177 | false | 4.122667 | false | false | false |
chapmanb/cwltool | cwltool/load_tool.py | 1 | 11202 | # pylint: disable=unused-import
"""Loads a CWL document."""
import logging
import os
import re
import uuid
import requests.sessions
import schema_salad.schema as schema
from avro.schema import Names
from ruamel.yaml.comments import CommentedSeq, CommentedMap
from schema_salad.ref_resolver import Loader, Fetcher, file_uri
from schema_salad.sourceline import cmap
from schema_salad.validate import ValidationException
from typing import Any, Callable, cast, Dict, Text, Tuple, Union
from six.moves import urllib
from six import itervalues, string_types
from . import process
from . import update
from .errors import WorkflowException
from .process import Process, shortname
_logger = logging.getLogger("cwltool")
def fetch_document(argsworkflow, # type: Union[Text, dict[Text, Any]]
resolver=None, # type: Callable[[Loader, Union[Text, dict[Text, Any]]], Text]
fetcher_constructor=None
# type: Callable[[Dict[unicode, unicode], requests.sessions.Session], Fetcher]
):
# type: (...) -> Tuple[Loader, CommentedMap, Text]
"""Retrieve a CWL document."""
document_loader = Loader({"cwl": "https://w3id.org/cwl/cwl#", "id": "@id"},
fetcher_constructor=fetcher_constructor)
uri = None # type: Text
workflowobj = None # type: CommentedMap
if isinstance(argsworkflow, string_types):
split = urllib.parse.urlsplit(argsworkflow)
if split.scheme:
uri = argsworkflow
elif os.path.exists(os.path.abspath(argsworkflow)):
uri = file_uri(str(os.path.abspath(argsworkflow)))
elif resolver:
uri = resolver(document_loader, argsworkflow)
if uri is None:
raise ValidationException("Not found: '%s'" % argsworkflow)
if argsworkflow != uri:
_logger.info("Resolved '%s' to '%s'", argsworkflow, uri)
fileuri = urllib.parse.urldefrag(uri)[0]
workflowobj = document_loader.fetch(fileuri)
elif isinstance(argsworkflow, dict):
uri = "#" + Text(id(argsworkflow))
workflowobj = cast(CommentedMap, cmap(argsworkflow, fn=uri))
else:
raise ValidationException("Must be URI or object: '%s'" % argsworkflow)
return document_loader, workflowobj, uri
def _convert_stdstreams_to_files(workflowobj):
# type: (Union[Dict[Text, Any], List[Dict[Text, Any]]]) -> None
if isinstance(workflowobj, dict):
if workflowobj.get('class') == 'CommandLineTool':
for out in workflowobj.get('outputs', []):
for streamtype in ['stdout', 'stderr']:
if out.get('type') == streamtype:
if 'outputBinding' in out:
raise ValidationException(
"Not allowed to specify outputBinding when"
" using %s shortcut." % streamtype)
if streamtype in workflowobj:
filename = workflowobj[streamtype]
else:
filename = Text(uuid.uuid4())
workflowobj[streamtype] = filename
out['type'] = 'File'
out['outputBinding'] = {'glob': filename}
for inp in workflowobj.get('inputs', []):
if inp.get('type') == 'stdin':
if 'inputBinding' in inp:
raise ValidationException(
"Not allowed to specify inputBinding when"
" using stdin shortcut.")
if 'stdin' in workflowobj:
raise ValidationException(
"Not allowed to specify stdin path when"
" using stdin type shortcut.")
else:
workflowobj['stdin'] = \
"$(inputs.%s.path)" % \
inp['id'].rpartition('#')[2]
inp['type'] = 'File'
else:
for entry in itervalues(workflowobj):
_convert_stdstreams_to_files(entry)
if isinstance(workflowobj, list):
for entry in workflowobj:
_convert_stdstreams_to_files(entry)
def _add_blank_ids(workflowobj):
# type: (Union[Dict[Text, Any], List[Dict[Text, Any]]]) -> None
if isinstance(workflowobj, dict):
if ("run" in workflowobj and
isinstance(workflowobj["run"], dict) and
"id" not in workflowobj["run"] and
"$import" not in workflowobj["run"]):
workflowobj["run"]["id"] = Text(uuid.uuid4())
for entry in itervalues(workflowobj):
_add_blank_ids(entry)
if isinstance(workflowobj, list):
for entry in workflowobj:
_add_blank_ids(entry)
def validate_document(document_loader, # type: Loader
workflowobj, # type: CommentedMap
uri, # type: Text
enable_dev=False, # type: bool
strict=True, # type: bool
preprocess_only=False, # type: bool
fetcher_constructor=None
# type: Callable[[Dict[unicode, unicode], requests.sessions.Session], Fetcher]
):
# type: (...) -> Tuple[Loader, Names, Union[Dict[Text, Any], List[Dict[Text, Any]]], Dict[Text, Any], Text]
"""Validate a CWL document."""
if isinstance(workflowobj, list):
workflowobj = {
"$graph": workflowobj
}
if not isinstance(workflowobj, dict):
raise ValueError("workflowjobj must be a dict, got '%s': %s" % (type(workflowobj), workflowobj))
jobobj = None
if "cwl:tool" in workflowobj:
jobobj, _ = document_loader.resolve_all(workflowobj, uri)
uri = urllib.parse.urljoin(uri, workflowobj["https://w3id.org/cwl/cwl#tool"])
del cast(dict, jobobj)["https://w3id.org/cwl/cwl#tool"]
workflowobj = fetch_document(uri, fetcher_constructor=fetcher_constructor)[1]
fileuri = urllib.parse.urldefrag(uri)[0]
if "cwlVersion" in workflowobj:
if not isinstance(workflowobj["cwlVersion"], (str, Text)):
raise Exception("'cwlVersion' must be a string, got %s" % type(workflowobj["cwlVersion"]))
workflowobj["cwlVersion"] = re.sub(
r"^(?:cwl:|https://w3id.org/cwl/cwl#)", "",
workflowobj["cwlVersion"])
else:
_logger.warn("No cwlVersion found, treating this file as draft-2.")
workflowobj["cwlVersion"] = "draft-2"
if workflowobj["cwlVersion"] == "draft-2":
workflowobj = cast(CommentedMap, cmap(update._draft2toDraft3dev1(
workflowobj, document_loader, uri, update_steps=False)))
if "@graph" in workflowobj:
workflowobj["$graph"] = workflowobj["@graph"]
del workflowobj["@graph"]
(sch_document_loader, avsc_names) = \
process.get_schema(workflowobj["cwlVersion"])[:2]
if isinstance(avsc_names, Exception):
raise avsc_names
processobj = None # type: Union[CommentedMap, CommentedSeq, unicode]
document_loader = Loader(sch_document_loader.ctx, schemagraph=sch_document_loader.graph,
idx=document_loader.idx, cache=sch_document_loader.cache,
fetcher_constructor=fetcher_constructor)
_add_blank_ids(workflowobj)
workflowobj["id"] = fileuri
processobj, metadata = document_loader.resolve_all(workflowobj, fileuri)
if not isinstance(processobj, (CommentedMap, CommentedSeq)):
raise ValidationException("Workflow must be a dict or list.")
if not metadata:
if not isinstance(processobj, dict):
raise ValidationException("Draft-2 workflows must be a dict.")
metadata = cast(CommentedMap, cmap({"$namespaces": processobj.get("$namespaces", {}),
"$schemas": processobj.get("$schemas", []),
"cwlVersion": processobj["cwlVersion"]},
fn=fileuri))
_convert_stdstreams_to_files(workflowobj)
if preprocess_only:
return document_loader, avsc_names, processobj, metadata, uri
schema.validate_doc(avsc_names, processobj, document_loader, strict)
if metadata.get("cwlVersion") != update.LATEST:
processobj = cast(CommentedMap, cmap(update.update(
processobj, document_loader, fileuri, enable_dev, metadata)))
if jobobj:
metadata[u"cwl:defaults"] = jobobj
return document_loader, avsc_names, processobj, metadata, uri
def make_tool(document_loader, # type: Loader
avsc_names, # type: Names
metadata, # type: Dict[Text, Any]
uri, # type: Text
makeTool, # type: Callable[..., Process]
kwargs # type: dict
):
# type: (...) -> Process
"""Make a Python CWL object."""
resolveduri = document_loader.resolve_ref(uri)[0]
if isinstance(resolveduri, list):
if len(resolveduri) == 1:
processobj = resolveduri[0]
else:
raise WorkflowException(
u"Tool file contains graph of multiple objects, must specify "
"one of #%s" % ", #".join(
urllib.parse.urldefrag(i["id"])[1] for i in resolveduri
if "id" in i))
elif isinstance(resolveduri, dict):
processobj = resolveduri
else:
raise Exception("Must resolve to list or dict")
kwargs = kwargs.copy()
kwargs.update({
"makeTool": makeTool,
"loader": document_loader,
"avsc_names": avsc_names,
"metadata": metadata
})
tool = makeTool(processobj, **kwargs)
if "cwl:defaults" in metadata:
jobobj = metadata["cwl:defaults"]
for inp in tool.tool["inputs"]:
if shortname(inp["id"]) in jobobj:
inp["default"] = jobobj[shortname(inp["id"])]
return tool
def load_tool(argsworkflow, # type: Union[Text, Dict[Text, Any]]
makeTool, # type: Callable[..., Process]
kwargs=None, # type: dict
enable_dev=False, # type: bool
strict=True, # type: bool
resolver=None, # type: Callable[[Loader, Union[Text, dict[Text, Any]]], Text]
fetcher_constructor=None # type: Callable[[Dict[unicode, unicode], requests.sessions.Session], Fetcher]
):
# type: (...) -> Process
document_loader, workflowobj, uri = fetch_document(argsworkflow, resolver=resolver,
fetcher_constructor=fetcher_constructor)
document_loader, avsc_names, processobj, metadata, uri = validate_document(
document_loader, workflowobj, uri, enable_dev=enable_dev,
strict=strict, fetcher_constructor=fetcher_constructor)
return make_tool(document_loader, avsc_names, metadata, uri,
makeTool, kwargs if kwargs else {})
| apache-2.0 | 5,150,466,595,201,390,000 | 40.335793 | 118 | 0.577218 | false | 4.181411 | false | false | false |
neiesc/Problem-solving | Implementation/Sort Algorithms/timsort.py | 1 | 2733 | #!/bin/python3
# https://hackernoon.com/timsort-the-fastest-sorting-algorithm-youve-never-heard-of-36b28417f399
# https://pt.wikipedia.org/wiki/Timsort
# based off of this code https://gist.github.com/nandajavarma/a3a6b62f34e74ec4c31674934327bbd3
# Brandon Skerritt
# https://skerritt.tech
def binary_search(the_array, item, start, end):
if start == end:
if the_array[start] > item:
return start
else:
return start + 1
if start > end:
return start
mid = round((start + end)/ 2)
if the_array[mid] < item:
return binary_search(the_array, item, mid + 1, end)
elif the_array[mid] > item:
return binary_search(the_array, item, start, mid - 1)
else:
return mid
"""
Insertion sort that timsort uses if the array size is small or if
the size of the "run" is small
"""
def insertion_sort(the_array):
l = len(the_array)
for index in range(1, l):
value = the_array[index]
pos = binary_search(the_array, value, 0, index - 1)
the_array = the_array[:pos] + [value] + the_array[pos:index] + the_array[index+1:]
return the_array
def merge(left, right):
"""Takes two sorted lists and returns a single sorted list by comparing the
elements one at a time.
[1, 2, 3, 4, 5, 6]
"""
if not left:
return right
if not right:
return left
if left[0] < right[0]:
return [left[0]] + merge(left[1:], right)
return [right[0]] + merge(left, right[1:])
def timsort(the_array):
runs, sorted_runs = [], []
length = len(the_array)
new_run = [the_array[0]]
# for every i in the range of 1 to length of array
for i in range(1, length):
# if i is at the end of the list
if i == length - 1:
new_run.append(the_array[i])
runs.append(new_run)
break
# if the i'th element of the array is less than the one before it
if the_array[i] < the_array[i-1]:
# if new_run is set to None (NULL)
if not new_run:
runs.append([the_array[i]])
new_run.append(the_array[i])
else:
runs.append(new_run)
new_run = []
# else if its equal to or more than
else:
new_run.append(the_array[i])
# for every item in runs, append it using insertion sort
for item in runs:
sorted_runs.append(insertion_sort(item))
# for every run in sorted_runs, merge them
sorted_array = []
for run in sorted_runs:
sorted_array = merge(sorted_array, run)
print(sorted_array)
list = [2, 3, 1, 5, 6, 7]
timsort(list)
# list.sort()
print(list)
print(sorted(list)) | mit | -5,232,938,781,224,659,000 | 28.397849 | 96 | 0.587633 | false | 3.280912 | false | false | false |
krzysztof/invenio-pidrelations | invenio_pidrelations/api.py | 1 | 9263 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""API for PID relations concepts."""
from __future__ import absolute_import, print_function
from invenio_db import db
from invenio_pidstore.models import PersistentIdentifier
from sqlalchemy.exc import IntegrityError
from .models import PIDRelation
class PIDConcept(object):
"""API for PID version relations."""
def __init__(self, child=None, parent=None, relation_type=None,
relation=None):
"""Create a PID concept API object."""
if relation:
self.relation = relation
self.child = relation.child
self.parent = relation.parent
self.relation_type = relation.relation_type
else:
self.child = child
self.parent = parent
self.relation_type = relation_type
# If child and parent (primary keys) are not None,
# try to set the relation
if child and parent:
self.relation = PIDRelation.query.get(
(self.parent.id, self.child.id))
else:
self.relation = None
@property
def parents(self):
"""Return the PID parents for given relation."""
filter_cond = [PIDRelation.child_id == self.child.id, ]
if self.relation_type is not None:
filter_cond.append(PIDRelation.relation_type == self.relation_type)
return db.session.query(PersistentIdentifier).join(
PIDRelation,
PIDRelation.parent_id == PersistentIdentifier.id
).filter(*filter_cond)
@property
def exists(self):
"""Determine if a PID Concept exists.
Determine if constructed API object describes an existing PID Concept.
The definition of that will vary across different PID Concepts, but
it's intended use is to check if given child/parent PIDs are in
the described relation.
"""
return bool(self.relation)
@property
def is_ordered(self):
"""Determine if the concept is an ordered concept."""
return all(val is not None for val in self.children.with_entities(
PIDRelation.index))
@property
def has_parents(self):
"""Determine if there are any parents in this relationship."""
return self.parents.count() > 0
@property
def parent(self):
"""Return the parent of the PID in given relation.
NOTE: Not supporting relations, which allow for multiple parents,
e.g. Collection.
None if not found
Raises 'sqlalchemy.orm.exc.MultipleResultsFound' for multiple parents.
"""
if self._parent is None:
parent = self.parents.one_or_none()
self._parent = parent
return self._parent
@parent.setter
def parent(self, parent):
self._parent = parent
@property
def is_parent(self):
"""Determine if the provided parent is a parent in the relation."""
return self.has_children
def get_children(self, ordered=False, pid_status=None):
"""Get all children of the parent."""
filter_cond = [PIDRelation.parent_id == self.parent.id, ]
if pid_status is not None:
filter_cond.append(PersistentIdentifier.status == pid_status)
if self.relation_type is not None:
filter_cond.append(PIDRelation.relation_type == self.relation_type)
q = db.session.query(PersistentIdentifier).join(
PIDRelation,
PIDRelation.child_id == PersistentIdentifier.id
).filter(*filter_cond)
if ordered:
return q.order_by(PIDRelation.index.asc())
else:
return q
@property
def index(self):
"""Index of the child in the relation."""
return self.relation.index
@property
def children(self):
"""Children of the parent."""
return self.get_children()
@property
def has_children(self):
"""Determine if there are any children in this relationship."""
return self.children.count() > 0
@property
def is_last_child(self):
"""
Determine if 'pid' is the latest version of a resource.
Resolves True for Versioned PIDs which are the oldest of its siblings.
False otherwise, also for Head PIDs.
"""
last_child = self.last_child
if last_child is None:
return False
return last_child == self.child
@property
def last_child(self):
"""
Get the latest PID as pointed by the Head PID.
If the 'pid' is a Head PID, return the latest of its children.
If the 'pid' is a Version PID, return the latest of its siblings.
Return None for the non-versioned PIDs.
"""
return self.get_children(ordered=False).filter(
PIDRelation.index.isnot(None)).order_by(
PIDRelation.index.desc()).first()
@property
def next(self):
"""Get the next sibling in the PID relation."""
if self.relation.index is not None:
return self.children.filter_by(
index=self.relation.index + 1).one_or_none()
else:
return None
@property
def previous(self):
"""Get the previous sibling in the PID relation."""
if self.relation.index is not None:
return self.children.filter_by(
index=self.relation.index - 1).one_or_none()
else:
return None
@property
def is_child(self):
"""
Determine if 'pid' is a Version PID.
Resolves as True for any PID which has a Head PID, False otherwise.
"""
return self.has_parents
def insert_child(self, child, index=None):
"""Insert a new child into a PID concept.
Argument 'index' can take the following values:
0,1,2,... - insert child PID at the specified position
-1 - insert the child PID at the last position
None - insert child without order (no re-ordering is done)
NOTE: If 'index' is specified, all sibling relations should
have PIDRelation.index information.
"""
try:
with db.session.begin_nested():
if index is not None:
child_relations = self.parent.child_relations.filter(
PIDRelation.relation_type ==
self.relation_type).order_by(PIDRelation.index).all()
relation_obj = PIDRelation.create(
self.parent, child, self.relation_type, None)
if index == -1:
child_relations.append(relation_obj)
else:
child_relations.insert(index, relation_obj)
for idx, c in enumerate(child_relations):
c.index = idx
else:
relation_obj = PIDRelation.create(
self.parent, child, self.relation_type, None)
except IntegrityError:
raise Exception("PID Relation already exists.")
def remove_child(self, child, reorder=False):
"""Remove a child from a PID concept."""
with db.session.begin_nested():
relation = PIDRelation.query.filter_by(
parent_id=self.parent.id,
child_id=child.id,
relation_type=self.relation_type).one()
db.session.delete(relation)
if reorder:
child_relations = self.parent.child_relations.filter(
PIDRelation.relation_type == self.relation_type).order_by(
PIDRelation.index).all()
for idx, c in enumerate(child_relations):
c.index = idx
class PIDConceptOrdered(PIDConcept):
"""Standard PID Concept with childred ordering."""
@property
def children(self):
"""Overwrite the children property to always return them ordered."""
return self.get_children(ordered=True)
@property
def is_ordered(self):
"""Determine if the concept is an ordered concept."""
return True
__all__ = (
'PIDConcept',
'PIDConceptOrdered',
)
| gpl-2.0 | -7,715,778,306,867,326,000 | 33.823308 | 79 | 0.603476 | false | 4.490063 | false | false | false |
baiyubin/python_practice | StronglyConnectedComponents.py | 2 | 1530 | '''
Created on Jun 22, 2013
@author: Yubin Bai
All rights reserved.
'''
def stronglyConnectedComponents(graph):
DFS_WHITE = 0
dfsNum = {} # number of a node
dfsLowNum = {} # lowest number met before this node
dfsNumCounter = [0]
dfsSCC = []
inStack = set()
for v in graph:
dfsNum[v] = DFS_WHITE
resultStr = []
def tarjanSCC(u):
dfsNumCounter[0] += 1
dfsLowNum[u] = dfsNum[u] = dfsNumCounter[0]
dfsSCC.append(u)
inStack.add(u) # stores u based on order of visitation
for v in graph[u]:
if dfsNum[v] == DFS_WHITE: # a tree edge
tarjanSCC(v)
if v in inStack: # condition for update
dfsLowNum[u] = min(
dfsLowNum[u], dfsLowNum[v]) # update dfsLowNum[u]
# after dfs for the branch
if dfsLowNum[u] == dfsNum[u]: # if this is a root of SCC
resultStr.append("SCC: ")
while (dfsSCC and dfsSCC[-1] != u):
v = dfsSCC[-1]
resultStr.append("%d " % v)
inStack.remove(v)
dfsSCC.pop()
v = dfsSCC[-1]
resultStr.append("%d\n" % v)
inStack.remove(v)
dfsSCC.pop()
for v in graph:
if dfsNum[v] == DFS_WHITE:
tarjanSCC(v)
print(''.join(resultStr))
if __name__ == '__main__':
graph = {0: [1], 1: [3], 2: [1], 3: [2, 4], 4: [5], 5: [7], 6: [4], 7: [6]}
stronglyConnectedComponents(graph)
| apache-2.0 | 4,697,648,897,265,538,000 | 26.321429 | 79 | 0.503268 | false | 3.029703 | false | false | false |
jlmadurga/listenclosely | listenclosely/models.py | 1 | 7496 | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_fsm import FSMField, transition
from django.utils.encoding import python_2_unicode_compatible
from listenclosely import managers
from django.utils.timezone import now
import datetime
class NoAgentFound(Exception):
"""
Raised when strategy can not find agent to attend chat
"""
class AbstractContact(models.Model):
id_service = models.CharField(_("Id Service"), unique=True, db_index=True, max_length=128)
created = models.DateTimeField(_("Date created"), auto_now_add=True)
class Meta:
abstract = True
verbose_name = _("Contact")
verbose_name_plural = _("Contacts")
@python_2_unicode_compatible
class Asker(AbstractContact):
"""
Customer, client, ... the who ask a question and starts a chat
"""
class Meta:
verbose_name = _("Asker")
verbose_name_plural = _("Askers")
def __str__(self):
return _(u"Asker(id_service: %(id_service)s") % {'id_service': self.id_service}
@python_2_unicode_compatible
class Agent(AbstractContact):
"""
One who answer chat
"""
OFFLINE, ONLINE, BUSY = "Offline", "Online", "Busy"
STATE_CHOICES = (
(OFFLINE, _("Offline")),
(ONLINE, _("Online")),
(BUSY, _("Busy")))
state = FSMField(default=OFFLINE, choices=STATE_CHOICES)
class Meta:
verbose_name = _("Agent")
verbose_name_plural = _("Agents")
objects = models.Manager()
offline = managers.OfflineAgentManager()
online = managers.OnlineAgentManager()
busy = managers.BusyAgentManager()
def __str__(self):
return _(u"Agent(id_service: %(id_service)s, state:%(state)s") % {'id_service': self.id_service,
'state': self.state}
@transition(field=state, source=OFFLINE, target=ONLINE)
def register(self):
"""
Agent is registered into the system so now is online to answers
"""
@transition(field=state, source=ONLINE, target=OFFLINE)
def unregister(self):
"""
Agent is not online anymore
"""
@transition(field=state, source=ONLINE, target=BUSY)
def attend(self, chat):
"""
Agent is assigned to a chat so it is busy answering
"""
chat.agent = self
@transition(field=state, source=BUSY, target=ONLINE)
def release(self, chat):
"""
Agent finishes chat
"""
@python_2_unicode_compatible
class Chat(models.Model):
asker = models.ForeignKey(Asker, verbose_name=_("Asker"), related_name="chats")
agent = models.ForeignKey(Agent, null=True, blank=True, verbose_name=_("Agent"), related_name="chats")
created = models.DateTimeField(_("Date created"), auto_now_add=True)
last_modified = models.DateTimeField(_("Last modified"), auto_now=True)
PENDING, LIVE, TERMINATED = "Pending", "Live", "Terminated"
STATE_CHOICES = (
(PENDING, _("Pending")),
(LIVE, _("Live")),
(TERMINATED, _("Terminated")))
state = FSMField(default=PENDING, choices=STATE_CHOICES)
class Meta:
verbose_name = _("Chat")
verbose_name_plural = _("Chats")
objects = models.Manager()
pending = managers.PendingChatsManager()
live = managers.LiveChatsManager()
terminated = managers.TerminatedChatsManager()
def __str__(self):
return _(u"Chat(asker: %(asker)s, agent: %(agent)s, state: %(state)s") % {'asker': self.asker,
'agent': self.agent,
'state': self.state}
@transition(field=state, source=[PENDING, LIVE], target=LIVE)
def handle_message(self, message_id_service, contact_id_service, content, listenclosely_app):
message = Message(id_service_in=message_id_service,
chat=self,
content=content,
type=Message.INCOMING if contact_id_service == self.asker.id_service else Message.OUTGOING)
if not self.agent:
# get free online agents
free_agent = listenclosely_app.strategy.free_agent()
if free_agent:
free_agent.attend(self)
free_agent.save()
else:
message.save()
# TODO: raise especific exception when no free agent to attend. Send auto message
raise NoAgentFound("No agent to attend %s created by %s" % (self.id, contact_id_service))
sent_id = listenclosely_app.service_backend.send_message(message.chat.agent.id_service if message.incoming()
else message.chat.asker.id_service,
content)
if sent_id:
message.id_service_out = sent_id
message.t_sent = now()
message.save()
@transition(field=state, source=PENDING, target=LIVE)
def attend_pending(self, agent, listenclosely_app):
agent.attend(self)
agent.save()
for message in self.messages.all():
sent = listenclosely_app.service_backend.send_message(message.chat.agent.id_service if message.incoming()
else message.chat.asker.id_service,
message.content)
if sent:
message.t_sent = now()
message.save()
@transition(field=state, source=LIVE, target=TERMINATED)
def terminate(self):
"""
Chat is finished and Agent is free
"""
self.agent.release(self)
self.agent.save()
def is_obsolete(self, time_offset):
"""
Check if chat is obsolete
"""
return now() > datetime.timedelta(seconds=time_offset) + self.last_modified
@python_2_unicode_compatible
class Message(models.Model):
id_service_in = models.CharField(_("Id Service In"), unique=True, db_index=True, max_length=128)
id_service_out = models.CharField(_("Id service Out"), null=True, blank=True, max_length=128)
chat = models.ForeignKey(Chat, verbose_name=_("Chat"), related_name="messages")
created = models.DateTimeField(_("Date created"), auto_now_add=True)
t_sent = models.DateTimeField(_("Date sent"), null=True, blank=True)
content = models.TextField(_("Content"))
INCOMING, OUTGOING = "Incoming", "Outgoing"
TYPE_CHOICES = ((INCOMING, _("Incoming")),
(OUTGOING, _("Outgoing")),
)
type = models.CharField(_("Type"), max_length=128, default=INCOMING, choices=TYPE_CHOICES)
class Meta:
verbose_name = _("Message")
verbose_name_plural = _("Messages")
def incoming(self):
return self.type == self.INCOMING
def outgoing(self):
return self.type == self.OUTGOING
def __str__(self):
return _(u"Chat(id_service: %(id_service)s, chat: %(chat)s") % {'id_service': self.id_service_in,
'chat': self.chat} | bsd-3-clause | -1,435,934,371,155,922,200 | 37.64433 | 117 | 0.562834 | false | 4.173719 | false | false | false |
Jacques-Florence/schedSim | src/analysis/temperature.py | 1 | 1287 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import sys
filename = "reports/configuration.conftemperatureReport.txt"
if (len(sys.argv) > 1):
filename = sys.argv[1]
with open(filename) as f:
print f.readline()
time = []
temp = []
for line in f:
entry = line.split(":")
#if (float(entry[0]) > 90000000):
time.append(float(entry[0]))
temp.append(float(entry[1]))
#f = open("reports/energyReport.txt")
#line = f.readline()
#time2 = []
#power = []
#for line in f:
# entry = line.split(":")
# time2.append(float(entry[0]))
# power.append(float(entry[1]))
#f.close()
#avg = []
#buf = []
#bufindex = 0
#for i in range(0,20001):
# buf.append(temp[i])
#for i in range (0,10000):
# avg.append(0.0)
#sum = 0
#for i in range(0,20001):
# sum += buf[i]
#avg.append(sum/20001)
#for i in range(10001, len(temp)-10000):
# sum -= buf[bufindex]
# buf[bufindex] = temp[i+10000]
# sum += buf[bufindex]
# avg.append(sum/20001)
# bufindex += 1
# if (bufindex == 20000):
# bufindex = 0
#for i in range (len(temp)-10001,len(temp)-1):
# avg.append(0)
#avg = []
#alpha = 0.0001
#avg.append(0.0)
#for i in range(1,len(temp)):
# a = temp[i]*alpha + (1-alpha)*avg[i-1]
# avg.append(a)
plt.plot(time, temp, 'ro')
#plt.plot(time2, power, 'bo')
#plt.plot(time, avg, 'b-')
plt.show()
| bsd-3-clause | 5,558,092,028,682,520,000 | 17.385714 | 60 | 0.624709 | false | 2.34854 | false | false | false |
yeatmanlab/BrainTools | projects/NLR_MEG/summer_interv_session2.py | 1 | 12224 | # -*- coding: utf-8 -*-
# Authors: Sung Jun Joo; Jason Yeatman; Kambiz Tavabi <[email protected]>
#
#
# License: BSD (3-clause)
import numpy as np
import mnefun
import os
#import glob
os.chdir('/home/sjjoo/git/BrainTools/projects/NLR_MEG')
from score import score
from nlr_organizeMEG_mnefun import nlr_organizeMEG_mnefun
import mne
import time
#import pycuda.driver
#import pycuda.autoinit
t0 = time.time()
mne.set_config('MNE_USE_CUDA', 'true')
# At Possum projects folder mounted in the local disk
raw_dir = '/mnt/diskArray/projects/MEG/nlr/raw'
# At local hard drive
out_dir = '/mnt/scratch/NLR_MEG4'
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
subs = ['NLR_102_RS','NLR_105_BB','NLR_110_HH','NLR_127_AM',
'NLR_132_WP','NLR_145_AC','NLR_150_MG',
'NLR_152_TC','NLR_160_EK','NLR_161_AK','NLR_163_LF', # 'NLR_162_EF',
'NLR_164_SF','NLR_170_GM','NLR_172_TH','NLR_174_HS','NLR_179_GM',
'NLR_180_ZD','NLR_201_GS','NLR_203_AM',
'NLR_204_AM','NLR_205_AC','NLR_207_AH','NLR_210_SB','NLR_211_LB',
'NLR_GB310','NLR_KB218','NLR_GB267','NLR_JB420',
'NLR_HB275','NLR_GB355'] # 'NLR_187_NB',
# tmin, tmax: sets the epoch
# bmin, bmax: sets the prestim duration for baseline correction. baseline is set
# as individual as default. Refer to _mnefun.py bmax is 0.0 by default
# hp_cut, lp_cut: set cutoff frequencies for highpass and lowpass
# I found that hp_cut of 0.03 is problematic because the tansition band is set
# to 5 by default, which makes the negative stopping frequency (0.03-5).
# It appears that currently data are acquired (online) using bandpass filter
# (0.03 - 326.4 Hz), so it might be okay not doing offline highpass filtering.
# It's worth checking later though. However, I think we should do baseline
# correction by setting bmin and bmax. I found that mnefun does baseline
# correction by default.
# sjjoo_20160809: Commented
#params = mnefun.Params(tmin=-0.1, tmax=0.9, n_jobs=18, # t_adjust was -39e-3
# decim=2, n_jobs_mkl=1, proj_sfreq=250,
# n_jobs_fir='cuda', n_jobs_resample='cuda',
# filter_length='5s', epochs_type='fif', lp_cut=40.,
## hp_cut=0.15,hp_trans=0.1,
# bmin=-0.1, auto_bad=20., plot_raw=False,
# bem_type = '5120-5120-5120')
# This sets the position of the head relative to the sensors. These values a
# A typical head position. So now in sensor space everyone is aligned. However
# We should also note that for source analysis it is better to leave this as
# the mne-fun default ==> Let's put None!!!
""" Organize subjects """
#out,ind = nlr_organizeMEG_mnefun(raw_dir=raw_dir,out_dir=out_dir,subs=subs)
""" The directory structure is really messy -- let's not use this function. """
os.chdir(out_dir)
#
#print(out)
#params.subjects.sort() # Sort the subject list
#print("Done sorting subjects.\n")
""" Attention!!!
164_sf160707_4_raw.fif: continuous HPI was not active in this file!
170_gm160613_5_raw.fif: in _fix_raw_eog_cals...non equal eog arrays???
172_th160825_6_raw.fif: origin of head out of helmet
201_gs150729_2_raw.fif: continuous HPI was not active in this file!
174_hs160620_1_raw.fif: Too many bad channels (62 based on grad=4000e-13, mag=4.0e-12)
174_hs160829_1_raw.fif: Too many bad channels (62 based on grad=4000e-13, mag=4.0e-12)
163_lf160707 : Too many bad channels --> Use grad=5000e-13, mag=5.0e-12
163_lf160920 : : Too many bad channels --> Use grad=5000e-13, mag=5.0e-12
"""
#for n, s in enumerate(badsubs):
# subnum = out.index(s)
# print('Removing subject ' + str(subnum) + ' ' + out[subnum])
# out.remove(s)
# ind[subnum] = []
# ind.remove([])
out = ['102_rs160815','105_bb161011','110_hh160809','127_am161004',
'132_wp161122','145_ac160823','150_mg160825',
'152_tc160623','160_ek160915','161_ak160916','163_lf160920', #'162_ef160829',
'164_sf160920','170_gm160822','172_th160825','174_hs160829','179_gm160913',
'180_zd160826','201_gs150925','203_am151029',
'204_am151120','205_ac160202','207_ah160809','210_sb160822','211_lb160823',
'nlr_gb310170829','nlr_kb218170829','nlr_gb267170911','nlr_jb420170828',
'nlr_hb275170828','nlr_gb355170907'] # 187_nb161205: EOG channel number suddenly changes at run4
#%%
out = ['170_gm160822'] #'162_ef160829',
# '164_sf160920','170_gm160822','172_th160825','174_hs160829','179_gm160913',
# '180_zd160826','201_gs150925','203_am151029',
# '204_am151120','205_ac160202','207_ah160809','210_sb160822','211_lb160823',
# 'nlr_gb310170829','nlr_kb218170829','nlr_gb267170911','nlr_jb420170828',
# 'nlr_hb275170828','nlr_gb355170907']
for n, s in enumerate(out):
print(s)
for n, s in enumerate(out):
params = mnefun.Params(tmin=-0.1, tmax=0.9, n_jobs=18, # t_adjust was -39e-3
decim=2, n_jobs_mkl=1, proj_sfreq=250,
n_jobs_fir='cuda', n_jobs_resample='cuda',
filter_length='5s', epochs_type='fif', lp_cut=40.,
# hp_cut=0.15,hp_trans=0.1,
bmin=-0.1, auto_bad=20., plot_raw=False)
# bem_type = '5120-5120-5120')
params.subjects = [s]
params.sss_type = 'python'
params.sss_regularize = 'in' # 'in' by default
params.tsss_dur = 8. # 60 for adults with not much head movements. This was set to 6.
params.st_correlation = 0.9
params.auto_bad_meg_thresh = 10 # THIS SHOULD NOT BE SO HIGH!
params.trans_to = None #'median'
params.t_adjust = -39e-3 # time delay from the trigger. It's due to set trigger function. I don't know why...
#print("Running " + str(len(params.subjects)) + ' Subjects')
# print("\n\n".join(params.subjects))
print("\n\n")
print("Running " + str(params.subjects))
print("\n\n")
params.subject_indices = np.arange(0,len(params.subjects))
params.structurals =[None] * len(params.subjects)
if s == '187_nb161205':
params.run_names = ['%s_1', '%s_2', '%s_3', '%s_5','%s_6']
elif s == '172_th160825':
params.run_names = ['%s_1', '%s_2', '%s_3', '%s_4', '%s_5']
else:
params.run_names = ['%s_1', '%s_2', '%s_3', '%s_4', '%s_5', '%s_6']
#params.subject_run_indices = np.array([
# np.arange(0,ind[0]),np.arange(0,ind[1]),np.arange(0,ind[2]),np.arange(0,ind[3]),
# np.arange(0,ind[4]),np.arange(0,ind[5]),np.arange(0,ind[6]),np.arange(0,ind[7]),
# np.arange(0,ind[8]),np.arange(0,ind[9])#,np.arange(0,ind[11])
## np.arange(0,ind[12]),np.arange(0,ind[13]),np.arange(0,ind[14]),np.arange(0,ind[15]),
## np.arange(0,ind[16]),np.arange(0,ind[17]),np.arange(0,ind[18]),np.arange(0,ind[19]),
## np.arange(0,ind[20]),np.arange(0,ind[21]),np.arange(0,ind[22]),np.arange(0,ind[23]),
## np.arange(0,ind[24])
#])
params.dates = [(2014, 0, 00)] * len(params.subjects)
#params.subject_indices = [0]
params.score = score # scoring function to use
params.plot_drop_logs = False
params.on_missing = 'warning'
#params.acq_ssh = '[email protected]' # minea - 172.28.161.8
#params.acq_dir = '/sinuhe/data03/jason_words'
# params.acq_ssh = '[email protected]' # minea - 172.28.161.8
# params.acq_dir = '/sinuhe/data03/jason_words'
params.sws_ssh = '[email protected]' # kasga - 172.28.161.8
params.sws_dir = '/data05/jason/NLR'
#params.mf_args = '-hpie 30 -hpig .8 -hpicons' # sjjoo-20160826: We are doing SSS using python
# epoch
if s == '174_hs160620':
params.reject = dict(grad=3000e-13, mag=4.0e-12)
else:
params.reject = dict(grad=3000e-13, mag=4.0e-12)
# params.reject = dict(grad=4000e-13, mag=4.0e-12)
params.ssp_eog_reject = dict(grad=params.reject['grad'], mag=params.reject['mag'], eog=np.inf)
params.ssp_ecg_reject = dict(grad=params.reject['grad'], mag=params.reject['mag'], ecg=np.inf)
params.flat = dict(grad=1e-13, mag=1e-15)
params.auto_bad_reject = dict(grad=2*params.reject['grad'], mag=2*params.reject['mag'])
params.auto_bad_flat = params.flat
params.cov_method = 'shrunk'
params.get_projs_from = range(len(params.run_names))
params.inv_names = ['%s']
params.inv_runs = [range(0, len(params.run_names))]
params.runs_empty = []
params.proj_nums = [[0, 0, 0], # ECG: grad/mag/eeg
[1, 1, 0], # EOG # sjjoo-20160826: was 3
[0, 0, 0]] # Continuous (from ERM)
# The scoring function needs to produce an event file with these values
params.in_names = ['word_c254_p20_dot', 'word_c254_p50_dot', 'word_c137_p20_dot',
'word_c254_p80_dot', 'word_c137_p80_dot',
'bigram_c254_p20_dot', 'bigram_c254_p50_dot', 'bigram_c137_p20_dot',
'word_c254_p20_word', 'word_c254_p50_word', 'word_c137_p20_word',
'word_c254_p80_word', 'word_c137_p80_word',
'bigram_c254_p20_word', 'bigram_c254_p50_word', 'bigram_c137_p20_word']
params.in_numbers = [101, 102, 103, 104, 105, 106, 107, 108,
201, 202, 203, 204, 205, 206, 207, 208]
# These lines define how to translate the above event types into evoked files
params.analyses = [
'All',
'Conditions'
]
params.out_names = [
['ALL'],
['word_c254_p20_dot', 'word_c254_p50_dot', 'word_c137_p20_dot',
'word_c254_p80_dot', 'word_c137_p80_dot',
'bigram_c254_p20_dot', 'bigram_c254_p50_dot', 'bigram_c137_p20_dot',
'word_c254_p20_word', 'word_c254_p50_word', 'word_c137_p20_word',
'word_c254_p80_word', 'word_c137_p80_word',
'bigram_c254_p20_word', 'bigram_c254_p50_word', 'bigram_c137_p20_word']
]
params.out_numbers = [
[1] * len(params.in_numbers),
[101, 102, 103, 104, 105, 106, 107, 108,
201, 202, 203, 204, 205, 206, 207, 208]
]
params.must_match = [
[],
[],
]
# Set what will run
mnefun.do_processing(
params,
fetch_raw=False, # Fetch raw recording files from acquisition machine
do_score=False, # Do scoring to slice data into trials
# Before running SSS, make SUBJ/raw_fif/SUBJ_prebad.txt file with
# space-separated list of bad MEG channel numbers
push_raw=False, # Push raw files and SSS script to SSS workstation
do_sss=False, # Run SSS remotely (on sws) or locally with mne-python
fetch_sss=False, # Fetch SSSed files from SSS workstation
do_ch_fix=False, # Fix channel ordering
# Before running SSP, examine SSS'ed files and make
# SUBJ/bads/bad_ch_SUBJ_post-sss.txt; usually, this should only contain EEG
# channels.
gen_ssp=True, # Generate SSP vectors
apply_ssp=True, # Apply SSP vectors and filtering
plot_psd=False, # Plot raw data power spectra
write_epochs=True, # Write epochs to disk
gen_covs=True, # Generate covariances
# Make SUBJ/trans/SUBJ-trans.fif using mne_analyze; needed for fwd calc.
gen_fwd=False, # Generate forward solutions (and src space if needed)
gen_inv=False, # Generate inverses
gen_report=False, # Write mne report html of results to disk
print_status=False, # Print completeness status update
# params,
# fetch_raw=False,
# do_score=True, # True
# push_raw=False,
# do_sss=True, # True
# fetch_sss=False,
# do_ch_fix=True, # True
# gen_ssp=True, # True
# apply_ssp=True, # True
# write_epochs=True, # True
# plot_psd=False,
# gen_covs=False,
# gen_fwd=False,
# gen_inv=False,
# print_status=False,
# gen_report=True # true
)
print('%i sec' % (time.time() - t0))
| bsd-3-clause | -1,400,178,077,217,708,300 | 41.297578 | 113 | 0.600213 | false | 2.772511 | false | false | false |
mendax-grip/cfdemUtilities | couette/averageRadialScalar.py | 2 | 2000 | # This program averages a variable for each value of r for each files specified by the user
# This program must be launched from the main folder of a case from which you can access ./CFD/ and ./voidfraction/
# A FOLDER ./voidfraction/averaged must exist!
# Author : Bruno Blais
# Last modified : 15-01-2014
#Python imports
#----------------
import os
import sys
import numpy
import math
import matplotlib.pyplot as plt
#----------------
#********************************
# OPTIONS AND USER PARAMETERS
#********************************
#Initial time of simulation, final time and time increment must be specified by user
t0=2.0
tf=100.
dT=0.5
#Number of x and y cell must be specified
nz=1
nr = 10
#======================
# MAIN
#======================
# Directory to work within
os.chdir("./voidFraction") # go to directory
nt=int((tf-t0)/dT)
t=t0
for i in range(0,nt):
#Current case
print "Radially averaging time ", t
fname='voidfraction_' + str(t)
x,y,z,phi = numpy.loadtxt(fname, unpack=True)
#Pre-allocate
phiAvg=numpy.zeros([4*nr*nr])
r = numpy.zeros([4*nr*nr])
lr = numpy.zeros([nr]) # list of possible radiuses
phiR = numpy.zeros([nr]) # radially averaged variable
#Calculate radiuses
r = numpy.sqrt(x*x + y * y)
#Establish list of possible radiuses
nlr = 0 # counter on how many radiuses were found
for j in range (0,len(r)):
cr = r[j] # current radius
present = 0;
for k in range(0,nlr):
if (numpy.abs(cr - lr[k]) < 1e-5):
present =1
phiR[k] += phi[j]
if (present == 0):
lr[nlr] = cr
nlr +=1
#Do the final average
for j in range (0,nr):
phiR[j] = phiR[j] / (4*nr)
#Create output file back in the averaged folder
outname='./averaged/radialVoidFraction_' + str(t)
outfile=open(outname,'w')
for i in range(0,nr):
outfile.write("%5.5e %5.5e\n" %(lr[i],phiR[i]))
outfile.close()
t = t+dT
#Go back to main dir
os.chdir("..")
print "Post-processing over"
| lgpl-3.0 | 788,407,417,079,574,300 | 21.727273 | 116 | 0.6045 | false | 3.125 | false | false | false |
gborri/SickRage | sickrage/providers/torrent/elitetorrent.py | 1 | 5577 | # coding=utf-8
# Author: CristianBB
#
# URL: https://sickrage.ca
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import re
import sickrage
from sickrage.core.caches.tv_cache import TVCache
from sickrage.core.helpers import bs4_parser, try_int
from sickrage.providers import TorrentProvider
class EliteTorrentProvider(TorrentProvider):
def __init__(self):
super(EliteTorrentProvider, self).__init__('EliteTorrent', 'https://www.elitetorrent.eu', False)
self.urls.update({
'search': '{base_url}/torrents.php'.format(**self.urls)
})
self.onlyspasearch = None
self.minseed = None
self.minleech = None
self.cache = TVCache(self)
def search(self, search_strings, age=0, ep_obj=None):
results = []
lang_info = '' if not ep_obj or not ep_obj.show else ep_obj.show.lang
"""
Search query:
http://www.elitetorrent.net/torrents.php?cat=4&modo=listado&orden=fecha&pag=1&buscar=fringe
cat = 4 => Shows
modo = listado => display results mode
orden = fecha => order
buscar => Search show
pag = 1 => page number
"""
search_params = {
'cat': 4,
'modo': 'listado',
'orden': 'fecha',
'pag': 1,
'buscar': ''
}
for mode in search_strings:
sickrage.app.log.debug("Search Mode: {}".format(mode))
# Only search if user conditions are true
if self.onlyspasearch and lang_info != 'es' and mode != 'RSS':
sickrage.app.log.debug("Show info is not spanish, skipping provider search")
continue
for search_string in search_strings[mode]:
if mode != 'RSS':
sickrage.app.log.debug("Search string: {0}".format(search_string))
search_string = re.sub(r'S0*(\d*)E(\d*)', r'\1x\2', search_string)
search_params['buscar'] = search_string.strip() if mode != 'RSS' else ''
try:
data = sickrage.app.wsession.get(self.urls['search'], params=search_params).text
results += self.parse(data, mode)
except Exception:
sickrage.app.log.debug("No data returned from provider")
return results
def parse(self, data, mode):
"""
Parse search results from data
:param data: response data
:param mode: search mode
:return: search results
"""
results = []
def _process_title(title):
title = title.encode('latin-1').decode('utf8')
# Quality, if no literal is defined it's HDTV
if 'calidad' not in title:
title += ' HDTV x264'
title = title.replace('(calidad baja)', 'HDTV x264')
title = title.replace('(Buena calidad)', '720p HDTV x264')
title = title.replace('(Alta calidad)', '720p HDTV x264')
title = title.replace('(calidad regular)', 'DVDrip x264')
title = title.replace('(calidad media)', 'DVDrip x264')
# Language, all results from this provider have spanish audio, we append it to title (avoid to download undesired torrents)
title += ' SPANISH AUDIO'
title += '-ELITETORRENT'
return title.strip()
with bs4_parser(data) as html:
torrent_table = html.find('table', class_='fichas-listado')
torrent_rows = torrent_table('tr') if torrent_table else []
if len(torrent_rows) < 2:
sickrage.app.log.debug("Data returned from provider does not contain any torrents")
return results
for row in torrent_rows[1:]:
try:
title = _process_title(row.find('a', class_='nombre')['title'])
download_url = self.urls['base_url'] + row.find('a')['href']
if not all([title, download_url]):
continue
seeders = try_int(row.find('td', class_='semillas').get_text(strip=True))
leechers = try_int(row.find('td', class_='clientes').get_text(strip=True))
# seeders are not well reported. Set 1 in case of 0
seeders = max(1, seeders)
# Provider does not provide size
size = -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders,
'leechers': leechers, 'hash': ''}
if mode != 'RSS':
sickrage.app.log.debug("Found result: {}".format(title))
results.append(item)
except Exception:
sickrage.app.log.error("Failed parsing provider")
return results | gpl-3.0 | 3,331,113,339,922,290,700 | 34.987097 | 135 | 0.565896 | false | 4.018012 | false | false | false |
victor-o-silva/db_file_storage | demo_and_tests/model_filefields_example/migrations/0001_initial.py | 1 | 2942 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-17 18:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('book_pk', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100, unique=True)),
('index', models.FileField(blank=True, null=True, upload_to=b'model_filefields_example.BookIndex/bytes/filename/mimetype')),
('pages', models.FileField(blank=True, null=True, upload_to=b'model_filefields_example.BookPages/bytes/filename/mimetype')),
('cover', models.ImageField(blank=True, null=True, upload_to=b'model_filefields_example.BookCover/bytes/filename/mimetype')),
],
),
migrations.CreateModel(
name='BookCover',
fields=[
('book_cover_pk', models.AutoField(primary_key=True, serialize=False)),
('bytes', models.TextField()),
('filename', models.CharField(max_length=255)),
('mimetype', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='BookIndex',
fields=[
('book_index_pk', models.AutoField(primary_key=True, serialize=False)),
('bytes', models.TextField()),
('filename', models.CharField(max_length=255)),
('mimetype', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='BookPages',
fields=[
('book_pages_pk', models.AutoField(primary_key=True, serialize=False)),
('bytes', models.TextField()),
('filename', models.CharField(max_length=255)),
('mimetype', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='SoundDevice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('instruction_manual', models.FileField(blank=True, null=True, upload_to=b'model_filefields_example.SoundDeviceInstructionManual/bytes/filename/mimetype')),
],
),
migrations.CreateModel(
name='SoundDeviceInstructionManual',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bytes', models.TextField()),
('filename', models.CharField(max_length=255)),
('mimetype', models.CharField(max_length=50)),
],
),
]
| mit | 4,143,665,049,642,220,500 | 41.028571 | 172 | 0.557784 | false | 4.437406 | false | false | false |
brownian/frescobaldi | frescobaldi_app/midiinput/widget.py | 2 | 7812 | """
MIDI input controls
"""
import weakref
from PyQt5.QtCore import QSettings, Qt
from PyQt5.QtWidgets import (QCheckBox, QComboBox, QGridLayout, QGroupBox,
QHBoxLayout, QLabel, QRadioButton, QToolButton,
QVBoxLayout, QWidget)
import app
import midiinput
class Widget(QWidget):
def __init__(self, dockwidget):
super(Widget, self).__init__(dockwidget)
self._document = None
self._midiin = midiinput.MidiIn(self)
self._dockwidget = weakref.ref(dockwidget)
signals = list()
self._labelmidichannel = QLabel()
self._midichannel = QComboBox()
signals.append(self._midichannel.currentIndexChanged)
self._labelkeysignature = QLabel()
self._keysignature = QComboBox()
signals.append(self._keysignature.currentIndexChanged)
self._labelaccidentals = QLabel()
self._accidentalssharps = QRadioButton()
signals.append(self._accidentalssharps.clicked)
self._accidentalsflats = QRadioButton()
signals.append(self._accidentalsflats.clicked)
self._groupaccidentals = QGroupBox()
self._groupaccidentals.setFlat(True)
hbox = QHBoxLayout()
self._groupaccidentals.setLayout(hbox)
hbox.addWidget(self._accidentalssharps)
hbox.addWidget(self._accidentalsflats)
self._accidentalssharps.setChecked(True)
self._chordmode = QCheckBox()
signals.append(self._chordmode.clicked)
self._relativemode = QCheckBox()
signals.append(self._relativemode.clicked)
self._labeldamper = QLabel()
self._damper = QComboBox()
self._labelsostenuto = QLabel()
self._sostenuto = QComboBox()
self._labelsoft = QLabel()
self._soft = QComboBox()
ac = self.parentWidget().actionCollection
self._capture = QToolButton()
self._capture.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self._capture.setDefaultAction(ac.capture_start)
self.addAction(ac.accidental_switch)
self._notemode = QLabel()
layout = QVBoxLayout()
self.setLayout(layout)
grid = QGridLayout(spacing=0)
layout.addLayout(grid)
layout.addStretch()
grid.addWidget(self._labelmidichannel, 0, 0)
grid.addWidget(self._midichannel, 0, 1)
grid.addWidget(self._labelkeysignature, 1, 0)
grid.addWidget(self._keysignature, 1, 1)
grid.addWidget(self._labelaccidentals, 2, 0)
grid.addWidget(self._groupaccidentals, 2, 1)
grid.addWidget(self._chordmode, 3, 0)
grid.addWidget(self._relativemode, 3, 1)
grid.addWidget(self._labeldamper, 4, 0)
grid.addWidget(self._damper, 4, 1)
grid.addWidget(self._labelsostenuto, 5, 0)
grid.addWidget(self._sostenuto, 5, 1)
grid.addWidget(self._labelsoft, 6, 0)
grid.addWidget(self._soft, 6, 1)
hbox = QHBoxLayout()
layout.addLayout(hbox)
hbox.addWidget(self._capture)
hbox.addStretch()
app.translateUI(self)
self.loadsettings()
for s in signals:
s.connect(self.savesettings)
def mainwindow(self):
return self._dockwidget().mainwindow()
def channel(self):
return self._midichannel.currentIndex()
def keysignature(self):
return self._keysignature.currentIndex()
def accidentals(self):
if self._accidentalsflats.isChecked():
return 'flats'
else:
return 'sharps'
def chordmode(self):
return self._chordmode.isChecked()
def relativemode(self):
return self._relativemode.isChecked()
def startcapturing(self):
self._midiin.capture()
ac = self.parentWidget().actionCollection
while self._capture.actions(): # remove all old actions
self._capture.removeAction(self._capture.actions()[0])
self._capture.setDefaultAction(ac.capture_stop)
def stopcapturing(self):
self._midiin.capturestop()
ac = self.parentWidget().actionCollection
while self._capture.actions(): # remove all old actions
self._capture.removeAction(self._capture.actions()[0])
self._capture.setDefaultAction(ac.capture_start)
def switchaccidental(self):
if self.accidentals() == 'flats':
self._accidentalssharps.setChecked(True)
else:
self._accidentalsflats.setChecked(True)
def savesettings(self):
s = QSettings()
s.beginGroup("midiinputdock")
s.setValue("midichannel", self._midichannel.currentIndex())
s.setValue("keysignature", self._keysignature.currentIndex())
if self._accidentalsflats.isChecked():
s.setValue("accidentals", 'flats')
else:
s.setValue("accidentals", 'sharps')
s.setValue("chordmode", self._chordmode.isChecked())
s.setValue("relativemode", self._relativemode.isChecked())
def loadsettings(self):
s = QSettings()
s.beginGroup("midiinputdock")
self._midichannel.setCurrentIndex(s.value("midichannel", 0, int))
self._keysignature.setCurrentIndex(s.value("keysignature", 7, int))
if s.value("accidentals", 'sharps', str) == 'flats':
self._accidentalsflats.setChecked(True)
else:
self._accidentalssharps.setChecked(True)
self._chordmode.setChecked(s.value("chordmode", False, bool))
self._relativemode.setChecked(s.value("relativemode", False, bool))
def translateUI(self):
self._labelmidichannel.setText(_("MIDI channel"))
self._midichannel.addItems([_("all")]+[str(i) for i in range(1,17)])
self._labelkeysignature.setText(_("Key signature"))
self._keysignature.addItems([
_("C flat major (7 flats)"),
_("G flat major (6 flats)"),
_("D flat major (5 flats)"),
_("A flat major (4 flats)"),
_("E flat major (3 flats)"),
_("B flat major (2 flats)"),
_("F major (1 flat)"),
_("C major"),
_("G major (1 sharp)"),
_("D major (2 sharps)"),
_("A major (3 sharps)"),
_("E major (4 sharps)"),
_("B major (5 sharps)"),
_("F sharp major (6 sharps)"),
_("C sharp major (7 sharps)")
])
self._keysignature.setCurrentIndex(7)
self._labelaccidentals.setText(_("Accidentals"))
self._accidentalssharps.setText(_("sharps"))
self._accidentalsflats.setText(_("flats"))
self._chordmode.setText(_("Chord mode"))
self._chordmode.setToolTip(_(
"Enter simultaneously played notes as chords. "
"See \"What's This\" for more information."))
self._chordmode.setWhatsThis(_(
"Notes which are played simultaneously are written "
"as chords. As a consequence they are not written "
"before the last key is lifted. Of course single "
"can also be entered."))
self._relativemode.setText(_("Relative mode"))
self._relativemode.setToolTip(_(
"Enter octaves of notes relative to the last note. "
"See \"What's This\" for more information."))
self._relativemode.setWhatsThis(_(
"Enter octaves of notes relative to the last note. "
"This refers to the last key pressed on the MIDI keyboard, not the last note in the document."
"Hold Shift with a note to enter an octave check."))
self._labeldamper.setText(_("Damper pedal"))
self._labelsostenuto.setText(_("Sostenuto pedal"))
self._labelsoft.setText(_("Soft pedal"))
| gpl-2.0 | 5,464,379,933,306,593,000 | 35.849057 | 106 | 0.610215 | false | 3.945455 | false | false | false |
chop-dbhi/arrc | learn/wrangle.py | 1 | 1775 | __author__ = 'Aaron J. Masino'
import numpy as np
def extractBy(condition, data, tol = 1e-6):
not_condition = condition[:]==False
return (data[condition], data[not_condition])
def partion(condition, data, ratios=[.6,.2,.2]):
''' returns two lists (l1,l2). l1 is a list of numpy arrays where each array contains indices
into the data where the condition is True and l2 is a list of numpy arrays where each array contains
indicies into the data where the condition is False. The len(l1)=len(l2)=len(ratios) and
the lists in l1 and l2 have lengths determined by the ratio values.'''
pos = np.where(condition)[0]
neg = np.where(condition[:]==False)[0]
#SHOULD ALSO USE np.where(condition) to split data
#NEED TO MODIFY TO RETURN MASKS ONLY
#MASK SHOULD BE AN 1D NUMPY ARRAY
#if not (np.sum(ratios) == 1 or np.sum(ratios) == 1.0): raise Exception('Ratios must sum to 1, got {0}'.format(np.sum(ratios)))
#(pos, neg) = extractBy(condition, data)
pos_row_count = pos.shape[0]
neg_row_count = neg.shape[0]
s1 = 0
s2 = 0
s3 = 0
s4 = 0
pdata = []
ndata = []
for i in range(len(ratios)):
r = ratios[i]
if i==len(ratios)-1:
s2 = pos_row_count
s4 = neg_row_count
else:
s2 = min(s1 + int(round(r*pos_row_count)), pos_row_count)
s4 = min(s3 + int(round(r*neg_row_count)), neg_row_count)
if s2<=s1: raise Exception('Insufficient positive data for partition, s1={0}, s2={1}'.format(s1,s2))
if s4<=s3: raise Exception('Insufficient negative data for partition, s3={0}, s4={1}'.format(s3,s4))
pdata.append(pos[s1:s2])
ndata.append(neg[s3:s4])
s1 = s2
s3 = s4
return(pdata,ndata) | mit | -3,090,557,369,432,531,500 | 38.466667 | 131 | 0.616338 | false | 3.018707 | false | false | false |
teamworkquality/twq-app | api/companies/migrations/0002_auto_20171119_1938.py | 1 | 2047 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-19 19:38
from __future__ import unicode_literals
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0001_initial'),
('companies', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('admin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='users.Admin')),
('employer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='employer', to='companies.Company')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
bases=('users.admin',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250, unique=True)),
],
),
migrations.AddField(
model_name='employee',
name='team',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='companies.Team'),
),
migrations.AddField(
model_name='company',
name='editors',
field=models.ManyToManyField(related_name='editors', to='users.Admin'),
),
migrations.AddField(
model_name='company',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Admin'),
),
]
| mit | -7,012,980,200,005,701,000 | 34.293103 | 187 | 0.560332 | false | 4.291405 | false | false | false |
shanot/imp | modules/em/test/test_origin_spacing.py | 2 | 2797 | import os
import IMP
import IMP.em
import IMP.test
import IMP.core
class Tests(IMP.test.TestCase):
"""Class to test EM correlation restraint"""
def test_origin_spacing_data_is_kept_in_mrc_format1(self):
scene = IMP.em.read_map(self.get_input_file_name("in.mrc"), self.mrw)
scene.set_origin(-100, -100, -100)
IMP.em.write_map(scene, "test1.mrc", self.mrw)
scene2 = IMP.em.read_map("test1.mrc", self.mrw)
os.unlink("test1.mrc")
origin2 = scene2.get_origin()
self.assertEqual(-100, origin2[0])
self.assertEqual(-100, origin2[1])
self.assertEqual(-100, origin2[2])
self.assertEqual(1, scene2.get_spacing())
def test_origin_spacing_data_is_kept_in_mrc_format2(self):
mrw = IMP.em.MRCReaderWriter()
scene = IMP.em.read_map(self.get_input_file_name("in.mrc"), self.mrw)
scene.set_origin(-100, -100, -100)
scene.update_voxel_size(10)
self.assertEqual(10, scene.get_spacing())
IMP.em.write_map(scene, "test2.mrc", self.mrw)
scene2 = IMP.em.read_map("test2.mrc", self.mrw)
os.unlink("test2.mrc")
origin2 = scene2.get_origin()
self.assertEqual(-100, origin2[0])
self.assertEqual(-100, origin2[1])
self.assertEqual(-100, origin2[2])
self.assertEqual(10, scene2.get_spacing())
def test_origin_spacing_data_is_kept_in_mrc_format3(self):
mrw = IMP.em.MRCReaderWriter()
scene = IMP.em.read_map(self.get_input_file_name("in.mrc"), self.mrw)
scene.update_voxel_size(10)
scene.set_origin(-100, -100, -100)
self.assertEqual(10, scene.get_spacing())
scene.get_header().show()
IMP.em.write_map(scene, "test3.mrc", self.mrw)
scene2 = IMP.em.read_map("test3.mrc", self.mrw)
os.unlink("test3.mrc")
origin2 = scene2.get_origin()
self.assertEqual(-100, origin2[0])
self.assertEqual(-100, origin2[1])
self.assertEqual(-100, origin2[2])
self.assertEqual(10, scene2.get_spacing())
def test_origin_spacing_data_is_kept_in_mrc_format4(self):
mrw = IMP.em.MRCReaderWriter()
scene = IMP.em.read_map(self.get_input_file_name("in.mrc"), self.mrw)
scene.update_voxel_size(10)
self.assertEqual(10, scene.get_spacing())
scene.get_header().show()
IMP.em.write_map(scene, "test4.mrc", self.mrw)
scene2 = IMP.em.read_map("test4.mrc", self.mrw)
os.unlink("test4.mrc")
origin2 = scene2.get_origin()
self.assertEqual(10, scene2.get_spacing())
def setUp(self):
"""Build test model and optimizer"""
IMP.test.TestCase.setUp(self)
self.mrw = IMP.em.MRCReaderWriter()
if __name__ == '__main__':
IMP.test.main()
| gpl-3.0 | -7,092,655,008,620,478,000 | 37.847222 | 77 | 0.614587 | false | 2.991444 | true | false | false |
anomitra/articleScraper | PyQt-gpl-5.4.1/examples/animation/moveblocks.py | 2 | 8146 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import (QAbstractTransition, QEasingCurve, QEvent,
QParallelAnimationGroup, QPropertyAnimation, qrand, QRect,
QSequentialAnimationGroup, qsrand, QState, QStateMachine, Qt, QTime,
QTimer)
from PyQt5.QtWidgets import (QApplication, QGraphicsScene, QGraphicsView,
QGraphicsWidget)
class StateSwitchEvent(QEvent):
StateSwitchType = QEvent.User + 256
def __init__(self, rand=0):
super(StateSwitchEvent, self).__init__(StateSwitchEvent.StateSwitchType)
self.m_rand = rand
def rand(self):
return self.m_rand
class QGraphicsRectWidget(QGraphicsWidget):
def paint(self, painter, option, widget):
painter.fillRect(self.rect(), Qt.blue)
class StateSwitchTransition(QAbstractTransition):
def __init__(self, rand):
super(StateSwitchTransition, self).__init__()
self.m_rand = rand
def eventTest(self, event):
return (event.type() == StateSwitchEvent.StateSwitchType and
event.rand() == self.m_rand)
def onTransition(self, event):
pass
class StateSwitcher(QState):
def __init__(self, machine):
super(StateSwitcher, self).__init__(machine)
self.m_stateCount = 0
self.m_lastIndex = 0
def onEntry(self, event):
n = qrand() % self.m_stateCount + 1
while n == self.m_lastIndex:
n = qrand() % self.m_stateCount + 1
self.m_lastIndex = n
self.machine().postEvent(StateSwitchEvent(n))
def onExit(self, event):
pass
def addState(self, state, animation):
self.m_stateCount += 1
trans = StateSwitchTransition(self.m_stateCount)
trans.setTargetState(state)
self.addTransition(trans)
trans.addAnimation(animation)
def createGeometryState(w1, rect1, w2, rect2, w3, rect3, w4, rect4, parent):
result = QState(parent)
result.assignProperty(w1, 'geometry', rect1)
result.assignProperty(w1, 'geometry', rect1)
result.assignProperty(w2, 'geometry', rect2)
result.assignProperty(w3, 'geometry', rect3)
result.assignProperty(w4, 'geometry', rect4)
return result
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
button1 = QGraphicsRectWidget()
button2 = QGraphicsRectWidget()
button3 = QGraphicsRectWidget()
button4 = QGraphicsRectWidget()
button2.setZValue(1)
button3.setZValue(2)
button4.setZValue(3)
scene = QGraphicsScene(0, 0, 300, 300)
scene.setBackgroundBrush(Qt.black)
scene.addItem(button1)
scene.addItem(button2)
scene.addItem(button3)
scene.addItem(button4)
window = QGraphicsView(scene)
window.setFrameStyle(0)
window.setAlignment(Qt.AlignLeft | Qt.AlignTop)
window.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
window.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
machine = QStateMachine()
group = QState()
timer = QTimer()
timer.setInterval(1250)
timer.setSingleShot(True)
group.entered.connect(timer.start)
state1 = createGeometryState(button1, QRect(100, 0, 50, 50), button2,
QRect(150, 0, 50, 50), button3, QRect(200, 0, 50, 50), button4,
QRect(250, 0, 50, 50), group)
state2 = createGeometryState(button1, QRect(250, 100, 50, 50), button2,
QRect(250, 150, 50, 50), button3, QRect(250, 200, 50, 50), button4,
QRect(250, 250, 50, 50), group)
state3 = createGeometryState(button1, QRect(150, 250, 50, 50), button2,
QRect(100, 250, 50, 50), button3, QRect(50, 250, 50, 50), button4,
QRect(0, 250, 50, 50), group)
state4 = createGeometryState(button1, QRect(0, 150, 50, 50), button2,
QRect(0, 100, 50, 50), button3, QRect(0, 50, 50, 50), button4,
QRect(0, 0, 50, 50), group)
state5 = createGeometryState(button1, QRect(100, 100, 50, 50), button2,
QRect(150, 100, 50, 50), button3, QRect(100, 150, 50, 50), button4,
QRect(150, 150, 50, 50), group)
state6 = createGeometryState(button1, QRect(50, 50, 50, 50), button2,
QRect(200, 50, 50, 50), button3, QRect(50, 200, 50, 50), button4,
QRect(200, 200, 50, 50), group)
state7 = createGeometryState(button1, QRect(0, 0, 50, 50), button2,
QRect(250, 0, 50, 50), button3, QRect(0, 250, 50, 50), button4,
QRect(250, 250, 50, 50), group)
group.setInitialState(state1)
animationGroup = QParallelAnimationGroup()
anim = QPropertyAnimation(button4, 'geometry')
anim.setDuration(1000)
anim.setEasingCurve(QEasingCurve.OutElastic)
animationGroup.addAnimation(anim)
subGroup = QSequentialAnimationGroup(animationGroup)
subGroup.addPause(100)
anim = QPropertyAnimation(button3, 'geometry')
anim.setDuration(1000)
anim.setEasingCurve(QEasingCurve.OutElastic)
subGroup.addAnimation(anim)
subGroup = QSequentialAnimationGroup(animationGroup)
subGroup.addPause(150)
anim = QPropertyAnimation(button2, 'geometry')
anim.setDuration(1000)
anim.setEasingCurve(QEasingCurve.OutElastic)
subGroup.addAnimation(anim)
subGroup = QSequentialAnimationGroup(animationGroup)
subGroup.addPause(200)
anim = QPropertyAnimation(button1, 'geometry')
anim.setDuration(1000)
anim.setEasingCurve(QEasingCurve.OutElastic)
subGroup.addAnimation(anim)
stateSwitcher = StateSwitcher(machine)
group.addTransition(timer.timeout, stateSwitcher)
stateSwitcher.addState(state1, animationGroup)
stateSwitcher.addState(state2, animationGroup)
stateSwitcher.addState(state3, animationGroup)
stateSwitcher.addState(state4, animationGroup)
stateSwitcher.addState(state5, animationGroup)
stateSwitcher.addState(state6, animationGroup)
stateSwitcher.addState(state7, animationGroup)
machine.addState(group)
machine.setInitialState(group)
machine.start()
window.resize(300, 300)
window.show()
qsrand(QTime(0, 0, 0).secsTo(QTime.currentTime()))
sys.exit(app.exec_())
| gpl-2.0 | -7,652,856,938,194,048,000 | 33.961373 | 80 | 0.677019 | false | 3.667717 | false | false | false |
djurodrljaca/salamander-alm | client/unittests/ut_user_management.py | 1 | 8634 | """
Salamander ALM
Copyright (c) 2016 Djuro Drljaca
This Python module is free software; you can redistribute it and/or modify it under the terms of the
GNU General Public License as published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This Python module is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with this library. If
not, see <http://www.gnu.org/licenses/>.
"""
import connection
import json
import requests
import requests.packages
import signal
import subprocess
import sys
import time
import unittest
class UserManagement(unittest.TestCase):
@classmethod
def setUpClass(cls):
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
def setUp(self):
self.__admin_user_id = 1
self.__server_instance = subprocess.Popen([sys.executable, "../../server/salamander_alm.py"])
if self.__server_instance is not None:
time.sleep(2.0)
def tearDown(self):
if self.__server_instance is not None:
self.__server_instance.send_signal(signal.SIGINT)
self.__server_instance.wait(1.0)
if self.__server_instance.returncode is None:
self.__server_instance.kill()
# def create_user_test1(self, conn: connection.Connection) -> Optional[dict]:
# """
# Creates a user
# :param conn:
# :return:
# """
# user_id = UserManagementInterface.create_user("test1",
# "Test 1",
# "[email protected]",
# "basic",
# {"password": "test123"})
# return user_id
# Tests ----------------------------------------------------------------------------------------
def test_default_administrator_login(self):
conn = connection.Connection()
success = conn.login("http://127.0.0.1:5000/api",
"administrator",
{"password": "administrator"})
self.assertTrue(success)
def test_default_administrator_login_failure(self):
conn = connection.Connection()
success = conn.login("http://127.0.0.1:5000/api",
"administrator",
{"password": "xyz"})
self.assertFalse(success)
def test_default_administrator_logout(self):
conn = connection.Connection()
# First log in
success = conn.login("http://127.0.0.1:5000/api",
"administrator",
{"password": "administrator"})
self.assertTrue(success)
# And then try to log out
success = conn.logout()
self.assertTrue(success)
def test_default_administrator_logout_failure(self):
conn = connection.Connection()
# First log in
success = conn.login("http://127.0.0.1:5000/api",
"administrator",
{"password": "administrator"})
self.assertTrue(success)
# And then try to log out
success = conn.logout()
self.assertTrue(success)
def test_default_administrator_read_current_user(self):
conn = connection.Connection()
# First log in
success = conn.login("http://127.0.0.1:5000/api",
"administrator",
{"password": "administrator"})
self.assertTrue(success)
# And then read the current user
success = conn.call_get_method("/usermanagement/user")
self.assertTrue(success)
user = json.loads(conn.last_response_message.text)
self.assertIsNotNone(user)
self.assertEqual(user["id"], self.__admin_user_id)
self.assertEqual(user["user_name"], "administrator")
self.assertEqual(user["display_name"], "Administrator")
self.assertEqual(user["email"], "")
self.assertEqual(user["active"], True)
def test_read_user_by_id(self):
conn = connection.Connection()
# First log in
success = conn.login("http://127.0.0.1:5000/api",
"administrator",
{"password": "administrator"})
self.assertTrue(success)
# And then read the selected user
success = conn.call_get_method("/usermanagement/user", parameters={"user_id": 1})
self.assertTrue(success)
user = json.loads(conn.last_response_message.text)
self.assertIsNotNone(user)
self.assertEqual(user["id"], self.__admin_user_id)
self.assertEqual(user["user_name"], "administrator")
self.assertEqual(user["display_name"], "Administrator")
self.assertEqual(user["email"], "")
self.assertEqual(user["active"], True)
def test_read_user_by_id_failure(self):
conn = connection.Connection()
# First log in
success = conn.login("http://127.0.0.1:5000/api",
"administrator",
{"password": "administrator"})
self.assertTrue(success)
# And then try to read a non-existing user
success = conn.call_get_method("/usermanagement/user", parameters={"user_id": 2})
self.assertFalse(success)
def test_read_user_by_user_name(self):
conn = connection.Connection()
# First log in
success = conn.login("http://127.0.0.1:5000/api",
"administrator",
{"password": "administrator"})
self.assertTrue(success)
# And then read the selected user
success = conn.call_get_method("/usermanagement/user",
parameters={"user_name": "administrator"})
self.assertTrue(success)
user = json.loads(conn.last_response_message.text)
self.assertIsNotNone(user)
self.assertEqual(user["id"], self.__admin_user_id)
self.assertEqual(user["user_name"], "administrator")
self.assertEqual(user["display_name"], "Administrator")
self.assertEqual(user["email"], "")
self.assertEqual(user["active"], True)
def test_read_user_by_user_name_failure(self):
conn = connection.Connection()
# First log in
success = conn.login("http://127.0.0.1:5000/api",
"administrator",
{"password": "administrator"})
self.assertTrue(success)
# And then try to read a non-existing user
success = conn.call_get_method("/usermanagement/user", parameters={"user_name": "xyz"})
self.assertFalse(success)
def test_read_user_by_display_name(self):
conn = connection.Connection()
# First log in
success = conn.login("http://127.0.0.1:5000/api",
"administrator",
{"password": "administrator"})
self.assertTrue(success)
# And then read the selected user
success = conn.call_get_method("/usermanagement/user",
parameters={"display_name": "Administrator"})
self.assertTrue(success)
user = json.loads(conn.last_response_message.text)
self.assertIsNotNone(user)
self.assertEqual(user["id"], self.__admin_user_id)
self.assertEqual(user["user_name"], "administrator")
self.assertEqual(user["display_name"], "Administrator")
self.assertEqual(user["email"], "")
self.assertEqual(user["active"], True)
def test_read_user_by_display_name_failure(self):
conn = connection.Connection()
# First log in
success = conn.login("http://127.0.0.1:5000/api",
"administrator",
{"password": "administrator"})
self.assertTrue(success)
# And then try to read a non-existing user
success = conn.call_get_method("/usermanagement/user",
parameters={"display_name": "xyz"})
self.assertFalse(success)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -5,142,175,285,237,491,000 | 35.740426 | 101 | 0.564281 | false | 4.457408 | true | false | false |
163gal/Time-Line | libs_arm/wx/tools/XRCed/plugins/wxlib.py | 6 | 1610 | # Name: wxlib.py
# Purpose: Component plugins for wx.lib classes
# Author: Roman Rolinsky <[email protected]>
# Created: 05.09.2007
# RCS-ID: $Id$
import xh_wxlib
from wx.tools.XRCed import component, params
from wx.tools.XRCed.globals import TRACE
from wx.lib.ticker_xrc import wxTickerXmlHandler
TRACE('*** creating wx.lib components')
# wx.lib.foldpanelbar.FoldPanelBar
c = component.SmartContainer('wx.lib.foldpanelbar.FoldPanelBar', ['book', 'window', 'control'],
['pos', 'size'],
implicit_klass='foldpanel',
implicit_page='FoldPanel',
implicit_attributes=['label', 'collapsed'],
implicit_params={'collapsed': params.ParamBool})
c.addStyles('FPB_SINGLE_FOLD', 'FPB_COLLAPSE_TO_BOTTOM',
'FPB_EXCLUSIVE_FOLD', 'FPB_HORIZONTAL', 'FPB_VERTICAL')
component.Manager.register(c)
component.Manager.addXmlHandler(xh_wxlib.FoldPanelBarXmlHandler)
component.Manager.setMenu(c, 'bar', 'fold panel bar', 'FoldPanelBar', 1000)
# wx.lib.ticker.Ticker
class ParamDirection(params.RadioBox):
choices = {'right to left': 'rtl', 'left to right': 'ltr'}
default = 'rtl'
c = component.Component('wxTicker', ['control'],
['pos', 'size', 'start', 'text', 'ppf', 'fps', 'direction'],
params={'ppf': params.ParamInt, 'fps': params.ParamInt,
'direction': ParamDirection})
component.Manager.register(c)
component.Manager.addXmlHandler(wxTickerXmlHandler)
component.Manager.setMenu(c, 'control', 'ticker', 'Ticker', 1000)
| gpl-3.0 | 7,605,360,152,080,896,000 | 40.282051 | 96 | 0.649068 | false | 3.333333 | false | false | false |
django-oscar/django-oscar-stripe | setup.py | 1 | 1109 | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='django-oscar-stripe',
version='0.1',
url='https://github.com/tangentlabs/django-oscar-stripe',
author="David Winterbottom",
author_email="[email protected]",
description="Stripe payment module for django-oscar",
long_description=open('README.rst').read(),
keywords="Payment, Stripe",
license='BSD',
packages=find_packages(exclude=['sandbox*', 'tests*']),
include_package_data=True,
install_requires=[
'django-oscar>=0.4',
'stripe==1.7.9'
],
dependency_links=['https://code.stripe.com/stripe/stripe-1.7.9#egg=stripe'],
# See http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python']
)
| bsd-3-clause | 8,019,311,933,894,074,000 | 37.241379 | 82 | 0.611362 | false | 3.90493 | false | false | false |
cblop/tropic | instal-linux/instal/firstprinciples/incompleteinstitutions/TestIncompleteGrounding.py | 1 | 2576 | from instal.firstprinciples.TestEngine import InstalSingleShotTestRunner, InstalMultiShotTestRunner, InstalTestCase
from instal.instalexceptions import InstalRuntimeError
class IncompleteGrounding(InstalTestCase):
def test_complete_grounding_solve(self):
runner = InstalSingleShotTestRunner(input_files=["incompleteinstitutions/incomplete.ial"], bridge_file=None,
domain_files=["incompleteinstitutions/fullgrounding.idc"], fact_files=[])
runner.run_test(query_file="incompleteinstitutions/blank.iaq")
def test_no_grounding_solve(self):
runner = InstalSingleShotTestRunner(input_files=["incompleteinstitutions/incomplete.ial"], bridge_file=None,
domain_files=["incompleteinstitutions/nogrounding.idc"], fact_files=[])
with self.assertRaises(InstalRuntimeError):
runner.run_test(query_file="incompleteinstitutions/blank.iaq")
def test_partial_grounding_solve(self):
runner = InstalSingleShotTestRunner(input_files=["incompleteinstitutions/incomplete.ial"], bridge_file=None,
domain_files=["incompleteinstitutions/partialgrounding.idc"], fact_files=[])
with self.assertRaises(InstalRuntimeError):
runner.run_test(query_file="incompleteinstitutions/blank.iaq")
def test_complete_grounding_query(self):
runner = InstalMultiShotTestRunner(input_files=["incompleteinstitutions/incomplete.ial"], bridge_file=None,
domain_files=["incompleteinstitutions/fullgrounding.idc"], fact_files=[])
runner.run_test(
query_file="incompleteinstitutions/blank.iaq", expected_answersets=1)
def test_no_grounding_query(self):
runner = InstalMultiShotTestRunner(input_files=["incompleteinstitutions/incomplete.ial"], bridge_file=None,
domain_files=["incompleteinstitutions/nogrounding.idc"], fact_files=[])
with self.assertRaises(InstalRuntimeError):
runner.run_test(query_file="incompleteinstitutions/blank.iaq")
def test_partial_grounding_query(self):
runner = InstalMultiShotTestRunner(input_files=["incompleteinstitutions/incomplete.ial"], bridge_file=None,
domain_files=["incompleteinstitutions/partialgrounding.idc"], fact_files=[])
with self.assertRaises(InstalRuntimeError):
runner.run_test(query_file="incompleteinstitutions/blank.iaq")
| epl-1.0 | 3,374,918,661,383,116,000 | 55 | 120 | 0.680901 | false | 4.236842 | true | false | false |
ArcasProject/WheresMyField | library/serializers.py | 1 | 2848 | from library.models import Article, Author, Year, Label, Strategies, KeyWord
from rest_framework import serializers
class AuthorSerializer(serializers.HyperlinkedModelSerializer):
papers_on_this_db = serializers.SerializerMethodField()
class Meta:
model = Author
fields = "__all__"
def get_papers_on_this_db(self, obj):
return obj.article_set.count()
class YearSerializer(serializers.HyperlinkedModelSerializer):
papers_on_specific_year = serializers.SerializerMethodField()
class Meta:
model = Year
fields = "__all__"
def get_papers_on_specific_year(self, obj):
return obj.article_set.count()
class LabelsSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Label
fields = ["label"]
class KeyWordSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = KeyWord
fields = ["key_word"]
class StrategiesSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Strategies
fields = ["strategy_name"]
class ArticleSerializer(serializers.HyperlinkedModelSerializer):
author = AuthorSerializer(many=True, )
date = YearSerializer()
labels = LabelsSerializer(many=True, )
key_word = KeyWordSerializer(many=True, )
list_strategies = StrategiesSerializer(many=True)
class Meta:
model = Article
fields = ('key', 'unique_key', 'title', 'author', 'date', 'abstract',
'pages', 'journal', 'labels', 'read', 'key_word',
'provenance', 'list_strategies')
def create(self, validated_data):
# Create the new article attributes
date = Year.objects.create(year=validated_data['date'].get("year"))
# create the article
article = Article(date=date, title=validated_data['title'],
abstract=validated_data['abstract'],
key=validated_data['key'],
pages=validated_data['pages'],
journal=validated_data['journal'],
provenance=validated_data['provenance'])
article.save()
for author in validated_data['author']:
article.author.add(Author.objects.create(name=author['name']))
for label in validated_data['labels']:
article.labels.add(Label.objects.create(label=label['label']))
for strategy in validated_data['list_strategies']:
article.list_strategies.add(Strategies.objects.create(
strategy_name=strategy['strategy_name']))
for keyword in validated_data['key_word']:
article.key_word.add(KeyWord.objects.create(key_word=keyword[
'key_word']))
return article
| mit | -2,835,192,520,253,170,000 | 33.731707 | 78 | 0.621138 | false | 4.415504 | false | false | false |
Yoctol/seq2vec | seq2vec/util/data_generator.py | 1 | 1759 |
class DataGenterator(object):
def __init__(
self, train_file_path, generate_x, generate_y,
predict_file_path=None, batch_size=128
):
self.train_file_path = train_file_path
self.predict_file_path = train_file_path
if predict_file_path is not None:
self.predict_file_path = predict_file_path
self.generate_x = generate_x
self.generate_y = generate_y
self.batch_size = batch_size
def array_generator(self, file_path, generating_function, batch_size):
with open(file_path, 'r', encoding='utf-8') as array_file:
seqs = []
seqs_len = 0
for line in array_file:
if seqs_len < batch_size:
seqs.append(line.strip().split(' '))
seqs_len += 1
else:
array = generating_function(seqs)
seqs = [line.strip().split(' ')]
seqs_len = 1
yield array
array = generating_function(seqs)
yield array
def __next__(self):
while True:
for x_array, y_array in zip(
self.array_generator(
self.train_file_path,
self.generate_x,
self.batch_size
),
self.array_generator(
self.predict_file_path,
self.generate_y,
self.batch_size
)
):
#assert (len(x_array) == len(y_array)), \
# 'training data has different length with testing data'
return (x_array, y_array)
| gpl-3.0 | -2,943,728,817,002,458,000 | 33.490196 | 75 | 0.464468 | false | 4.386534 | false | false | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/virtual_network_peering.py | 1 | 4050 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class VirtualNetworkPeering(SubResource):
"""Peerings in a virtual network resource.
:param id: Resource ID.
:type id: str
:param allow_virtual_network_access: Whether the VMs in the linked virtual
network space would be able to access all the VMs in local Virtual network
space.
:type allow_virtual_network_access: bool
:param allow_forwarded_traffic: Whether the forwarded traffic from the VMs
in the remote virtual network will be allowed/disallowed.
:type allow_forwarded_traffic: bool
:param allow_gateway_transit: If gateway links can be used in remote
virtual networking to link to this virtual network.
:type allow_gateway_transit: bool
:param use_remote_gateways: If remote gateways can be used on this virtual
network. If the flag is set to true, and allowGatewayTransit on remote
peering is also true, virtual network will use gateways of remote virtual
network for transit. Only one peering can have this flag set to true. This
flag cannot be set if virtual network already has a gateway.
:type use_remote_gateways: bool
:param remote_virtual_network: The reference of the remote virtual
network.
:type remote_virtual_network:
~azure.mgmt.network.v2017_08_01.models.SubResource
:param peering_state: The status of the virtual network peering. Possible
values are 'Initiated', 'Connected', and 'Disconnected'. Possible values
include: 'Initiated', 'Connected', 'Disconnected'
:type peering_state: str or
~azure.mgmt.network.v2017_08_01.models.VirtualNetworkPeeringState
:param provisioning_state: The provisioning state of the resource.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'allow_virtual_network_access': {'key': 'properties.allowVirtualNetworkAccess', 'type': 'bool'},
'allow_forwarded_traffic': {'key': 'properties.allowForwardedTraffic', 'type': 'bool'},
'allow_gateway_transit': {'key': 'properties.allowGatewayTransit', 'type': 'bool'},
'use_remote_gateways': {'key': 'properties.useRemoteGateways', 'type': 'bool'},
'remote_virtual_network': {'key': 'properties.remoteVirtualNetwork', 'type': 'SubResource'},
'peering_state': {'key': 'properties.peeringState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualNetworkPeering, self).__init__(**kwargs)
self.allow_virtual_network_access = kwargs.get('allow_virtual_network_access', None)
self.allow_forwarded_traffic = kwargs.get('allow_forwarded_traffic', None)
self.allow_gateway_transit = kwargs.get('allow_gateway_transit', None)
self.use_remote_gateways = kwargs.get('use_remote_gateways', None)
self.remote_virtual_network = kwargs.get('remote_virtual_network', None)
self.peering_state = kwargs.get('peering_state', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.name = kwargs.get('name', None)
self.etag = kwargs.get('etag', None)
| mit | -8,661,834,945,974,377,000 | 50.923077 | 104 | 0.66642 | false | 4.05 | false | false | false |
shenbai/tradesafe | org/tradesafe/bt/order.py | 1 | 1854 | # coding: utf-8
__author__ = 'tack'
class Order(object):
def __init__(self, code, num,hold_num, cost_price, market_price, commission, date, cmd):
'''
Args:
code: stock code
cost_price: cost price
market_price: market price
commission: commission
num: num
hold_num: hold num
date: date
cmd: buy|sell
'''
self.code = code
self.num = num
self.hold_num = hold_num
self.date = date
self.commission = commission
self.cost_price = cost_price
self.market_price = market_price
self.cost = self.cost_price * num + commission
self.cmd = cmd
if (self.hold_num == 0 and 'sell' == self.cmd):
self.profit = (self.market_price - self.cost_price) * num - self.commission
else:
self.profit = 0 - self.commission
def __repr__(self):
return 'cmd=%s, code=%s,cost=%f,cost_price=%f, market_price=%f,num=%d, hold_num=%d, commission=%f, profit=%f, date=%s' % (self.cmd, self.code, self.cost, self.cost_price,self.market_price, self.num,self.hold_num, self.commission,self.profit, self.date)
class OrderHistory(object):
def __init__(self):
self._ohs = {}
def update(self, order):
'''
when the market close, update positions
:param order:
:return:
'''
if order.code in self._ohs:
self._ohs[order.code].append(order)
else:
self._ohs[order.code] = [order]
def get_history(self, code):
return self._ohs.get(code, None)
def get_total_profit(self, code):
if code not in self._ohs:
return 0.
profit = 0.
for o in self._ohs[code]:
profit += o.profit
return profit | mit | 8,134,701,983,899,524,000 | 29.916667 | 260 | 0.542611 | false | 3.579151 | false | false | false |
jpalpant/jarvis-lab-rnaseq-flow | rnaseqflow/workflow.py | 2 | 22041 | """
Created on Dec 12, 2015
@author: justinpalpant
Copyright 2015 Justin Palpant
This file is part of the Jarvis Lab RNAseq Workflow program.
RNAseq Workflow is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option) any
later version.
RNAseq Workflow is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
RNAseq Workflow. If not, see http://www.gnu.org/licenses/.
"""
import logging
import subprocess
import os
import fnmatch
import re
import shutil
from abc import ABCMeta, abstractmethod, abstractproperty
from cliutils import all_subclasses, firstline
from cliutils import ArgFiller
class Workflow(object):
"""
Execute a simple series of steps used to preprocess RNAseq files
"""
logger = logging.getLogger('rnaseqflow.Workflow')
"""log4j-style class logger"""
def __init__(self):
"""
Initialize an empty workflow with no stages
"""
self.items = []
def append(self, item):
"""Add a WorkflowStage to the workflow
:param item: the WorkflowStage to insert
:type item: WorkflowStage
"""
self.items.append(item)
def insert(self, idx, item):
"""Insert a WorkflowStage into the workflow
:param idx: list index for insertion
:type idx: int
:param item: the WorkflowStage to insert
:type item: WorkflowStage
"""
self.items.insert(idx, item)
def run(self):
"""Allows the user to select a directory and processes all files within
that directory
This function is the primary function of the Workflow class. All other
functions are written as support for this function, at the moment
"""
current_input = None
for item in self.items:
next_input = item.run(current_input)
current_input = next_input
class WorkflowStage(object):
"""Interface for a stage of a Workflow
Subclasses must override the run method, which takes and verifies arbitrary
input, processes it, and returns some output
They must also provide a .spec property which is a short string to be used
to select the specific WorkflowStage from many options. These should not
overlap, but at the moment no checking is done to see if they do.
"""
__metaclass__ = ABCMeta
logger = logging.getLogger('rnaseqflow.WorkflowStage')
"""log4j-style class logger"""
@abstractmethod
def run(self, stage_input):
"""Attempt to process the provided input according to the rules of the
subclass
:param stage_input: an arbitrary input to be processed, usually a list of
file names or file-like objects. The subclass must typecheck
the input as necessary, and define what input it takes
:type stage_input: object
:returns: the results of the subclass's processing
"""
pass
@abstractproperty
def spec(self):
"""Abstract class property, override with @classmethod
Used by the help method to specify available WorkflowItems
"""
pass
@classmethod
def shorthelp(cls):
"""Create a short help text with one line for each subclass of WorkflowStage
Subclasses are found using cliutils.all_subclasses
"""
helpstrings = []
helpstrings.append('The following WorkflowStages are available:\n')
for sub in all_subclasses(cls):
helpstrings.append(
'{0}: {1} - {2}\n'.format(
sub.spec, sub.__name__, firstline(sub.__doc__)))
helpstrings.append('Use "--help stages" for more details\n')
return ''.join(helpstrings)
@classmethod
def longhelp(cls):
"""Create a long help text with full docstrings for each subclass of WorkflowStage
Subclasses are found using cliutils.all_subclasses
"""
helpstrings = []
helpstrings.append('The following WorkflowStages are available:\n')
for sub in all_subclasses(cls):
helpstrings.append(
'{0}: {1}\n {2}\n'.format(
sub.spec, sub.__name__, sub.__doc__))
return ''.join(helpstrings)
class FindFiles(WorkflowStage):
"""Find files recursively in a folder
Input:
No input is required for this WorkflowStage
Output:
A flat set of file path strings
Args used:
* --root: the folder in which to start the search
* --ext: the file extention to search for
"""
logger = logging.getLogger('rnaseqflow.WorkflowStage.FindFiles')
"""log4j-style class-logger"""
spec = '1'
"""FindFiles uses '1' as its specifier"""
def __init__(self, args):
"""Prepare the recursive file finder
Check that a root directory is provided, or ask for one
Make sure the search extension is valid
:param args: an object with settable and gettable attributes
:type args: Namespace, SimpleNamespace, etc.
"""
argfiller = ArgFiller(args)
argfiller.fill(['root', 'ext'])
self.root = args.root
self.ext = args.ext
def run(self, stage_input):
"""Run the recursive file finding stage
:param stage_input: not used, only for the interface
:type stage_input: object, None
:returns: A flat set of files found with the correct extension
:rtype: set(str)
"""
self.logger.info('Beginning file find operations')
outfiles = set()
for root, _, files in os.walk(self.root):
for basename in files:
if fnmatch.fnmatch(basename, "*" + self.ext):
filename = os.path.join(root, basename)
outfiles.add(filename)
self.logger.info('Found {0} files'.format(len(outfiles)))
return outfiles
class MergeSplitFiles(WorkflowStage):
"""Merge files by the identifying sequence and direction
Input:
An iterable of file names to be grouped and merged
Output:
A flat set of merged filenames
Args used:
* --root: the folder where merged files will be placed
* --ext: the file extention to be used for the output files
* --blocksize: number of kilobytes to use as a copy block size
"""
logger = logging.getLogger('rnaseqflow.WorkflowStage.MergeSplitFiles')
"""log4j-style class-logger"""
spec = '2'
"""MergeSplitFiles uses '2' as its specifier"""
def __init__(self, args):
"""Prepare for the merge file stage
Check for a root directory and a blocksize
:param args: an object with settable and gettable attributes
:type args: Namespace, SimpleNamespace, etc.
"""
argfiller = ArgFiller(args)
argfiller.fill(['root', 'ext', 'blocksize'])
self.root = args.root
self.blocksize = args.blocksize
self.ext = args.ext
self.outdir = os.path.join(self.root, 'merged')
try:
os.makedirs(self.outdir)
except OSError:
if not os.path.isdir(self.outdir):
self.logger.error(
'Cannot make directory {0}, '
'permissions'.format(self.outdir))
raise
def run(self, stage_input):
"""Run the merge files operation
Creates a directory merged under the root directory and fills it with
files concatenated from individual parts of large RNAseq data files
Files are grouped and ordered by searching the file basename for a
sequence identifier like AACTAG, a direction like R1, and a part number
formatted 001
:param stage_input: file names to be organized and merged
:type stage_input: iterable(str)
:returns: a set of organized files
:rtype: set(str)
"""
self.logger.info('Beginning file merge operations')
organized = self._organize_files(stage_input)
merged_files = set()
for i, (fileid, files) in enumerate(organized.iteritems()):
outfile_name = 'merged_' + \
fileid[0] + '_' + fileid[1] + self.ext
outfile_path = os.path.join(self.outdir, outfile_name)
self.logger.info(
'Building file {0:d} of {1:d}: {2}'.format(
i + 1, len(organized), outfile_path))
with open(outfile_path, 'wb') as outfile:
for j, infile in enumerate(files):
if j + 1 != self._get_part_num(infile):
self.logger.error(
'{0} is not file {1} of {2}. Files must be out of'
' order, or there are extra files in the root '
'folder that the merger cannot process. '
'Construction of file {2} is '
'terminated'.format(infile, j+1, outfile_path))
break
self.logger.debug(
'Merging file %d of %d: %s', j, len(files),
infile)
shutil.copyfileobj(
open(infile, 'rb'), outfile, 1024 * self.blocksize)
merged_files.add(outfile_path)
self.logger.info('Created {0} merged files'.format(len(merged_files)))
return merged_files
def _organize_files(self, files):
"""Organizes a list of paths by sequence_id, part number, and direction
Uses regular expressions to find the six-character sequence ID, the
three character integer part number, and the direction (R1 or R2)
:param files: filenames to be organized
:type files: iterable(str)
:returns: organized files in a dictionary mapping the sequence ID and
direction to the files that have that ID, sorted in ascending part
number
:rtype: dict(tuple:list)
"""
mapping = {}
for path in files:
sequence_id = self._get_sequence_id(os.path.basename(path))
direction = self._get_direction_id(os.path.basename(path))
if not (sequence_id and direction):
self.logger.warning('Discarding file {0} - could not find '
'sequence ID and direction using '
'regular expressions'.format(
os.path.basename(path)))
continue
try:
mapping[(sequence_id, direction)].append(path)
except KeyError:
mapping[(sequence_id, direction)] = [path]
for key, lst in mapping.iteritems():
mapping[key] = sorted(lst, key=self._get_part_num)
return mapping
@staticmethod
def _get_sequence_id(filename):
"""Gets the six-letter RNA sequence that identifies the RNAseq file
Returns a six character string that is the ID, or an empty string if no
identifying sequence is found.
:param filename: the base filename to be processed
:type filename: str
:returns: the file's sequence ID, six characters of ACTG
:rtype: string
"""
p = re.compile('.*[ACTG]{6}')
m = p.search(filename)
if m is None:
return ''
else:
return m.group()
@staticmethod
def _get_direction_id(filename):
"""Gets the direction identifier from an RNAseq filename
A direction identifier is either R1 or R2, indicating a forward or a
backwards read, respectively.
:param filename: the base filename to be processed
:type filename: str
:returns: the file's direction ID, R1 or R2
:rtype: string
"""
p = re.compile('R\d{1}')
m = p.search(filename)
if m is None:
return ''
else:
return m.group()
@staticmethod
def _get_part_num(filename):
"""Returns an integer indicating the file part number of the selected
RNAseq file
RNAseq files, due to their size, are split into many smaller files,
each of which is given a three digit file part number (e.g. 001, 010).
This method returns that part number as an integer.
This requires that there only be one sequence of three digits in the
filename
:param filename: the base filename to be processed
:type filename: str
:returns: the file's part number
:rtype: int
"""
p = re.compile('_\d{3}')
m = p.search(filename)
if m is None:
return 0
else:
text = m.group()
return int(text[1:])
class FastQMCFTrimSolo(WorkflowStage):
"""Trim adapter sequences from files using fastq-mcf one file at a time
Input:
A flat set of files to be passed into fastq-mcf file-by-file
Output:
A flat set of trimmed file names
Args used:
* --root: the folder where trimmed files will be placed
* --adapters: the filepath of the fasta adapters file
* --fastq: the location of the fastq-mcf executable
* --fastq_args: a string of arguments to pass directly to fastq-mcf
* --quiet: silence fastq-mcf's output if given
"""
logger = logging.getLogger('rnaseqflow.WorkflowStage.FastQMCFTrimSolo')
"""log4j-style class-logger"""
spec = '3.0'
"""FastQMCFTrimSolo uses '3.0' as its specifier"""
def __init__(self, args):
"""Run all checks needed to create a FastQMCFTrimSolo object
Check that fastq-mcf exists in the system
Specify the fasta adapter file and any arguments
Create the output folder
:param args: an object with settable and gettable attributes
:type args: Namespace, SimpleNamespace, etc.
"""
argfiller = ArgFiller(args)
argfiller.fill(['root', 'adapters', 'fastq', 'fastq_args', 'quiet'])
self.root = args.root
self.adapters = args.adapters
self.fastq_args = args.fastq_args
self.executable = args.fastq
self.quiet = args.quiet
self.outdir = os.path.join(self.root, 'trimmed')
try:
os.makedirs(self.outdir)
except OSError:
if not os.path.isdir(self.outdir):
raise
try:
with open(os.devnull, "w") as fnull:
subprocess.call([self.executable], stdout=fnull, stderr=fnull)
except OSError:
self.logger.error(
'fastq-mcf not found, cannot use FastQMCFTrimSolo')
raise
else:
self.logger.info('fastq-mcf found')
def run(self, stage_input):
"""Trim files one at a time using fastq-mcf
:param stage_input: filenames to be processed
:type stage_input: iterable(str)
:returns: a set of filenames holding the processed files
:rtype: set(str)
"""
self.logger.info('Beginning file trim operation')
trimmed_files = set()
for i, fname in enumerate(stage_input):
outfile_name = 'trimmed_' + os.path.basename(fname)
outfile_path = os.path.join(self.outdir, outfile_name)
cmd = [self.executable, self.adapters, fname] + \
self.fastq_args.split() + ['-o', outfile_path]
self.logger.info(
'Building file {0:d} of {1:d}: {2}'.format(
i + 1, len(stage_input), outfile_path))
self.logger.debug('Calling %s', str(cmd))
if self.quiet:
with open(os.devnull, 'w') as nullfile:
subprocess.call(cmd, stdout=nullfile, stderr=nullfile)
else:
subprocess.call(cmd)
trimmed_files.add(outfile_path)
self.logger.info('Trimmed {0} files'.format(len(trimmed_files)))
return trimmed_files
class FastQMCFTrimPairs(WorkflowStage):
"""Trim adapter sequences from files using fastq-mcf in paired-end mode
Input:
A flat set of files to be passed into fastq-mcf in pairs
Output:
A flat set of trimmed file names
Args used:
* --root: the folder where trimmed files will be placed
* --adapters: the filepath of the fasta adapters file
* --fastq: the location of the fastq-mcf executable
* --fastq_args: a string of arguments to pass directly to fastq-mcf
* --quiet: silence fastq-mcf's output if given
"""
logger = logging.getLogger('rnaseqflow.WorkflowStage.FastQMCFTrimPairs')
"""log4j-style class-logger"""
spec = '3.1'
"""FastQMCFTrimPairs uses '3.1' as its specifier"""
def __init__(self, args):
"""Run all checks needed to create a FastQMCFTrimPairs object
Check that fastq-mcf exists in the system
Specify the fasta adapter file and any arguments
Create the output folder
:param args: an object with settable and gettable attributes
:type args: Namespace, SimpleNamespace, etc.
"""
argfiller = ArgFiller(args)
argfiller.fill(['root', 'adapters', 'fastq', 'fastq_args', 'quiet'])
self.root = args.root
self.adapters = args.adapters
self.fastq_args = args.fastq_args
self.executable = args.fastq
self.quiet = args.quiet
self.outdir = os.path.join(self.root, 'trimmed')
try:
os.makedirs(self.outdir)
except OSError:
if not os.path.isdir(self.outdir):
raise
try:
with open(os.devnull, "w") as fnull:
subprocess.call([self.executable], stdout=fnull, stderr=fnull)
except OSError:
self.logger.error(
'fastq-mcf not found, cannot use FastQMCFTrimPairs')
raise
else:
self.logger.info('fastq-mcf found')
def run(self, stage_input):
"""Trim files one at a time using fastq-mcf
:param stage_input: filenames to be processed
:type stage_input: iterable(str)
:returns: a set of filenames holding the processed files
:rtype: set(str)
"""
self.logger.info('Beginning file trim operation')
pairs = self._find_file_pairs(stage_input)
trimmed_files = set()
prog_count = 0
for f1, f2 in pairs:
outfile_name_1 = 'trimmed_' + os.path.basename(f1)
outfile_path_1 = os.path.join(self.outdir, outfile_name_1)
prog_count += 1
if f2:
prog_count += 1
outfile_name_2 = 'trimmed_' + os.path.basename(f2)
outfile_path_2 = os.path.join(self.outdir, outfile_name_2)
cmd = [self.executable, self.adapters, f1, f2] + \
self.fastq_args.split() + \
['-o', outfile_path_1, '-o', outfile_path_2]
self.logger.info(
'Building files {0:d} and {1:d} of {2:d}: {3} and {4}'.format(
prog_count - 1, prog_count, len(stage_input), outfile_path_1, outfile_path_2))
self.logger.debug('Calling %s', str(cmd))
trimmed_files.add(outfile_path_1)
trimmed_files.add(outfile_path_2)
else:
cmd = [self.executable, self.adapters, f1] + \
self.fastq_args.split() + ['-o', outfile_path_1]
self.logger.info(
'Building file {0:d} of {1:d}: {2}'.format(
prog_count, len(stage_input), outfile_path_1))
trimmed_files.add(outfile_path_1)
self.logger.debug('Calling %s', str(cmd))
if self.quiet:
with open(os.devnull, 'w') as nullfile:
subprocess.call(cmd, stdout=nullfile, stderr=nullfile)
else:
subprocess.call(cmd)
self.logger.info('Trimmed {0} files'.format(len(trimmed_files)))
return trimmed_files
def _find_file_pairs(self, files):
"""Finds pairs of forward and backward read files
:param files: filenames to be paired and trimmed
:type files: iterable(str)
:returns: pairs (f1, f2) that are paired files, forward and backward
If a file f1 does not have a mate, f2 will be None, and the file
will be trimmed without a mate
:rtype: set(tuple(str, str))
"""
pairs = set()
for f in files:
try:
pair = next(f2 for f2 in files if (
self._get_sequence_id(f2) == self._get_sequence_id(f) and
f2 != f))
except StopIteration as e:
pairs.add((f, None))
else:
pairs.add(tuple(fn for fn in sorted([f, pair])))
return pairs
@staticmethod
def _get_sequence_id(filename):
"""Gets the six-letter RNA sequence that identifies the RNAseq file
Returns a six character string that is the ID, or an empty string if no
identifying sequence is found.
:param filename: the base filename to be processed
:type filename: str
:returns: the file's sequence ID, six characters of ACTG
:rtype: string
"""
p = re.compile('.*[ACTG]{6}')
m = p.search(filename)
if m is None:
return ''
else:
return m.group()
| gpl-3.0 | 9,199,470,783,824,311,000 | 30.98984 | 102 | 0.586952 | false | 4.270684 | false | false | false |
SteveMcGrath/Concord | site/app/views/admin.py | 1 | 2428 | from flask import render_template, flash, redirect, session, url_for, abort, g
from flask.ext.login import login_user, logout_user, current_user, login_required
from app import app, db, login_manager, forms
from app.models import User, Ticket
from common import display_errors
@login_manager.user_loader
def load_user(userid):
return User.query.filter_by(id=int(userid)).first()
def administrative(func):
def wrapped(*args, **kwargs):
if not g.user.admin:
flash('Not Authorized to access that page. Not an Admin', 'danger')
return redirect(url_for('home'))
return func(*args, **kwargs)
return wrapped
def author(func):
def wrapped(*args, **kwargs):
if not g.user.author:
flash('Not Authorized to access that page. Not an Author', 'danger')
return redirect(url_for('home'))
return func(*args, **kwargs)
return wrapped
def reviewer(func):
def wrapped(*args, **kwargs):
if not g.user.reviewer:
flash('Not Authorized to access that page. Not a Reviewer', 'danger')
return redirect(url_for('home'))
return func(*args, **kwargs)
return wrapped
@app.route('/login', methods=['GET', 'POST'])
def login():
if g.user.is_authenticated():
return redirect(url_for('home'))
form = forms.LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is not None and user.check_password(form.password.data):
login_user(user)
return redirect(url_for('home'))
else:
user = None
if user == None:
flash('Invalid Username or Password', 'error')
return render_template('login.html', title='Sign In', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/user/new', methods=['GET', 'POST'])
@app.route('/user/<username>', methods=['GET', 'POST'])
def user_info(username=None):
if g.user.username == username or g.user.admin:
user = User.query.filter_by(username=username).first_or_404()
tickets = Ticket.query.filter_by(user_id=user.id).all()
return render_template('user_info.html', person=user, tickets=tickets,
title='%s - Information' % user.username)
return redirect(url_for('home')) | gpl-2.0 | -7,936,594,103,518,559,000 | 32.736111 | 82 | 0.630972 | false | 3.729647 | false | false | false |
NifTK/NiftyNet | niftynet/layer/binary_masking.py | 1 | 3700 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import numpy as np
import scipy.ndimage as ndimg
from scipy.ndimage.morphology import binary_fill_holes as fill_holes
from niftynet.layer.base_layer import Layer
from niftynet.utilities.util_common import look_up_operations
from niftynet.utilities.util_common import otsu_threshold
"""
This class defines methods to generate a binary image from an input image.
The binary image can be used as an automatic foreground selector, so that later
processing layers can only operate on the `True` locations within the image.
"""
SUPPORTED_MASK_TYPES = set(['threshold_plus', 'threshold_minus',
'otsu_plus', 'otsu_minus', 'mean_plus'])
SUPPORTED_MULTIMOD_MASK_TYPES = set(['or', 'and', 'multi'])
class BinaryMaskingLayer(Layer):
def __init__(self,
type_str='otsu_plus',
multimod_fusion='or',
threshold=0.0):
super(BinaryMaskingLayer, self).__init__(name='binary_masking')
self.type_str = look_up_operations(
type_str.lower(), SUPPORTED_MASK_TYPES)
self.multimod_fusion = look_up_operations(
multimod_fusion.lower(), SUPPORTED_MULTIMOD_MASK_TYPES)
self.threshold = threshold
def __make_mask_3d(self, image):
assert image.ndim == 3
image_shape = image.shape
image = image.reshape(-1)
mask = np.zeros_like(image, dtype=np.bool)
thr = self.threshold
if self.type_str == 'threshold_plus':
mask[image > thr] = True
elif self.type_str == 'threshold_minus':
mask[image < thr] = True
elif self.type_str == 'otsu_plus':
thr = otsu_threshold(image) if np.any(image) else thr
mask[image > thr] = True
elif self.type_str == 'otsu_minus':
thr = otsu_threshold(image) if np.any(image) else thr
mask[image < thr] = True
elif self.type_str == 'mean_plus':
thr = np.mean(image)
mask[image > thr] = True
mask = mask.reshape(image_shape)
mask = ndimg.binary_dilation(mask, iterations=2)
mask = fill_holes(mask)
# foreground should not be empty
assert np.any(mask == True), \
"no foreground based on the specified combination parameters, " \
"please change choose another `mask_type` or double-check all " \
"input images"
return mask
def layer_op(self, image):
if image.ndim == 3:
return self.__make_mask_3d(image)
if image.ndim == 5:
mod_to_mask = [m for m in range(image.shape[4])
if np.any(image[..., :, m])]
mask = np.zeros_like(image, dtype=bool)
mod_mask = None
for mod in mod_to_mask:
for t in range(image.shape[3]):
mask[..., t, mod] = self.__make_mask_3d(image[..., t, mod])
# combine masks across the modalities dim
if self.multimod_fusion == 'or':
if mod_mask is None:
mod_mask = np.zeros(image.shape[:4], dtype=bool)
mod_mask = np.logical_or(mod_mask, mask[..., mod])
elif self.multimod_fusion == 'and':
if mod_mask is None:
mod_mask = np.ones(image.shape[:4], dtype=bool)
mod_mask = np.logical_and(mod_mask, mask[..., mod])
for mod in mod_to_mask:
mask[..., mod] = mod_mask
return mask
else:
raise ValueError("unknown input format")
| apache-2.0 | -959,226,072,506,172,300 | 39.659341 | 79 | 0.567297 | false | 3.806584 | false | false | false |
SUNET/eduid-webapp | src/eduid_webapp/orcid/views.py | 1 | 7848 | # -*- coding: utf-8 -*-
from flask import Blueprint, redirect, request, url_for
from oic.oic.message import AuthorizationResponse, Claims, ClaimsRequest
from six.moves.urllib_parse import urlencode
from eduid_common.api.decorators import MarshalWith, UnmarshalWith, require_user
from eduid_common.api.messages import CommonMsg, redirect_with_msg
from eduid_common.api.schemas.csrf import CSRFRequest
from eduid_common.api.utils import get_unique_hash, save_and_sync_user
from eduid_userdb.logs import OrcidProofing
from eduid_userdb.orcid import OidcAuthorization, OidcIdToken, Orcid
from eduid_userdb.proofing import OrcidProofingState, ProofingUser
from eduid_webapp.orcid.app import current_orcid_app as current_app
from eduid_webapp.orcid.helpers import OrcidMsg
from eduid_webapp.orcid.schemas import OrcidResponseSchema
__author__ = 'lundberg'
orcid_views = Blueprint('orcid', __name__, url_prefix='', template_folder='templates')
@orcid_views.route('/authorize', methods=['GET'])
@require_user
def authorize(user):
if user.orcid is None:
proofing_state = current_app.proofing_statedb.get_state_by_eppn(user.eppn, raise_on_missing=False)
if not proofing_state:
current_app.logger.debug(
'No proofing state found for user {!s}. Initializing new proofing state.'.format(user)
)
proofing_state = OrcidProofingState(
id=None, modified_ts=None, eppn=user.eppn, state=get_unique_hash(), nonce=get_unique_hash()
)
current_app.proofing_statedb.save(proofing_state)
claims_request = ClaimsRequest(userinfo=Claims(id=None))
oidc_args = {
'client_id': current_app.oidc_client.client_id,
'response_type': 'code',
'scope': 'openid',
'claims': claims_request.to_json(),
'redirect_uri': url_for('orcid.authorization_response', _external=True),
'state': proofing_state.state,
'nonce': proofing_state.nonce,
}
authorization_url = '{}?{}'.format(current_app.oidc_client.authorization_endpoint, urlencode(oidc_args))
current_app.logger.debug('Authorization url: {!s}'.format(authorization_url))
current_app.stats.count(name='authn_request')
return redirect(authorization_url)
# Orcid already connected to user
redirect_url = current_app.conf.orcid_verify_redirect_url
return redirect_with_msg(redirect_url, OrcidMsg.already_connected)
@orcid_views.route('/authorization-response', methods=['GET'])
@require_user
def authorization_response(user):
# Redirect url for user feedback
redirect_url = current_app.conf.orcid_verify_redirect_url
current_app.stats.count(name='authn_response')
# parse authentication response
query_string = request.query_string.decode('utf-8')
current_app.logger.debug('query_string: {!s}'.format(query_string))
authn_resp = current_app.oidc_client.parse_response(AuthorizationResponse, info=query_string, sformat='urlencoded')
current_app.logger.debug('Authorization response received: {!s}'.format(authn_resp))
if authn_resp.get('error'):
current_app.logger.error(
'AuthorizationError from {}: {} - {} ({})'.format(
request.host, authn_resp['error'], authn_resp.get('error_message'), authn_resp.get('error_description')
)
)
return redirect_with_msg(redirect_url, OrcidMsg.authz_error)
user_oidc_state = authn_resp['state']
proofing_state = current_app.proofing_statedb.get_state_by_oidc_state(user_oidc_state, raise_on_missing=False)
if not proofing_state:
current_app.logger.error('The \'state\' parameter ({!s}) does not match a user state.'.format(user_oidc_state))
return redirect_with_msg(redirect_url, OrcidMsg.no_state)
# do token request
args = {
'code': authn_resp['code'],
'redirect_uri': url_for('orcid.authorization_response', _external=True),
}
current_app.logger.debug('Trying to do token request: {!s}'.format(args))
token_resp = current_app.oidc_client.do_access_token_request(
scope='openid', state=authn_resp['state'], request_args=args, authn_method='client_secret_basic'
)
current_app.logger.debug('token response received: {!s}'.format(token_resp))
id_token = token_resp['id_token']
if id_token['nonce'] != proofing_state.nonce:
current_app.logger.error('The \'nonce\' parameter does not match for user')
return redirect_with_msg(redirect_url, OrcidMsg.unknown_nonce)
current_app.logger.info('ORCID authorized for user')
# do userinfo request
current_app.logger.debug('Trying to do userinfo request:')
userinfo = current_app.oidc_client.do_user_info_request(
method=current_app.conf.userinfo_endpoint_method, state=authn_resp['state']
)
current_app.logger.debug('userinfo received: {!s}'.format(userinfo))
if userinfo['sub'] != id_token['sub']:
current_app.logger.error(
'The \'sub\' of userinfo does not match \'sub\' of ID Token for user {!s}.'.format(proofing_state.eppn)
)
return redirect_with_msg(redirect_url, OrcidMsg.sub_mismatch)
# Save orcid and oidc data to user
current_app.logger.info('Saving ORCID data for user')
proofing_user = ProofingUser.from_user(user, current_app.private_userdb)
oidc_id_token = OidcIdToken(
iss=id_token['iss'],
sub=id_token['sub'],
aud=id_token['aud'],
exp=id_token['exp'],
iat=id_token['iat'],
nonce=id_token['nonce'],
auth_time=id_token['auth_time'],
created_by='orcid',
)
oidc_authz = OidcAuthorization(
access_token=token_resp['access_token'],
token_type=token_resp['token_type'],
id_token=oidc_id_token,
expires_in=token_resp['expires_in'],
refresh_token=token_resp['refresh_token'],
created_by='orcid',
)
orcid_element = Orcid(
id=userinfo['id'],
name=userinfo['name'],
given_name=userinfo['given_name'],
family_name=userinfo['family_name'],
is_verified=True,
oidc_authz=oidc_authz,
created_by='orcid',
)
orcid_proofing = OrcidProofing(
eppn=proofing_user.eppn,
created_by='orcid',
orcid=orcid_element.id,
issuer=orcid_element.oidc_authz.id_token.iss,
audience=orcid_element.oidc_authz.id_token.aud,
proofing_method='oidc',
proofing_version='2018v1',
)
if current_app.proofing_log.save(orcid_proofing):
current_app.logger.info('ORCID proofing data saved to log')
proofing_user.orcid = orcid_element
save_and_sync_user(proofing_user)
current_app.logger.info('ORCID proofing data saved to user')
message_args = dict(msg=OrcidMsg.authz_success, error=False)
else:
current_app.logger.info('ORCID proofing data NOT saved, failed to save proofing log')
message_args = dict(msg=CommonMsg.temp_problem)
# Clean up
current_app.logger.info('Removing proofing state')
current_app.proofing_statedb.remove_state(proofing_state)
return redirect_with_msg(redirect_url, **message_args)
@orcid_views.route('/', methods=['GET'])
@MarshalWith(OrcidResponseSchema)
@require_user
def get_orcid(user):
return user.to_dict()
@orcid_views.route('/remove', methods=['POST'])
@UnmarshalWith(CSRFRequest)
@MarshalWith(OrcidResponseSchema)
@require_user
def remove_orcid(user):
current_app.logger.info('Removing ORCID data for user')
proofing_user = ProofingUser.from_user(user, current_app.private_userdb)
proofing_user.orcid = None
save_and_sync_user(proofing_user)
current_app.logger.info('ORCID data removed for user')
return proofing_user.to_dict()
| bsd-3-clause | 2,024,243,273,847,922,200 | 40.52381 | 119 | 0.674312 | false | 3.30303 | false | false | false |
Aninstance/invoicing | invoicing/signals.py | 1 | 8748 | from django.dispatch import receiver
from django.db.models.signals import post_save, pre_save
from invoicing.models import *
from haystack.management.commands import update_index
from django.utils import timezone
import invoicing.views as invoicing
from invoicing import schedules
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
"""
Note: if doing a save in post_save ensure signal is disconnected to avoid an infinite loop of saving:
e.g.:
post_save.disconnect(my_method, sender=sender)
instance.save()
post_save.connect(my_method, sender=Invoice)
"""
@receiver(post_save, sender=Invoice)
def invoice_post_save(sender, instance, created, **kwargs):
"""
post save receiver for Invoice model
"""
save_updated = False
post_save.disconnect(invoice_post_save, sender=sender)
if created:
invoice_id = model_functions.generate_invoice_random_id() # generate random id
instance.invoice_number = '{}-{}'.format(invoice_id, instance.id)
save_updated = True
# recurring invoice stuff
if not getattr(instance, 'save_pdf', False): # if save not being called again just to update DB after gen of PDF
# set the invoice as start/stop recurring
recurring(instance=instance)
# dispatch email if client email notifications set to true and the save is being called when saving a new PDF
if instance.client.email_notifications and getattr(instance, 'save_pdf', False):
if instance.invoice_status == Invoice.INVOICE_STATUS[5][0]: # receipt (paid in full)
# send email
if not getattr(instance, 'receipt_emailed', False): # if hasn't already been sent
invoicing.pdf_gen_or_fetch_or_email(invoice_number=instance.invoice_number,
type=invoicing.PDF_TYPES.get('receipt'),
email=True, regenerate=False)
# mark as sent
setattr(instance, 'receipt_emailed', True)
save_updated = True
elif instance.invoice_status == Invoice.INVOICE_STATUS[1][0]: # invoice (unpaid)
# send email
if not getattr(instance, 'invoice_emailed', False): # if hasn't already been sent
invoicing.pdf_gen_or_fetch_or_email(invoice_number=instance.invoice_number,
type=invoicing.PDF_TYPES.get('invoice'),
email=True, regenerate=False)
# mark as sent
setattr(instance, 'invoice_emailed', True)
# change status from issued to sent
setattr(instance, 'invoice_status', Invoice.INVOICE_STATUS[2][0])
save_updated = True
elif instance.invoice_status == Invoice.INVOICE_STATUS[4][0]: # invoice (partially paid)
# send email
invoicing.pdf_gen_or_fetch_or_email(invoice_number=instance.invoice_number,
type=invoicing.PDF_TYPES.get('invoice_update'), email=True)
# save the instance if something's been called ...
if save_updated:
# disable pre_save signal, as not required to be run again for the second save!
pre_save.disconnect(invoice_pre_save, sender=Invoice)
# save the instance
instance.save()
# re-enable pre_save signal
pre_save.connect(invoice_pre_save, sender=Invoice)
# re-enable post_save signal
post_save.connect(invoice_post_save, sender=Invoice)
@receiver(pre_save, sender=Invoice)
def invoice_pre_save(sender, instance, **kwargs):
"""
Also to populate mark_as_paid field with datetime when a status is changed to 'PAID_IN_FULL'
Also populates the "amount_paid" field with total.
Also updates the invoice status to partially paid when an amount is paid
"""
try: # existing invoice to be modified
inv = Invoice.objects.get(invoice_number=instance.invoice_number)
except Invoice.DoesNotExist: # new invoice
inv = Invoice() # generate an empty reference Invoice instance if no existing (i.e. A NEW INVOICE)
# IF NOT SAVING PDF (Most stuff goes in here!)
if not getattr(instance, 'save_pdf',
False): # avoid running this pre_save if 'save_pdf' param added to instance
# PRE-SAVE AMENDMENT STUFF
instance_dict = invoicing.invoice_instance_to_dict(instance) # get instance as dict + sums
# if invoice issued, save the time
if getattr(instance, 'invoice_status') in dict(Invoice.INVOICE_STATUS[1:6]) and \
not getattr(inv, 'datetime_issued'):
setattr(instance, 'datetime_issued', timezone.now())
# ensure invoice_status is upper case
setattr(instance, 'invoice_status', instance.invoice_status.upper())
# # enter marked_as_paid datetime into database if status changed to marked as paid
if instance.invoice_status == Invoice.INVOICE_STATUS[5][0]:
if not inv.marked_as_paid: # if not originally marked as paid
instance.marked_as_paid = timezone.now() # set as marked as paid
# set paid_amount to total owed if status is set to paid in full
instance.paid_amount = Decimal(instance_dict.get('Total after tax'))
# change status if paid_amount is submitted
if inv.paid_amount or instance.paid_amount:
# if total paid >= total owed, set status to paid in full
if inv.paid_amount >= Decimal(instance_dict.get('Total after tax')) or instance.paid_amount >= \
Decimal(instance_dict.get('Total after tax')):
instance.invoice_status = Invoice.INVOICE_STATUS[5][0]
# enter marked_as_paid datetime into database
instance.marked_as_paid = timezone.now()
else: # else set status to partially paid
instance.invoice_status = Invoice.INVOICE_STATUS[4][0]
# check for overdue status todo: move this to an automated django-q later
date_due = getattr(instance, 'date_due', None) or getattr(inv, 'date_due')
if date_due < timezone.now().date() and \
instance.invoice_status in dict(Invoice.INVOICE_STATUS[:5]):
instance.overdue = True # set overdue to True if date_due < now
else:
instance.overdue = False # ensure set back to False if paid
# todo: new feature - if amount paid exceeds amount owed, store client credit note ...
@receiver(post_save, sender=Account)
def change_uploaded_file_permissions(sender, instance, **kwargs):
"""
Changes the file permissions of uploaded media files to something sensible
"""
if getattr(instance, 'logo', False):
os.chmod('{}'.format(instance.logo.file.name), 0o664)
@receiver(post_save) # don't specify sender, so it is fired on all model saves
def update_search_index(sender, instance, created, **kwargs):
"""
receiver to update the search index whenever a model is saved
"""
watched_models = [Invoice, Account]
# if sender in watched but do NOT trigger when saving the model when saving PDF (no point)
if sender in watched_models and not getattr(instance, 'save_pdf', False):
update_index.Command().handle(interactive=False)
def recurring(instance=None):
"""
Function to handle the creation of child invoices
"""
if instance.invoice_number:
# ensure child recurring invoice does not produce its own child
if int(getattr(instance, 'recurring', False)) not in dict(Invoice.RECURRING[:1]) and getattr(instance,
'parent_invoice',
False):
raise ValidationError(_('A child invoice cannot itself be recurring'))
# ensure invoice is not its own parent
try:
if getattr(instance, 'invoice_number', False) == getattr(instance, 'parent_invoice', False).invoice_number:
raise ValidationError(_('An invoice cannot be it\'s own parent ... stop messing around!'))
except AttributeError:
pass # thrown if no invoice_number for parent, so can discard as if no parent, no worries!
# if status >= ISSUED, call the scheduler to start/stop recurring
if getattr(instance, 'invoice_status', False) in dict(Invoice.INVOICE_STATUS[1:]):
schedules.recurring_invoice_scheduler(instance=instance)
return True
return None
| gpl-3.0 | 2,832,531,829,811,774,500 | 52.668712 | 119 | 0.631573 | false | 4.263158 | false | false | false |
catapult-project/catapult | third_party/gsutil/third_party/oauth2client/tests/http_mock.py | 21 | 4000 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HTTP helpers mock functionality."""
from six.moves import http_client
class ResponseMock(dict):
"""Mock HTTP response"""
def __init__(self, vals=None):
if vals is None:
vals = {}
self.update(vals)
self.status = int(self.get('status', http_client.OK))
class HttpMock(object):
"""Mock of HTTP object."""
def __init__(self, headers=None, data=None):
"""HttpMock constructor.
Args:
headers: dict, header to return with response
"""
if headers is None:
headers = {'status': http_client.OK}
self.data = data
self.response_headers = headers
self.headers = None
self.uri = None
self.method = None
self.body = None
self.headers = None
self.requests = 0
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
self.uri = uri
self.method = method
self.body = body
self.headers = headers
self.redirections = redirections
self.requests += 1
return ResponseMock(self.response_headers), self.data
class HttpMockSequence(object):
"""Mock of HTTP object with multiple return values.
Mocks a sequence of calls to request returning different responses for each
call. Create an instance initialized with the desired response headers
and content and then use as if an HttpMock instance::
http = HttpMockSequence([
({'status': '401'}, b''),
({'status': '200'}, b'{"access_token":"1/3w","expires_in":3600}'),
({'status': '200'}, 'echo_request_headers'),
])
resp, content = http.request('http://examples.com')
There are special values you can pass in for content to trigger
behavours that are helpful in testing.
* 'echo_request_headers' means return the request headers in the response
body
* 'echo_request_body' means return the request body in the response body
"""
def __init__(self, iterable):
"""HttpMockSequence constructor.
Args:
iterable: iterable, a sequence of pairs of (headers, body)
"""
self._iterable = iterable
self.requests = []
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
resp, content = self._iterable.pop(0)
self.requests.append({
'method': method,
'uri': uri,
'body': body,
'headers': headers,
})
# Read any underlying stream before sending the request.
body_stream_content = (body.read()
if getattr(body, 'read', None) else None)
if content == 'echo_request_headers':
content = headers
elif content == 'echo_request_body':
content = (body
if body_stream_content is None else body_stream_content)
return ResponseMock(resp), content
class CacheMock(object):
def __init__(self):
self.cache = {}
def get(self, key, namespace=''):
# ignoring namespace for easier testing
return self.cache.get(key, None)
| bsd-3-clause | 5,458,100,558,172,799,000 | 30.25 | 79 | 0.59125 | false | 4.469274 | false | false | false |
ebilionis/py-mcmc | demos/demo1.py | 2 | 1495 | """
This demo demonstrates how to train a GPy model using the pymcmc module.
Author:
Ilias Bilionis
Date:
3/20/2014
"""
import GPy
import pymcmc as pm
import numpy as np
import matplotlib.pyplot as plt
# Construct a GPy Model (anything really..., here we are using a regression
# example)
model = GPy.examples.regression.olympic_marathon_men(optimize=False, plot=False)
# Look at the model before it is trained:
print 'Model before training:'
print str(model)
# Pick a proposal for MCMC (here we pick a Metropolized Langevin Proposal
proposal = pm.MALAProposal(dt=1.)
# Construct a Metropolis Hastings object
mcmc = pm.MetropolisHastings(model, # The model you want to train
proposal=proposal, # The proposal you want to use
db_filename='demo_1_db.h5')# The HDF5 database to write the results
# Look at the model now: We have automatically added uninformative priors
# by looking at the constraints of the parameters
print 'Model after adding priors:'
print str(model)
# Now we can sample it:
mcmc.sample(100000, # Number of MCMC steps
num_thin=100, # Number of steps to skip
num_burn=1000, # Number of steps to burn initially
verbose=True) # Be verbose or not
# Here is the model at the last MCMC step:
print 'Model after training:'
print str(model)
# Let's plot the results:
model.plot(plot_limits=(1850, 2050))
a = raw_input('press enter...')
| lgpl-3.0 | 6,326,718,548,425,373,000 | 32.977273 | 96 | 0.686288 | false | 3.559524 | false | false | false |
cmc333333/regulations-parser | regparser/layer/interpretations.py | 1 | 2541 | from collections import defaultdict
from regparser.citations import Label
from regparser.layer.layer import Layer
from regparser.tree import struct
from regparser.tree.interpretation import text_to_labels
class Interpretations(Layer):
"""Supplement I (interpretations) provides (sometimes very lengthy) extra
information about particular paragraphs. This layer provides those
interpretations."""
shorthand = 'interpretations'
def __init__(self, *args, **kwargs):
Layer.__init__(self, *args, **kwargs)
self.lookup_table = defaultdict(list)
def pre_process(self):
"""Create a lookup table for each interpretation"""
def per_node(node):
if (node.node_type != struct.Node.INTERP or
node.label[-1] != struct.Node.INTERP_MARK):
return
# Always add a connection based on the interp's label
self.lookup_table[tuple(node.label[:-1])].append(node)
# Also add connections based on the title
for label in text_to_labels(node.title or '',
Label.from_node(node),
warn=False):
label = tuple(label[:-1]) # Remove Interp marker
if node not in self.lookup_table[label]:
self.lookup_table[label].append(node)
struct.walk(self.tree, per_node)
def process(self, node):
"""Is there an interpretation associated with this node? If yes,
return the associated layer information. @TODO: Right now, this only
associates if there is a direct match. It should also associate if any
parents match"""
label = tuple(node.label)
if self.lookup_table[label]: # default dict; will always be present
interp_labels = [n.label_id() for n in self.lookup_table[label]
if not self.empty_interpretation(n)]
return [{'reference': l} for l in interp_labels] or None
def empty_interpretation(self, interp):
"""We don't want to include empty (e.g. \n\n) nodes as
interpretations unless their children are subparagraphs. We
distinguish subparagraphs from structural children by checking the
location of the 'Interp' delimiter."""
if interp.text.strip():
return False
return all(not child.label or
child.label[-1] == struct.Node.INTERP_MARK
for child in interp.children)
| cc0-1.0 | -3,965,981,242,969,317,400 | 42.067797 | 78 | 0.611964 | false | 4.481481 | false | false | false |
cjaymes/pyscap | src/scap/collector/windows/ResolvePathFilenameCollector.py | 1 | 12823 | # Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
import re
from scap.Collector import Collector, ArgumentException
logger = logging.getLogger(__name__)
class ResolvePathFilenameCollector(Collector):
def __init__(self, host, args):
super(ResolvePathFilenameCollector, self).__init__(host, args)
if 'path' not in args:
raise ArgumentException('ResolvePathFilenameCollector requires path argument')
if 'filename' not in args:
raise ArgumentException('ResolvePathFilenameCollector requires filename argument')
for i in ('value_datatypes', 'value_masks', 'value_operations'):
if i not in args:
raise ArgumentException('ResolvePathFilenameCollector requires ' + i + ' argument')
if args['value_datatypes']['path'] != 'string':
raise ArgumentException('ResolvePathFilenameCollector requires string path')
if args['value_datatypes']['filename'] != 'string':
raise ArgumentException('ResolvePathFilenameCollector requires string filename')
# NOTE: operation should be already validated by EntityObjectType
# TODO the max_depth behavior MUST not be used when a pattern match is used with a path entity
# TODO the recurse behavior MUST not be used when a pattern match is used with a path entity
# TODO the recurse_direction behavior MUST not be used when a pattern match is used with a path entity
# the recurse_file_system behavior MUST not be set to 'defined' when a pattern match is used with a path entity
if args['value_operations']['path'] == 'pattern match' and args['behavior_recurse_file_system'] == 'defined':
raise ArgumentException('ResolvePathFilenameCollector behavior_recurse_file_system set to defined with path pattern match operation')
def collect(self):
if self.args['value_operations']['path'] in ['equals', 'case insensitive equals']:
# check if path exists
col = self.host.load_collector('DirectoryExistsCollector', {'path': self.args['path']})
if not col.collect():
raise FileNotFoundError(self.args['path'] + ' was not found')
paths = [self.args['path']]
elif self.args['value_operations']['path'] in ['not equal', 'case insensitive not equal']:
raise NotImplementedError(self.args['value_operations']['path'] + ' operation not supported for ResolvePathFilenameCollector')
elif self.args['value_operations']['path'] == 'pattern match':
path = self.args['path']
logger.debug('Matching pattern ' + path)
# strip off leading ^ or trailing $ as they are assumed
if path.startswith('^'):
path = path[1:]
if path.endswith('$'):
path = path[:-1]
paths = []
m = re.match(r'^([a-zA-Z]):\\', path)
if m:
# C:\ local abs path
drive = m.group(1) + ':\\'
logger.debug('Absolute path on drive ' + drive)
cmd = "Get-PSDrive -PSProvider FileSystem | % { $_.Root }"
return_code, out_lines, err_lines = self.host.exec_command('powershell -Command "' + cmd.replace('\"', '\\"') + '"')
if drive not in out_lines:
# don't have the drive, so path won't match
raise FileNotFoundError(self.args['path'] + ' was not found')
start = m.group(1) + ':'
fp = path.split('\\')
fp = fp[1:]
for p in fp:
logger.debug('Checking if path component ' + p + ' exists')
cmd = "Get-Item -LiteralPath '" + start + '\\' + p + "' -ErrorAction Ignore | % { $_.Name }"
return_code, out_lines, err_lines = self.host.exec_command('powershell -Command "' + cmd.replace('\"', '\\"') + '"')
if return_code == 0 and len(out_lines) == 1:
logger.debug(p + ' exists')
start = start + '\\' + p
else:
logger.debug(p + ' does not exist; using ' + start + ' as starting point')
break
logger.debug('Recursing from ' + start)
cmd = "Get-ChildItem -LiteralPath '" + start + "' -Recurse -ErrorAction Ignore | % { $_.FullName }"
return_code, out_lines, err_lines = self.host.exec_command('powershell -Command "' + cmd.replace('\"', '\\"') + '"')
if return_code != 0 or len(out_lines) < 1:
raise FileNotFoundError(self.args['path'] + ' was not found')
for l in out_lines:
m = re.fullmatch(self.args['path'], l)
if m:
logger.debug(l + ' matches ' + self.args['path'])
paths.append(l)
elif path.startswith(r'\\\\\?\\UNC\\'):
# \\?\UNC\ extended UNC length path
raise NotImplementedError('extended UNC paths are not yet supported')
elif path.startswith(r'\\\\\?\\'):
# \\?\ extended length path
raise NotImplementedError('extended paths are not yet supported')
elif path.startswith(r'\\\\\.\\'):
# \\.\ device namespace path
raise NotImplementedError('device paths are not yet supported')
elif path.startswith(r'\\\\'):
# \\server\share UNC path
m = re.match(r'^\\\\([^\\]+)\\')
if not m:
raise ArgumentException('Invalid UNC path: ' + path)
server = m.group(1)
logger.debug('UNC path on server ' + server)
start = '\\\\' + server
fp = path.split('\\')
fp = fp[3:]
for p in fp:
logger.debug('Checking if path component ' + p + ' exists')
cmd = "Get-Item -LiteralPath '" + start + '\\' + p + "' -ErrorAction Ignore | % { $_.Name }"
return_code, out_lines, err_lines = self.host.exec_command('powershell -Command "' + cmd.replace('\"', '\\"') + '"')
if return_code == 0 and len(out_lines) == 1:
logger.debug(p + ' exists')
start = start + '\\' + p
else:
logger.debug(p + ' does not exist; using ' + start + ' as starting point')
break
logger.debug('Recursing from ' + start)
cmd = "Get-ChildItem -LiteralPath '" + start + "' -Recurse -ErrorAction Ignore | % { $_.FullName }"
return_code, out_lines, err_lines = self.host.exec_command('powershell -Command "' + cmd.replace('\"', '\\"') + '"')
if return_code != 0 or len(out_lines) < 1:
raise FileNotFoundError(self.args['path'] + ' was not found')
for l in out_lines:
m = re.fullmatch(self.args['path'], l)
if m:
logger.debug(l + ' matches ' + self.args['path'])
paths.append(l)
elif path.startswith(r'\.\.\\'):
# ..\ relative parent path
cmd = "(Get-Item -Path '..\\' -Verbose).FullName"
return_code, out_lines, err_lines = self.host.exec_command('powershell -Command "' + cmd.replace('\"', '\\"') + '"')
start = out_lines[0]
logger.debug('Recursing from ' + start)
cmd = "Get-ChildItem -LiteralPath '" + start + "' -Recurse -ErrorAction Ignore | % { $_.FullName }"
return_code, out_lines, err_lines = self.host.exec_command('powershell -Command "' + cmd.replace('\"', '\\"') + '"')
if return_code != 0 or len(out_lines) < 1:
raise FileNotFoundError(self.args['path'] + ' was not found')
for l in out_lines:
m = re.fullmatch(self.args['path'], l.replace(start, '..'))
if m:
logger.debug(l + ' matches ' + self.args['path'])
paths.append(l)
elif path.startswith(r'\.\\'):
# .\ relative current path
cmd = "(Get-Item -Path '.\\' -Verbose).FullName"
return_code, out_lines, err_lines = self.host.exec_command('powershell -Command "' + cmd.replace('\"', '\\"') + '"')
start = out_lines[0]
logger.debug('Recursing from ' + start)
cmd = "Get-ChildItem -LiteralPath '" + start + "' -Recurse -ErrorAction Ignore | % { $_.FullName }"
return_code, out_lines, err_lines = self.host.exec_command('powershell -Command "' + cmd.replace('\"', '\\"') + '"')
if return_code != 0 or len(out_lines) < 1:
raise FileNotFoundError(self.args['path'] + ' was not found')
for l in out_lines:
m = re.fullmatch(self.args['path'], l.replace(start, '.'))
if m:
logger.debug(l + ' matches ' + self.args['path'])
paths.append(l)
else:
raise ArgumentException('Invalid path: ' + path)
# TODO imp behavior_windows_view
filepaths = []
for path in paths:
if self.args['behavior_recurse_file_system'] == 'local' and path.startswith('\\\\'):
continue
if self.args['value_operations']['filename'] in ['equals', 'case insensitive equals']:
filepaths.extend(self.search_path_for(path, self.args['filename'], '-eq', self.args['behavior_max_depth'], self.args['behavior_recurse_direction']))
elif self.args['value_operations']['filename'] in ['not equal', 'case insensitive not equal']:
raise NotImplementedError(self.args['value_operations']['filename'] + ' operation not supported for ResolvePathFilenameCollector')
elif self.args['value_operations']['filename'] == 'pattern match':
filepaths.extend(self.search_path_for(path, self.args['filename'], '-match', self.args['behavior_max_depth'], self.args['behavior_recurse_direction']))
else:
raise NotImplementedError('Unknown operation not supported for ResolvePathFilenameCollector filename')
return filepaths
def search_path_for(self, path, filename, operator, remaining_depth, direction):
logger.debug('Looking for ' + filename + ' in ' + path)
if remaining_depth == 0:
return []
if direction == 'up':
raise NotImplementedError('Upward recursion is not yet implemented')
# TODO implement link traversal validation
if remaining_depth == -1:
cmd = "Get-ChildItem -Recurse -LiteralPath '" + path.replace("'", "\\'") + "'"
else:
cmd = "Get-ChildItem -LiteralPath '" + path.replace("'", "\\'") + "'"
cmd = cmd + " | % {"
cmd = cmd + "$_.FullName + ',' + "
cmd = cmd + "($_.Mode[0] -eq 'd') + ',' + "
cmd = cmd + "($_.Name " + operator + " '" + filename.replace("'", "\\'") + "')"
cmd = cmd + " }"
return_code, out_lines, err_lines = self.host.exec_command('powershell -Command "' + cmd.replace('\"', '\\"') + '"')
if return_code != 0:
raise FileNotFoundError('Error finding ' + filename + ' in ' + path)
filepaths = []
for l in out_lines:
logger.debug('Got ' + l + ' in ' + path)
fullname, is_dir, matches = l.rsplit(',', 3)
is_dir = is_dir == 'True'
matches = matches == 'True'
if is_dir and direction == 'down' and remaining_depth >= 1:
filepaths.extend(self.search_path_for(fullname, filename, operator, remaining_depth - 1, direction))
if matches:
filepaths.append(fullname)
return filepaths
| gpl-3.0 | 3,837,583,239,619,794,000 | 47.388679 | 167 | 0.537862 | false | 4.361565 | false | false | false |
demisto/content | Packs/MicrosoftAdvancedThreatAnalytics/Integrations/MicrosoftAdvancedThreatAnalytics/MicrosoftAdvancedThreatAnalytics.py | 1 | 14791 | import traceback
from typing import Any, Dict, List, Tuple, Union
import urllib3
from dateparser import parse
from pytz import utc
from requests_ntlm import HttpNtlmAuth
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
urllib3.disable_warnings()
SEVERITY_TRANSLATION = {
'Low': 1,
'Medium': 2,
'High': 3
}
class Client(BaseClient):
def get_suspicious_activity_request(self, suspicious_activity_id: str = '') -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix=f'/suspiciousActivities/{suspicious_activity_id}'
)
def get_suspicious_activity_details_request(self, suspicious_activity_id: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix=f'/suspiciousActivities/{suspicious_activity_id}/details'
)
def update_suspicious_activity_status_request(self,
suspicious_activity_id: str,
suspicious_activity_status: str
) -> Dict[str, Any]:
body = {
'Status': suspicious_activity_status
}
return self._http_request(
method='POST',
url_suffix=f'/suspiciousActivities/{suspicious_activity_id}',
json_data=body,
ok_codes=(204,),
resp_type='text'
)
def delete_suspicious_activity_request(self, suspicious_activity_id: str) -> Dict[str, Any]:
body = {
'shouldDeleteSametype': False
}
params = {
'shouldDeleteSameType': 'false'
}
return self._http_request(
method='DELETE',
url_suffix=f'/suspiciousActivities/{suspicious_activity_id}',
json_data=body,
params=params,
ok_codes=(204,),
resp_type='text'
)
def get_monitoring_alert_request(self) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix='/monitoringAlerts'
)
def get_entity_request(self, entity_id: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix=f'/uniqueEntities/{entity_id}'
)
def get_entity_profile_request(self, entity_id: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix=f'/uniqueEntities/{entity_id}/profile'
)
def test_module(client: Client) -> str:
client.get_monitoring_alert_request()
return 'ok'
def get_suspicious_activity(client: Client, args: Dict[str, str]) -> Union[CommandResults, str]:
suspicious_activity_id = args.get('id', '')
suspicious_activity_status = argToList(args.get('status', ''))
suspicious_activity_severity = argToList(args.get('severity', ''))
suspicious_activity_type = argToList(args.get('type', ''))
suspicious_activity_start_time = parse(args.get('start_time', ''))
suspicious_activity_end_time = parse(args.get('end_time', ''))
limit = int(args.get('limit', '50'))
raw_suspicious_activity = client.get_suspicious_activity_request(suspicious_activity_id)
suspicious_activity_output = []
if raw_suspicious_activity:
suspicious_activities = raw_suspicious_activity if isinstance(raw_suspicious_activity, list) \
else [raw_suspicious_activity]
if not suspicious_activity_id and \
any([suspicious_activity_status, suspicious_activity_severity, suspicious_activity_type,
suspicious_activity_start_time, suspicious_activity_end_time]):
for activity in suspicious_activities:
if suspicious_activity_status and activity.get('Status') not in suspicious_activity_status:
continue
if suspicious_activity_severity and activity.get('Severity') not in suspicious_activity_severity:
continue
if suspicious_activity_type and activity.get('Type') not in suspicious_activity_type:
continue
if suspicious_activity_start_time and parse(activity.get('StartTime')).replace(tzinfo=utc) < \
suspicious_activity_start_time.replace(tzinfo=utc):
continue
if suspicious_activity_end_time and parse(activity.get('EndTime')).replace(tzinfo=utc) > \
suspicious_activity_end_time.replace(tzinfo=utc):
continue
suspicious_activity_output.append(activity)
else:
suspicious_activity_output = suspicious_activities
suspicious_activity_output = suspicious_activity_output[:limit]
if suspicious_activity_output:
readable_output = tableToMarkdown(
'Microsoft Advanced Threat Analytics Suspicious Activity',
suspicious_activity_output,
headers=['Id', 'Type', 'Status', 'Severity', 'StartTime', 'EndTime'],
removeNull=True
)
if suspicious_activity_id:
suspicious_activity_details = client.get_suspicious_activity_details_request(suspicious_activity_id)
details_records = suspicious_activity_details.get('DetailsRecords', [])
if details_records:
suspicious_activity_output[0]['DetailsRecords'] = details_records
readable_output += tableToMarkdown(
'Details Records',
details_records,
removeNull=True
)
return CommandResults(
readable_output=readable_output,
outputs_prefix='MicrosoftATA.SuspiciousActivity',
outputs_key_field='Id',
outputs=suspicious_activity_output
)
else:
return 'No results found.'
def update_suspicious_activity_status(client: Client, args: Dict[str, str]) -> str:
suspicious_activity_id = args.get('id', '')
suspicious_activity_status = args.get('status', '')
if suspicious_activity_status == 'Delete':
client.delete_suspicious_activity_request(suspicious_activity_id)
return f'Suspicious activity {suspicious_activity_id} was deleted successfully.'
else:
client.update_suspicious_activity_status_request(suspicious_activity_id, suspicious_activity_status)
return f'Suspicious activity {suspicious_activity_id} status was updated to ' \
f'{suspicious_activity_status} successfully.'
def get_monitoring_alert(client: Client, args: Dict[str, str]) -> Union[CommandResults, str]:
monitoring_alert_status = argToList(args.get('status', ''))
monitoring_alert_severity = argToList(args.get('severity', ''))
monitoring_alert_type = argToList(args.get('type', ''))
monitoring_alert_start_time = parse(args.get('start_time', ''))
monitoring_alert_end_time = parse(args.get('end_time', ''))
limit = int(args.get('limit', '50'))
raw_monitoring_alert = client.get_monitoring_alert_request()
monitoring_alert_output = []
if raw_monitoring_alert:
monitoring_alerts = raw_monitoring_alert if isinstance(raw_monitoring_alert, list) else [raw_monitoring_alert]
if any([monitoring_alert_status, monitoring_alert_severity, monitoring_alert_type,
monitoring_alert_start_time, monitoring_alert_end_time]):
for alert in monitoring_alerts:
if monitoring_alert_status and alert.get('Status') not in monitoring_alert_status:
continue
if monitoring_alert_severity and alert.get('Severity') not in monitoring_alert_severity:
continue
if monitoring_alert_type and alert.get('Type') not in monitoring_alert_type:
continue
if monitoring_alert_start_time and parse(alert.get('StartTime')).replace(tzinfo=utc) < \
monitoring_alert_start_time.replace(tzinfo=utc):
continue
if monitoring_alert_end_time and parse(alert.get('EndTime')).replace(tzinfo=utc) > \
monitoring_alert_end_time.replace(tzinfo=utc):
continue
monitoring_alert_output.append(alert)
else:
monitoring_alert_output = monitoring_alerts
monitoring_alert_output = monitoring_alert_output[:limit]
if monitoring_alert_output:
readable_output = tableToMarkdown(
'Microsoft Advanced Threat Analytics Monitoring Alert',
monitoring_alert_output,
headers=['Id', 'Type', 'Status', 'Severity', 'StartTime', 'EndTime'],
removeNull=True
)
return CommandResults(
readable_output=readable_output,
outputs_prefix='MicrosoftATA.MonitoringAlert',
outputs_key_field='Id',
outputs=monitoring_alert_output
)
else:
return 'No results found.'
def get_entity(client: Client, args: Dict[str, str]) -> Union[CommandResults, str]:
entity_id = args.get('id', '')
entity = client.get_entity_request(entity_id)
if entity:
readable_output = tableToMarkdown(
f'Microsoft Advanced Threat Analytics Entity {entity_id}',
entity,
headers=['Id', 'SystemDisplayName', 'DistinguishedName', 'UpnName', 'Type', 'CreationTime'],
removeNull=True
)
entity_profile = client.get_entity_profile_request(entity_id)
if entity_profile:
entity['Profile'] = entity_profile
readable_output += tableToMarkdown(
'Entity Profile',
entity_profile,
headers=['Type', 'SuspiciousActivitySeverityToCountMapping', 'UpdateTime', 'IsBehaviorChanged'],
removeNull=True
)
return CommandResults(
readable_output=readable_output,
outputs_prefix='MicrosoftATA.Entity',
outputs_key_field='Id',
outputs=entity
)
else:
return 'No results found.'
def fetch_incidents(
client: Client,
last_run: Dict[str, str],
first_fetch_time: str,
max_results: int,
activity_status_to_fetch: List,
min_severity: int,
activity_type_to_fetch: List
) -> Tuple[Dict[str, str], List[Dict[str, str]]]:
last_fetch = last_run.get('last_fetch', '') if last_run.get('last_fetch') else first_fetch_time
last_fetch_dt = parse(last_fetch).replace(tzinfo=utc)
latest_start_time = parse(last_fetch).replace(tzinfo=utc)
incidents_fetched = 0
incidents: List[Dict[str, Any]] = []
suspicious_activities = client.get_suspicious_activity_request()
suspicious_activities_list = suspicious_activities if isinstance(suspicious_activities, list) \
else [suspicious_activities]
demisto.debug(suspicious_activities_list)
for activity in suspicious_activities_list:
if incidents_fetched == max_results:
break
activity_id = activity.get('Id', '')
activity_status = activity.get('Status', '')
activity_type = activity.get('Type', '')
activity_severity = activity.get('Severity', '')
if activity_status_to_fetch and activity_status not in activity_status_to_fetch:
demisto.debug(f'Skipping suspicious activity {activity_id} with status {activity_status}')
continue
if activity_type_to_fetch and activity_type not in activity_type_to_fetch:
demisto.debug(f'Skipping suspicious activity {activity_id} with type {activity_type}')
continue
if SEVERITY_TRANSLATION[activity_severity] < min_severity:
demisto.debug(f'Skipping suspicious activity {activity_id} with severity {activity_severity}')
continue
activity_start_time = activity.get('StartTime', '')
activity_start_time_dt = parse(activity_start_time).replace(tzinfo=utc)
if activity_start_time_dt > latest_start_time:
incidents.append({
'name': f'{activity_type} - {activity_id}',
'occurred': activity_start_time,
'rawJSON': json.dumps(activity)
})
if activity_start_time_dt > last_fetch_dt:
last_fetch_dt = activity_start_time_dt
last_fetch = activity_start_time
incidents_fetched += 1
next_run = {'last_fetch': last_fetch}
return next_run, incidents
def main() -> None:
params = demisto.params()
base_url = urljoin(params['url'], '/api/management')
username = params['credentials']['identifier']
password = params['credentials']['password']
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
auth=HttpNtlmAuth(username, password),
verify=verify_certificate,
proxy=proxy
)
if demisto.command() == 'test-module':
result = test_module(client)
return_results(result)
elif demisto.command() == 'fetch-incidents':
next_run, incidents = fetch_incidents(
client=client,
last_run=demisto.getLastRun(),
first_fetch_time=params.get('first_fetch', '3 days'),
max_results=int(params.get('max_fetch', '50')),
activity_status_to_fetch=params.get('activity_status', ['Open']),
min_severity=SEVERITY_TRANSLATION[params.get('min_severity', 'Low')],
activity_type_to_fetch=argToList(params.get('activity_type', ''))
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'ms-ata-suspicious-activities-list':
return_results(get_suspicious_activity(client, demisto.args()))
elif demisto.command() == 'ms-ata-suspicious-activity-status-set':
return_results(update_suspicious_activity_status(client, demisto.args()))
elif demisto.command() == 'ms-ata-monitoring-alerts-list':
return_results(get_monitoring_alert(client, demisto.args()))
elif demisto.command() == 'ms-ata-entity-get':
return_results(get_entity(client, demisto.args()))
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | -3,743,627,044,597,745,700 | 41.502874 | 118 | 0.610844 | false | 4.139659 | false | false | false |
derak/directory.py | directory.py | 1 | 5070 | #!/usr/bin/python
"""LDAP Directory Management, wrapper for python-ldap (http://www.python-ldap.org).
This module provides high level control over an LDAP Directory.
Some code was originally built on examples available here:
http://www.grotan.com/ldap/python-ldap-samples.html
Copyright (c) 2014 Derak Berreyesa
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = "Derak Berreyesa (github.com/derak)"
__version__ = "1.0"
import sys, json
import ldap
import ldap.modlist as modlist
class Directory(object):
def __init__(self):
self.result = {}
self.l = None
def connect(self, url, username, password):
try:
# Create a new user in Active Directory
ldap.set_option(ldap.OPT_REFERRALS, 0)
# Allows us to have a secure connection
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, 0)
# Open a connection
self.l = ldap.initialize(url)
# Bind/authenticate with a user with apropriate rights to add objects
self.l.simple_bind_s(username, password)
except ldap.LDAPError, e:
sys.stderr.write('Error connecting to LDAP server: ' + str(e) + '\n')
self.result['status'] = 'Error connecting to LDAP server: ' + str(e) + '\n'
print json.dumps(self.result)
sys.exit(1)
def add_user(self, dn, attrs):
try:
# Convert our dict to nice syntax for the add-function using modlist-module
ldif = modlist.addModlist(attrs)
# Add user
self.l.add_s(dn,ldif)
except ldap.LDAPError, e:
sys.stderr.write('Error with LDAP add_user: ' + str(e) + '\n')
self.result['status'] = 'Error with LDAP add_user: ' + str(e) + '\n'
print json.dumps(self.result)
sys.exit(1)
def add_user_to_groups(self, dn, group_dn_list):
try:
# Add user to groups as member
mod_attrs = [( ldap.MOD_ADD, 'member', dn )]
for g in group_dn_list:
self.l.modify_s(g, mod_attrs)
except ldap.LDAPError, e:
sys.stderr.write('Error: adding user to group(s): ' + str(e) + '\n')
self.result['status'] = 'Error: adding user to group(s): ' + str(e) + '\n'
print json.dumps(self.result)
sys.exit(1)
def set_password(self, dn, password):
# HERE YOU MAKE THE utf-16-le encode password
unicode_pass = unicode('\"' + password + '\"', 'iso-8859-1')
password_value = unicode_pass.encode('utf-16-le')
# change the atribute in the entry you just created
add_pass = [(ldap.MOD_REPLACE, 'unicodePwd', [password_value])]
try:
self.l.modify_s(dn, add_pass)
except ldap.LDAPError, error_message:
self.result['status'] = 'Error: could not change password: ' + str(error_message) + '\n'
print json.dumps(self.result)
sys.exit(1)
else:
self.result['status'] = 'Successfully changed password \n'
def modify_user(self, dn, flag):
"""Modify user, flag is userAccountControl property"""
# 512 will set user account to enabled
# change the user to enabled
mod_acct = [(ldap.MOD_REPLACE, 'userAccountControl', str(flag))]
try:
self.l.modify_s(dn, mod_acct)
except ldap.LDAPError, error_message:
self.result['status'] = 'Error: could not modify user: ' + str(error_message) + '\n'
print json.dumps(self.result)
sys.exit(1)
else:
self.result['status'] = 'Successfully modified user \n'
def print_users(self, base_dn, attrs):
filter = '(objectclass=person)'
users = self.l.search_s(base_dn, ldap.SCOPE_SUBTREE, filter, attrs)
for row in users:
print row
def disconnect(self):
self.l.unbind_s()
def get_result(self):
return self.result
if __name__ == '__main__':
print 'This is directory.py'
| mit | -1,881,307,883,944,796,000 | 34.957447 | 100 | 0.622485 | false | 3.846737 | false | false | false |
stefanseifert/redash | redash/query_runner/google_spreadsheets.py | 8 | 5437 | from base64 import b64decode
import json
import logging
from dateutil import parser
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
import gspread
from oauth2client.client import SignedJwtAssertionCredentials
enabled = True
except ImportError:
enabled = False
def _load_key(filename):
with open(filename, "rb") as f:
return json.loads(f.read())
def _guess_type(value):
if value == '':
return TYPE_STRING
try:
val = int(value)
return TYPE_INTEGER
except ValueError:
pass
try:
val = float(value)
return TYPE_FLOAT
except ValueError:
pass
if unicode(value).lower() in ('true', 'false'):
return TYPE_BOOLEAN
try:
val = parser.parse(value)
return TYPE_DATETIME
except ValueError:
pass
return TYPE_STRING
def _value_eval_list(value):
value_list = []
for member in value:
if member == '' or member is None:
val = None
value_list.append(val)
continue
try:
val = int(member)
value_list.append(val)
continue
except ValueError:
pass
try:
val = float(member)
value_list.append(val)
continue
except ValueError:
pass
if unicode(member).lower() in ('true', 'false'):
if unicode(member).lower() == 'true':
value_list.append(True)
else:
value_list.append(False)
continue
try:
val = parser.parse(member)
value_list.append(val)
continue
except ValueError:
pass
value_list.append(member)
return value_list
HEADER_INDEX = 0
class WorksheetNotFoundError(Exception):
def __init__(self, worksheet_num, worksheet_count):
message = "Worksheet number {} not found. Spreadsheet has {} worksheets. Note that the worksheet count is zero based.".format(worksheet_num, worksheet_count)
super(WorksheetNotFoundError, self).__init__(message)
def parse_worksheet(worksheet):
if not worksheet:
return {'columns': [], 'rows': []}
column_names = []
columns = []
duplicate_counter = 1
for j, column_name in enumerate(worksheet[HEADER_INDEX]):
if column_name in column_names:
column_name = u"{}{}".format(column_name, duplicate_counter)
duplicate_counter += 1
column_names.append(column_name)
columns.append({
'name': column_name,
'friendly_name': column_name,
'type': TYPE_STRING
})
if len(worksheet) > 1:
for j, value in enumerate(worksheet[HEADER_INDEX+1]):
columns[j]['type'] = _guess_type(value)
rows = [dict(zip(column_names, _value_eval_list(row))) for row in worksheet[HEADER_INDEX + 1:]]
data = {'columns': columns, 'rows': rows}
return data
def parse_spreadsheet(spreadsheet, worksheet_num):
worksheets = spreadsheet.worksheets()
worksheet_count = len(worksheets)
if worksheet_num >= worksheet_count:
raise WorksheetNotFoundError(worksheet_num, worksheet_count)
worksheet = worksheets[worksheet_num].get_all_values()
return parse_worksheet(worksheet)
class GoogleSpreadsheet(BaseQueryRunner):
@classmethod
def annotate_query(cls):
return False
@classmethod
def type(cls):
return "google_spreadsheets"
@classmethod
def enabled(cls):
return enabled
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'jsonKeyFile': {
"type": "string",
'title': 'JSON Key File'
}
},
'required': ['jsonKeyFile'],
'secret': ['jsonKeyFile']
}
def __init__(self, configuration):
super(GoogleSpreadsheet, self).__init__(configuration)
def _get_spreadsheet_service(self):
scope = [
'https://spreadsheets.google.com/feeds',
]
key = json.loads(b64decode(self.configuration['jsonKeyFile']))
credentials = SignedJwtAssertionCredentials(key['client_email'], key["private_key"], scope=scope)
spreadsheetservice = gspread.authorize(credentials)
return spreadsheetservice
def test_connection(self):
self._get_spreadsheet_service()
def run_query(self, query, user):
logger.debug("Spreadsheet is about to execute query: %s", query)
values = query.split("|")
key = values[0] #key of the spreadsheet
worksheet_num = 0 if len(values) != 2 else int(values[1])# if spreadsheet contains more than one worksheet - this is the number of it
try:
spreadsheet_service = self._get_spreadsheet_service()
spreadsheet = spreadsheet_service.open_by_key(key)
data = parse_spreadsheet(spreadsheet, worksheet_num)
json_data = json.dumps(data, cls=JSONEncoder)
error = None
except gspread.SpreadsheetNotFound:
error = "Spreadsheet ({}) not found. Make sure you used correct id.".format(key)
json_data = None
return json_data, error
register(GoogleSpreadsheet)
| bsd-2-clause | 3,098,874,932,534,900,000 | 27.170984 | 165 | 0.594445 | false | 4.339186 | false | false | false |
imayhaveborkedit/discord.py | discord/__init__.py | 1 | 3031 | # -*- coding: utf-8 -*-
"""
Discord API Wrapper
~~~~~~~~~~~~~~~~~~~
A basic wrapper for the Discord API.
:copyright: (c) 2015-2019 Rapptz
:license: MIT, see LICENSE for more details.
"""
__title__ = 'discord'
__author__ = 'Rapptz'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015-2019 Rapptz'
__version__ = '1.3.0a'
from collections import namedtuple
import logging
from .client import Client
from .appinfo import AppInfo
from .user import User, ClientUser, Profile
from .emoji import Emoji
from .partial_emoji import PartialEmoji
from .activity import *
from .channel import *
from .guild import Guild, SystemChannelFlags
from .relationship import Relationship
from .member import Member, VoiceState
from .message import Message, Attachment
from .asset import Asset
from .errors import *
from .calls import CallMessage, GroupCall
from .permissions import Permissions, PermissionOverwrite
from .role import Role
from .file import File
from .colour import Color, Colour
from .invite import Invite, PartialInviteChannel, PartialInviteGuild
from .widget import Widget, WidgetMember, WidgetChannel
from .object import Object
from .reaction import Reaction
from . import utils, opus, abc, rtp
from .enums import *
from .embeds import Embed
from .shard import AutoShardedClient
from .player import *
from .reader import *
from .webhook import *
from .voice_client import VoiceClient
from .audit_logs import AuditLogChanges, AuditLogEntry, AuditLogDiff
from .raw_models import *
from .team import *
from .speakingstate import SpeakingState
VersionInfo = namedtuple('VersionInfo', 'major minor micro releaselevel serial')
version_info = VersionInfo(major=1, minor=3, micro=0, releaselevel='alpha', serial=0)
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
import warnings
warnings.simplefilter('once', category=RuntimeWarning)
warnings.warn("""This is a development branch.
DO NOT:
- Expect anything to work.
- Expect anything broken to be fixed in a timely manner.
- Expect docs.
- Expect it to be done anytime soon.
- Expect this code to be up to date with the main repo.
- Expect help with this fork from randos in the discord.py help channels.
- Bother people in the help server for assistance anyways.
- Mention the words "machine learning" or "AI" without being able to
produce a university email or degree.
- Try to use this fork without some degree of python competence.
If I see you struggling with basic stuff I will ignore your problem
and tell you to learn python.
If you have questions ping Imayhaveborkedit somewhere in the help server and
ask directly. For other matters such as comments and concerns relating more
to the api design post it here instead:
https://github.com/Rapptz/discord.py/issues/1094
""", RuntimeWarning, stacklevel=1000)
warnings.simplefilter('default', category=RuntimeWarning)
del warnings
| mit | 5,965,994,541,591,187,000 | 29.928571 | 85 | 0.758825 | false | 3.774595 | false | false | false |
csomerlot/RPiSmartThermostat | src/wallUnit.py | 1 | 7692 | #!/usr/bin/python
import sys, os, time, urllib, datetime, socket, sched, threading
basepath = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
sys.path.append(basepath + '/libs/Adafruit_Python_CharLCD')
import Adafruit_CharLCD as LCD
sys.path.append(basepath + '/libs/requests')
sys.path.append(basepath + '/libs/python-forecast.io')
sys.path.append(basepath + '/libs/io-client-python')
import requests
from requests.packages import urllib3
urllib3.disable_warnings()
import forecastio
import Adafruit_IO
import tempControl
UI = (
("time",),
("indoor temp", ('offset up', 'offset down')),
("outdoor temp",),
("diagnostics", ('IP', 'reboot')),
('auxiliaries', (
("fan", ('on', 'off')),
("patio melter", ('on', 'off')),
("outside light", ('on', 'off')),
("garden hose", ('on', 'off'))
))
)
threads = []
topUIidx = 0
def log(message):
with file(sys.argv[0]+".log", 'a') as logFile:
logFile.write("%s: %s\n" % (datetime.datetime.now(), message))
def getOutdoor(lcd):
global topUIidx
temp = tempControl.getOutdoor()
if topUIidx == 3:
lcd.clear()
lcd.message("Outside temp\n%i deg F" % temp)
update = threading.Timer(300, getOutdoor, (lcd,))
update.start()
def getIndoor(lcd):
global topUIidx
temp, humidity = tempControl.getIndoor()
target = tempControl.getTarget()
if topUIidx == 2:
lcd.clear()
lcd.message("Inside temp: %iF\nSet to: %iF" % (int(round(temp,0)), target))
update = threading.Timer(300, getIndoor, (lcd,))
update.start()
def getIp():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8",80))
return s.getsockname()[0]
except:
return "unknown"
def getTime(lcd):
d = datetime.datetime.now()
if topUIidx == 1:
lcd.clear()
lcd.message(d.strftime("%m/%d %I:%M %p\n%A"))
update = threading.Timer(60, getTime, (lcd,))
update.start()
def setTopMessage(lcd, on=True):
global topUIidx
global threads
lcd.clear()
if topUIidx == 1:
t = threading.Thread(name="time", target=getTime, args=(lcd,))
t.start()
elif topUIidx == 2:
lcd.message("Inside temp:\nSet to:")
t = threading.Thread(name="insideTemp", target=getIndoor, args=(lcd,))
threads.append(t)
t.start()
elif topUIidx == 3:
lcd.message("Outside temp\n...")
t = threading.Thread(name="outsideTemp", target=getOutdoor, args=(lcd,))
threads.append(t)
t.start()
elif topUIidx == 4: lcd.message('Press up/down to\nuse Aux. systems')
elif topUIidx == 5: lcd.message('Welcome to\nRPi Thermostat')
def setAuxMessage(idx, lcd, on):
lcd.clear()
if idx == 0:
if not on: lcd.message('Press select to\nturn fan on')
else: lcd.message('Press select to\nturn fan off')
elif idx == 1:
if not on: lcd.message('Press select to\nturn melter on')
else: lcd.message('Press select to\nturn melter off')
elif idx == 2:
if not on: lcd.message('Press select to\nturn light on')
else: lcd.message('Press select to\nturn light off')
elif idx == 3:
if not on: lcd.message('Press select to\nturn hose on')
else: lcd.message('Press select to\nturn hose off')
else: lcd.message('Aux menu level\nerror: choice=%i' % idx)
def setDiagMessage(idx, lcd):
lcd.clear()
if idx == 0:
lcd.message("IP address\n%s" % (getIp()))
elif idx == 1:
lcd.message("Press select\nto reboot")
else:
lcd.message('Menu Error\ndiag level choice=%i' % idx)
def setFurnace():
temp, humidity = tempControl.getIndoor()
target = tempControl.getTarget()
if target > temp:
On = True
else:
On = False
callRelay(3, On)
def callRelay(idx, On):
url = r'http://192.168.42.44/relay%i%s' % (idx, {True:'On', False:"Off"}[On])
log(url)
try:
result = urllib.urlopen(url).read()
except IOError:
result = "Remote control unit not responding"
log(result)
def restart():
command = "/usr/bin/sudo /sbin/shutdown -r now"
import subprocess
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
log( output )
def main():
global topUIidx
global threads
scheduler = sched.scheduler(time.time, time.sleep)
nextEventTime = time.mktime(tempControl.getNextEventTime())
nextEvent = scheduler.enterabs(nextEventTime, 1, setFurnace, None)
# Start a thread to run the events
t = threading.Thread(target=scheduler.run)
t.start()
lcd = LCD.Adafruit_CharLCDPlate()
lcd.set_color(1,1,1)
secUIidx = 0
auxDevices = [False, False, False, False]
setTopMessage(lcd)
while True:
if lcd.is_pressed(LCD.LEFT):
secUIidx = 0
topUIidx -= 1
if topUIidx < 1:
topUIidx = len(UI)
setTopMessage(lcd)
if lcd.is_pressed(LCD.RIGHT):
secUIidx = 0
topUIidx += 1
if topUIidx > len(UI):
topUIidx = 1
setTopMessage(lcd)
if lcd.is_pressed(LCD.UP):
if UI[topUIidx][0] == 'indoor temp':
tempControl.offset += 1
setTopMessage(lcd)
t = threading.Thread(name="furnaceUp", target=setFurnace)
threads.append(t)
t.start()
if UI[topUIidx][0] == 'auxiliaries':
secUIidx += 1
if secUIidx > len(UI[topUIidx][0][1]):
secUIidx = 0
setAuxMessage(secUIidx, lcd, auxDevices[secUIidx])
if UI[topUIidx][0] == "diagnostics":
secUIidx += 1
if secUIidx > len(UI[topUIidx][0][1]):
secUIidx = 0
setDiagMessage(secUIidx, lcd)
if lcd.is_pressed(LCD.DOWN):
if UI[topUIidx][0] == 'indoor temp':
tempControl.offset -= 1
setTopMessage(lcd)
t = threading.Thread(name="furnaceDown", target=setFurnace)
threads.append(t)
t.start()
if UI[topUIidx][0] == 'auxiliaries':
secUIidx -= 1
if secUIidx < 0:
secUIidx = len(UI[topUIidx][0][1])
setAuxMessage(secUIidx, lcd, auxDevices[secUIidx])
if UI[topUIidx][0] == "diagnostics":
secUIidx -= 1
if secUIidx < 0:
secUIidx = len(UI[topUIidx][0][1])
setDiagMessage(secUIidx, lcd)
if lcd.is_pressed(LCD.SELECT):
if UI[topUIidx][0] == "diagnostics" and UI[topUIidx][secUIidx] == 'reboot':
lcd.clear()
lcd.message("\nrebooting...")
restart()
if UI[topUIidx][0] == 'auxiliaries':
t = threading.Thread(name="callRelayAux", target=callRelay, args =(secUIidx+1, auxDevices[secUIidx]))
threads.append(t)
t.start()
auxDevices[secUIidx] = not auxDevices[secUIidx]
setAuxMessage(secUIidx, lcd, auxDevices[secUIidx])
if __name__ == '__main__':
## check internet connection
##
## check for relay module
main()
| mit | 6,654,319,928,651,757,000 | 30.016129 | 117 | 0.546802 | false | 3.441611 | false | false | false |
apache/incubator-allura | Allura/allura/model/artifact.py | 1 | 32531 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from collections import defaultdict
from datetime import datetime
import pymongo
from pylons import tmpl_context as c, app_globals as g
from pylons import request
from ming import schema as S
from ming.orm import state, session
from ming.orm import FieldProperty, ForeignIdProperty, RelationProperty
from ming.orm.declarative import MappedClass
from ming.utils import LazyProperty
from webhelpers import feedgenerator as FG
from allura.lib import helpers as h
from allura.lib import security
from .session import main_orm_session
from .session import project_orm_session
from .session import artifact_orm_session
from .index import ArtifactReference
from .types import ACL, MarkdownCache
from .project import AppConfig
from .notification import MailFooter
from filesystem import File
log = logging.getLogger(__name__)
class Artifact(MappedClass):
"""
Base class for anything you want to keep track of.
- Automatically indexed into Solr (see index() method)
- Has a discussion thread that can have files attached to it
:var mod_date: last-modified :class:`datetime`
:var acl: dict of permission name => [roles]
:var labels: list of plain old strings
"""
class __mongometa__:
session = artifact_orm_session
name = 'artifact'
indexes = [
('app_config_id', 'labels'),
]
def before_save(data):
_session = artifact_orm_session._get()
skip_mod_date = getattr(_session, 'skip_mod_date', False)
skip_last_updated = getattr(_session, 'skip_last_updated', False)
if not skip_mod_date:
data['mod_date'] = datetime.utcnow()
else:
log.debug('Not updating mod_date')
if c.project and not skip_last_updated:
c.project.last_updated = datetime.utcnow()
type_s = 'Generic Artifact'
# Artifact base schema
_id = FieldProperty(S.ObjectId)
mod_date = FieldProperty(datetime, if_missing=datetime.utcnow)
app_config_id = ForeignIdProperty(
'AppConfig', if_missing=lambda: c.app.config._id)
plugin_verson = FieldProperty(S.Deprecated)
tool_version = FieldProperty(S.Deprecated)
acl = FieldProperty(ACL)
tags = FieldProperty(S.Deprecated)
labels = FieldProperty([str])
references = FieldProperty(S.Deprecated)
backreferences = FieldProperty(S.Deprecated)
app_config = RelationProperty('AppConfig')
# Not null if artifact originated from external import. The import ID is
# implementation specific, but should probably be an object indicating
# the source, original ID, and any other info needed to identify where
# the artifact came from. But if you only have one source, a str might do.
import_id = FieldProperty(None, if_missing=None)
deleted = FieldProperty(bool, if_missing=False)
def __json__(self):
"""Return a JSON-encodable :class:`dict` representation of this
Artifact.
"""
return dict(
_id=str(self._id),
mod_date=self.mod_date,
labels=list(self.labels),
related_artifacts=[a.url() for a in self.related_artifacts()],
discussion_thread=self.discussion_thread.__json__(),
discussion_thread_url=h.absurl('/rest%s' %
self.discussion_thread.url()),
)
def parent_security_context(self):
"""Return the :class:`allura.model.project.AppConfig` instance for
this Artifact.
ACL processing for this Artifact continues at the AppConfig object.
This lets AppConfigs provide a 'default' ACL for all artifacts in the
tool.
"""
return self.app_config
@classmethod
def attachment_class(cls):
raise NotImplementedError, 'attachment_class'
@classmethod
def translate_query(cls, q, fields):
"""Return a translated Solr query (``q``), where generic field
identifiers are replaced by the 'strongly typed' versions defined in
``fields``.
"""
for f in fields:
if '_' in f:
base, typ = f.rsplit('_', 1)
q = q.replace(base + ':', f + ':')
return q
@LazyProperty
def ref(self):
"""Return :class:`allura.model.index.ArtifactReference` for this
Artifact.
"""
return ArtifactReference.from_artifact(self)
@LazyProperty
def refs(self):
"""Artifacts referenced by this one.
:return: list of :class:`allura.model.index.ArtifactReference`
"""
return self.ref.references
@LazyProperty
def backrefs(self):
"""Artifacts that reference this one.
:return: list of :attr:`allura.model.index.ArtifactReference._id`'s
"""
q = ArtifactReference.query.find(dict(references=self.index_id()))
return [aref._id for aref in q]
def related_artifacts(self):
"""Return all Artifacts that are related to this one.
"""
related_artifacts = []
for ref_id in self.refs + self.backrefs:
ref = ArtifactReference.query.get(_id=ref_id)
if ref is None:
continue
artifact = ref.artifact
if artifact is None:
continue
artifact = artifact.primary()
if artifact is None:
continue
# don't link to artifacts in deleted tools
if hasattr(artifact, 'app_config') and artifact.app_config is None:
continue
# TODO: This should be refactored. We shouldn't be checking
# artifact type strings in platform code.
if artifact.type_s == 'Commit' and not artifact.repo:
ac = AppConfig.query.get(
_id=ref.artifact_reference['app_config_id'])
app = ac.project.app_instance(ac) if ac else None
if app:
artifact.set_context(app.repo)
if artifact not in related_artifacts and (getattr(artifact, 'deleted', False) == False):
related_artifacts.append(artifact)
return sorted(related_artifacts, key=lambda a: a.url())
def subscribe(self, user=None, topic=None, type='direct', n=1, unit='day'):
"""Subscribe ``user`` to the :class:`allura.model.notification.Mailbox`
for this Artifact.
:param user: :class:`allura.model.auth.User`
If ``user`` is None, ``c.user`` will be subscribed.
"""
from allura.model import Mailbox
if user is None:
user = c.user
Mailbox.subscribe(
user_id=user._id,
project_id=self.app_config.project_id,
app_config_id=self.app_config._id,
artifact=self, topic=topic,
type=type, n=n, unit=unit)
def unsubscribe(self, user=None):
"""Unsubscribe ``user`` from the
:class:`allura.model.notification.Mailbox` for this Artifact.
:param user: :class:`allura.model.auth.User`
If ``user`` is None, ``c.user`` will be unsubscribed.
"""
from allura.model import Mailbox
if user is None:
user = c.user
Mailbox.unsubscribe(
user_id=user._id,
project_id=self.app_config.project_id,
app_config_id=self.app_config._id,
artifact_index_id=self.index_id())
def primary(self):
"""If an artifact is a "secondary" artifact (discussion of a ticket, for
instance), return the artifact that is the "primary".
"""
return self
@classmethod
def artifacts_labeled_with(cls, label, app_config):
"""Return all artifacts of type ``cls`` that have the label ``label`` and
are in the tool denoted by ``app_config``.
:param label: str
:param app_config: :class:`allura.model.project.AppConfig` instance
"""
return cls.query.find({'labels': label, 'app_config_id': app_config._id})
def email_link(self, subject='artifact'):
"""Return a 'mailto' URL for this Artifact, with optional subject.
"""
if subject:
return 'mailto:%s?subject=[%s:%s:%s] Re: %s' % (
self.email_address,
self.app_config.project.shortname,
self.app_config.options.mount_point,
self.shorthand_id(),
subject)
else:
return 'mailto:%s' % self.email_address
@property
def project(self):
"""Return the :class:`allura.model.project.Project` instance to which
this Artifact belongs.
"""
return getattr(self.app_config, 'project', None)
@property
def project_id(self):
"""Return the ``_id`` of the :class:`allura.model.project.Project`
instance to which this Artifact belongs.
"""
return self.app_config.project_id
@LazyProperty
def app(self):
"""Return the :class:`allura.model.app.Application` instance to which
this Artifact belongs.
"""
if not self.app_config:
return None
if getattr(c, 'app', None) and c.app.config._id == self.app_config._id:
return c.app
else:
return self.app_config.load()(self.project, self.app_config)
def index_id(self):
"""Return a globally unique artifact identifier.
Used for SOLR ID, shortlinks, and possibly elsewhere.
"""
id = '%s.%s#%s' % (
self.__class__.__module__,
self.__class__.__name__,
self._id)
return id.replace('.', '/')
def index(self):
"""Return a :class:`dict` representation of this Artifact suitable for
search indexing.
Subclasses should override this, providing a dictionary of solr_field => value.
These fields & values will be stored by Solr. Subclasses should call the
super() index() and then extend it with more fields.
You probably want to override at least title and text to have
meaningful search results and email senders.
You can take advantage of Solr's dynamic field typing by adding a type
suffix to your field names, e.g.:
_s (string) (not analyzed)
_t (text) (analyzed)
_b (bool)
_i (int)
"""
project = self.project
return dict(
id=self.index_id(),
mod_date_dt=self.mod_date,
title='Artifact %s' % self._id,
project_id_s=str(project._id),
project_name_t=project.name,
project_shortname_t=project.shortname,
tool_name_s=self.app_config.tool_name,
mount_point_s=self.app_config.options.mount_point,
is_history_b=False,
url_s=self.url(),
type_s=self.type_s,
labels_t=' '.join(l for l in self.labels),
snippet_s='',
deleted_b=self.deleted)
def url(self):
"""Return the URL for this Artifact.
Subclasses must implement this.
"""
raise NotImplementedError, 'url' # pragma no cover
def shorthand_id(self):
"""How to refer to this artifact within the app instance context.
For a wiki page, it might be the title. For a ticket, it might be the
ticket number. For a discussion, it might be the message ID. Generally
this should have a strong correlation to the URL.
"""
return str(self._id) # pragma no cover
def link_text(self):
"""Return the link text to use when a shortlink to this artifact
is expanded into an <a></a> tag.
By default this method returns :attr:`type_s` + :meth:`shorthand_id`. Subclasses should
override this method to provide more descriptive link text.
"""
return self.shorthand_id()
def get_discussion_thread(self, data=None):
"""Return the discussion thread and parent_id for this artifact.
:return: (:class:`allura.model.discuss.Thread`, parent_thread_id (int))
"""
from .discuss import Thread
t = Thread.query.get(ref_id=self.index_id())
if t is None:
idx = self.index()
t = Thread.new(
app_config_id=self.app_config_id,
discussion_id=self.app_config.discussion_id,
ref_id=idx['id'],
subject='%s discussion' % h.get_first(idx, 'title'))
parent_id = None
if data:
in_reply_to = data.get('in_reply_to', [])
if in_reply_to:
parent_id = in_reply_to[0]
return t, parent_id
@LazyProperty
def discussion_thread(self):
"""Return the :class:`discussion thread <allura.model.discuss.Thread>`
for this Artifact.
"""
return self.get_discussion_thread()[0]
def add_multiple_attachments(self, file_info):
if not isinstance(file_info, list):
file_info = [file_info]
for attach in file_info:
if hasattr(attach, 'file'):
self.attach(attach.filename, attach.file,
content_type=attach.type)
def attach(self, filename, fp, **kw):
"""Attach a file to this Artifact.
:param filename: file name
:param fp: a file-like object (implements ``read()``)
:param \*\*kw: passed through to Attachment class constructor
"""
att = self.attachment_class().save_attachment(
filename=filename,
fp=fp, artifact_id=self._id, **kw)
return att
@LazyProperty
def attachments(self):
return self.attachment_class().query.find(dict(
app_config_id=self.app_config_id, artifact_id=self._id, type='attachment')).all()
def delete(self):
"""Delete this Artifact.
"""
ArtifactReference.query.remove(dict(_id=self.index_id()))
super(Artifact, self).delete()
def get_mail_footer(self, notification, toaddr):
return MailFooter.standard(notification)
def message_id(self):
'''Persistent, email-friendly (Message-ID header) id of this artifact'''
return h.gen_message_id(self._id)
class Snapshot(Artifact):
"""A snapshot of an :class:`Artifact <allura.model.artifact.Artifact>`, used in :class:`VersionedArtifact <allura.model.artifact.VersionedArtifact>`"""
class __mongometa__:
session = artifact_orm_session
name = 'artifact_snapshot'
unique_indexes = [('artifact_class', 'artifact_id', 'version')]
indexes = [('artifact_id', 'version')]
_id = FieldProperty(S.ObjectId)
artifact_id = FieldProperty(S.ObjectId)
artifact_class = FieldProperty(str)
version = FieldProperty(S.Int, if_missing=0)
author = FieldProperty(dict(
id=S.ObjectId,
username=str,
display_name=str,
logged_ip=str))
timestamp = FieldProperty(datetime)
data = FieldProperty(None)
def index(self):
result = Artifact.index(self)
original = self.original()
if original:
original_index = original.index()
result.update(original_index)
result['title'] = '%s (version %d)' % (
h.get_first(original_index, 'title'), self.version)
result.update(
id=self.index_id(),
version_i=self.version,
author_username_t=self.author.username,
author_display_name_t=self.author.display_name,
timestamp_dt=self.timestamp,
is_history_b=True)
return result
def original(self):
raise NotImplemented, 'original' # pragma no cover
def shorthand_id(self):
return '%s#%s' % (self.original().shorthand_id(), self.version)
@property
def attachments(self):
orig = self.original()
if not orig:
return None
return orig.attachments
def __getattr__(self, name):
return getattr(self.data, name)
class VersionedArtifact(Artifact):
"""
An :class:`Artifact <allura.model.artifact.Artifact>` that has versions.
Associated data like attachments and discussion thread are not versioned.
"""
class __mongometa__:
session = artifact_orm_session
name = 'versioned_artifact'
history_class = Snapshot
version = FieldProperty(S.Int, if_missing=0)
def commit(self, update_stats=True):
'''Save off a snapshot of the artifact and increment the version #'''
self.version += 1
try:
ip_address = request.headers.get(
'X_FORWARDED_FOR', request.remote_addr)
ip_address = ip_address.split(',')[0].strip()
except:
ip_address = '0.0.0.0'
data = dict(
artifact_id=self._id,
artifact_class='%s.%s' % (
self.__class__.__module__,
self.__class__.__name__),
version=self.version,
author=dict(
id=c.user._id,
username=c.user.username,
display_name=c.user.get_pref('display_name'),
logged_ip=ip_address),
timestamp=datetime.utcnow(),
data=state(self).clone())
ss = self.__mongometa__.history_class(**data)
session(ss).insert_now(ss, state(ss))
log.info('Snapshot version %s of %s',
self.version, self.__class__)
if update_stats:
if self.version > 1:
g.statsUpdater.modifiedArtifact(
self.type_s, self.mod_date, self.project, c.user)
else:
g.statsUpdater.newArtifact(
self.type_s, self.mod_date, self.project, c.user)
return ss
def get_version(self, n):
if n < 0:
n = self.version + n + 1
ss = self.__mongometa__.history_class.query.get(
artifact_id=self._id,
artifact_class='%s.%s' % (
self.__class__.__module__,
self.__class__.__name__),
version=n)
if ss is None:
raise IndexError, n
return ss
def revert(self, version):
ss = self.get_version(version)
old_version = self.version
for k, v in ss.data.iteritems():
setattr(self, k, v)
self.version = old_version
def history(self):
HC = self.__mongometa__.history_class
q = HC.query.find(dict(artifact_id=self._id)).sort(
'version', pymongo.DESCENDING)
return q
@property
def last_updated(self):
history = self.history()
if history.count():
return self.history().first().timestamp
else:
return self.mod_date
def delete(self):
# remove history so that the snapshots aren't left orphaned
super(VersionedArtifact, self).delete()
HC = self.__mongometa__.history_class
HC.query.remove(dict(artifact_id=self._id))
class Message(Artifact):
"""
A message
:var _id: an email friendly (e.g. message-id) string id
:var slug: slash-delimeted random identifier. Slashes useful for threaded searching and ordering
:var full_slug: string of slash-delimited "timestamp:slug" components. Useful for sorting by timstamp
"""
class __mongometa__:
session = artifact_orm_session
name = 'message'
type_s = 'Generic Message'
_id = FieldProperty(str, if_missing=h.gen_message_id)
slug = FieldProperty(str, if_missing=h.nonce)
full_slug = FieldProperty(str, if_missing=None)
parent_id = FieldProperty(str)
app_id = FieldProperty(S.ObjectId, if_missing=lambda: c.app.config._id)
timestamp = FieldProperty(datetime, if_missing=datetime.utcnow)
author_id = FieldProperty(S.ObjectId, if_missing=lambda: c.user._id)
text = FieldProperty(str, if_missing='')
@classmethod
def make_slugs(cls, parent=None, timestamp=None):
part = h.nonce()
if timestamp is None:
timestamp = datetime.utcnow()
dt = timestamp.strftime('%Y%m%d%H%M%S')
slug = part
full_slug = dt + ':' + part
if parent:
return (parent.slug + '/' + slug,
parent.full_slug + '/' + full_slug)
else:
return slug, full_slug
def author(self):
from .auth import User
return User.query.get(_id=self.author_id) or User.anonymous()
def index(self):
result = Artifact.index(self)
author = self.author()
result.update(
author_user_name_t=author.username,
author_display_name_t=author.get_pref('display_name'),
timestamp_dt=self.timestamp,
text=self.text)
return result
def shorthand_id(self):
return self.slug
class AwardFile(File):
class __mongometa__:
session = main_orm_session
name = 'award_file'
award_id = FieldProperty(S.ObjectId)
class Award(Artifact):
class __mongometa__:
session = main_orm_session
name = 'award'
indexes = ['short']
type_s = 'Generic Award'
from .project import Neighborhood
_id = FieldProperty(S.ObjectId)
created_by_neighborhood_id = ForeignIdProperty(
Neighborhood, if_missing=None)
created_by_neighborhood = RelationProperty(
Neighborhood, via='created_by_neighborhood_id')
short = FieldProperty(str, if_missing=h.nonce)
timestamp = FieldProperty(datetime, if_missing=datetime.utcnow)
full = FieldProperty(str, if_missing='')
def index(self):
result = Artifact.index(self)
result.update(
_id_s=self._id,
short_s=self.short,
timestamp_dt=self.timestamp,
full_s=self.full)
if self.created_by:
result['created_by_s'] = self.created_by.name
return result
@property
def icon(self):
return AwardFile.query.get(award_id=self._id)
def url(self):
return str(self._id)
def longurl(self):
return self.created_by_neighborhood.url_prefix + "_admin/awards/" + self.url()
def shorthand_id(self):
return self.short
class AwardGrant(Artifact):
"An :class:`Award <allura.model.artifact.Award>` can be bestowed upon a project by a neighborhood"
class __mongometa__:
session = main_orm_session
name = 'grant'
indexes = ['short']
type_s = 'Generic Award Grant'
_id = FieldProperty(S.ObjectId)
award_id = ForeignIdProperty(Award, if_missing=None)
award = RelationProperty(Award, via='award_id')
granted_by_neighborhood_id = ForeignIdProperty(
'Neighborhood', if_missing=None)
granted_by_neighborhood = RelationProperty(
'Neighborhood', via='granted_by_neighborhood_id')
granted_to_project_id = ForeignIdProperty('Project', if_missing=None)
granted_to_project = RelationProperty(
'Project', via='granted_to_project_id')
timestamp = FieldProperty(datetime, if_missing=datetime.utcnow)
def index(self):
result = Artifact.index(self)
result.update(
_id_s=self._id,
short_s=self.short,
timestamp_dt=self.timestamp,
full_s=self.full)
if self.award:
result['award_s'] = self.award.short
return result
@property
def icon(self):
return AwardFile.query.get(award_id=self.award_id)
def url(self):
slug = str(self.granted_to_project.shortname).replace('/', '_')
return h.urlquote(slug)
def longurl(self):
slug = str(self.granted_to_project.shortname).replace('/', '_')
slug = self.award.longurl() + '/' + slug
return h.urlquote(slug)
def shorthand_id(self):
if self.award:
return self.award.short
else:
return None
class Feed(MappedClass):
"""
Used to generate rss/atom feeds. This does not need to be extended;
all feed items go into the same collection
"""
class __mongometa__:
session = project_orm_session
name = 'artifact_feed'
indexes = [
'pubdate',
('artifact_ref.project_id', 'artifact_ref.mount_point'),
(('ref_id', pymongo.ASCENDING),
('pubdate', pymongo.DESCENDING)),
(('project_id', pymongo.ASCENDING),
('app_config_id', pymongo.ASCENDING),
('pubdate', pymongo.DESCENDING)),
# used in ext/user_profile/user_main.py for user feeds
'author_link',
# used in project feed
(('project_id', pymongo.ASCENDING),
('pubdate', pymongo.DESCENDING)),
]
_id = FieldProperty(S.ObjectId)
ref_id = ForeignIdProperty('ArtifactReference')
neighborhood_id = ForeignIdProperty('Neighborhood')
project_id = ForeignIdProperty('Project')
app_config_id = ForeignIdProperty('AppConfig')
tool_name = FieldProperty(str)
title = FieldProperty(str)
link = FieldProperty(str)
pubdate = FieldProperty(datetime, if_missing=datetime.utcnow)
description = FieldProperty(str)
description_cache = FieldProperty(MarkdownCache)
unique_id = FieldProperty(str, if_missing=lambda: h.nonce(40))
author_name = FieldProperty(str, if_missing=lambda: c.user.get_pref(
'display_name') if hasattr(c, 'user') else None)
author_link = FieldProperty(
str, if_missing=lambda: c.user.url() if hasattr(c, 'user') else None)
artifact_reference = FieldProperty(S.Deprecated)
@classmethod
def post(cls, artifact, title=None, description=None, author=None, author_link=None, author_name=None, pubdate=None, link=None, **kw):
"""
Create a Feed item. Returns the item.
But if anon doesn't have read access, create does not happen and None is returned
"""
# TODO: fix security system so we can do this correctly and fast
from allura import model as M
anon = M.User.anonymous()
if not security.has_access(artifact, 'read', user=anon):
return
if not security.has_access(c.project, 'read', user=anon):
return
idx = artifact.index()
if author is None:
author = c.user
if author_name is None:
author_name = author.get_pref('display_name')
if title is None:
title = '%s modified by %s' % (
h.get_first(idx, 'title'), author_name)
if description is None:
description = title
if pubdate is None:
pubdate = datetime.utcnow()
if link is None:
link = artifact.url()
item = cls(
ref_id=artifact.index_id(),
neighborhood_id=artifact.app_config.project.neighborhood_id,
project_id=artifact.app_config.project_id,
app_config_id=artifact.app_config_id,
tool_name=artifact.app_config.tool_name,
title=title,
description=g.markdown.convert(description),
link=link,
pubdate=pubdate,
author_name=author_name,
author_link=author_link or author.url())
unique_id = kw.pop('unique_id', None)
if unique_id:
item.unique_id = unique_id
return item
@classmethod
def feed(cls, q, feed_type, title, link, description,
since=None, until=None, offset=None, limit=None):
"Produces webhelper.feedgenerator Feed"
d = dict(title=title, link=h.absurl(link),
description=description, language=u'en')
if feed_type == 'atom':
feed = FG.Atom1Feed(**d)
elif feed_type == 'rss':
feed = FG.Rss201rev2Feed(**d)
query = defaultdict(dict)
query.update(q)
if since is not None:
query['pubdate']['$gte'] = since
if until is not None:
query['pubdate']['$lte'] = until
cur = cls.query.find(query)
cur = cur.sort('pubdate', pymongo.DESCENDING)
if limit is None:
limit = 10
query = cur.limit(limit)
if offset is not None:
query = cur.offset(offset)
for r in cur:
feed.add_item(title=r.title,
link=h.absurl(r.link.encode('utf-8')),
pubdate=r.pubdate,
description=r.description,
unique_id=h.absurl(r.unique_id),
author_name=r.author_name,
author_link=h.absurl(r.author_link))
return feed
class VotableArtifact(MappedClass):
"""Voting support for the Artifact. Use as a mixin."""
class __mongometa__:
session = main_orm_session
name = 'vote'
votes = FieldProperty(int, if_missing=0)
votes_up = FieldProperty(int, if_missing=0)
votes_down = FieldProperty(int, if_missing=0)
votes_up_users = FieldProperty([str], if_missing=list())
votes_down_users = FieldProperty([str], if_missing=list())
def vote_up(self, user):
voted = self.user_voted(user)
if voted == 1:
# Already voted up - unvote
self.votes_up_users.remove(user.username)
self.votes_up -= 1
elif voted == -1:
# Change vote to negative
self.votes_down_users.remove(user.username)
self.votes_down -= 1
self.votes_up_users.append(user.username)
self.votes_up += 1
else:
self.votes_up_users.append(user.username)
self.votes_up += 1
self.votes = self.votes_up - self.votes_down
def vote_down(self, user):
voted = self.user_voted(user)
if voted == -1:
# Already voted down - unvote
self.votes_down_users.remove(user.username)
self.votes_down -= 1
elif voted == 1:
# Change vote to positive
self.votes_up_users.remove(user.username)
self.votes_up -= 1
self.votes_down_users.append(user.username)
self.votes_down += 1
else:
self.votes_down_users.append(user.username)
self.votes_down += 1
self.votes = self.votes_up - self.votes_down
def user_voted(self, user):
"""Check that user voted for this artifact.
Return:
1 if user voted up
-1 if user voted down
0 if user doesn't vote
"""
if user.username in self.votes_up_users:
return 1
if user.username in self.votes_down_users:
return -1
return 0
@property
def votes_up_percent(self):
votes_count = self.votes_up + self.votes_down
if votes_count == 0:
return 0
return int(float(self.votes_up) / votes_count * 100)
def __json__(self):
return {
'votes_up': self.votes_up,
'votes_down': self.votes_down,
}
class MovedArtifact(Artifact):
class __mongometa__:
session = artifact_orm_session
name = 'moved_artifact'
_id = FieldProperty(S.ObjectId)
app_config_id = ForeignIdProperty(
'AppConfig', if_missing=lambda: c.app.config._id)
app_config = RelationProperty('AppConfig')
moved_to_url = FieldProperty(str, required=True, allow_none=False)
| apache-2.0 | 2,514,140,458,359,754,000 | 33.135362 | 155 | 0.590514 | false | 3.968163 | true | false | false |
mganeva/mantid | Framework/PythonInterface/plugins/algorithms/BASISDiffraction.py | 1 | 25038 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=too-many-branches
from __future__ import (absolute_import, division, print_function)
import os
import tempfile
import itertools
from collections import namedtuple
from contextlib import contextmanager
import numpy as np
from mantid import config as mantid_config
from mantid.api import (DataProcessorAlgorithm, AlgorithmFactory, FileProperty,
WorkspaceProperty, FileAction, PropertyMode, mtd,
AnalysisDataService, Progress)
from mantid.simpleapi import (DeleteWorkspace, LoadEventNexus, SetGoniometer,
SetUB, ModeratorTzeroLinear, SaveNexus,
ConvertToMD, LoadMask, MaskDetectors, LoadNexus,
MDNormSCDPreprocessIncoherent, MDNormSCD,
MultiplyMD, CreateSingleValuedWorkspace,
ConvertUnits, CropWorkspace, DivideMD, MinusMD,
RenameWorkspace, ConvertToMDMinMaxGlobal,
ClearMaskFlag)
from mantid.kernel import (Direction, EnabledWhenProperty, PropertyCriterion,
IntArrayProperty, FloatArrayProperty,
FloatArrayLengthValidator)
DEPRECATION_NOTICE = """BASISDiffraction is deprecated (on 2018-08-27).
Instead, use BASISCrystalDiffraction or BASISPowderReduction."""
_SOLID_ANGLE_WS_ = '/tmp/solid_angle_diff.nxs'
_FLUX_WS_ = '/tmp/int_flux.nxs'
@contextmanager
def pyexec_setup(new_options):
"""
Backup keys of mantid.config and clean up temporary files and workspaces
upon algorithm completion or exception raised.
:param new_options: dictionary of mantid configuration options
to be modified.
"""
# Hold in this tuple all temporary objects to be removed after completion
temp_objects = namedtuple('temp_objects', 'files workspaces')
temps = temp_objects(list(), list())
previous_config = dict()
for key, value in new_options.items():
previous_config[key] = mantid_config[key]
mantid_config[key] = value
try:
yield temps
finally:
# reinstate the mantid options
for key, value in previous_config.items():
mantid_config[key] = value
# delete temporary files
for file_name in temps.files:
os.remove(file_name)
# remove any workspace added to temps.workspaces or whose name begins
# with "_t_"
to_be_removed = set()
for name in AnalysisDataService.getObjectNames():
if '_t_' == name[0:3]:
to_be_removed.add(name)
for workspace in temps.workspaces:
if isinstance(workspace, str):
to_be_removed.add(workspace)
else:
to_be_removed.add(workspace.name())
for name in to_be_removed:
DeleteWorkspace(name)
class BASISDiffraction(DataProcessorAlgorithm):
_mask_file = '/SNS/BSS/shared/autoreduce/new_masks_08_12_2015/'\
'BASIS_Mask_default_diff.xml'
_solid_angle_ws_ = '/SNS/BSS/shared/autoreduce/solid_angle_diff.nxs'
_flux_ws_ = '/SNS/BSS/shared/autoreduce/int_flux.nxs'
def __init__(self):
DataProcessorAlgorithm.__init__(self)
self._lambda_range = [5.86, 6.75] # units of inverse Angstroms
self._short_inst = "BSS"
self._long_inst = "BASIS"
self._run_list = None
self._temps = None
self._bkg = None
self._bkg_scale = None
self._vanadium_files = None
self._momentum_range = None
self._t_mask = None
self._n_bins = None
@classmethod
def category(self):
return "Diffraction\\Reduction"
@classmethod
def version(self):
return 1
@classmethod
def summary(self):
return DEPRECATION_NOTICE
def seeAlso(self):
return [ "AlignDetectors","DiffractionFocussing","SNSPowderReduction" ]
def PyInit(self):
# Input validators
array_length_three = FloatArrayLengthValidator(3)
# Properties
self.declareProperty('RunNumbers', '', 'Sample run numbers')
self.declareProperty(FileProperty(name='MaskFile',
defaultValue=self._mask_file,
action=FileAction.OptionalLoad,
extensions=['.xml']),
doc='See documentation for latest mask files.')
self.declareProperty(FloatArrayProperty('LambdaRange',
self._lambda_range,
direction=Direction.Input),
doc='Incoming neutron wavelength range')
self.declareProperty(WorkspaceProperty('OutputWorkspace', '',
optional=PropertyMode.Mandatory,
direction=Direction.Output),
doc='Output Workspace. If background is '+
'subtracted, _data and _background '+
'workspaces will also be generated')
#
# Background for the sample runs
#
background_title = 'Background runs'
self.declareProperty('BackgroundRuns', '', 'Background run numbers')
self.setPropertyGroup('BackgroundRuns', background_title)
self.declareProperty("BackgroundScale", 1.0,
doc='The background will be scaled by this '+
'number before being subtracted.')
self.setPropertyGroup('BackgroundScale', background_title)
#
# Vanadium
#
vanadium_title = 'Vanadium runs'
self.declareProperty('VanadiumRuns', '', 'Vanadium run numbers')
self.setPropertyGroup('VanadiumRuns', vanadium_title)
#
# Single Crystal Diffraction
#
crystal_diffraction_title = 'Single Crystal Diffraction'
self.declareProperty('SingleCrystalDiffraction',
False, direction=Direction.Input,
doc='Calculate diffraction pattern?')
crystal_diffraction_enabled =\
EnabledWhenProperty('SingleCrystalDiffraction',
PropertyCriterion.IsNotDefault)
self.declareProperty('PsiAngleLog', 'SE50Rot',
direction=Direction.Input,
doc='log entry storing rotation of the sample'
'around the vertical axis')
self.declareProperty('PsiOffset', 0.0,
direction=Direction.Input,
doc='Add this quantity to PsiAngleLog')
self.declareProperty(FloatArrayProperty('LatticeSizes', [0,0,0],
array_length_three,
direction=Direction.Input),
doc='three item comma-separated list "a, b, c"')
self.declareProperty(FloatArrayProperty('LatticeAngles',
[90.0, 90.0, 90.0],
array_length_three,
direction=Direction.Input),
doc='three item comma-separated ' +
'list "alpha, beta, gamma"')
# Reciprocal vector to be aligned with incoming beam
self.declareProperty(FloatArrayProperty('VectorU', [1, 0, 0],
array_length_three,
direction=Direction.Input),
doc='three item, comma-separated, HKL indexes'
'of the diffracting plane')
# Reciprocal vector orthogonal to VectorU and in-plane with
# incoming beam
self.declareProperty(FloatArrayProperty('VectorV', [0, 1, 0],
array_length_three,
direction=Direction.Input),
doc='three item, comma-separated, HKL indexes'
'of the direction perpendicular to VectorV'
'and the vertical axis')
# Abscissa view
self.declareProperty(FloatArrayProperty('Uproj', [1, 0, 0],
array_length_three,
direction=Direction.Input),
doc='three item comma-separated Abscissa view'
'of the diffraction pattern')
# Ordinate view
self.declareProperty(FloatArrayProperty('Vproj', [0, 1, 0],
array_length_three,
direction=Direction.Input),
doc='three item comma-separated Ordinate view'
'of the diffraction pattern')
# Hidden axis
self.declareProperty(FloatArrayProperty('Wproj', [0, 0, 1],
array_length_three,
direction=Direction.Input),
doc='Hidden axis view')
# Binnin in reciprocal slice
self.declareProperty('NBins', 400, direction=Direction.Input,
doc='number of bins in the HKL slice')
self.setPropertyGroup('SingleCrystalDiffraction',
crystal_diffraction_title)
for a_property in ('PsiAngleLog', 'PsiOffset',
'LatticeSizes', 'LatticeAngles', 'VectorU',
'VectorV', 'Uproj', 'Vproj', 'Wproj', 'NBins'):
self.setPropertyGroup(a_property, crystal_diffraction_title)
self.setPropertySettings(a_property, crystal_diffraction_enabled)
def PyExec(self):
# Exit with deprecation notice
self.log().error(DEPRECATION_NOTICE)
# Facility and database configuration
config_new_options = {'default.facility': 'SNS',
'default.instrument': 'BASIS',
'datasearch.searcharchive': 'On'}
# Find valid incoming momentum range
self._lambda_range = np.array(self.getProperty('LambdaRange').value)
self._momentum_range = np.sort(2 * np.pi / self._lambda_range)
# implement with ContextDecorator after python2 is deprecated)
with pyexec_setup(config_new_options) as self._temps:
# Load the mask to a workspace
self._t_mask = LoadMask(Instrument='BASIS',
InputFile=self.getProperty('MaskFile').
value,
OutputWorkspace='_t_mask')
# Pre-process the background runs
if self.getProperty('BackgroundRuns').value:
bkg_run_numbers = self._getRuns(
self.getProperty('BackgroundRuns').value,
doIndiv=True)
bkg_run_numbers = \
list(itertools.chain.from_iterable(bkg_run_numbers))
background_reporter = Progress(self, start=0.0, end=1.0,
nreports=len(bkg_run_numbers))
for i, run in enumerate(bkg_run_numbers):
if self._bkg is None:
self._bkg = self._mask_t0_crop(run, '_bkg')
self._temps.workspaces.append('_bkg')
else:
_ws = self._mask_t0_crop(run, '_ws')
self._bkg += _ws
if '_ws' not in self._temps.workspaces:
self._temps.workspaces.append('_ws')
message = 'Pre-processing background: {} of {}'.\
format(i+1, len(bkg_run_numbers))
background_reporter.report(message)
SetGoniometer(self._bkg, Axis0='0,0,1,0,1')
self._bkg_scale = self.getProperty('BackgroundScale').value
background_reporter.report(len(bkg_run_numbers), 'Done')
# Pre-process the vanadium run(s)
if self.getProperty('VanadiumRuns').value:
run_numbers = self._getRuns(
self.getProperty('VanadiumRuns').value,
doIndiv=True)
run_numbers = list(itertools.chain.from_iterable(run_numbers))
vanadium_reporter = Progress(self, start=0.0, end=1.0,
nreports=len(run_numbers))
self._vanadium_files = list()
for i, run in enumerate(run_numbers):
self._vanadium_files.append(self._save_t0(run))
message = 'Pre-processing vanadium: {} of {}'. \
format(i+1, len(run_numbers))
vanadium_reporter.report(message)
vanadium_reporter.report(len(run_numbers), 'Done')
# Determination of single crystal diffraction
if self.getProperty('SingleCrystalDiffraction').value:
self._determine_single_crystal_diffraction()
def _determine_single_crystal_diffraction(self):
"""
All work related to the determination of the diffraction pattern
"""
a, b, c = self.getProperty('LatticeSizes').value
alpha, beta, gamma = self.getProperty('LatticeAngles').value
u = self.getProperty('VectorU').value
v = self.getProperty('VectorV').value
uproj = self.getProperty('Uproj').value
vproj = self.getProperty('Vproj').value
wproj = self.getProperty('Wproj').value
n_bins = self.getProperty('NBins').value
self._n_bins = (n_bins, n_bins, 1)
axis0 = '{},0,1,0,1'.format(self.getProperty('PsiAngleLog').value)
axis1 = '{},0,1,0,1'.format(self.getProperty('PsiOffset').value)
# Options for SetUB independent of run
ub_args = dict(a=a, b=b, c=c,
alpha=alpha, beta=beta, gamma=gamma,
u=u, v=v)
min_values = None
# Options for algorithm ConvertToMD independent of run
cmd_args = dict(QDimensions='Q3D', dEAnalysisMode='Elastic',
Q3DFrames='HKL', QConversionScales='HKL',
Uproj=uproj, Vproj=vproj, Wproj=wproj)
mdn_args = None # Options for algorithm MDNormSCD
# Find solid angle and flux
if self._vanadium_files:
kwargs = dict(Filename='+'.join(self._vanadium_files),
MaskFile=self.getProperty("MaskFile").value,
MomentumMin=self._momentum_range[0],
MomentumMax=self._momentum_range[1])
_t_solid_angle, _t_int_flux = \
MDNormSCDPreprocessIncoherent(**kwargs)
else:
_t_solid_angle = self.nominal_solid_angle('_t_solid_angle')
_t_int_flux = self.nominal_integrated_flux('_t_int_flux')
# Process a sample at a time
run_numbers = self._getRuns(self.getProperty("RunNumbers").value,
doIndiv=True)
run_numbers = list(itertools.chain.from_iterable(run_numbers))
diffraction_reporter = Progress(self, start=0.0, end=1.0,
nreports=len(run_numbers))
for i_run, run in enumerate(run_numbers):
_t_sample = self._mask_t0_crop(run, '_t_sample')
# Set Goniometer and UB matrix
SetGoniometer(_t_sample, Axis0=axis0, Axis1=axis1)
SetUB(_t_sample, **ub_args)
if self._bkg:
self._bkg.run().getGoniometer().\
setR(_t_sample.run().getGoniometer().getR())
SetUB(self._bkg, **ub_args)
# Determine limits for momentum transfer in HKL space. Needs to be
# done only once. We use the first run.
if min_values is None:
kwargs = dict(QDimensions='Q3D',
dEAnalysisMode='Elastic',
Q3DFrames='HKL')
min_values, max_values = ConvertToMDMinMaxGlobal(_t_sample, **kwargs)
cmd_args.update({'MinValues': min_values,
'MaxValues': max_values})
# Convert to MD
_t_md = ConvertToMD(_t_sample, OutputWorkspace='_t_md',
**cmd_args)
if self._bkg:
_t_bkg_md = ConvertToMD(self._bkg, OutputWorkspace='_t_bkg_md',
**cmd_args)
# Determine aligned dimensions. Need to be done only once
if mdn_args is None:
aligned = list()
for i_dim in range(3):
kwargs = {'name': _t_md.getDimension(i_dim).name,
'min': min_values[i_dim],
'max': max_values[i_dim],
'n_bins': self._n_bins[i_dim]}
aligned.append(
'{name},{min},{max},{n_bins}'.format(**kwargs))
mdn_args = dict(AlignedDim0=aligned[0],
AlignedDim1=aligned[1],
AlignedDim2=aligned[2],
FluxWorkspace=_t_int_flux,
SolidAngleWorkspace=_t_solid_angle,
SkipSafetyCheck=True)
# Normalize sample by solid angle and integrated flux;
# Accumulate runs into the temporary workspaces
MDNormSCD(_t_md,
OutputWorkspace='_t_data',
OutputNormalizationWorkspace='_t_norm',
TemporaryDataWorkspace='_t_data' if
mtd.doesExist('_t_data') else None,
TemporaryNormalizationWorkspace='_t_norm' if
mtd.doesExist('_t_norm') else None,
**mdn_args)
if self._bkg:
MDNormSCD(_t_bkg_md,
OutputWorkspace='_t_bkg_data',
OutputNormalizationWorkspace='_t_bkg_norm',
TemporaryDataWorkspace='_t_bkg_data' if
mtd.doesExist('_t_bkg_data') else None,
TemporaryNormalizationWorkspace='_t_bkg_norm'
if mtd.doesExist('_t_bkg_norm') else None,
**mdn_args)
message = 'Processing sample {} of {}'.\
format(i_run+1, len(run_numbers))
diffraction_reporter.report(message)
self._temps.workspaces.append('PreprocessedDetectorsWS') # to remove
# Iteration over the sample runs is done.
# Division by vanadium, subtract background, and rename workspaces
name = self.getPropertyValue("OutputWorkspace")
_t_data = DivideMD(LHSWorkspace='_t_data', RHSWorkspace='_t_norm')
if self._bkg:
_t_bkg_data = DivideMD(LHSWorkspace='_t_bkg_data',
RHSWorkspace='_t_bkg_norm')
_t_scale = CreateSingleValuedWorkspace(DataValue=self._bkg_scale)
_t_bkg_data = MultiplyMD(_t_bkg_data, _t_scale)
ws = MinusMD(_t_data, _t_bkg_data)
RenameWorkspace(_t_data, OutputWorkspace=name + '_dat')
RenameWorkspace(_t_bkg_data, OutputWorkspace=name + '_bkg')
else:
ws = _t_data
RenameWorkspace(ws, OutputWorkspace=name)
self.setProperty("OutputWorkspace", ws)
diffraction_reporter.report(len(run_numbers), 'Done')
def _save_t0(self, run_number, name='_t_ws'):
"""
Create temporary events file with delayed emission time from
moderator removed
:param run: run number
:param name: name for the output workspace
:return: file name of event file with events treated with algorithm
ModeratorTzeroLinear.
"""
ws = LoadEventNexus(Filename=self._makeRunFile(run_number),
NXentryName='entry-diff',
OutputWorkspace=name)
ws = ModeratorTzeroLinear(InputWorkspace=ws.name(),
OutputWorkspace=ws.name())
file_name = self._spawn_tempnexus()
SaveNexus(ws, file_name)
return file_name
def _mask_t0_crop(self, run_number, name):
"""
Load a run into a workspace with:
1. Masked detectors
2. Delayed emission time from moderator removed
3. Conversion of units to momentum
4. Remove events outside the valid momentum range
:param run_number: BASIS run number
:param name: name for the output workspace
:return: workspace object
"""
ws = LoadEventNexus(Filename=self._makeRunFile(run_number),
NXentryName='entry-diff',
SingleBankPixelsOnly=False,
OutputWorkspace=name)
MaskDetectors(ws, MaskedWorkspace=self._t_mask)
ws = ModeratorTzeroLinear(InputWorkspace=ws.name(),
OutputWorkspace=ws.name())
ws = ConvertUnits(ws, Target='Momentum', OutputWorkspace=ws.name())
ws = CropWorkspace(ws,
OutputWorkspace=ws.name(),
XMin=self._momentum_range[0],
XMax=self._momentum_range[1])
return ws
def _getRuns(self, rlist, doIndiv=True):
"""
Create sets of run numbers for analysis. A semicolon indicates a
separate group of runs to be processed together.
:param rlist: string containing all the run numbers to be reduced.
:return: if doIndiv is False, return a list of IntArrayProperty objects.
Each item is a pseudolist containing a set of runs to be reduced together.
if doIndiv is True, return a list of strings, each string is a run number.
"""
run_list = []
# ";" separates the runs into substrings. Each substring represents a set of runs
rlvals = rlist.split(';')
for rlval in rlvals:
iap = IntArrayProperty('', rlval) # split the substring
if doIndiv:
run_list.extend([[x] for x in iap.value])
else:
run_list.append(iap.value)
return run_list
def _makeRunFile(self, run):
"""
Make name like BSS_24234_event.nxs
"""
return "{0}_{1}_event.nxs".format(self._short_inst, str(run))
def _spawn_tempnexus(self):
"""
Create a temporary file and flag for removal upon algorithm completion.
:return: (str) absolute path to the temporary file.
"""
f = tempfile.NamedTemporaryFile(prefix='BASISDiffraction_',
suffix='.nxs',
dir=mantid_config['defaultsave.directory'],
delete=False)
file_name = f.name
f.close()
self._temps.files.append(file_name) # flag for removal
return file_name
def nominal_solid_angle(self, name):
"""
Generate an isotropic solid angle
:param name: Name of the output workspace
:return: reference to solid angle workspace
"""
ws = LoadNexus(Filename=self._solid_angle_ws_, OutputWorkspace=name)
ClearMaskFlag(ws)
MaskDetectors(ws, MaskedWorkspace=self._t_mask)
for i in range(ws.getNumberHistograms()):
ws.dataY(i)[0] = 0.0 if ws.getDetector(i).isMasked() else 1.0
ws.setX(i, self._momentum_range)
return ws
def nominal_integrated_flux(self, name):
"""
Generate a flux independent of momentum
:param name: Name of the output workspace
:return: reference to flux workspace
"""
ws = LoadNexus(Filename=self._flux_ws_, OutputWorkspace=name)
ClearMaskFlag(ws)
MaskDetectors(ws, MaskedWorkspace=self._t_mask)
return ws
# Register algorithm with Mantid.
AlgorithmFactory.subscribe(BASISDiffraction)
| gpl-3.0 | -5,616,242,651,998,437,000 | 45.366667 | 89 | 0.536465 | false | 4.428369 | true | false | false |
fxia22/ASM_xf | PythonD/site_python/Scientific/IO/PDB.py | 2 | 42526 | # This module handles input and output of PDB files.
#
# Written by Konrad Hinsen <[email protected]>
# Last revision: 2001-6-13
#
"""This module provides classes that represent PDB (Protein Data Bank)
files and configurations contained in PDB files. It provides access to
PDB files on two levels: low-level (line by line) and high-level
(chains, residues, and atoms).
Caution: The PDB file format has been heavily abused, and it is
probably impossible to write code that can deal with all variants
correctly. This modules tries to read the widest possible range of PDB
files, but gives priority to a correct interpretation of the PDB
format as defined by the Brookhaven National Laboratory.
A special problem are atom names. The PDB file format specifies that
the first two letters contain the right-justified chemical element
name. A later modification allowed the initial space in hydrogen names
to be replaced by a digit. Many programs ignore all this and treat the
name as an arbitrary left-justified four-character name. This makes it
difficult to extract the chemical element accurately; most programs
write the '"CA"' for C_alpha in such a way that it actually stands for
a calcium atom! For this reason a special element field has been added
later, but only few files use it.
The low-level routines in this module do not try to deal with the atom
name problem; they return and expect four-character atom names
including spaces in the correct positions. The high-level routines use
atom names without leading or trailing spaces, but provide and use the
element field whenever possible. For output, they use the element
field to place the atom name correctly, and for input, they construct
the element field content from the atom name if no explicit element
field is found in the file.
Except where indicated, numerical values use the same units and
conventions as specified in the PDB format description.
Example:
>>>conf = Structure('example.pdb')
>>>print conf
>>>for residue in conf.residues:
>>> for atom in residue:
>>> print atom
"""
from Scientific.IO.TextFile import TextFile
from Scientific.IO.FortranFormat import FortranFormat, FortranLine
from Scientific.Geometry import Vector, Tensor
from PDBExportFilters import export_filters
import copy, string
#
# Fortran formats for PDB entries
#
atom_format = FortranFormat('A6,I5,1X,A4,A1,A4,A1,I4,A1,3X,3F8.3,2F6.2,' +
'6X,A4,2A2')
anisou_format = FortranFormat('A6,I5,1X,A4,A1,A4,A1,I4,A1,1X,6I7,2X,A4,2A2')
conect_format = FortranFormat('A6,11I5')
ter_format = FortranFormat('A6,I5,6X,A4,A1,I4,A1')
model_format = FortranFormat('A6,4X,I4')
header_format = FortranFormat('A6,4X,A40,A9,3X,A4')
generic_format = FortranFormat('A6,A74')
#
# Amino acid and nucleic acid residues
#
amino_acids = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'CYX', 'GLN', 'GLU', 'GLY',
'HIS', 'HID', 'HIE', 'HIP', 'HSD', 'HSE', 'HSP', 'ILE', 'LEU',
'LYS', 'MET', 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL',
'ACE', 'NME']
nucleic_acids = [ 'A', 'C', 'G', 'I', 'T', 'U',
'+A', '+C', '+G', '+I', '+T', '+U',
'RA', 'RC', 'RG', 'RU',
'DA', 'DC', 'DG', 'DT',
'RA5', 'RC5', 'RG5', 'RU5',
'DA5', 'DC5', 'DG5', 'DT5',
'RA3', 'RC3', 'RG3', 'RU3',
'DA3', 'DC3', 'DG3', 'DT3',
'RAN', 'RCN', 'RGN', 'RUN',
'DAN', 'DCN', 'DGN', 'DTN',
]
def defineAminoAcidResidue(symbol):
amino_acids.append(string.upper(symbol))
def defineNucleicAcidResidue(symbol):
nucleic_acids.append(string.upper(symbol))
#
# Low-level file object. It represents line contents as Python dictionaries.
# For output, there are additional methods that generate sequence numbers
# for everything.
#
class PDBFile:
"""PDB file with access at the record level
Constructor: PDBFile(|filename|, |mode|='"r"'), where |filename|
is the file name and |mode| is '"r"' for reading and '"w"' for writing,
The low-level file access is handled by the module
Scientific.IO.TextFile, therefore compressed files and URLs
(for reading) can be used as well.
"""
def __init__(self, filename, mode = 'r', subformat = None):
self.file = TextFile(filename, mode)
self.output = string.lower(mode[0]) == 'w'
self.export_filter = None
if subformat is not None:
export = export_filters.get(subformat, None)
if export is not None:
self.export_filter = export()
self.open = 1
if self.output:
self.data = {'serial_number': 0,
'residue_number': 0,
'chain_id': '',
'segment_id': ''}
self.het_flag = 0
self.chain_number = -1
def readLine(self):
"""Returns the contents of the next non-blank line (= record).
The return value is a tuple whose first element (a string)
contains the record type. For supported record types (HEADER,
ATOM, HETATM, ANISOU, TERM, MODEL, CONECT), the items from the
remaining fields are put into a dictionary which is returned
as the second tuple element. Most dictionary elements are
strings or numbers; atom positions are returned as a vector,
and anisotropic temperature factors are returned as a rank-2
tensor, already multiplied by 1.e-4. White space is stripped
from all strings except for atom names, whose correct
interpretation can depend on an initial space. For unsupported
record types, the second tuple element is a string containing
the remaining part of the record.
"""
while 1:
line = self.file.readline()
if not line: return ('END','')
if line[-1] == '\n': line = line[:-1]
line = string.strip(line)
if line: break
line = string.ljust(line, 80)
type = string.strip(line[:6])
if type == 'ATOM' or type == 'HETATM':
line = FortranLine(line, atom_format)
data = {'serial_number': line[1],
'name': line[2],
'alternate': string.strip(line[3]),
'residue_name': string.strip(line[4]),
'chain_id': string.strip(line[5]),
'residue_number': line[6],
'insertion_code': string.strip(line[7]),
'position': Vector(line[8:11]),
'occupancy': line[11],
'temperature_factor': line[12],
'segment_id': string.strip(line[13]),
'element': string.strip(line[14]),
'charge': string.strip(line[15])}
return type, data
elif type == 'ANISOU':
line = FortranLine(line, anisou_format)
data = {'serial_number': line[1],
'name': line[2],
'alternate': string.strip(line[3]),
'residue_name': string.strip(line[4]),
'chain_id': string.strip(line[5]),
'residue_number': line[6],
'insertion_code': string.strip(line[7]),
'u': 1.e-4*Tensor([[line[8], line[11], line[12]],
[line[11], line[9] , line[13]],
[line[12], line[13], line[10]]]),
'segment_id': string.strip(line[14]),
'element': string.strip(line[15]),
'charge': string.strip(line[16])}
return type, data
elif type == 'TER':
line = FortranLine(line, ter_format)
data = {'serial_number': line[1],
'residue_name': string.strip(line[2]),
'chain_id': string.strip(line[3]),
'residue_number': line[4],
'insertion_code': string.strip(line[5])}
return type, data
elif type == 'CONECT':
line = FortranLine(line, conect_format)
data = {'serial_number': line[1],
'bonded': filter(lambda i: i > 0, line[2:6]),
'hydrogen_bonded': filter(lambda i: i > 0, line[6:10]),
'salt_bridged': filter(lambda i: i > 0, line[10:12])}
return type, data
elif type == 'MODEL':
line = FortranLine(line, model_format)
data = {'serial_number': line[1]}
return type, data
elif type == 'HEADER':
line = FortranLine(line, header_format)
data = {'compound': line[1],
'date': line[2],
'pdb_code': line[3]}
return type, data
else:
return type, line[6:]
def writeLine(self, type, data):
"""Writes a line using record type and data dictionary in the
same format as returned by readLine(). Default values are
provided for non-essential information, so the data dictionary
need not contain all entries.
"""
if self.export_filter is not None:
type, data = self.export_filter.processLine(type, data)
if type is None:
return
line = [type]
if type == 'ATOM' or type == 'HETATM':
format = atom_format
position = data['position']
line = line + [data.get('serial_number', 1),
data.get('name'),
data.get('alternate', ''),
string.rjust(data.get('residue_name', ''), 3),
data.get('chain_id', ''),
data.get('residue_number', 1),
data.get('insertion_code', ''),
position[0], position[1], position[2],
data.get('occupancy', 0.),
data.get('temperature_factor', 0.),
data.get('segment_id', ''),
string.rjust(data.get('element', ''), 2),
data.get('charge', '')]
elif type == 'ANISOU':
format = anisou_format
u = 1.e4*data['u']
u = [int(u[0,0]), int(u[1,1]), int(u[2,2]),
int(u[0,1]), int(u[0,2]), int(u[1,2])]
line = line + [data.get('serial_number', 1),
data.get('name'),
data.get('alternate', ''),
string.rjust(data.get('residue_name'), 3),
data.get('chain_id', ''),
data.get('residue_number', 1),
data.get('insertion_code', '')] \
+ u \
+ [data.get('segment_id', ''),
string.rjust(data.get('element', ''), 2),
data.get('charge', '')]
elif type == 'TER':
format = ter_format
line = line + [data.get('serial_number', 1),
string.rjust(data.get('residue_name'), 3),
data.get('chain_id', ''),
data.get('residue_number', 1),
data.get('insertion_code', '')]
elif type == 'CONECT':
format = conect_format
line = line + [data.get('serial_number')]
line = line + (data.get('bonded', [])+4*[None])[:4]
line = line + (data.get('hydrogen_bonded', [])+4*[None])[:4]
line = line + (data.get('salt_bridged', [])+2*[None])[:2]
elif type == 'MODEL':
format = model_format
line = line + [data.get('serial_number')]
elif type == 'HEADER':
format = header_format
line = line + [data.get('compound', ''), data.get('date', ''),
data.get('pdb_code')]
else:
format = generic_format
line = line + [data]
self.file.write(str(FortranLine(line, format)) + '\n')
def writeComment(self, text):
"""Writes |text| into one or several comment lines.
Each line of the text is prefixed with 'REMARK' and written
to the file.
"""
while text:
eol = string.find(text,'\n')
if eol == -1:
eol = len(text)
self.file.write('REMARK %s \n' % text[:eol])
text = text[eol+1:]
def writeAtom(self, name, position, occupancy=0.0, temperature_factor=0.0,
element=''):
"""Writes an ATOM or HETATM record using the |name|, |occupancy|,
|temperature| and |element| information supplied. The residue and
chain information is taken from the last calls to the methods
nextResidue() and nextChain().
"""
if self.het_flag:
type = 'HETATM'
else:
type = 'ATOM'
name = string.upper(name)
if element != '' and len(element) == 1 and name and name[0] == element:
name = ' ' + name
self.data['name'] = name
self.data['position'] = position
self.data['serial_number'] = (self.data['serial_number'] + 1) % 100000
self.data['occupancy'] = occupancy
self.data['temperature_factor'] = temperature_factor
self.data['element'] = element
self.writeLine(type, self.data)
def nextResidue(self, name, number = None, terminus = None):
"""Signals the beginning of a new residue, starting with the
next call to writeAtom(). The residue name is |name|, and a
|number| can be supplied optionally; by default residues in a
chain will be numbered sequentially starting from 1. The
value of |terminus| can be 'None', '"C"', or '"N"'; it is passed
to export filters that can use this information in order to
use different atom or residue names in terminal residues.
"""
name = string.upper(name)
if self.export_filter is not None:
name, number = self.export_filter.processResidue(name, number,
terminus)
self.het_flag = not (name in amino_acids or name in nucleic_acids)
self.data['residue_name'] = name
self.data['residue_number'] = (self.data['residue_number'] + 1) % 10000
self.data['insertion_code'] = ''
if number is not None:
if type(number) is type(0):
self.data['residue_number'] = number % 10000
else:
self.data['residue_number'] = number.number % 10000
self.data['insertion_code'] = number.insertion_code
def nextChain(self, chain_id = None, segment_id = ''):
"""Signals the beginning of a new chain. A chain identifier
(string of length one) can be supplied as |chain_id|, by
default consecutive letters from the alphabet are used.
The equally optional |segment_id| defaults to an empty string.
"""
if chain_id is None:
self.chain_number = (self.chain_number + 1) % len(self._chain_ids)
chain_id = self._chain_ids[self.chain_number]
if self.export_filter is not None:
chain_id, segment_id = \
self.export_filter.processChain(chain_id, segment_id)
self.data['chain_id'] = (chain_id+' ')[:1]
self.data['segment_id'] = (segment_id+' ')[:4]
self.data['residue_number'] = 0
_chain_ids = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def terminateChain(self):
"Signals the end of a chain."
if self.export_filter is not None:
self.export_filter.terminateChain()
self.data['serial_number'] = (self.data['serial_number'] + 1) % 100000
self.writeLine('TER', self.data)
self.data['chain_id'] = ''
self.data['segment_id'] = ''
def close(self):
"""Closes the file. This method *must* be called for write mode
because otherwise the file will be incomplete.
"""
if self.open:
if self.output:
self.file.write('END\n')
self.file.close()
self.open = 0
def __del__(self):
self.close()
#
# High-level object representation of PDB file contents.
#
#
# Representation of objects.
#
class Atom:
"""Atom in a PDB structure
Constructor: Atom(|name|, |position|, |**properties|),
where |name| is the PDB atom name (a string),
|position| is a atom position (a vector), and
|properties| can include any of the other items that
can be stored in an atom record.
The properties can be obtained or modified using
indexing, as for Python dictionaries.
"""
def __init__(self, name, position, **properties):
self.position = position
self.properties = properties
if self.properties.get('element', '') == '':
if name[0] == ' ' or name[0] in string.digits:
self.properties['element'] = name[1]
elif name[1] in string.digits:
self.properties['element'] = name[0]
self.name = string.strip(name)
def __getitem__(self, item):
try:
return self.properties[item]
except KeyError:
if item == 'name':
return self.name
elif item == 'position':
return self.position
else:
raise KeyError, "Undefined atom property: " + repr(item)
def __setitem__(self, item, value):
self.properties[item] = value
def __str__(self):
return self.__class__.__name__ + ' ' + self.name + \
' at ' + str(self.position)
__repr__ = __str__
def type(self):
"Returns the six-letter record type, ATOM or HETATM."
return 'ATOM '
def writeToFile(self, file):
"""Writes an atom record to |file| (a PDBFile object or a
string containing a file name)."""
close = 0
if type(file) == type(''):
file = PDBFile(file, 'w')
close = 1
file.writeAtom(self.name, self.position,
self.properties.get('occupancy', 0.),
self.properties.get('temperature_factor', 0.),
self.properties.get('element', ''))
if close:
file.close()
class HetAtom(Atom):
"""HetAtom in a PDB structure
A subclass of Atom, which differs only in the return value
of the method type().
Constructor: HetAtom(|name|, |position|, |**properties|).
"""
def type(self):
return 'HETATM'
class Group:
"""Atom group (residue or molecule) in a PDB file
This is an abstract base class. Instances can be created using
one of the subclasses (Molecule, AminoAcidResidue, NucleotideResidue).
Group objects permit iteration over atoms with for-loops,
as well as extraction of atoms by indexing with the
atom name.
"""
def __init__(self, name, atoms = None, number = None):
self.name = name
self.number = number
self.atom_list = []
self.atoms = {}
if atoms:
self.atom_list = atoms
for a in atoms:
self.atoms[a.name] = a
def __len__(self):
return len(self.atom_list)
def __getitem__(self, item):
if type(item) == type(0):
return self.atom_list[item]
else:
return self.atoms[item]
def __str__(self):
s = self.__class__.__name__ + ' ' + self.name + ':\n'
for atom in self.atom_list:
s = s + ' ' + `atom` + '\n'
return s
__repr__ = __str__
def isCompatible(self, residue_data):
return residue_data['residue_name'] == self.name \
and residue_data['residue_number'] == self.number
def addAtom(self, atom):
"Adds |atom| (an Atom object) to the group."
self.atom_list.append(atom)
self.atoms[atom.name] = atom
def deleteAtom(self, atom):
"""Removes |atom| (an Atom object) from the group. An exception
will be raised if |atom| is not part of the group.
"""
self.atom_list.remove(atom)
del self.atoms[atom.name]
def deleteHydrogens(self):
"Removes all hydrogen atoms."
delete = []
for a in self.atom_list:
if a.name[0] == 'H' or (a.name[0] in string.digits
and a.name[1] == 'H'):
delete.append(a)
for a in delete:
self.deleteAtom(a)
def changeName(self, name):
"Sets the PDB residue name to |name|."
self.name = name
def writeToFile(self, file):
"""Writes the group to |file| (a PDBFile object or a
string containing a file name).
"""
close = 0
if type(file) == type(''):
file = PDBFile(file, 'w')
close = 1
file.nextResidue(self.name, self.number, None)
for a in self.atom_list:
a.writeToFile(file)
if close:
file.close()
class Molecule(Group):
"""Molecule in a PDB file
A subclass of Group.
Constructor: Molecule(|name|, |atoms|='None', |number|=None),
where |name| is the PDB residue name. An optional list
of |atoms| can be specified, otherwise the molecule is initially
empty. The optional |number| is the PDB residue number.
Note: In PDB files, non-chain molecules are treated as residues,
there is no separate molecule definition. This modules defines
every residue as a molecule that is not an amino acid residue or a
nucleotide residue.
"""
pass
class Residue(Group):
pass
class AminoAcidResidue(Residue):
"""Amino acid residue in a PDB file
A subclass of Group.
Constructor: AminoAcidResidue(|name|, |atoms|='None', |number|=None),
where |name| is the PDB residue name. An optional list
of |atoms| can be specified, otherwise the residue is initially
empty. The optional |number| is the PDB residue number.
"""
is_amino_acid = 1
def isCTerminus(self):
"""Returns 1 if the residue is in C-terminal configuration,
i.e. if it has a second oxygen bound to the carbon atom of
the peptide group.
"""
return self.atoms.has_key('OXT') or self.atoms.has_key('OT2')
def isNTerminus(self):
"""Returns 1 if the residue is in N-terminal configuration,
i.e. if it contains more than one hydrogen bound to be
nitrogen atom of the peptide group.
"""
return self.atoms.has_key('1HT') or self.atoms.has_key('2HT') \
or self.atoms.has_key('3HT')
def writeToFile(self, file):
close = 0
if type(file) == type(''):
file = PDBFile(file, 'w')
close = 1
terminus = None
if self.isCTerminus(): terminus = 'C'
if self.isNTerminus(): terminus = 'N'
file.nextResidue(self.name, self.number, terminus)
for a in self.atom_list:
a.writeToFile(file)
if close:
file.close()
class NucleotideResidue(Residue):
"""Nucleotide residue in a PDB file
A subclass of Group.
Constructor: NucleotideResidue(|name|, |atoms|='None', |number|=None),
where |name| is the PDB residue name. An optional list
of |atoms| can be specified, otherwise the residue is initially
empty. The optional |number| is the PDB residue number.
"""
is_nucleotide = 1
def __init__(self, name, atoms = None, number = None):
self.pdbname = name
name = string.strip(name)
if name[0] != 'D' and name[0] != 'R':
name = 'D' + name
Residue.__init__(self, name, atoms, number)
for a in atoms:
if a.name == 'O2*' or a.name == "O2'": # Ribose
self.name = 'R' + self.name[1:]
def isCompatible(self, residue_data):
return (residue_data['residue_name'] == self.name or
residue_data['residue_name'] == self.pdbname) \
and residue_data['residue_number'] == self.number
def addAtom(self, atom):
Residue.addAtom(self, atom)
if atom.name == 'O2*' or atom.name == "O2'": # Ribose
self.name = 'R' + self.name[1:]
def hasRibose(self):
"Returns 1 if the residue has an atom named O2*."
return self.atoms.has_key('O2*') or self.atoms.has_key("O2'")
def hasDesoxyribose(self):
"Returns 1 if the residue has no atom named O2*."
return not self.hasRibose()
def hasPhosphate(self):
"Returns 1 if the residue has a phosphate group."
return self.atoms.has_key('P')
def hasTerminalH(self):
"Returns 1 if the residue has a 3-terminal H atom."
return self.atoms.has_key('H3T')
def writeToFile(self, file):
close = 0
if type(file) == type(''):
file = PDBFile(file, 'w')
close = 1
terminus = None
if not self.hasPhosphate(): terminus = '5'
file.nextResidue(self.name[1:], self.number, terminus)
for a in self.atom_list:
a.writeToFile(file)
if close:
file.close()
class Chain:
"""Chain of PDB residues
This is an abstract base class. Instances can be created using
one of the subclasses (PeptideChain, NucleotideChain).
Chain objects respond to len() and return their residues
by indexing with integers.
"""
def __init__(self, residues = None, chain_id = None, segment_id = None):
if residues is None:
self.residues = []
else:
self.residues = residues
self.chain_id = chain_id
self.segment_id = segment_id
def __len__(self):
return len(self.residues)
def sequence(self):
"Returns the list of residue names."
return map(lambda r: r.name, self.residues)
def __getitem__(self, index):
return self.residues[index]
def addResidue(self, residue):
"Add |residue| at the end of the chain."
self.residues.append(residue)
def removeResidues(self, first, last):
"""Remove residues starting from |first| up to (but not
including) |last|. If |last| is 'None', remove everything
starting from |first|.
"""
if last is None:
del self.residues[first:]
else:
del self.residues[first:last]
def deleteHydrogens(self):
"Removes all hydrogen atoms."
for r in self.residues:
r.deleteHydrogens()
def writeToFile(self, file):
"""Writes the chain to |file| (a PDBFile object or a
string containing a file name).
"""
close = 0
if type(file) == type(''):
file = PDBFile(file, 'w')
close = 1
file.nextChain(self.chain_id, self.segment_id)
for r in self.residues:
r.writeToFile(file)
file.terminateChain()
if close:
file.close()
class PeptideChain(Chain):
"""Peptide chain in a PDB file
A subclass of Chain.
Constructor: PeptideChain(|residues|='None', |chain_id|='None',
|segment_id|='None'), where |chain_id|
is a one-letter chain identifier and |segment_id| is
a multi-character chain identifier, both are optional. A list
of AminoAcidResidue objects can be passed as |residues|; by
default a peptide chain is initially empty.
"""
def __getslice__(self, i1, i2):
return self.__class__(self.residues[i1:i2])
def isTerminated(self):
"Returns 1 if the last residue is in C-terminal configuration."
return self.residues and self.residues[-1].isCTerminus()
def isCompatible(self, chain_data, residue_data):
return chain_data['chain_id'] == self.chain_id and \
chain_data['segment_id'] == self.segment_id and \
residue_data['residue_name'] in amino_acids
class NucleotideChain(Chain):
"""Nucleotide chain in a PDB file
A subclass of Chain.
Constructor: NucleotideChain(|residues|='None', |chain_id|='None',
|segment_id|='None'), where |chain_id|
is a one-letter chain identifier and |segment_id| is
a multi-character chain identifier, both are optional. A list
of NucleotideResidue objects can be passed as |residues|; by
default a nucleotide chain is initially empty.
"""
def __getslice__(self, i1, i2):
return self.__class__(self.residues[i1:i2])
def isTerminated(self):
# impossible to detect for standard PDB files, but we can still
# do something useful for certain non-standard files
return self.residues and \
(self.residues[-1].name[-1] == '3'
or self.residues[-1].hasTerminalH())
def isCompatible(self, chain_data, residue_data):
return chain_data['chain_id'] == self.chain_id and \
chain_data['segment_id'] == self.segment_id and \
residue_data['residue_name'] in nucleic_acids
class DummyChain(Chain):
def __init__(self, structure, chain_id, segment_id):
self.structure = structure
self.chain_id = chain_id
self.segment_id = segment_id
def isTerminated(self):
return 0
def addResidue(self, residue):
self.structure.addMolecule(residue)
def isCompatible(self, chain_data, residue_data):
return chain_data['chain_id'] == self.chain_id and \
chain_data['segment_id'] == self.segment_id and \
residue_data['residue_name'] not in amino_acids and \
residue_data['residue_name'] not in nucleic_acids
#
# Residue number class for dealing with insertion codes
#
class ResidueNumber:
"""PDB residue number
Most PDB residue numbers are simple integers, but when insertion
codes are used a number can consist of an integer plus a letter.
Such compound residue numbers are represented by this class.
Constructor: ResidueNumber(|number|, |insertion_code|)
"""
def __init__(self, number, insertion_code):
self.number = number
self.insertion_code = insertion_code
def __cmp__(self, other):
if type(other) == type(0):
if self.number == other:
return 1
else:
return cmp(self.number, other)
if self.number == other.number:
return cmp(self.insertion_code, other.insertion_code)
else:
return cmp(self.number, other.number)
def __str__(self):
return str(self.number) + self.insertion_code
__repr__ = __str__
#
# The configuration class.
#
class Structure:
"""A high-level representation of the contents of a PDB file
Constructor: Structure(|filename|, |model|='0', |alternate_code|='"A"'),
where |filename| is the name of the PDB file. Compressed files
and URLs are accepted, as for class PDBFile. The two optional
arguments specify which data should be read in case of a
multiple-model file or in case of a file that contains alternative
positions for some atoms.
The components of a system can be accessed in several ways
('s' is an instance of this class):
- 's.residues' is a list of all PDB residues, in the order in
which they occurred in the file.
- 's.peptide_chains' is a list of PeptideChain objects, containing
all peptide chains in the file in their original order.
- 's.nucleotide_chains' is a list of NucleotideChain objects, containing
all nucleotide chains in the file in their original order.
- 's.molecules' is a list of all PDB residues that are neither
amino acid residues nor nucleotide residues, in their original
order.
- 's.objects' is a list of all high-level objects (peptide chains,
nucleotide chains, and molecules) in their original order.
An iteration over a Structure instance by a for-loop is equivalent
to an iteration over the residue list.
"""
def __init__(self, filename, model = 0, alternate_code = 'A'):
self.filename = filename
self.model = model
self.alternate = alternate_code
self.pdb_code = ''
self.residues = []
self.objects = []
self.peptide_chains = []
self.nucleotide_chains = []
self.molecules = {}
self.parseFile(PDBFile(filename))
peptide_chain_constructor = PeptideChain
nucleotide_chain_constructor = NucleotideChain
molecule_constructor = Molecule
def __len__(self):
return len(self.residues)
def __getitem__(self, item):
return self.residues[item]
def deleteHydrogens(self):
"Removes all hydrogen atoms."
for r in self.residues:
r.deleteHydrogens()
def splitPeptideChain(self, number, position):
"""Splits the peptide chain indicated by |number| (0 being
the first peptide chain in the PDB file) after the residue indicated
by |position| (0 being the first residue of the chain).
The two chain fragments remain adjacent in the peptide chain
list, i.e. the numbers of all following nucleotide chains increase
by one.
"""
self._splitChain(self.peptide_chain_constructor,
self.peptide_chains, number, position)
def splitNucleotideChain(self, number, position):
"""Splits the nucleotide chain indicated by |number| (0 being
the first nucleotide chain in the PDB file) after the residue indicated
by |position| (0 being the first residue of the chain).
The two chain fragments remain adjacent in the nucleotide chain
list, i.e. the numbers of all following nucleotide chains increase
by one.
"""
self._splitChain(self.nucleotide_chain_constructor,
self.nucleotide_chains, number, position)
def _splitChain(self, constructor, chain_list, number, position):
chain = chain_list[number]
part1 = constructor(chain.residues[:position],
chain.chain_id, chain.segment_id)
part2 = constructor(chain.residues[position:])
chain_list[number:number+1] = [part1, part2]
index = self.objects.index(chain)
self.objects[index:index+1] = [part1, part2]
def joinPeptideChains(self, first, second):
"""Join the two peptide chains indicated by |first| and |second|
into one peptide chain. The new chain occupies the position
|first|; the chain at |second| is removed from the peptide
chain list.
"""
self._joinChains(self.peptide_chain_constructor,
self.peptide_chains, first, second)
def joinNucleotideChains(self, first, second):
"""Join the two nucleotide chains indicated by |first| and |second|
into one nucleotide chain. The new chain occupies the position
|first|; the chain at |second| is removed from the nucleotide
chain list.
"""
self._joinChains(self.nucleotide_chain_constructor,
self.nucleotide_chains, first, second)
def _joinChains(self, constructor, chain_list, first, second):
chain1 = chain_list[first]
chain2 = chain_list[second]
total = constructor(chain1.residues+chain2.residues,
chain1.chain_id, chain1.segment_id)
chain_list[first] = total
del chain_list[second]
index = self.objects.index(chain1)
self.objects[index] = total
index = self.objects.index(chain2)
del self.objects[index]
def addMolecule(self, molecule):
try:
molecule_list = self.molecules[molecule.name]
except KeyError:
molecule_list = []
self.molecules[molecule.name] = molecule_list
molecule_list.append(molecule)
self.objects.append(molecule)
def extractData(self, data):
atom_data = {}
for name in ['serial_number', 'name', 'position',
'occupancy', 'temperature_factor']:
atom_data[name] = data[name]
for name in ['alternate', 'charge']:
value = data[name]
if value:
atom_data[name] = value
element = data['element']
if element != '':
try:
string.atoi(element)
except ValueError:
atom_data['element'] = element
residue_data = {'residue_name': data['residue_name']}
number = data['residue_number']
insertion = data['insertion_code']
if insertion == '':
residue_data['residue_number'] = number
else:
residue_data['residue_number'] = ResidueNumber(number, insertion)
chain_data = {}
for name in ['chain_id', 'segment_id']:
chain_data[name] = data[name]
if chain_data['segment_id'] == self.pdb_code:
chain_data['segment_id'] = ''
return atom_data, residue_data, chain_data
def newResidue(self, residue_data):
name = residue_data['residue_name']
residue_number = residue_data['residue_number']
if name in amino_acids:
residue = AminoAcidResidue(name, [], residue_number)
elif name in nucleic_acids:
residue = NucleotideResidue(name, [], residue_number)
else:
residue = self.molecule_constructor(name, [], residue_number)
self.residues.append(residue)
return residue
def newChain(self, residue, chain_data):
if hasattr(residue, 'is_amino_acid'):
chain = self.peptide_chain_constructor([], chain_data['chain_id'],
chain_data['segment_id'])
self.peptide_chains.append(chain)
self.objects.append(chain)
elif hasattr(residue, 'is_nucleotide'):
chain = self.nucleotide_chain_constructor([],
chain_data['chain_id'],
chain_data['segment_id'])
self.nucleotide_chains.append(chain)
self.objects.append(chain)
else:
chain = DummyChain(self, chain_data['chain_id'],
chain_data['segment_id'])
return chain
def parseFile(self, file):
atom = None
residue = None
chain = None
read = self.model == 0
while 1:
type, data = file.readLine()
if type == 'END': break
elif type == 'HEADER':
self.pdb_code = data['pdb_code']
elif type == 'MODEL':
read = data['serial_number'] == self.model
if self.model == 0 and len(self.residues) == 0:
read = 1
elif type == 'ENDMDL':
read = 0
elif read:
if type == 'ATOM' or type == 'HETATM':
alt = data['alternate']
if alt == '' or alt == self.alternate:
atom_data, residue_data, chain_data = \
self.extractData(data)
if type == 'ATOM':
atom = apply(Atom, (), atom_data)
else:
atom = apply(HetAtom, (), atom_data)
new_chain = chain is None or \
not chain.isCompatible(chain_data,
residue_data)
new_residue = new_chain or residue is None \
or not residue.isCompatible(residue_data)
if new_residue and chain is not None and \
chain.isTerminated():
new_chain = 1
if new_residue:
residue = self.newResidue(residue_data)
if new_chain:
chain = self.newChain(residue, chain_data)
chain.addResidue(residue)
residue.addAtom(atom)
elif type == 'ANISOU':
alt = data['alternate']
if alt == '' or alt == self.alternate:
if atom is None:
raise ValueError, "ANISOU record before " + \
"ATOM record"
atom['u'] = data['u']
elif type == 'TERM':
if chain is None:
raise ValueError, "TERM record before chain"
chain = None
def renumberAtoms(self):
"Renumber all atoms sequentially starting with 1."
n = 0
for residue in self.residues:
for atom in residue:
atom['serial_number'] = n
n = n + 1
def __repr__(self):
s = self.__class__.__name__ + "(" + repr(self.filename)
if self.model != 0:
s = s + ", model=" + repr(self.model)
if self.alternate != 'A':
s = s + ", alternate_code = " + repr(self.alternate_code)
s = s + "):\n"
for name, list in [("Peptide", self.peptide_chains),
("Nucleotide", self.nucleotide_chains)]:
for c in list:
s = s + " " + name + " chain "
if c.segment_id:
s = s + c.segment_id + " "
elif c.chain_id:
s = s + c.chain_id + " "
s = s + "of length " + repr(len(c)) + "\n"
for name, list in self.molecules.items():
s = s + " " + repr(len(list)) + " " + name + " molecule"
if len(list) == 1:
s = s + "\n"
else:
s = s + "s\n"
return s
def writeToFile(self, file):
"""Writes all objects to |file| (a PDBFile object or a
string containing a file name).
"""
close = 0
if type(file) == type(''):
file = PDBFile(file, 'w')
close = 1
for o in self.objects:
o.writeToFile(file)
if close:
file.close()
if __name__ == '__main__':
if 0:
file = PDBFile('~/3lzt.pdb')
copy = PDBFile('test.pdb', 'w', 'xplor')
while 1:
type, data = file.readLine()
if type == 'END':
break
copy.writeLine(type, data)
copy.close()
if 1:
s = Structure('~/detached/okb.pdb')
#s = Structure('./3lzt.pdb')
#s = Structure('~/1tka.pdb')
print s
| gpl-2.0 | 953,494,229,587,949,400 | 35.97913 | 79 | 0.563185 | false | 3.792224 | false | false | false |
epiphany27/NewsBlur | apps/reader/migrations/0006_folders_unique.py | 18 | 13116 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'UserSubscriptionFolders', fields ['user']
db.create_unique('reader_usersubscriptionfolders', ['user_id'])
# Changing field 'UserSubscription.needs_unread_recalc'
db.alter_column('reader_usersubscription', 'needs_unread_recalc', self.gf('django.db.models.fields.BooleanField')(blank=True))
# Changing field 'UserSubscription.is_trained'
db.alter_column('reader_usersubscription', 'is_trained', self.gf('django.db.models.fields.BooleanField')(blank=True))
# Changing field 'UserSubscription.active'
db.alter_column('reader_usersubscription', 'active', self.gf('django.db.models.fields.BooleanField')(blank=True))
def backwards(self, orm):
# Removing unique constraint on 'UserSubscriptionFolders', fields ['user']
db.delete_unique('reader_usersubscriptionfolders', ['user_id'])
# Changing field 'UserSubscription.needs_unread_recalc'
db.alter_column('reader_usersubscription', 'needs_unread_recalc', self.gf('django.db.models.fields.BooleanField')())
# Changing field 'UserSubscription.is_trained'
db.alter_column('reader_usersubscription', 'is_trained', self.gf('django.db.models.fields.BooleanField')())
# Changing field 'UserSubscription.active'
db.alter_column('reader_usersubscription', 'active', self.gf('django.db.models.fields.BooleanField')())
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'reader.feature': {
'Meta': {'object_name': 'Feature'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'reader.userstory': {
'Meta': {'unique_together': "(('user', 'feed', 'story'),)", 'object_name': 'UserStory'},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opinion': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'read_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Story']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'reader.usersubscription': {
'Meta': {'unique_together': "(('user', 'feed'),)", 'object_name': 'UserSubscription'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscribers'", 'to': "orm['rss_feeds.Feed']"}),
'feed_opens': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trained': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_read_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 10, 22, 2, 45, 34, 980447)'}),
'mark_read_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 10, 22, 2, 45, 34, 980447)'}),
'needs_unread_recalc': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'unread_count_negative': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'unread_count_neutral': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'unread_count_positive': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'unread_count_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptions'", 'to': "orm['auth.User']"})
},
'reader.usersubscriptionfolders': {
'Meta': {'object_name': 'UserSubscriptionFolders'},
'folders': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'rss_feeds.feed': {
'Meta': {'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'active_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'exception_code': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'has_feed_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'has_page_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '15'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'premium_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'queued_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'story_count_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.story': {
'Meta': {'unique_together': "(('story_feed', 'story_guid_hash'),)", 'object_name': 'Story', 'db_table': "'stories'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'story_author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.StoryAuthor']"}),
'story_author_name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'story_content': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_content_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'story_date': ('django.db.models.fields.DateTimeField', [], {}),
'story_feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stories'", 'to': "orm['rss_feeds.Feed']"}),
'story_guid': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_guid_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'story_original_content': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_past_trim_date': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'story_permalink': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_tags': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'story_title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'rss_feeds.storyauthor': {
'Meta': {'object_name': 'StoryAuthor'},
'author_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['reader']
| mit | 5,856,136,366,699,130,000 | 77.071429 | 163 | 0.56717 | false | 3.687377 | false | false | false |
BirkbeckCTP/janeway | src/core/homepage_elements/issue/plugin_settings.py | 1 | 1919 | from django.db.utils import OperationalError
from django.contrib.contenttypes.models import ContentType
PLUGIN_NAME = 'Current Issue'
DESCRIPTION = 'This is a homepage element that renders featured current issues.'
AUTHOR = 'Martin Paul Eve'
def install():
import core.models as core_models
import journal.models as journal_models
import press.models as press_models
# check whether this homepage element has already been installed for all journals
journals = journal_models.Journal.objects.all()
for journal in journals:
content_type = ContentType.objects.get_for_model(journal)
element, created = core_models.HomepageElement.objects.get_or_create(
name=PLUGIN_NAME,
configure_url='current_issue_setup',
template_path='journal/homepage_elements/issue_block.html',
content_type=content_type,
object_id=journal.pk,
has_config=True)
element.save()
presses = press_models.Press.objects.all()
for press in presses:
content_type = ContentType.objects.get_for_model(press)
element, created = core_models.HomepageElement.objects.get_or_create(
name=PLUGIN_NAME,
configure_url='current_issue_setup',
template_path='journal/homepage_elements/issue_block.html',
content_type=content_type,
object_id=press.pk,
has_config=True)
element.save()
def hook_registry():
try:
install()
return {
'yield_homepage_element_context': {
'module': 'core.homepage_elements.issue.hooks',
'function': 'yield_homepage_element_context',
'name': PLUGIN_NAME,
}
}
except OperationalError:
# if we get here the database hasn't yet been created
return {}
except BaseException:
return {}
| agpl-3.0 | -8,131,779,798,871,920,000 | 32.086207 | 85 | 0.638874 | false | 4.245575 | false | false | false |
xuxiao19910803/edx | common/lib/xmodule/xmodule/static_content.py | 70 | 6702 | # /usr/bin/env python
"""
This module has utility functions for gathering up the static content
that is defined by XModules and XModuleDescriptors (javascript and css)
"""
import logging
import hashlib
import os
import errno
import sys
from collections import defaultdict
from docopt import docopt
from path import path
from xmodule.x_module import XModuleDescriptor
LOG = logging.getLogger(__name__)
def write_module_styles(output_root):
"""Write all registered XModule css, sass, and scss files to output root."""
return _write_styles('.xmodule_display', output_root, _list_modules())
def write_module_js(output_root):
"""Write all registered XModule js and coffee files to output root."""
return _write_js(output_root, _list_modules())
def write_descriptor_styles(output_root):
"""Write all registered XModuleDescriptor css, sass, and scss files to output root."""
return _write_styles('.xmodule_edit', output_root, _list_descriptors())
def write_descriptor_js(output_root):
"""Write all registered XModuleDescriptor js and coffee files to output root."""
return _write_js(output_root, _list_descriptors())
def _list_descriptors():
"""Return a list of all registered XModuleDescriptor classes."""
return [
desc for desc in [
desc for (_, desc) in XModuleDescriptor.load_classes()
]
]
def _list_modules():
"""Return a list of all registered XModule classes."""
return [
desc.module_class
for desc
in _list_descriptors()
]
def _ensure_dir(directory):
"""Ensure that `directory` exists."""
try:
os.makedirs(directory)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def _write_styles(selector, output_root, classes):
"""
Write the css fragments from all XModules in `classes`
into `output_root` as individual files, hashed by the contents to remove
duplicates
"""
contents = {}
css_fragments = defaultdict(set)
for class_ in classes:
class_css = class_.get_css()
for filetype in ('sass', 'scss', 'css'):
for idx, fragment in enumerate(class_css.get(filetype, [])):
css_fragments[idx, filetype, fragment].add(class_.__name__)
css_imports = defaultdict(set)
for (idx, filetype, fragment), classes in sorted(css_fragments.items()):
fragment_name = "{idx:0=3d}-{hash}.{type}".format(
idx=idx,
hash=hashlib.md5(fragment).hexdigest(),
type=filetype)
# Prepend _ so that sass just includes the files into a single file
filename = '_' + fragment_name
contents[filename] = fragment
for class_ in classes:
css_imports[class_].add(fragment_name)
module_styles_lines = []
module_styles_lines.append("@import 'bourbon/bourbon';")
module_styles_lines.append("@import 'bourbon/addons/button';")
module_styles_lines.append("@import 'assets/anims';")
for class_, fragment_names in css_imports.items():
module_styles_lines.append("""{selector}.xmodule_{class_} {{""".format(
class_=class_, selector=selector
))
module_styles_lines.extend(' @import "{0}";'.format(name) for name in fragment_names)
module_styles_lines.append('}')
contents['_module-styles.scss'] = '\n'.join(module_styles_lines)
_write_files(output_root, contents)
def _write_js(output_root, classes):
"""
Write the javascript fragments from all XModules in `classes`
into `output_root` as individual files, hashed by the contents to remove
duplicates
"""
contents = {}
js_fragments = set()
for class_ in classes:
module_js = class_.get_javascript()
# It will enforce 000 prefix for xmodule.js.
js_fragments.add((0, 'js', module_js.get('xmodule_js')))
for filetype in ('coffee', 'js'):
for idx, fragment in enumerate(module_js.get(filetype, [])):
js_fragments.add((idx + 1, filetype, fragment))
for idx, filetype, fragment in sorted(js_fragments):
filename = "{idx:0=3d}-{hash}.{type}".format(
idx=idx,
hash=hashlib.md5(fragment).hexdigest(),
type=filetype)
contents[filename] = fragment
_write_files(output_root, contents, {'.coffee': '.js'})
return [output_root / filename for filename in contents.keys()]
def _write_files(output_root, contents, generated_suffix_map=None):
"""
Write file contents to output root.
Any files not listed in contents that exists in output_root will be deleted,
unless it matches one of the patterns in `generated_suffix_map`.
output_root (path): The root directory to write the file contents in
contents (dict): A map from filenames to file contents to be written to the output_root
generated_suffix_map (dict): Optional. Maps file suffix to generated file suffix.
For any file in contents, if the suffix matches a key in `generated_suffix_map`,
then the same filename with the suffix replaced by the value from `generated_suffix_map`
will be ignored
"""
_ensure_dir(output_root)
to_delete = set(file.basename() for file in output_root.files()) - set(contents.keys())
if generated_suffix_map:
for output_file in contents.keys():
for suffix, generated_suffix in generated_suffix_map.items():
if output_file.endswith(suffix):
to_delete.discard(output_file.replace(suffix, generated_suffix))
for extra_file in to_delete:
(output_root / extra_file).remove_p()
for filename, file_content in contents.iteritems():
output_file = output_root / filename
not_file = not output_file.isfile()
# not_file is included to short-circuit this check, because
# read_md5 depends on the file already existing
write_file = not_file or output_file.read_md5() != hashlib.md5(file_content).digest() # pylint: disable=too-many-function-args
if write_file:
LOG.debug("Writing %s", output_file)
output_file.write_bytes(file_content)
else:
LOG.debug("%s unchanged, skipping", output_file)
def main():
"""
Generate
Usage: static_content.py <output_root>
"""
args = docopt(main.__doc__)
root = path(args['<output_root>'])
write_descriptor_js(root / 'descriptors/js')
write_descriptor_styles(root / 'descriptors/css')
write_module_js(root / 'modules/js')
write_module_styles(root / 'modules/css')
if __name__ == '__main__':
sys.exit(main())
| agpl-3.0 | 2,948,270,375,082,353,000 | 32.678392 | 135 | 0.644434 | false | 3.984542 | false | false | false |
chubbymaggie/miasm | miasm2/ir/symbexec_top.py | 1 | 7620 | from miasm2.ir.symbexec import SymbolicExecutionEngine, StateEngine
from miasm2.expression.simplifications import expr_simp
from miasm2.expression.expression import ExprId, ExprInt, ExprSlice,\
ExprMem, ExprCond, ExprCompose, ExprOp
from miasm2.core import asmblock
TOPSTR = "TOP"
def exprid_top(expr):
"""Return a TOP expression (ExprId("TOP") of size @expr.size
@expr: expression to replace with TOP
"""
return ExprId(TOPSTR, expr.size)
class SymbolicStateTop(StateEngine):
def __init__(self, dct, regstop):
self._symbols = frozenset(dct.items())
self._regstop = frozenset(regstop)
def __hash__(self):
return hash((self.__class__, self._symbols, self._regstop))
def __str__(self):
out = []
for dst, src in sorted(self._symbols):
out.append("%s = %s" % (dst, src))
for dst in self._regstop:
out.append('TOP %s' %dst)
return "\n".join(out)
def __eq__(self, other):
if self is other:
return True
if self.__class__ != other.__class__:
return False
return (self.symbols == other.symbols and
self.regstop == other.regstop)
def __iter__(self):
for dst, src in self._symbols:
yield dst, src
def merge(self, other):
"""Merge two symbolic states
Only equal expressions are kept in both states
@other: second symbolic state
"""
symb_a = self.symbols
symb_b = other.symbols
intersection = set(symb_a.keys()).intersection(symb_b.keys())
diff = set(symb_a.keys()).union(symb_b.keys()).difference(intersection)
symbols = {}
regstop = set()
for dst in diff:
if dst.is_id():
regstop.add(dst)
for dst in intersection:
if symb_a[dst] == symb_b[dst]:
symbols[dst] = symb_a[dst]
else:
regstop.add(dst)
return self.__class__(symbols, regstop)
@property
def symbols(self):
"""Return the dictionnary of known symbols"""
return dict(self._symbols)
@property
def regstop(self):
"""Return the set of expression with TOP values"""
return self._regstop
class SymbExecTopNoMem(SymbolicExecutionEngine):
"""
Symbolic execution, include TOP value.
ExprMem are not propagated.
Any computation involving a TOP will generate TOP.
"""
StateEngine = SymbolicStateTop
def __init__(self, ir_arch, state, regstop,
func_read=None,
func_write=None,
sb_expr_simp=expr_simp):
known_symbols = dict(state)
super(SymbExecTopNoMem, self).__init__(ir_arch, known_symbols,
func_read,
func_write,
sb_expr_simp)
self.regstop = set(regstop)
def get_state(self):
"""Return the current state of the SymbolicEngine"""
return self.StateEngine(self.symbols, self.regstop)
def eval_expr(self, expr, eval_cache=None):
if expr in self.regstop:
return exprid_top(expr)
ret = self.apply_expr_on_state(expr, eval_cache)
return ret
def manage_mem(self, expr, state, cache, level):
ptr = self.apply_expr_on_state_visit_cache(expr.arg, state, cache, level+1)
ret = ExprMem(ptr, expr.size)
ret = self.get_mem_state(ret)
if ret.is_mem() and not ret.arg.is_int() and ret.arg == ptr:
ret = exprid_top(expr)
assert expr.size == ret.size
return ret
def apply_expr_on_state_visit_cache(self, expr, state, cache, level=0):
"""
Deep First evaluate nodes:
1. evaluate node's sons
2. simplify
"""
if expr in cache:
ret = cache[expr]
elif expr in state:
return state[expr]
elif expr.is_int():
ret = expr
elif expr.is_id():
if isinstance(expr.name, asmblock.asm_label) and expr.name.offset is not None:
ret = ExprInt(expr.name.offset, expr.size)
elif expr in self.regstop:
ret = exprid_top(expr)
else:
ret = state.get(expr, expr)
elif expr.is_mem():
ret = self.manage_mem(expr, state, cache, level)
elif expr.is_cond():
cond = self.apply_expr_on_state_visit_cache(expr.cond, state, cache, level+1)
src1 = self.apply_expr_on_state_visit_cache(expr.src1, state, cache, level+1)
src2 = self.apply_expr_on_state_visit_cache(expr.src2, state, cache, level+1)
if cond.is_id(TOPSTR) or src1.is_id(TOPSTR) or src2.is_id(TOPSTR):
ret = exprid_top(expr)
else:
ret = ExprCond(cond, src1, src2)
elif expr.is_slice():
arg = self.apply_expr_on_state_visit_cache(expr.arg, state, cache, level+1)
if arg.is_id(TOPSTR):
ret = exprid_top(expr)
else:
ret = ExprSlice(arg, expr.start, expr.stop)
elif expr.is_op():
args = []
for oarg in expr.args:
arg = self.apply_expr_on_state_visit_cache(oarg, state, cache, level+1)
assert oarg.size == arg.size
if arg.is_id(TOPSTR):
return exprid_top(expr)
args.append(arg)
ret = ExprOp(expr.op, *args)
elif expr.is_compose():
args = []
for arg in expr.args:
arg = self.apply_expr_on_state_visit_cache(arg, state, cache, level+1)
if arg.is_id(TOPSTR):
return exprid_top(expr)
args.append(arg)
ret = ExprCompose(*args)
else:
raise TypeError("Unknown expr type")
ret = self.expr_simp(ret)
assert expr.size == ret.size
cache[expr] = ret
return ret
def apply_change(self, dst, src):
eval_cache = {}
if dst.is_mem():
# If Write to TOP, forget all memory information
ret = self.eval_expr(dst.arg, eval_cache)
if ret.is_id(TOPSTR):
to_del = set()
for dst_tmp in self.symbols:
if dst_tmp.is_mem():
to_del.add(dst_tmp)
for dst_to_del in to_del:
del self.symbols[dst_to_del]
return
src_o = self.expr_simp(src)
# Force update. Ex:
# EBX += 1 (state: EBX = EBX+1)
# EBX -= 1 (state: EBX = EBX, must be updated)
if dst in self.regstop:
self.regstop.discard(dst)
self.symbols[dst] = src_o
if dst == src_o:
# Avoid useless X = X information
del self.symbols[dst]
if src_o.is_id(TOPSTR):
if dst in self.symbols:
del self.symbols[dst]
self.regstop.add(dst)
class SymbExecTop(SymbExecTopNoMem):
"""
Symbolic execution, include TOP value.
ExprMem are propagated.
Any computation involving a TOP will generate TOP.
WARNING: avoid memory aliases here!
"""
def manage_mem(self, expr, state, cache, level):
ptr = self.apply_expr_on_state_visit_cache(expr.arg, state, cache, level+1)
ret = ExprMem(ptr, expr.size)
ret = self.get_mem_state(ret)
assert expr.size == ret.size
return ret
| gpl-2.0 | -4,532,542,140,521,507,000 | 33.479638 | 90 | 0.543045 | false | 3.746313 | false | false | false |
redcurrant/redcurrant | svc-monitor/contrib/rainx-monitor.py | 1 | 1285 | #!/usr/bin/python
import sys
import urllib2
RAINX_STAT_KEYS = [
("rainx.reqpersec", "total_reqpersec"),
("rainx.reqputpersec", "put_reqpersec"),
("rainx.reqgetpersec", "get_reqpersec"),
("rainx.avreqtime", "total_avreqtime"),
("rainx.avputreqtime", "put_avreqtime"),
("rainx.avgetreqtime", "get_avreqtime"),
]
def parse_info(stream):
data = {}
for line in stream.readlines():
parts = line.split()
if len(parts) > 1:
# try to cast value to int or float
try:
value = int(parts[1])
except ValueError:
try:
value = float(parts[1])
except ValueError:
value = parts[1]
data[parts[0]] = value
else:
data[parts[0]] = None
return data
def get_stat_lines(url, stat_keys):
stream = urllib2.urlopen(url)
data = parse_info(stream)
stream.close()
stats = [("stat.%s = %s" % (k[1], str(data[k[0]])))
for k in stat_keys if k[0] in data]
return stats
def main(args):
ip_port = args[1].split("|")[2]
stats_url = "http://%s/stat" % ip_port
for stat in get_stat_lines(stats_url, RAINX_STAT_KEYS):
print stat
if __name__ == "__main__":
main(sys.argv)
| lgpl-3.0 | 2,449,744,074,121,119,000 | 24.7 | 59 | 0.542412 | false | 3.165025 | false | false | false |
gautam1858/tensorflow | tensorflow/contrib/rnn/python/kernel_tests/fused_rnn_cell_test.py | 25 | 7408 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.rnn.python.ops.fused_rnn_cell."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.rnn.python.ops import fused_rnn_cell
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class FusedRnnCellTest(test.TestCase):
def testBasicRNNFusedWrapper(self):
"""This test checks that using a wrapper for BasicRNN works as expected."""
with self.cached_session() as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
cell = rnn_cell.BasicRNNCell(10)
batch_size = 5
input_size = 20
timelen = 15
inputs = constant_op.constant(
np.random.randn(timelen, batch_size, input_size))
with variable_scope.variable_scope("basic", initializer=initializer):
unpacked_inputs = array_ops.unstack(inputs)
outputs, state = rnn.static_rnn(
cell, unpacked_inputs, dtype=dtypes.float64)
packed_outputs = array_ops.stack(outputs)
basic_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("basic/")
]
sess.run([variables.global_variables_initializer()])
basic_outputs, basic_state = sess.run([packed_outputs, state])
basic_grads = sess.run(gradients_impl.gradients(packed_outputs, inputs))
basic_wgrads = sess.run(
gradients_impl.gradients(packed_outputs, basic_vars))
with variable_scope.variable_scope(
"fused_static", initializer=initializer):
fused_cell = fused_rnn_cell.FusedRNNCellAdaptor(
rnn_cell.BasicRNNCell(10))
outputs, state = fused_cell(inputs, dtype=dtypes.float64)
fused_static_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("fused_static/")
]
sess.run([variables.global_variables_initializer()])
fused_static_outputs, fused_static_state = sess.run([outputs, state])
fused_static_grads = sess.run(gradients_impl.gradients(outputs, inputs))
fused_static_wgrads = sess.run(
gradients_impl.gradients(outputs, fused_static_vars))
self.assertAllClose(basic_outputs, fused_static_outputs)
self.assertAllClose(basic_state, fused_static_state)
self.assertAllClose(basic_grads, fused_static_grads)
for basic, fused in zip(basic_wgrads, fused_static_wgrads):
self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2)
with variable_scope.variable_scope(
"fused_dynamic", initializer=initializer):
fused_cell = fused_rnn_cell.FusedRNNCellAdaptor(
rnn_cell.BasicRNNCell(10), use_dynamic_rnn=True)
outputs, state = fused_cell(inputs, dtype=dtypes.float64)
fused_dynamic_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("fused_dynamic/")
]
sess.run([variables.global_variables_initializer()])
fused_dynamic_outputs, fused_dynamic_state = sess.run([outputs, state])
fused_dynamic_grads = sess.run(
gradients_impl.gradients(outputs, inputs))
fused_dynamic_wgrads = sess.run(
gradients_impl.gradients(outputs, fused_dynamic_vars))
self.assertAllClose(basic_outputs, fused_dynamic_outputs)
self.assertAllClose(basic_state, fused_dynamic_state)
self.assertAllClose(basic_grads, fused_dynamic_grads)
for basic, fused in zip(basic_wgrads, fused_dynamic_wgrads):
self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2)
def testTimeReversedFusedRNN(self):
with self.cached_session() as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890213)
fw_cell = rnn_cell.BasicRNNCell(10)
bw_cell = rnn_cell.BasicRNNCell(10)
batch_size = 5
input_size = 20
timelen = 15
inputs = constant_op.constant(
np.random.randn(timelen, batch_size, input_size))
# test bi-directional rnn
with variable_scope.variable_scope("basic", initializer=initializer):
unpacked_inputs = array_ops.unstack(inputs)
outputs, fw_state, bw_state = rnn.static_bidirectional_rnn(
fw_cell, bw_cell, unpacked_inputs, dtype=dtypes.float64)
packed_outputs = array_ops.stack(outputs)
basic_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("basic/")
]
sess.run([variables.global_variables_initializer()])
basic_outputs, basic_fw_state, basic_bw_state = sess.run(
[packed_outputs, fw_state, bw_state])
basic_grads = sess.run(gradients_impl.gradients(packed_outputs, inputs))
basic_wgrads = sess.run(
gradients_impl.gradients(packed_outputs, basic_vars))
with variable_scope.variable_scope("fused", initializer=initializer):
fused_cell = fused_rnn_cell.FusedRNNCellAdaptor(
rnn_cell.BasicRNNCell(10))
fused_bw_cell = fused_rnn_cell.TimeReversedFusedRNN(
fused_rnn_cell.FusedRNNCellAdaptor(rnn_cell.BasicRNNCell(10)))
fw_outputs, fw_state = fused_cell(
inputs, dtype=dtypes.float64, scope="fw")
bw_outputs, bw_state = fused_bw_cell(
inputs, dtype=dtypes.float64, scope="bw")
outputs = array_ops.concat([fw_outputs, bw_outputs], 2)
fused_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("fused/")
]
sess.run([variables.global_variables_initializer()])
fused_outputs, fused_fw_state, fused_bw_state = sess.run(
[outputs, fw_state, bw_state])
fused_grads = sess.run(gradients_impl.gradients(outputs, inputs))
fused_wgrads = sess.run(gradients_impl.gradients(outputs, fused_vars))
self.assertAllClose(basic_outputs, fused_outputs)
self.assertAllClose(basic_fw_state, fused_fw_state)
self.assertAllClose(basic_bw_state, fused_bw_state)
self.assertAllClose(basic_grads, fused_grads)
for basic, fused in zip(basic_wgrads, fused_wgrads):
self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2)
if __name__ == "__main__":
test.main()
| apache-2.0 | 7,840,838,567,654,228,000 | 43.626506 | 80 | 0.668467 | false | 3.878534 | true | false | false |
unreal666/outwiker | plugins/pagetypecolor/pagetypecolor/preferencepanel.py | 3 | 2537 | # -*- coding: UTF-8 -*-
import wx
from outwiker.core.application import Application
from outwiker.core.events import PageDialogPageFactoriesNeededParams
from outwiker.gui.preferences.baseprefpanel import BasePrefPanel
from pagetypecolor.i18n import get_
from pagetypecolor.colorslist import ColorsList
class PreferencePanel(BasePrefPanel):
"""
Панель с настройками
"""
def __init__(self, parent, config):
"""
parent - родитель панели(должен быть wx.Treebook)
config - настройки из plugin._application.config
"""
super(PreferencePanel, self).__init__(parent)
global _
_ = get_()
self._application = Application
# Key - page type string, value - ColorPicker instance
self._colorPickers = {}
self._colorsList = ColorsList(self._application)
self.__createGui()
self.SetupScrolling()
def __createGui(self):
"""
Создать элементы управления
"""
mainSizer = wx.FlexGridSizer(cols=2)
mainSizer.AddGrowableCol(0)
mainSizer.AddGrowableCol(1)
descriptionLabel = wx.StaticText(
self,
-1,
_(u'The colors for the various page types')
)
mainSizer.Add(descriptionLabel,
flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL,
border=2)
mainSizer.AddSpacer(0)
eventParams = PageDialogPageFactoriesNeededParams(None, None)
self._application.onPageDialogPageFactoriesNeeded(None, eventParams)
for factory in eventParams.pageFactories:
label, colorPicker = self._createLabelAndColorPicker(
factory.title,
mainSizer
)
self._colorPickers[factory.getTypeString()] = colorPicker
self.SetSizer(mainSizer)
def LoadState(self):
self._colorsList.load()
for pageType in self._colorsList.getPageTypes():
if pageType in self._colorPickers:
color = self._colorsList.getColor(pageType)
self._colorPickers[pageType].SetColour(color)
def Save(self):
self._colorsList.load()
for pageType in self._colorsList.getPageTypes():
if pageType in self._colorPickers:
color = self._colorPickers[pageType].GetColour().GetAsString(wx.C2S_HTML_SYNTAX)
self._colorsList.setColor(pageType, color)
| gpl-3.0 | 191,321,534,753,112,540 | 30.126582 | 96 | 0.620984 | false | 3.824261 | false | false | false |
txt/evil | stats.py | 1 | 1665 | from __future__ import print_function, division
from cols import *
def cliffsDelta(lst1,lst2,
dull = [0.147, # small
0.33, # medium
0.474 # large
][0] ):
"Returns true if there are more than 'dull' differences"
m, n = len(lst1), len(lst2)
lst2 = sorted(lst2)
j = more = less = 0
for repeats,x in runs(sorted(lst1)):
while j <= (n - 1) and lst2[j] < x:
j += 1
more += j*repeats
while j <= (n - 1) and lst2[j] == x:
j += 1
less += (n - j)*repeats
d= (more - less) / (m*n)
return abs(d) > dull
def runs(lst):
"Iterator, chunks repeated values"
for j,two in enumerate(lst):
if j == 0:
one,i = two,0
if one!=two:
yield j - i,one
i = j
one=two
yield j - i + 1,two
def fromFile(f=None):
"utility for reading sample data from disk"
source=open(f) if f else sys.stdin
def labels(str):
if str:
words = re.split(dash,str)
for n in range(len(words)):
m = n + 1
yield ','.join(words[:m])
import re
cache = {}
num, space,dash = r'^\+?-?[0-9]', 'r[ \t\r\n]',r'[ \t]*-[ \t]*'
now=None
for line in source:
line = line.strip()
if line:
for word in re.split(space,line):
if re.match(num,word[0]):
if now:
for label in labels(now):
cache[label] += float(word)
else:
for label in labels(word):
if not label in cache:
cache[label] = Nums()
now = word
print(cache.keys())
for k,v in cache.items():
print(k,v.n)
return cache
fromFile()
| unlicense | -5,390,725,896,659,030,000 | 23.850746 | 65 | 0.506306 | false | 3.147448 | false | false | false |
leilihh/nova | nova/network/nova_ipam_lib.py | 15 | 4277 | # Copyright 2011 Nicira Networks, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from nova import ipv6
from nova.objects import fixed_ip as fixed_ip_obj
from nova.objects import floating_ip as floating_ip_obj
from nova.objects import network as network_obj
from nova.objects import virtual_interface as vif_obj
def get_ipam_lib(net_man):
return NeutronNovaIPAMLib(net_man)
class NeutronNovaIPAMLib(object):
"""Implements Neutron IP Address Management (IPAM) interface
using the local Nova database. This implementation is inline
with how IPAM is used by other NetworkManagers.
"""
def __init__(self, net_manager):
"""Holds a reference to the "parent" network manager, used
to take advantage of various FlatManager methods to avoid
code duplication.
"""
self.net_manager = net_manager
def get_subnets_by_net_id(self, context, tenant_id, net_id, _vif_id=None):
"""Returns information about the IPv4 and IPv6 subnets
associated with a Neutron Network UUID.
"""
n = network_obj.Network.get_by_uuid(context.elevated(), net_id)
subnet_v4 = {
'network_id': n.uuid,
'cidr': n.cidr,
'gateway': n.gateway,
'broadcast': n.broadcast,
'netmask': n.netmask,
'version': 4,
'dns1': n.dns1,
'dns2': n.dns2}
#TODO(tr3buchet): I'm noticing we've assumed here that all dns is v4.
# this is probably bad as there is no way to add v6
# dns to nova
subnet_v6 = {
'network_id': n.uuid,
'cidr': n.cidr_v6,
'gateway': n.gateway_v6,
'broadcast': None,
'netmask': n.netmask_v6,
'version': 6,
'dns1': None,
'dns2': None}
def ips_to_strs(net):
for key, value in net.items():
if isinstance(value, netaddr.ip.BaseIP):
net[key] = str(value)
return net
return [ips_to_strs(subnet_v4), ips_to_strs(subnet_v6)]
def get_routes_by_ip_block(self, context, block_id, project_id):
"""Returns the list of routes for the IP block."""
return []
def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id):
"""Returns a list of IPv4 address strings associated with
the specified virtual interface, based on the fixed_ips table.
"""
# TODO(tr3buchet): link fixed_ips to vif by uuid so only 1 db call
vif_rec = vif_obj.VirtualInterface.get_by_uuid(context, vif_id)
if not vif_rec:
return []
fixed_ips = fixed_ip_obj.FixedIPList.get_by_virtual_interface_id(
context, vif_rec.id)
return [str(fixed_ip.address) for fixed_ip in fixed_ips]
def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id):
"""Returns a list containing a single IPv6 address strings
associated with the specified virtual interface.
"""
admin_context = context.elevated()
network = network_obj.Network.get_by_uuid(admin_context, net_id)
vif_rec = vif_obj.VirtualInterface.get_by_uuid(context, vif_id)
if network.cidr_v6 and vif_rec and vif_rec.address:
ip = ipv6.to_global(network.cidr_v6,
vif_rec.address,
project_id)
return [ip]
return []
def get_floating_ips_by_fixed_address(self, context, fixed_address):
return floating_ip_obj.FloatingIPList.get_by_fixed_address(
context, fixed_address)
| apache-2.0 | -8,251,852,585,102,243,000 | 38.238532 | 78 | 0.609773 | false | 3.835874 | false | false | false |
lidavidm/mathics-heroku | mathics/core/definitions.py | 1 | 13170 | # -*- coding: utf8 -*-
u"""
Mathics: a general-purpose computer algebra system
Copyright (C) 2011-2013 The Mathics Team
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import cPickle as pickle
import os
import base64
def get_file_time(file):
try:
return os.stat(file).st_mtime
except OSError:
return 0
def valuesname(name):
" 'NValues' -> 'n' "
if name == 'Messages':
return 'messages'
else:
return name[:-6].lower()
class Definitions(object):
def __init__(self, add_builtin=False, builtin_filename=None):
super(Definitions, self).__init__()
self.builtin = {}
self.user = {}
self.autoload_stage = False
if add_builtin:
from mathics.builtin import modules, contribute
from mathics.core.evaluation import Evaluation
from mathics.settings import ROOT_DIR
loaded = False
if builtin_filename is not None:
builtin_dates = [get_file_time(module.__file__)
for module in modules]
builtin_time = max(builtin_dates)
if get_file_time(builtin_filename) > builtin_time:
builtin_file = open(builtin_filename, 'r')
self.builtin = pickle.load(builtin_file)
loaded = True
if not loaded:
contribute(self)
if builtin_filename is not None:
builtin_file = open(builtin_filename, 'w')
pickle.dump(self.builtin, builtin_file, -1)
self.autoload_stage = True
for root, dirs, files in os.walk( # noqa
os.path.join(ROOT_DIR, 'autoload')):
for f in filter(lambda x: x.endswith('.m'), files):
with open(os.path.join(root, f)) as stream:
Evaluation(stream.read(), self, timeout=30)
self.autoload_stage = False
def get_builtin_names(self):
return set(self.builtin)
def get_user_names(self):
return set(self.user)
def get_names(self):
return self.get_builtin_names() | self.get_user_names()
def get_definition(self, name):
user = self.user.get(name, None)
builtin = self.builtin.get(name, None)
if builtin:
context = 'System`'
else:
context = 'Global`'
if user is None and builtin is None:
return Definition(name=name, context=context)
if builtin is None:
user.context = context
return user
if user is None:
builtin.context = context
return builtin
if user:
attributes = user.attributes
elif builtin:
attributes = builtin.attributes
else:
attributes = set()
if not user:
user = Definition(name=name)
if not builtin:
builtin = Definition(name=name)
options = builtin.options.copy()
options.update(user.options)
formatvalues = builtin.formatvalues.copy()
for form, rules in user.formatvalues.iteritems():
if form in formatvalues:
formatvalues[form].extend(rules)
else:
formatvalues[form] = rules
return Definition(name=name,
ownvalues=user.ownvalues + builtin.ownvalues,
downvalues=user.downvalues + builtin.downvalues,
subvalues=user.subvalues + builtin.subvalues,
upvalues=user.upvalues + builtin.upvalues,
formatvalues=formatvalues,
messages=user.messages + builtin.messages,
attributes=attributes,
options=options,
nvalues=user.nvalues + builtin.nvalues,
defaultvalues=user.defaultvalues +
builtin.defaultvalues,
context=context,
)
def get_attributes(self, name):
return self.get_definition(name).attributes
def get_ownvalues(self, name):
return self.get_definition(name).ownvalues
def get_downvalues(self, name):
return self.get_definition(name).downvalues
def get_subvalues(self, name):
return self.get_definition(name).subvalues
def get_upvalues(self, name):
return self.get_definition(name).upvalues
def get_formats(self, name, format=''):
formats = self.get_definition(name).formatvalues
result = formats.get(format, []) + formats.get('', [])
result.sort()
return result
def get_nvalues(self, name):
return self.get_definition(name).nvalues
def get_defaultvalues(self, name):
return self.get_definition(name).defaultvalues
def get_value(self, name, pos, pattern, evaluation):
rules = self.get_definition(name).get_values_list(valuesname(pos))
for rule in rules:
result = rule.apply(pattern, evaluation)
if result is not None:
return result
def get_user_definition(self, name, create=True):
if self.autoload_stage:
existing = self.builtin.get(name)
if existing is None:
if not create:
return None
self.builtin[name] = Definition(name=name, attributes=set())
return self.builtin[name]
existing = self.user.get(name)
if existing:
return existing
else:
if not create:
return None
builtin = self.builtin.get(name)
if builtin:
attributes = builtin.attributes
else:
attributes = set()
self.user[name] = Definition(name=name, attributes=attributes)
return self.user[name]
def reset_user_definition(self, name):
del self.user[name]
def add_user_definition(self, name, definition):
self.user[name] = definition
def set_attribute(self, name, attribute):
definition = self.get_user_definition(name)
definition.attributes.add(attribute)
def set_attributes(self, name, attributes):
definition = self.get_user_definition(name)
definition.attributes = set(attributes)
def clear_attribute(self, name, attribute):
definition = self.get_user_definition(name)
if attribute in definition.attributes:
definition.attributes.remove(attribute)
def add_rule(self, name, rule, position=None):
if position is None:
return self.get_user_definition(name).add_rule(rule)
else:
return self.get_user_definition(name).add_rule_at(rule, position)
def add_format(self, name, rule, form=''):
definition = self.get_user_definition(name)
if isinstance(form, tuple) or isinstance(form, list):
forms = form
else:
forms = [form]
for form in forms:
if form not in definition.formatvalues:
definition.formatvalues[form] = []
insert_rule(definition.formatvalues[form], rule)
def add_nvalue(self, name, rule):
definition = self.get_user_definition(name)
definition.add_rule_at(rule, 'n')
def add_default(self, name, rule):
definition = self.get_user_definition(name)
definition.add_rule_at(rule, 'default')
def add_message(self, name, rule):
definition = self.get_user_definition(name)
definition.add_rule_at(rule, 'messages')
def set_values(self, name, values, rules):
pos = valuesname(values)
definition = self.get_user_definition(name)
definition.set_values_list(pos, rules)
def get_options(self, name):
return self.get_definition(name).options
def reset_user_definitions(self):
self.user = {}
def get_user_definitions(self):
return base64.b64encode(pickle.dumps(self.user, protocol=pickle.HIGHEST_PROTOCOL))
def set_user_definitions(self, definitions):
if definitions:
self.user = pickle.loads(base64.b64decode(definitions))
else:
self.user = {}
def get_ownvalue(self, name):
ownvalues = self.get_definition(name).ownvalues
if ownvalues:
return ownvalues[0]
return None
def set_ownvalue(self, name, value):
from expression import Symbol
from rules import Rule
self.add_rule(name, Rule(Symbol(name), value))
def set_options(self, name, options):
definition = self.get_user_definition(name)
definition.options = options
def unset(self, name, expr):
definition = self.get_user_definition(name)
return definition.remove_rule(expr)
def get_tag_position(pattern, name):
if pattern.get_name() == name:
return 'own'
elif pattern.is_atom():
return None
else:
head_name = pattern.get_head_name()
if head_name == name:
return 'down'
elif head_name == 'Condition' and len(pattern.leaves) > 0:
return get_tag_position(pattern.leaves[0], name)
elif pattern.get_lookup_name() == name:
return 'sub'
else:
for leaf in pattern.leaves:
if leaf.get_lookup_name() == name:
return 'up'
return None
def insert_rule(values, rule):
for index, existing in enumerate(values):
if existing.pattern.same(rule.pattern):
del values[index]
break
values.insert(0, rule)
values.sort()
class Definition(object):
def __init__(self, name, rules=None, ownvalues=None, downvalues=None,
subvalues=None, upvalues=None, formatvalues=None,
messages=None, attributes=(), options=None, nvalues=None,
defaultvalues=None, builtin=None, context='Global`'):
super(Definition, self).__init__()
self.name = name
if rules is None:
rules = []
if ownvalues is None:
ownvalues = []
if downvalues is None:
downvalues = []
if subvalues is None:
subvalues = []
if upvalues is None:
upvalues = []
if formatvalues is None:
formatvalues = {}
if options is None:
options = {}
if nvalues is None:
nvalues = []
if defaultvalues is None:
defaultvalues = []
if messages is None:
messages = []
self.ownvalues = ownvalues
self.downvalues = downvalues
self.subvalues = subvalues
self.upvalues = upvalues
for rule in rules:
self.add_rule(rule)
self.formatvalues = dict((name, list)
for name, list in formatvalues.items())
self.messages = messages
self.attributes = set(attributes)
self.options = options
self.nvalues = nvalues
self.defaultvalues = defaultvalues
self.builtin = builtin
self.context = context
def get_values_list(self, pos):
if pos == 'messages':
return self.messages
else:
return getattr(self, '%svalues' % pos)
def set_values_list(self, pos, rules):
if pos == 'messages':
self.messages = rules
else:
setattr(self, '%svalues' % pos, rules)
def add_rule_at(self, rule, position):
values = self.get_values_list(position)
insert_rule(values, rule)
return True
def add_rule(self, rule):
pos = get_tag_position(rule.pattern, self.name)
if pos:
return self.add_rule_at(rule, pos)
return False
def remove_rule(self, lhs):
position = get_tag_position(lhs, self.name)
if position:
values = self.get_values_list(position)
for index, existing in enumerate(values):
if existing.pattern.expr.same(lhs):
del values[index]
return True
return False
def __repr__(self):
return (
'<Definition: name: %s, '
'downvalues: %s, formats: %s, attributes: %s>') % (
self.name, self.downvalues, self.formatvalues, self.attributes)
| gpl-3.0 | 6,722,413,932,270,954,000 | 32.173804 | 90 | 0.575323 | false | 4.349406 | false | false | false |
endlessm/chromium-browser | third_party/catapult/telemetry/telemetry/__init__.py | 1 | 2624 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A library for cross-platform browser tests."""
import os
import sys
try:
# This enables much better stack upon native code crashes.
import faulthandler
faulthandler.enable()
except ImportError:
pass
# Ensure Python >= 2.7.
if sys.version_info < (2, 7):
print >> sys.stderr, 'Need Python 2.7 or greater.'
sys.exit(-1)
def _JoinPath(*path_parts):
return os.path.abspath(os.path.join(*path_parts))
def _InsertPath(path):
assert os.path.isdir(path), 'Not a valid path: %s' % path
if path not in sys.path:
# Some call sites that use Telemetry assume that sys.path[0] is the
# directory containing the script, so we add these extra paths to right
# after sys.path[0].
sys.path.insert(1, path)
def _AddDirToPythonPath(*path_parts):
path = _JoinPath(*path_parts)
_InsertPath(path)
# Add Catapult dependencies to our path.
# util depends on py_utils, so we can't use it to get the catapult dir.
_CATAPULT_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..')
_AddDirToPythonPath(_CATAPULT_DIR, 'common', 'py_utils')
_AddDirToPythonPath(_CATAPULT_DIR, 'dependency_manager')
_AddDirToPythonPath(_CATAPULT_DIR, 'devil')
_AddDirToPythonPath(_CATAPULT_DIR, 'systrace')
_AddDirToPythonPath(_CATAPULT_DIR, 'tracing')
_AddDirToPythonPath(_CATAPULT_DIR, 'common', 'py_trace_event')
_AddDirToPythonPath(_CATAPULT_DIR, 'common', 'py_vulcanize')
_AddDirToPythonPath(_CATAPULT_DIR, 'tracing', 'tracing_build')
# pylint: disable=wrong-import-position
from telemetry.core import util
from telemetry.internal.util import global_hooks
# pylint: enable=wrong-import-position
# Add Catapult third party dependencies into our path.
_AddDirToPythonPath(util.GetCatapultThirdPartyDir(), 'typ')
# Required by websocket-client.
_AddDirToPythonPath(util.GetCatapultThirdPartyDir(), 'six')
# Add Telemetry third party dependencies into our path.
_TELEMETRY_3P = util.GetTelemetryThirdPartyDir()
_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'altgraph')
_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'mock')
_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'modulegraph')
_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'mox3')
_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'png')
_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'pyfakefs')
_AddDirToPythonPath(util.GetTelemetryThirdPartyDir(), 'websocket-client')
# Install Telemtry global hooks.
global_hooks.InstallHooks()
| bsd-3-clause | -1,831,805,396,973,461,500 | 34.945205 | 75 | 0.755716 | false | 3.153846 | false | false | false |
EDUlib/edx-platform | cms/djangoapps/contentstore/tasks.py | 1 | 28335 | """
This file contains celery tasks for contentstore views
"""
import base64
import json
import os
import pkg_resources
import shutil
import tarfile
from datetime import datetime
from tempfile import NamedTemporaryFile, mkdtemp
import olxcleaner
from ccx_keys.locator import CCXLocator
from celery import shared_task
from celery.utils.log import get_task_logger
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.exceptions import SuspiciousOperation
from django.core.files import File
from django.test import RequestFactory
from django.utils.text import get_valid_filename
from django.utils.translation import ugettext as _
from edx_django_utils.monitoring import (
set_code_owner_attribute,
set_code_owner_attribute_from_module,
set_custom_attribute,
set_custom_attributes_for_course_key
)
from olxcleaner.exceptions import ErrorLevel
from olxcleaner.reporting import report_error_summary, report_errors
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import LibraryLocator
from organizations.api import add_organization_course, ensure_organization
from organizations.models import OrganizationCourse
from path import Path as path
from pytz import UTC
from user_tasks.models import UserTaskArtifact, UserTaskStatus
from user_tasks.tasks import UserTask
from cms.djangoapps.contentstore.courseware_index import (
CoursewareSearchIndexer,
LibrarySearchIndexer,
SearchIndexingError
)
from cms.djangoapps.contentstore.storage import course_import_export_storage
from cms.djangoapps.contentstore.utils import initialize_permissions, reverse_usage_url, translation_language
from cms.djangoapps.models.settings.course_metadata import CourseMetadata
from common.djangoapps.course_action_state.models import CourseRerunState
from common.djangoapps.student.auth import has_course_author_access
from common.djangoapps.util.monitoring import monitor_import_failure
from openedx.core.djangoapps.content.learning_sequences.api import key_supports_outlines
from openedx.core.djangoapps.embargo.models import CountryAccessRule, RestrictedCourse
from openedx.core.lib.extract_tar import safetar_extractall
from xmodule.contentstore.django import contentstore
from xmodule.course_module import CourseFields
from xmodule.exceptions import SerializationError
from xmodule.modulestore import COURSE_ROOT, LIBRARY_ROOT
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import DuplicateCourseError, ItemNotFoundError, InvalidProctoringProvider
from xmodule.modulestore.xml_exporter import export_course_to_xml, export_library_to_xml
from xmodule.modulestore.xml_importer import import_course_from_xml, import_library_from_xml
from .outlines import update_outline_from_modulestore
from .toggles import course_import_olx_validation_is_enabled
User = get_user_model()
LOGGER = get_task_logger(__name__)
FILE_READ_CHUNK = 1024 # bytes
FULL_COURSE_REINDEX_THRESHOLD = 1
ALL_ALLOWED_XBLOCKS = frozenset(
[entry_point.name for entry_point in pkg_resources.iter_entry_points("xblock.v1")]
)
def clone_instance(instance, field_values):
""" Clones a Django model instance.
The specified fields are replaced with new values.
Arguments:
instance (Model): Instance of a Django model.
field_values (dict): Map of field names to new values.
Returns:
Model: New instance.
"""
instance.pk = None
for field, value in field_values.items():
setattr(instance, field, value)
instance.save()
return instance
@shared_task
@set_code_owner_attribute
def rerun_course(source_course_key_string, destination_course_key_string, user_id, fields=None):
"""
Reruns a course in a new celery task.
"""
# import here, at top level this import prevents the celery workers from starting up correctly
from edxval.api import copy_course_videos
source_course_key = CourseKey.from_string(source_course_key_string)
destination_course_key = CourseKey.from_string(destination_course_key_string)
try:
# deserialize the payload
fields = deserialize_fields(fields) if fields else None
# use the split modulestore as the store for the rerun course,
# as the Mongo modulestore doesn't support multiple runs of the same course.
store = modulestore()
with store.default_store('split'):
store.clone_course(source_course_key, destination_course_key, user_id, fields=fields)
# set initial permissions for the user to access the course.
initialize_permissions(destination_course_key, User.objects.get(id=user_id))
# update state: Succeeded
CourseRerunState.objects.succeeded(course_key=destination_course_key)
# call edxval to attach videos to the rerun
copy_course_videos(source_course_key, destination_course_key)
# Copy OrganizationCourse
organization_course = OrganizationCourse.objects.filter(course_id=source_course_key_string).first()
if organization_course:
clone_instance(organization_course, {'course_id': destination_course_key_string})
# Copy RestrictedCourse
restricted_course = RestrictedCourse.objects.filter(course_key=source_course_key).first()
if restricted_course:
country_access_rules = CountryAccessRule.objects.filter(restricted_course=restricted_course)
new_restricted_course = clone_instance(restricted_course, {'course_key': destination_course_key})
for country_access_rule in country_access_rules:
clone_instance(country_access_rule, {'restricted_course': new_restricted_course})
org_data = ensure_organization(source_course_key.org)
add_organization_course(org_data, destination_course_key)
return "succeeded"
except DuplicateCourseError:
# do NOT delete the original course, only update the status
CourseRerunState.objects.failed(course_key=destination_course_key)
LOGGER.exception('Course Rerun Error')
return "duplicate course"
# catch all exceptions so we can update the state and properly cleanup the course.
except Exception as exc: # pylint: disable=broad-except
# update state: Failed
CourseRerunState.objects.failed(course_key=destination_course_key)
LOGGER.exception('Course Rerun Error')
try:
# cleanup any remnants of the course
modulestore().delete_course(destination_course_key, user_id)
except ItemNotFoundError:
# it's possible there was an error even before the course module was created
pass
return "exception: " + str(exc)
def deserialize_fields(json_fields):
fields = json.loads(json_fields)
for field_name, value in fields.items():
fields[field_name] = getattr(CourseFields, field_name).from_json(value)
return fields
def _parse_time(time_isoformat):
""" Parses time from iso format """
return datetime.strptime(
# remove the +00:00 from the end of the formats generated within the system
time_isoformat.split('+')[0],
"%Y-%m-%dT%H:%M:%S.%f"
).replace(tzinfo=UTC)
@shared_task
@set_code_owner_attribute
def update_search_index(course_id, triggered_time_isoformat):
""" Updates course search index. """
try:
course_key = CourseKey.from_string(course_id)
# We skip search indexing for CCX courses because there is currently
# some issue around Modulestore caching that makes it prohibitively
# expensive (sometimes hours-long for really complex courses).
if isinstance(course_key, CCXLocator):
LOGGER.warning(
'Search indexing skipped for CCX Course %s (this is currently too slow to run in production)',
course_id
)
return
CoursewareSearchIndexer.index(modulestore(), course_key, triggered_at=(_parse_time(triggered_time_isoformat)))
except SearchIndexingError as exc:
error_list = exc.error_list
LOGGER.error(
"Search indexing error for complete course %s - %s - %s",
course_id,
str(exc),
error_list,
)
else:
LOGGER.debug('Search indexing successful for complete course %s', course_id)
@shared_task
@set_code_owner_attribute
def update_library_index(library_id, triggered_time_isoformat):
""" Updates course search index. """
try:
library_key = CourseKey.from_string(library_id)
LibrarySearchIndexer.index(modulestore(), library_key, triggered_at=(_parse_time(triggered_time_isoformat)))
except SearchIndexingError as exc:
LOGGER.error('Search indexing error for library %s - %s', library_id, str(exc))
else:
LOGGER.debug('Search indexing successful for library %s', library_id)
class CourseExportTask(UserTask): # pylint: disable=abstract-method
"""
Base class for course and library export tasks.
"""
@staticmethod
def calculate_total_steps(arguments_dict):
"""
Get the number of in-progress steps in the export process, as shown in the UI.
For reference, these are:
1. Exporting
2. Compressing
"""
return 2
@classmethod
def generate_name(cls, arguments_dict):
"""
Create a name for this particular import task instance.
Arguments:
arguments_dict (dict): The arguments given to the task function
Returns:
text_type: The generated name
"""
key = arguments_dict['course_key_string']
return f'Export of {key}'
@shared_task(base=CourseExportTask, bind=True)
# Note: The decorator @set_code_owner_attribute could not be used because
# the implementation of this task breaks with any additional decorators.
def export_olx(self, user_id, course_key_string, language):
"""
Export a course or library to an OLX .tar.gz archive and prepare it for download.
"""
set_code_owner_attribute_from_module(__name__)
courselike_key = CourseKey.from_string(course_key_string)
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
with translation_language(language):
self.status.fail(_('Unknown User ID: {0}').format(user_id))
return
if not has_course_author_access(user, courselike_key):
with translation_language(language):
self.status.fail(_('Permission denied'))
return
if isinstance(courselike_key, LibraryLocator):
courselike_module = modulestore().get_library(courselike_key)
else:
courselike_module = modulestore().get_course(courselike_key)
try:
self.status.set_state('Exporting')
tarball = create_export_tarball(courselike_module, courselike_key, {}, self.status)
artifact = UserTaskArtifact(status=self.status, name='Output')
artifact.file.save(name=os.path.basename(tarball.name), content=File(tarball))
artifact.save()
# catch all exceptions so we can record useful error messages
except Exception as exception: # pylint: disable=broad-except
LOGGER.exception('Error exporting course %s', courselike_key, exc_info=True)
if self.status.state != UserTaskStatus.FAILED:
self.status.fail({'raw_error_msg': str(exception)})
return
def create_export_tarball(course_module, course_key, context, status=None):
"""
Generates the export tarball, or returns None if there was an error.
Updates the context with any error information if applicable.
"""
name = course_module.url_name
export_file = NamedTemporaryFile(prefix=name + '.', suffix=".tar.gz")
root_dir = path(mkdtemp())
try:
if isinstance(course_key, LibraryLocator):
export_library_to_xml(modulestore(), contentstore(), course_key, root_dir, name)
else:
export_course_to_xml(modulestore(), contentstore(), course_module.id, root_dir, name)
if status:
status.set_state('Compressing')
status.increment_completed_steps()
LOGGER.debug('tar file being generated at %s', export_file.name)
with tarfile.open(name=export_file.name, mode='w:gz') as tar_file:
tar_file.add(root_dir / name, arcname=name)
except SerializationError as exc:
LOGGER.exception('There was an error exporting %s', course_key, exc_info=True)
parent = None
try:
failed_item = modulestore().get_item(exc.location)
parent_loc = modulestore().get_parent_location(failed_item.location)
if parent_loc is not None:
parent = modulestore().get_item(parent_loc)
except: # pylint: disable=bare-except
# if we have a nested exception, then we'll show the more generic error message
pass
context.update({
'in_err': True,
'raw_err_msg': str(exc),
'edit_unit_url': reverse_usage_url("container_handler", parent.location) if parent else "",
})
if status:
status.fail(json.dumps({'raw_error_msg': context['raw_err_msg'],
'edit_unit_url': context['edit_unit_url']}))
raise
except Exception as exc:
LOGGER.exception('There was an error exporting %s', course_key, exc_info=True)
context.update({
'in_err': True,
'edit_unit_url': None,
'raw_err_msg': str(exc)})
if status:
status.fail(json.dumps({'raw_error_msg': context['raw_err_msg']}))
raise
finally:
if os.path.exists(root_dir / name):
shutil.rmtree(root_dir / name)
return export_file
class CourseImportTask(UserTask): # pylint: disable=abstract-method
"""
Base class for course and library import tasks.
"""
@staticmethod
def calculate_total_steps(arguments_dict):
"""
Get the number of in-progress steps in the import process, as shown in the UI.
For reference, these are:
1. Unpacking
2. Verifying
3. Updating
"""
return 3
@classmethod
def generate_name(cls, arguments_dict):
"""
Create a name for this particular import task instance.
Arguments:
arguments_dict (dict): The arguments given to the task function
Returns:
text_type: The generated name
"""
key = arguments_dict['course_key_string']
filename = arguments_dict['archive_name']
return f'Import of {key} from {filename}'
@shared_task(base=CourseImportTask, bind=True)
# Note: The decorator @set_code_owner_attribute could not be used because # lint-amnesty, pylint: disable=too-many-statements
# the implementation of this task breaks with any additional decorators.
def import_olx(self, user_id, course_key_string, archive_path, archive_name, language):
"""
Import a course or library from a provided OLX .tar.gz archive.
"""
current_step = 'Unpacking'
courselike_key = CourseKey.from_string(course_key_string)
set_code_owner_attribute_from_module(__name__)
set_custom_attributes_for_course_key(courselike_key)
log_prefix = f'Course import {courselike_key}'
self.status.set_state(current_step)
data_root = path(settings.GITHUB_REPO_ROOT)
subdir = base64.urlsafe_b64encode(repr(courselike_key).encode('utf-8')).decode('utf-8')
course_dir = data_root / subdir
def validate_user():
"""Validate if the user exists otherwise log error. """
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist as exc:
with translation_language(language):
self.status.fail(_('User permission denied.'))
LOGGER.error(f'{log_prefix}: Unknown User: {user_id}')
monitor_import_failure(courselike_key, current_step, exception=exc)
return
def user_has_access(user):
"""Return True if user has studio write access to the given course."""
has_access = has_course_author_access(user, courselike_key)
if not has_access:
message = f'User permission denied: {user.username}'
with translation_language(language):
self.status.fail(_('Permission denied. You do not have write access to this course.'))
LOGGER.error(f'{log_prefix}: {message}')
monitor_import_failure(courselike_key, current_step, message=message)
return has_access
def file_is_supported():
"""Check if it is a supported file."""
file_is_valid = archive_name.endswith('.tar.gz')
if not file_is_valid:
message = f'Unsupported file {archive_name}'
with translation_language(language):
self.status.fail(_('We only support uploading a .tar.gz file.'))
LOGGER.error(f'{log_prefix}: {message}')
monitor_import_failure(courselike_key, current_step, message=message)
return file_is_valid
def file_exists_in_storage():
"""Verify archive path exists in storage."""
archive_path_exists = course_import_export_storage.exists(archive_path)
if not archive_path_exists:
message = f'Uploaded file {archive_path} not found'
with translation_language(language):
self.status.fail(_('Uploaded Tar file not found. Try again.'))
LOGGER.error(f'{log_prefix}: {message}')
monitor_import_failure(courselike_key, current_step, message=message)
return archive_path_exists
def verify_root_name_exists(course_dir, root_name):
"""Verify root xml file exists."""
def get_all_files(directory):
"""
For each file in the directory, yield a 2-tuple of (file-name,
directory-path)
"""
for directory_path, _dirnames, filenames in os.walk(directory):
for filename in filenames:
yield (filename, directory_path)
def get_dir_for_filename(directory, filename):
"""
Returns the directory path for the first file found in the directory
with the given name. If there is no file in the directory with
the specified name, return None.
"""
for name, directory_path in get_all_files(directory):
if name == filename:
return directory_path
return None
dirpath = get_dir_for_filename(course_dir, root_name)
if not dirpath:
message = f'Could not find the {root_name} file in the package.'
with translation_language(language):
self.status.fail(_('Could not find the {0} file in the package.').format(root_name))
LOGGER.error(f'{log_prefix}: {message}')
monitor_import_failure(courselike_key, current_step, message=message)
return
return dirpath
user = validate_user()
if not user:
return
if not user_has_access(user):
return
if not file_is_supported():
return
is_library = isinstance(courselike_key, LibraryLocator)
is_course = not is_library
if is_library:
root_name = LIBRARY_ROOT
courselike_module = modulestore().get_library(courselike_key)
import_func = import_library_from_xml
else:
root_name = COURSE_ROOT
courselike_module = modulestore().get_course(courselike_key)
import_func = import_course_from_xml
# Locate the uploaded OLX archive (and download it from S3 if necessary)
# Do everything in a try-except block to make sure everything is properly cleaned up.
try:
LOGGER.info(f'{log_prefix}: unpacking step started')
temp_filepath = course_dir / get_valid_filename(archive_name)
if not course_dir.isdir():
os.mkdir(course_dir)
LOGGER.info(f'{log_prefix}: importing course to {temp_filepath}')
# Copy the OLX archive from where it was uploaded to (S3, Swift, file system, etc.)
if not file_exists_in_storage():
return
with course_import_export_storage.open(archive_path, 'rb') as source:
with open(temp_filepath, 'wb') as destination:
def read_chunk():
"""
Read and return a sequence of bytes from the source file.
"""
return source.read(FILE_READ_CHUNK)
for chunk in iter(read_chunk, b''):
destination.write(chunk)
LOGGER.info(f'{log_prefix}: Download from storage complete')
# Delete from source location
course_import_export_storage.delete(archive_path)
# If the course has an entrance exam then remove it and its corresponding milestone.
# current course state before import.
if is_course:
if courselike_module.entrance_exam_enabled:
fake_request = RequestFactory().get('/')
fake_request.user = user
from .views.entrance_exam import remove_entrance_exam_milestone_reference
# TODO: Is this really ok? Seems dangerous for a live course
remove_entrance_exam_milestone_reference(fake_request, courselike_key)
LOGGER.info(f'{log_prefix}: entrance exam milestone content reference has been removed')
# Send errors to client with stage at which error occurred.
except Exception as exception: # pylint: disable=broad-except
if course_dir.isdir():
shutil.rmtree(course_dir)
LOGGER.info(f'{log_prefix}: Temp data cleared')
self.status.fail(_('An Unknown error occurred during the unpacking step.'))
LOGGER.exception(f'{log_prefix}: Unknown error while unpacking', exc_info=True)
monitor_import_failure(courselike_key, current_step, exception=exception)
return
# try-finally block for proper clean up after receiving file.
try:
tar_file = tarfile.open(temp_filepath)
try:
safetar_extractall(tar_file, (course_dir + '/'))
except SuspiciousOperation as exc:
with translation_language(language):
self.status.fail(_('Unsafe tar file. Aborting import.'))
LOGGER.error(f'{log_prefix}: Unsafe tar file')
monitor_import_failure(courselike_key, current_step, exception=exc)
return
finally:
tar_file.close()
current_step = 'Verifying'
self.status.set_state(current_step)
self.status.increment_completed_steps()
LOGGER.info(f'{log_prefix}: Uploaded file extracted. Verification step started')
dirpath = verify_root_name_exists(course_dir, root_name)
if not dirpath:
return
if not validate_course_olx(courselike_key, dirpath, self.status):
return
dirpath = os.path.relpath(dirpath, data_root)
current_step = 'Updating'
self.status.set_state(current_step)
self.status.increment_completed_steps()
LOGGER.info(f'{log_prefix}: Extracted file verified. Updating course started')
courselike_items = import_func(
modulestore(), user.id,
settings.GITHUB_REPO_ROOT, [dirpath],
load_error_modules=False,
static_content_store=contentstore(),
target_id=courselike_key,
verbose=True,
status=self.status
)
new_location = courselike_items[0].location
LOGGER.debug('new course at %s', new_location)
LOGGER.info(f'{log_prefix}: Course import successful')
set_custom_attribute('course_import_completed', True)
except Exception as exception: # pylint: disable=broad-except
msg = str(exception)
status_msg = _('Unknown error while importing course.')
if isinstance(exception, InvalidProctoringProvider):
status_msg = msg
LOGGER.exception(f'{log_prefix}: Unknown error while importing course {str(exception)}')
if self.status.state != UserTaskStatus.FAILED:
self.status.fail(status_msg)
monitor_import_failure(courselike_key, current_step, exception=exception)
finally:
if course_dir.isdir():
shutil.rmtree(course_dir)
LOGGER.info(f'{log_prefix}: Temp data cleared')
if self.status.state == 'Updating' and is_course:
# Reload the course so we have the latest state
course = modulestore().get_course(courselike_key)
if course.entrance_exam_enabled:
entrance_exam_chapter = modulestore().get_items(
course.id,
qualifiers={'category': 'chapter'},
settings={'is_entrance_exam': True}
)[0]
metadata = {'entrance_exam_id': str(entrance_exam_chapter.location)}
CourseMetadata.update_from_dict(metadata, course, user)
from .views.entrance_exam import add_entrance_exam_milestone
add_entrance_exam_milestone(course.id, entrance_exam_chapter)
LOGGER.info(f'Course import {course.id}: Entrance exam imported')
@shared_task
@set_code_owner_attribute
def update_outline_from_modulestore_task(course_key_str):
"""
Celery task that creates a learning_sequence course outline.
"""
try:
course_key = CourseKey.from_string(course_key_str)
if not key_supports_outlines(course_key):
LOGGER.warning(
(
"update_outline_from_modulestore_task called for course key"
" %s, which does not support learning_sequence outlines."
),
course_key_str
)
return
update_outline_from_modulestore(course_key)
except Exception: # pylint disable=broad-except
LOGGER.exception("Could not create course outline for course %s", course_key_str)
raise # Re-raise so that errors are noted in reporting.
def validate_course_olx(courselike_key, course_dir, status):
"""
Validates course olx and records the errors as an artifact.
Arguments:
courselike_key: A locator identifies a course resource.
course_dir: complete path to the course olx
status: UserTaskStatus object.
"""
is_library = isinstance(courselike_key, LibraryLocator)
olx_is_valid = True
log_prefix = f'Course import {courselike_key}'
if is_library:
return olx_is_valid
if not course_import_olx_validation_is_enabled():
return olx_is_valid
try:
__, errorstore, __ = olxcleaner.validate(course_dir, steps=8, allowed_xblocks=ALL_ALLOWED_XBLOCKS)
except Exception: # pylint: disable=broad-except
LOGGER.exception(f'{log_prefix}: CourseOlx Could not be validated')
return olx_is_valid
log_errors = len(errorstore.errors) > 0
if log_errors:
log_errors_to_artifact(errorstore, status)
has_errors = errorstore.return_error(ErrorLevel.ERROR.value)
if not has_errors:
return olx_is_valid
LOGGER.error(f'{log_prefix}: CourseOlx validation failed')
# TODO: Do not fail the task until we have some data about kinds of
# olx validation failures. TNL-8151
return olx_is_valid
def log_errors_to_artifact(errorstore, status):
"""Log errors as a task artifact."""
def get_error_by_type(error_type):
return [error for error in error_report if error.startswith(error_type)]
error_summary = report_error_summary(errorstore)
error_report = report_errors(errorstore)
message = json.dumps({
'summary': error_summary,
'errors': get_error_by_type(ErrorLevel.ERROR.name),
'warnings': get_error_by_type(ErrorLevel.WARNING.name),
})
UserTaskArtifact.objects.create(status=status, name='OLX_VALIDATION_ERROR', text=message)
| agpl-3.0 | -1,813,413,675,217,657,000 | 38.13674 | 126 | 0.658726 | false | 4.165687 | false | false | false |
najmacherrad/master_thesis | DATA/DIDA/Modif_fasta.py | 1 | 7499 | # TRANSFORM fasta sequence after mutation
# /Memoire/DIDAfasta
# coding=utf-8
from numpy import *
import petl as etl
import operator
import pandas as pd
import re
import csv
#-------------------------------------------------------------------------------
# Download and modify files from DIDA website
#-------------------------------------------------------------------------------
DIDAgenes = 'DIDA_Genes_a000.csv' #Gene name + Uniprot ACC (delimiter='\t')
table1 = etl.fromcsv('DIDA_Genes_a000.csv', delimiter='\t')
table2 = etl.rename(table1, {'Uniprot ACC': 'Uniprot_ACC','Gene name': 'Gene_name' })
table3 = etl.cut(table2, 'Uniprot_ACC', 'Gene_name')
etl.totsv(table3,'_didagenes.csv') #137 lignes
table4 = etl.fromcsv('DIDA_Variants_108c0.csv', delimiter='\t')
table5 = etl.split(table4, 'Protein change', '\)', ['Protein change',''])
table6 = etl.cutout(table5,'')
table7 = etl.split(table6, 'Protein change', '\(', ['','Protein change'])
table8 = etl.cutout(table7,'')
table9 = etl.rename(table8, {'Gene name': 'Gene_name','Variant effect':'Variant_effect','Protein change':'Protein_change' })
etl.totsv(table9,'_didavariants.csv')
#-------------------------------------------------------------------------------
# Creation of file with fasta sequence mutant
#-------------------------------------------------------------------------------
file_variants = '_didavariants.csv' #364 variants
file_genes = '_didagenes.csv' #136 genes
a = pd.read_csv(file_variants,'\t')
b = pd.read_csv(file_genes,'\t')
merged = a.merge(b, on='Gene_name')
merged.to_csv('_didavariantsgenes.csv', index=False)
V = open('_didavariantsgenes.csv','r')
c1 = csv.reader(V,delimiter=',')
variants = list(c1)
variants.pop(0)
file_fasta_wt = 'DIDAgenes_wt.txt' #136 sequences
file_fasta_mut = 'DIDAgenes_mut.txt' # OUTPUT file
fasta = open(file_fasta_wt,'r')
mutant = open(file_fasta_mut,'w')
lines = fasta.readlines()
sequences={} #all fasta sequences without the 1st line >
listID={} #to keep the first line of fasta sequence with all informations
for i in range(0,len(lines)):
if lines[i].startswith('>'):
splitline = lines[i].split('|')
accessorID = splitline[1]
listID[accessorID] = lines[i].split(' ')[0]
sequences[accessorID] = ''
else:
sequences[accessorID] = sequences[accessorID] + lines[i].rstrip('\n').rstrip('*')
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# LENGTH OF PROTEINS
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
fasta = open('DIDAgenes_wt.txt','r')
lines = fasta.readlines()
sequences={}
for i in range(0,len(lines)):
if lines[i].startswith('>'):
splitline = lines[i].split('|')
accessorID = splitline[1]
sequences[accessorID] = ''
else:
sequences[accessorID] = sequences[accessorID] + lines[i].rstrip('\n').rstrip('*')
csv_file = open('DIDAlength.csv','w')
cL = csv.writer(csv_file)
head_row = ['Uniprot_ACC','length']
cL.writerow(head_row)
length_list=[]
for key, value in sequences.items():
length = len(value)
length_list.append(len(value))
cL.writerow([key,length])
csv_file.close()
# BARCHART statistique
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('DIDAlength2.csv','\t')
#df = df.set_index('Uniprot_ACC')
df.plot(kind='bar',legend=False)
plt.ylabel('Uniprot ACC')
plt.savefig('barplotDIDA_length.png')
# HISTOGRAMME
from scipy import stats
from pylab import plot, show, savefig, xlim, figure, \
hold, ylim, legend, boxplot, setp, axes
fig = figure()
mu2, std2 = stats.norm.fit(log(length_list))
bins = np.linspace(0, 99, 30)
plt.hist(log(length_list),normed=True,bins=15,alpha=0.8)
xmin2, xmax2 = plt.xlim()
x2 = np.linspace(xmin2, xmax2, 100)
p2 = stats.norm.pdf(x2, mu2, std2)
plt.plot(x2, p2, 'k--',linewidth=2)
plt.xlabel('log(protein length)')
plt.ylabel('Frequence')
#plt.xlim(-4,4)
#plt.ylim(0,0.8)
plt.title('fit results: mu=%.2f, std=%.2f'%(mu2, std2))
fig.savefig('histo_DIDA_length.png')
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# MODIFICATION of fasta sequence after mutation
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
PROBLEME =[]
compteur = 0
for v in variants:
for ID in sequences.keys():
if ID == v[4]:
#---------------------------------------------------------------
if v[2]=='deletion':
l1=list(v[3])
aa1 = l1[0]
objet1 = re.search(r'[0-9]+',v[3])
position1 = int(objet1.group())
s1 = list(sequences[ID])
if s1[position1 - 1]==aa1 :
s1[position1 - 1] = ''
seq1=''.join(s1)
mutant.write(listID[ID] + '|variant_' + v[0] + '\n')
for i in range(0,len(seq1),60):
mutant.write(seq1[i:min(len(seq1),i+60)] + '\n')
else:
str1='PROBLEME in '+ ID + ' position ' + str(position1)
PROBLEME.append(str1)
compteur = compteur + 1
#---------------------------------------------------------------
elif v[2]=='insertion':
l3=list(v[3])
aa3 = l3[0]
objet3 = re.search(r'[0-9]+',v[3])
position3 = int(objet3.group())
#new AA
objet3bis = re.search(r'[A-Z]+$',v[3] )
new_aa3=objet3bis.group()
s3 = list(sequences[ID])
if s3[position3 - 1]==aa3 :
s3[position3] = new_aa3 + s3[position3]
seq3=''.join(s3)
mutant.write(listID[ID] + '|variant_' + v[0] + '\n')
for i in range(0,len(seq3),60):
mutant.write(seq3[i:min(len(seq3),i+60)] + '\n')
else:
str3 = 'PROBLEME in '+ ID + ' position '+ str(position3)
PROBLEME.append(str3)
compteur = compteur + 1
#-----------------------------------------------------------------
elif v[2]=='missense':
l4=list(v[3])
aa4 = l4[0]
objet4 = re.search(r'[0-9]+',v[3])
position4 = int(objet4.group())
#new AA
new_aa4=l4[-1]
s4 = list(sequences[ID])
if s4[position4 - 1]==aa4 :
s4[position4 - 1] = new_aa4
seq4=''.join(s4)
mutant.write(listID[ID] + '|variant_' + v[0] + '\n')
for i in range(0,len(seq4),60):
mutant.write(seq4[i:min(len(seq4),i+60)] + '\n')
else:
str4 = 'PROBLEME in '+ ID + ' position '+ str(position4)
PROBLEME.append(str4)
compteur = compteur + 1
#---------------------------------------------------------------NO CHANGE
elif v[2]=='silent':
seq6 = sequences[ID]
mutant.write(listID[ID] + '|variant_' + v[0] + '\n')
for i in range(0,len(seq6),60):
mutant.write(seq6[i:min(len(seq6),i+60)] + '\n')
compteur = compteur + 1
fasta.close()
V.close()
mutant.close()
| mit | -8,691,030,830,536,997,000 | 38.26178 | 124 | 0.479531 | false | 3.35076 | false | false | false |
CORDEA/my-python-modules | send_gmail/send_gmail.py | 1 | 1140 | ##!/usr/bin/env python
# encoding:utf-8
#
# Copyright 2014-2017 Yoshihiro Tanaka
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__Author__ = "Yoshihiro Tanaka"
__date__ = "2015-01-13"
import smtplib
from email.mime.text import MIMEText
def send_gmail(sub, msg, address):
From = "your gmail address"
To = address
Subject = sub
Message = msg
msg = MIMEText(Message)
msg['Subject'] = Subject
msg['From'] = From
msg['To'] = To
s = smtplib.SMTP('smtp.gmail.com', 587)
s.ehlo()
s.starttls()
s.ehlo()
s.login(From, "your gmail password")
s.sendmail(From, To, msg.as_string())
s.close()
| apache-2.0 | -7,518,351,877,246,057,000 | 26.804878 | 74 | 0.687719 | false | 3.433735 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.