repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Berkeley-BORIS/BORIS_Code
|
notebooks/stereocalibrationtesting/stereo_calibration_orig.py
|
1
|
8634
|
import sys
import os
import random
import fnmatch
import cv
import cv2
import numpy as np
#print cv2.__version__
#if not cv2.__version__.startswith('2.3'):
# raise NotImplementedError("WARNING: cv2 is version {0}!! We haven't implemented the inverted transform direction changed after 2.3!".format(cv2.__version__))
def stereo_calibration(check_img_folder,nimages,display=False,dims=(4,11),size=(640,480)):
'''reads files from a directory of stereo images of opencv circle grid. calibrates intrinsics of each camera, then extrinsics of stereo rig
'''
#grab calbration frames directory
for dir in os.listdir(check_img_folder):
if fnmatch.fnmatch(dir,'calibration_frames*'):
check_img_folder = check_img_folder + dir + '/'
break
# Number of points in circle grid
num_pts = dims[0] * dims[1]
if not os.path.exists(check_img_folder + 'images_used/'):
os.mkdir(check_img_folder + 'images_used/')
# evaluate image points
nimg = 0 #number of images with found corners
iptsF1 = [] #image point arrays to fill up
iptsF2 = []
random_images = random.sample(range(500), nimages)
#for n in range(0,nimages,2):
for n in random_images:
filename1 = check_img_folder + 'cam1_frame_'+str(n+1)+'.bmp'
filename2 = check_img_folder + 'cam2_frame_'+str(n+1)+'.bmp'
if os.path.exists(filename1) and os.path.exists(filename2):
img1 = cv2.imread(filename1,0)
img2 = cv2.imread(filename2,0)
# find center points in circle grid
[found1,points1] = cv2.findCirclesGridDefault(img1,dims,flags=(cv2.CALIB_CB_ASYMMETRIC_GRID))
[found2,points2] = cv2.findCirclesGridDefault(img2,dims,flags=(cv2.CALIB_CB_ASYMMETRIC_GRID))
# copy the found points into the ipts matrices
temp1 = np.zeros( (num_pts,2) )
temp2 = np.zeros( (num_pts,2) )
if found1 and found2:
for i in range(num_pts):
temp1[i,0]=points1[i,0,0]
temp1[i,1]=points1[i,0,1]
temp2[i,0]=points2[i,0,0]
temp2[i,1]=points2[i,0,1]
iptsF1.append(temp1)
iptsF2.append(temp2)
nimg = nimg + 1 #increment image counter
#save images with points identified
drawn_boards_1 = img1.copy()
drawn_boards_2 = img2.copy()
cv2.drawChessboardCorners(drawn_boards_1, dims, points1, found1)
cv2.drawChessboardCorners(drawn_boards_2, dims, points2, found2)
cv2.imwrite(check_img_folder + 'images_used/' + 'cam1_frame_'+str(n+1)+'.bmp', drawn_boards_1)
cv2.imwrite(check_img_folder + 'images_used/' + 'cam2_frame_'+str(n+1)+'.bmp', drawn_boards_2)
print "\n Usable stereo pairs: " + str(nimg)
# convert image points to numpy
iptsF1 = np.array(iptsF1, dtype = np.float32)
iptsF2 = np.array(iptsF2, dtype = np.float32)
# evaluate object points
opts = object_points(dims,nimg,4.35)
# initialize camera parameters
intrinsics1 = np.zeros( (3,3) )
intrinsics2 = np.zeros( (3,3) )
distortion1 = np.zeros( (8,1) )
distortion2 = np.zeros( (8,1) )
# Set initial guess for intrinsic camera parameters (focal length = 0.35cm)
intrinsics1[0,0] = 583.3
intrinsics1[1,1] = 583.3
intrinsics1[0,2] = 320
intrinsics1[1,2] = 240
intrinsics1[2,2] = 1.0
intrinsics2[0,0] = 583.3
intrinsics2[1,1] = 583.3
intrinsics2[0,2] = 320
intrinsics2[1,2] = 240
intrinsics2[2,2] = 1.0
#calibrate cameras
print 'Calibrating camera 1...'
(cam1rms, intrinsics1, distortion1, rotv1, trav1) = cv2.calibrateCamera(opts, iptsF1, size, intrinsics1, distortion1, flags=int(cv2.CALIB_USE_INTRINSIC_GUESS | cv2.CALIB_RATIONAL_MODEL))
print "\nEstimated intrinsic parameters for camera 1:"
for i in range(3):
print [intrinsics1[i,j] for j in range(3)]
print "\nEstimated distortion parameters for camera 1:"
print distortion1
print 'Calibrating camera 2...'
(cam2rms, intrinsics2, distortion2, rotv2, trav2) = cv2.calibrateCamera(opts, iptsF2, size, intrinsics2, distortion2,flags=int(cv2.CALIB_USE_INTRINSIC_GUESS | cv2.CALIB_RATIONAL_MODEL))
print "\nEstimated intrinsic parameters for camera 2:"
for i in range(3):
print [intrinsics2[i,j] for j in range(3)]
print "\nEstimated distortion parameters for camera 2:"
print distortion2
print "\n rms pixel error:"
print "cam1 orig: " + str(cam1rms)
print "cam2 orig: " + str(cam2rms)
# Estimate extrinsic parameters from stereo point correspondences
print "\n Stereo estimating..."
#(stereorms, intrinsics1, distortion1, intrinsics2, distortion2, R, T, E, F) = cv2.stereoCalibrate(opts, iptsF1, iptsF2, intrinsics1, distortion1, intrinsics2, distortion2, size,criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 300, 1e-7), flags=(cv2.CALIB_USE_INTRINSIC_GUESS | cv2.CALIB_RATIONAL_MODEL))
(stereorms, intrinsics1, distortion1, intrinsics2, distortion2, R, T, E, F) = cv2.stereoCalibrate(opts, iptsF1, iptsF2, size, intrinsics1, distortion1, intrinsics2, distortion2,criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 300, 1e-7), flags=(cv2.CALIB_USE_INTRINSIC_GUESS | cv2.CALIB_RATIONAL_MODEL))
print "\nEstimated extrinsic parameters between cameras 1 and 2:\nRotation:"
for i in range(3):
print [R[i,j] for j in range(3)]
print "\nTranslation:"
print [T[i,0] for i in range(3)]
print "\n rms pixel error:"
print "stereo: " + str(stereorms)
# Initialize rectification parameters
R1=cv.CreateMat(3,3,cv.CV_64F)
R2=cv.CreateMat(3,3,cv.CV_64F)
P1=cv.CreateMat(3,4,cv.CV_64F)
P2=cv.CreateMat(3,4,cv.CV_64F)
Q=cv.CreateMat(4,4,cv.CV_64F)
intrinsics1 = cv.fromarray(intrinsics1.copy())
intrinsics2 = cv.fromarray(intrinsics2.copy())
distortion1 = cv.fromarray(distortion1.copy())
distortion2 = cv.fromarray(distortion2.copy())
R = cv.fromarray(R.copy())
T = cv.fromarray(T.copy())
E = cv.fromarray(E.copy())
F = cv.fromarray(F.copy())
new_size = (640,480)
# Estimate rectification
(roi1,roi2)=cv.StereoRectify(intrinsics1, intrinsics2, distortion1, distortion2, size, R,T,R1, R2, P1,P2, Q,cv.CV_CALIB_ZERO_DISPARITY)
# Rectification maps
#Left maps
map1x = cv.CreateMat(new_size[1], new_size[0], cv.CV_32FC1)
map2x = cv.CreateMat(new_size[1], new_size[0], cv.CV_32FC1)
#Right maps
map1y = cv.CreateMat(new_size[1], new_size[0], cv.CV_32FC1)
map2y = cv.CreateMat(new_size[1], new_size[0], cv.CV_32FC1)
cv.InitUndistortRectifyMap(intrinsics1, distortion1, R1, P1, map1x, map1y)
cv.InitUndistortRectifyMap(intrinsics2, distortion2, R2, P2, map2x, map2y)
#save parameter estimates
print "\nSaving all parameters to the folder with checkerboard images..."
calib_params = open(check_img_folder+ 'calib_params.txt', 'w')
calib_params.write("\n num stereo frames: " + str(nimg))
calib_params.write("\n rms cam1: " + str(cam1rms))
calib_params.write("\n rms cam2: " + str(cam2rms))
calib_params.write("\n rms stereo: " + str(stereorms))
calib_params.close()
cv.Save(check_img_folder + 'Intrinsics_cam1.xml',intrinsics1)
cv.Save(check_img_folder + 'Intrinsics_cam2.xml',intrinsics2)
cv.Save(check_img_folder + 'Distortion_cam1.xml',distortion1)
cv.Save(check_img_folder + 'Distortion_cam2.xml',distortion2)
cv.Save(check_img_folder + 'Projection_matrix_cam1.xml',P1)
cv.Save(check_img_folder + 'Projection_matrix_cam2.xml',P2)
cv.Save(check_img_folder + 'Essential_matrix.xml',E)
cv.Save(check_img_folder + 'Fundamental_matrix.xml',F)
cv.Save(check_img_folder + 'Rotation_matrix.xml',R)
cv.Save(check_img_folder + 'Translation_vector.xml',T)
cv.Save(check_img_folder + 'Disp2depth_matrix.xml',Q)
cv.Save(check_img_folder + 'Rectification_transform_cam1.xml',R1)
cv.Save(check_img_folder + 'Rectification_transform_cam2.xml',R2)
cv.Save(check_img_folder + 'Rectification_map_cam1x.xml',map1x)
cv.Save(check_img_folder + 'Rectification_map_cam1y.xml',map1y)
cv.Save(check_img_folder + 'Rectification_map_cam2x.xml',map2x)
cv.Save(check_img_folder + 'Rectification_map_cam2y.xml',map2y)
return None
def object_points(dims,num_images,square_size):
'''determine 3d object points for each image
'''
width = dims[0]
height = dims[1]
num_pts = width*height
opts = []
for n in range(num_images):
temp = np.zeros( (num_pts,3) )
for i in range(height):
for j in range(width):
if i%2==0:
temp[i*width+j,0] = (i*(square_size/2.00))
temp[i*width+j,1] = j*square_size
temp[i*width+j,2] = 0
else:
temp[i*width+j,0] = (i*(square_size/2.00))
temp[i*width+j,1] = (j*square_size) + square_size/2.00
temp[i*width+j,2] = 0
opts.append(temp)
opts = np.array(opts, dtype = np.float32)
return opts
if __name__=="__main__":
check_img_folder = sys.argv[1]
nimages = int(sys.argv[2])
stereo_calibration(check_img_folder,nimages,display=False,dims=(4,11),size=(640,480))
|
mit
| -3,516,562,591,805,934,000 | 36.703057 | 315 | 0.702571 | false | 2.513537 | false | false | false |
shenaishiren/xiaodi
|
xiaodi/api/abc.py
|
1
|
2864
|
# coding=utf-8
import types
import logging
from tornado.gen import coroutine
from tornado.web import asynchronous
from tornado.web import RequestHandler
from tornado.web import HTTPError
from xiaodi.api.errors import HTTPAPIError
from xiaodi.api.errors import INTERNAL_SERVER_ERROR
from xiaodi.api.errors import BAD_REQUEST_ERROR
LOG = logging.getLogger(__name__)
class GenAsyncMetaclass(type):
def __new__(cls, clsname, bases, attrs):
allow_method = ['get', 'put', 'delete', 'post', 'options', 'patch']
for method in attrs:
if method.lower() in allow_method:
attrs[method] = coroutine(asynchronous(attrs[method]))
return super(GenAsyncMetaclass, cls).__new__(cls, clsname, bases, attrs)
class Namespace(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
class BaseApiHandler(RequestHandler):
__metaclass__ = GenAsyncMetaclass
def prepare(self):
self._G = Namespace()
def on_finish(self):
self.set_header("Content-Type", "application/json; charset=UTF-8")
def _async_write(self, data, finish=True):
self.write(data)
# disconnect long connection
if finish:
self.finish()
def write_success(self, data=None, finish=True):
assert isinstance(data, (types.NoneType, dict)), 'data must be NoneType or dict'
self._async_write(dict((data or {}), **{'status': 'success'}), finish=finish)
def write_error(self, status_code, **kwargs):
try:
exc_info = kwargs.pop('exc_info')
e = exc_info[1]
if isinstance(e, HTTPAPIError):
pass
elif isinstance(e, HTTPError):
e = HTTPAPIError(BAD_REQUEST_ERROR, e.log_message, e.status_code)
else:
e = HTTPAPIError(INTERNAL_SERVER_ERROR, str(e), 500)
self.set_status(status_code)
self._async_write(str(e))
except Exception as e:
LOG.exception(str(e))
return super(BaseApiHandler, self).write_error(status_code, **kwargs)
# private method
def __set_cross_domain_headers(self):
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Max-Age', 1000)
self.set_header('Access-Control-Allow-Methods', 'POST, GET, PUT, DELETE, OPTIONS')
self.set_header('Access-Control-Allow-Headers', '*')
def set_default_headers(self):
self.__set_cross_domain_headers()
self.set_header('Content-type', 'application/json')
def options(self, *args):
self.__set_cross_domain_headers()
@property
def settings(self):
return self.application.settings
|
gpl-3.0
| -5,720,625,523,224,219,000 | 31.179775 | 90 | 0.622556 | false | 3.87027 | false | false | false |
nataliemcmullen/WikiMiney
|
db/data/2012/10/get_gzs.py
|
1
|
24893
|
urls = [
"pagecounts-20121001-000000.gz",
"pagecounts-20121001-010000.gz",
"pagecounts-20121001-020000.gz",
"pagecounts-20121001-030000.gz",
"pagecounts-20121001-040000.gz",
"pagecounts-20121001-050000.gz",
"pagecounts-20121001-060001.gz",
"pagecounts-20121001-070000.gz",
"pagecounts-20121001-080000.gz",
"pagecounts-20121001-090000.gz",
"pagecounts-20121001-100000.gz",
"pagecounts-20121001-110000.gz",
"pagecounts-20121001-120000.gz",
"pagecounts-20121001-130000.gz",
"pagecounts-20121001-140000.gz",
"pagecounts-20121001-150000.gz",
"pagecounts-20121001-160000.gz",
"pagecounts-20121001-170000.gz",
"pagecounts-20121001-180000.gz",
"pagecounts-20121001-190000.gz",
"pagecounts-20121001-200001.gz",
"pagecounts-20121001-210000.gz",
"pagecounts-20121001-220000.gz",
"pagecounts-20121001-230000.gz",
"pagecounts-20121002-000000.gz",
"pagecounts-20121002-010000.gz",
"pagecounts-20121002-020000.gz",
"pagecounts-20121002-030000.gz",
"pagecounts-20121002-040000.gz",
"pagecounts-20121002-050000.gz",
"pagecounts-20121002-060000.gz",
"pagecounts-20121002-070000.gz",
"pagecounts-20121002-080000.gz",
"pagecounts-20121002-090000.gz",
"pagecounts-20121002-100001.gz",
"pagecounts-20121002-110000.gz",
"pagecounts-20121002-120000.gz",
"pagecounts-20121002-130000.gz",
"pagecounts-20121002-140000.gz",
"pagecounts-20121002-150000.gz",
"pagecounts-20121002-160000.gz",
"pagecounts-20121002-170000.gz",
"pagecounts-20121002-180000.gz",
"pagecounts-20121002-190000.gz",
"pagecounts-20121002-200000.gz",
"pagecounts-20121002-210000.gz",
"pagecounts-20121002-220000.gz",
"pagecounts-20121002-230000.gz",
"pagecounts-20121003-000001.gz",
"pagecounts-20121003-010000.gz",
"pagecounts-20121003-020000.gz",
"pagecounts-20121003-030000.gz",
"pagecounts-20121003-040000.gz",
"pagecounts-20121003-050000.gz",
"pagecounts-20121003-060000.gz",
"pagecounts-20121003-070000.gz",
"pagecounts-20121003-080000.gz",
"pagecounts-20121003-090000.gz",
"pagecounts-20121003-100000.gz",
"pagecounts-20121003-110000.gz",
"pagecounts-20121003-120000.gz",
"pagecounts-20121003-130001.gz",
"pagecounts-20121003-140000.gz",
"pagecounts-20121003-150000.gz",
"pagecounts-20121003-160000.gz",
"pagecounts-20121003-170000.gz",
"pagecounts-20121003-180000.gz",
"pagecounts-20121003-190000.gz",
"pagecounts-20121003-200000.gz",
"pagecounts-20121003-210000.gz",
"pagecounts-20121003-220000.gz",
"pagecounts-20121003-230000.gz",
"pagecounts-20121004-000000.gz",
"pagecounts-20121004-010000.gz",
"pagecounts-20121004-020000.gz",
"pagecounts-20121004-030001.gz",
"pagecounts-20121004-040000.gz",
"pagecounts-20121004-050000.gz",
"pagecounts-20121004-060000.gz",
"pagecounts-20121004-070000.gz",
"pagecounts-20121004-080000.gz",
"pagecounts-20121004-090000.gz",
"pagecounts-20121004-100000.gz",
"pagecounts-20121004-110000.gz",
"pagecounts-20121004-120000.gz",
"pagecounts-20121004-130000.gz",
"pagecounts-20121004-140000.gz",
"pagecounts-20121004-150000.gz",
"pagecounts-20121004-160000.gz",
"pagecounts-20121004-170001.gz",
"pagecounts-20121004-180000.gz",
"pagecounts-20121004-190000.gz",
"pagecounts-20121004-200000.gz",
"pagecounts-20121004-210000.gz",
"pagecounts-20121004-220000.gz",
"pagecounts-20121004-230000.gz",
"pagecounts-20121005-000000.gz",
"pagecounts-20121005-010000.gz",
"pagecounts-20121005-020000.gz",
"pagecounts-20121005-030000.gz",
"pagecounts-20121005-040000.gz",
"pagecounts-20121005-050000.gz",
"pagecounts-20121005-060000.gz",
"pagecounts-20121005-070001.gz",
"pagecounts-20121005-080000.gz",
"pagecounts-20121005-090000.gz",
"pagecounts-20121005-100000.gz",
"pagecounts-20121005-110000.gz",
"pagecounts-20121005-120000.gz",
"pagecounts-20121005-130000.gz",
"pagecounts-20121005-140000.gz",
"pagecounts-20121005-150000.gz",
"pagecounts-20121005-160000.gz",
"pagecounts-20121005-170000.gz",
"pagecounts-20121005-180000.gz",
"pagecounts-20121005-190000.gz",
"pagecounts-20121005-200000.gz",
"pagecounts-20121005-210001.gz",
"pagecounts-20121005-220000.gz",
"pagecounts-20121005-230000.gz",
"pagecounts-20121006-000000.gz",
"pagecounts-20121006-010000.gz",
"pagecounts-20121006-020000.gz",
"pagecounts-20121006-030000.gz",
"pagecounts-20121006-040000.gz",
"pagecounts-20121006-050000.gz",
"pagecounts-20121006-060000.gz",
"pagecounts-20121006-070000.gz",
"pagecounts-20121006-080000.gz",
"pagecounts-20121006-090000.gz",
"pagecounts-20121006-100000.gz",
"pagecounts-20121006-110000.gz",
"pagecounts-20121006-120001.gz",
"pagecounts-20121006-130000.gz",
"pagecounts-20121006-140000.gz",
"pagecounts-20121006-150000.gz",
"pagecounts-20121006-160000.gz",
"pagecounts-20121006-170000.gz",
"pagecounts-20121006-180000.gz",
"pagecounts-20121006-190000.gz",
"pagecounts-20121006-200000.gz",
"pagecounts-20121006-210000.gz",
"pagecounts-20121006-220000.gz",
"pagecounts-20121006-230000.gz",
"pagecounts-20121007-000000.gz",
"pagecounts-20121007-010000.gz",
"pagecounts-20121007-020001.gz",
"pagecounts-20121007-030000.gz",
"pagecounts-20121007-040000.gz",
"pagecounts-20121007-050000.gz",
"pagecounts-20121007-060000.gz",
"pagecounts-20121007-070000.gz",
"pagecounts-20121007-080000.gz",
"pagecounts-20121007-090000.gz",
"pagecounts-20121007-100000.gz",
"pagecounts-20121007-110000.gz",
"pagecounts-20121007-120000.gz",
"pagecounts-20121007-130000.gz",
"pagecounts-20121007-140000.gz",
"pagecounts-20121007-150001.gz",
"pagecounts-20121007-160000.gz",
"pagecounts-20121007-170000.gz",
"pagecounts-20121007-180000.gz",
"pagecounts-20121007-190000.gz",
"pagecounts-20121007-200000.gz",
"pagecounts-20121007-210000.gz",
"pagecounts-20121007-220000.gz",
"pagecounts-20121007-230000.gz",
"pagecounts-20121008-000000.gz",
"pagecounts-20121008-010000.gz",
"pagecounts-20121008-020000.gz",
"pagecounts-20121008-030000.gz",
"pagecounts-20121008-040001.gz",
"pagecounts-20121008-050000.gz",
"pagecounts-20121008-060000.gz",
"pagecounts-20121008-070000.gz",
"pagecounts-20121008-080000.gz",
"pagecounts-20121008-090000.gz",
"pagecounts-20121008-100000.gz",
"pagecounts-20121008-110000.gz",
"pagecounts-20121008-120000.gz",
"pagecounts-20121008-130000.gz",
"pagecounts-20121008-140000.gz",
"pagecounts-20121008-150000.gz",
"pagecounts-20121008-160000.gz",
"pagecounts-20121008-170000.gz",
"pagecounts-20121008-180001.gz",
"pagecounts-20121008-190000.gz",
"pagecounts-20121008-200000.gz",
"pagecounts-20121008-210000.gz",
"pagecounts-20121008-220000.gz",
"pagecounts-20121008-230000.gz",
"pagecounts-20121009-000000.gz",
"pagecounts-20121009-010000.gz",
"pagecounts-20121009-020000.gz",
"pagecounts-20121009-030000.gz",
"pagecounts-20121009-040000.gz",
"pagecounts-20121009-050000.gz",
"pagecounts-20121009-060000.gz",
"pagecounts-20121009-070001.gz",
"pagecounts-20121009-080000.gz",
"pagecounts-20121009-090000.gz",
"pagecounts-20121009-100000.gz",
"pagecounts-20121009-110000.gz",
"pagecounts-20121009-120000.gz",
"pagecounts-20121009-130000.gz",
"pagecounts-20121009-140000.gz",
"pagecounts-20121009-150000.gz",
"pagecounts-20121009-160000.gz",
"pagecounts-20121009-170000.gz",
"pagecounts-20121009-180000.gz",
"pagecounts-20121009-190000.gz",
"pagecounts-20121009-200001.gz",
"pagecounts-20121009-210000.gz",
"pagecounts-20121009-220000.gz",
"pagecounts-20121009-230000.gz",
"pagecounts-20121010-000000.gz",
"pagecounts-20121010-010000.gz",
"pagecounts-20121010-020000.gz",
"pagecounts-20121010-030000.gz",
"pagecounts-20121010-040000.gz",
"pagecounts-20121010-050000.gz",
"pagecounts-20121010-060000.gz",
"pagecounts-20121010-070000.gz",
"pagecounts-20121010-080000.gz",
"pagecounts-20121010-090000.gz",
"pagecounts-20121010-100000.gz",
"pagecounts-20121010-110001.gz",
"pagecounts-20121010-120000.gz",
"pagecounts-20121010-130000.gz",
"pagecounts-20121010-140000.gz",
"pagecounts-20121010-150000.gz",
"pagecounts-20121010-160000.gz",
"pagecounts-20121010-170000.gz",
"pagecounts-20121010-180000.gz",
"pagecounts-20121010-190000.gz",
"pagecounts-20121010-200000.gz",
"pagecounts-20121010-210000.gz",
"pagecounts-20121010-220000.gz",
"pagecounts-20121010-230000.gz",
"pagecounts-20121011-000000.gz",
"pagecounts-20121011-010001.gz",
"pagecounts-20121011-020000.gz",
"pagecounts-20121011-030000.gz",
"pagecounts-20121011-040000.gz",
"pagecounts-20121011-050000.gz",
"pagecounts-20121011-060000.gz",
"pagecounts-20121011-070000.gz",
"pagecounts-20121011-080000.gz",
"pagecounts-20121011-090000.gz",
"pagecounts-20121011-100000.gz",
"pagecounts-20121011-110000.gz",
"pagecounts-20121011-120000.gz",
"pagecounts-20121011-130000.gz",
"pagecounts-20121011-140000.gz",
"pagecounts-20121011-150001.gz",
"pagecounts-20121011-160000.gz",
"pagecounts-20121011-170000.gz",
"pagecounts-20121011-180000.gz",
"pagecounts-20121011-190000.gz",
"pagecounts-20121011-200000.gz",
"pagecounts-20121011-210000.gz",
"pagecounts-20121011-220000.gz",
"pagecounts-20121011-230000.gz",
"pagecounts-20121012-000000.gz",
"pagecounts-20121012-010000.gz",
"pagecounts-20121012-020000.gz",
"pagecounts-20121012-030000.gz",
"pagecounts-20121012-040000.gz",
"pagecounts-20121012-050000.gz",
"pagecounts-20121012-060001.gz",
"pagecounts-20121012-070000.gz",
"pagecounts-20121012-080000.gz",
"pagecounts-20121012-090000.gz",
"pagecounts-20121012-100000.gz",
"pagecounts-20121012-110000.gz",
"pagecounts-20121012-120000.gz",
"pagecounts-20121012-130000.gz",
"pagecounts-20121012-140000.gz",
"pagecounts-20121012-150000.gz",
"pagecounts-20121012-160000.gz",
"pagecounts-20121012-170000.gz",
"pagecounts-20121012-180000.gz",
"pagecounts-20121012-190000.gz",
"pagecounts-20121012-200001.gz",
"pagecounts-20121012-210000.gz",
"pagecounts-20121012-220000.gz",
"pagecounts-20121012-230000.gz",
"pagecounts-20121013-000000.gz",
"pagecounts-20121013-010000.gz",
"pagecounts-20121013-020000.gz",
"pagecounts-20121013-030000.gz",
"pagecounts-20121013-040000.gz",
"pagecounts-20121013-050000.gz",
"pagecounts-20121013-060000.gz",
"pagecounts-20121013-070000.gz",
"pagecounts-20121013-080000.gz",
"pagecounts-20121013-090001.gz",
"pagecounts-20121013-100000.gz",
"pagecounts-20121013-110000.gz",
"pagecounts-20121013-120000.gz",
"pagecounts-20121013-130000.gz",
"pagecounts-20121013-140000.gz",
"pagecounts-20121013-150000.gz",
"pagecounts-20121013-160000.gz",
"pagecounts-20121013-170000.gz",
"pagecounts-20121013-180000.gz",
"pagecounts-20121013-190000.gz",
"pagecounts-20121013-200000.gz",
"pagecounts-20121013-210000.gz",
"pagecounts-20121013-220001.gz",
"pagecounts-20121013-230000.gz",
"pagecounts-20121014-000000.gz",
"pagecounts-20121014-010000.gz",
"pagecounts-20121014-020000.gz",
"pagecounts-20121014-030000.gz",
"pagecounts-20121014-040000.gz",
"pagecounts-20121014-050000.gz",
"pagecounts-20121014-060000.gz",
"pagecounts-20121014-070000.gz",
"pagecounts-20121014-080000.gz",
"pagecounts-20121014-090000.gz",
"pagecounts-20121014-100000.gz",
"pagecounts-20121014-110000.gz",
"pagecounts-20121014-120001.gz",
"pagecounts-20121014-130000.gz",
"pagecounts-20121014-140000.gz",
"pagecounts-20121014-150000.gz",
"pagecounts-20121014-160000.gz",
"pagecounts-20121014-170000.gz",
"pagecounts-20121014-180000.gz",
"pagecounts-20121014-190000.gz",
"pagecounts-20121014-200000.gz",
"pagecounts-20121014-210000.gz",
"pagecounts-20121014-220000.gz",
"pagecounts-20121014-230000.gz",
"pagecounts-20121015-000000.gz",
"pagecounts-20121015-010000.gz",
"pagecounts-20121015-020001.gz",
"pagecounts-20121015-030000.gz",
"pagecounts-20121015-040000.gz",
"pagecounts-20121015-050000.gz",
"pagecounts-20121015-060000.gz",
"pagecounts-20121015-070000.gz",
"pagecounts-20121015-080000.gz",
"pagecounts-20121015-090000.gz",
"pagecounts-20121015-100000.gz",
"pagecounts-20121015-110000.gz",
"pagecounts-20121015-120000.gz",
"pagecounts-20121015-130000.gz",
"pagecounts-20121015-140001.gz",
"pagecounts-20121015-150000.gz",
"pagecounts-20121015-160000.gz",
"pagecounts-20121015-170000.gz",
"pagecounts-20121015-180000.gz",
"pagecounts-20121015-190000.gz",
"pagecounts-20121015-200000.gz",
"pagecounts-20121015-210000.gz",
"pagecounts-20121015-220000.gz",
"pagecounts-20121015-230000.gz",
"pagecounts-20121016-000000.gz",
"pagecounts-20121016-010000.gz",
"pagecounts-20121016-020000.gz",
"pagecounts-20121016-030000.gz",
"pagecounts-20121016-040001.gz",
"pagecounts-20121016-050000.gz",
"pagecounts-20121016-060000.gz",
"pagecounts-20121016-070000.gz",
"pagecounts-20121016-080000.gz",
"pagecounts-20121016-090000.gz",
"pagecounts-20121016-100000.gz",
"pagecounts-20121016-110000.gz",
"pagecounts-20121016-120000.gz",
"pagecounts-20121016-130000.gz",
"pagecounts-20121016-140000.gz",
"pagecounts-20121016-150000.gz",
"pagecounts-20121016-160001.gz",
"pagecounts-20121016-170000.gz",
"pagecounts-20121016-180000.gz",
"pagecounts-20121016-190000.gz",
"pagecounts-20121016-200000.gz",
"pagecounts-20121016-210000.gz",
"pagecounts-20121016-220000.gz",
"pagecounts-20121016-230000.gz",
"pagecounts-20121017-000000.gz",
"pagecounts-20121017-010000.gz",
"pagecounts-20121017-020000.gz",
"pagecounts-20121017-030000.gz",
"pagecounts-20121017-040000.gz",
"pagecounts-20121017-050000.gz",
"pagecounts-20121017-060001.gz",
"pagecounts-20121017-070000.gz",
"pagecounts-20121017-080000.gz",
"pagecounts-20121017-090000.gz",
"pagecounts-20121017-100000.gz",
"pagecounts-20121017-110000.gz",
"pagecounts-20121017-120000.gz",
"pagecounts-20121017-130000.gz",
"pagecounts-20121017-140000.gz",
"pagecounts-20121017-150000.gz",
"pagecounts-20121017-160000.gz",
"pagecounts-20121017-170000.gz",
"pagecounts-20121017-180000.gz",
"pagecounts-20121017-190000.gz",
"pagecounts-20121017-200001.gz",
"pagecounts-20121017-210000.gz",
"pagecounts-20121017-220000.gz",
"pagecounts-20121017-230000.gz",
"pagecounts-20121018-000000.gz",
"pagecounts-20121018-010000.gz",
"pagecounts-20121018-020000.gz",
"pagecounts-20121018-030000.gz",
"pagecounts-20121018-040000.gz",
"pagecounts-20121018-050000.gz",
"pagecounts-20121018-060000.gz",
"pagecounts-20121018-070000.gz",
"pagecounts-20121018-080000.gz",
"pagecounts-20121018-090000.gz",
"pagecounts-20121018-100001.gz",
"pagecounts-20121018-110000.gz",
"pagecounts-20121018-120000.gz",
"pagecounts-20121018-130000.gz",
"pagecounts-20121018-140000.gz",
"pagecounts-20121018-150000.gz",
"pagecounts-20121018-160000.gz",
"pagecounts-20121018-170000.gz",
"pagecounts-20121018-180000.gz",
"pagecounts-20121018-190000.gz",
"pagecounts-20121018-200000.gz",
"pagecounts-20121018-210000.gz",
"pagecounts-20121018-220000.gz",
"pagecounts-20121018-230000.gz",
"pagecounts-20121019-000001.gz",
"pagecounts-20121019-010000.gz",
"pagecounts-20121019-020000.gz",
"pagecounts-20121019-030000.gz",
"pagecounts-20121019-040000.gz",
"pagecounts-20121019-050000.gz",
"pagecounts-20121019-060000.gz",
"pagecounts-20121019-070000.gz",
"pagecounts-20121019-080000.gz",
"pagecounts-20121019-090000.gz",
"pagecounts-20121019-100000.gz",
"pagecounts-20121019-110000.gz",
"pagecounts-20121019-120000.gz",
"pagecounts-20121019-130000.gz",
"pagecounts-20121019-140001.gz",
"pagecounts-20121019-150000.gz",
"pagecounts-20121019-160000.gz",
"pagecounts-20121019-170000.gz",
"pagecounts-20121019-180000.gz",
"pagecounts-20121019-190000.gz",
"pagecounts-20121019-200000.gz",
"pagecounts-20121019-210000.gz",
"pagecounts-20121019-220000.gz",
"pagecounts-20121019-230000.gz",
"pagecounts-20121020-000000.gz",
"pagecounts-20121020-010000.gz",
"pagecounts-20121020-020000.gz",
"pagecounts-20121020-030000.gz",
"pagecounts-20121020-040001.gz",
"pagecounts-20121020-050000.gz",
"pagecounts-20121020-060000.gz",
"pagecounts-20121020-070000.gz",
"pagecounts-20121020-080000.gz",
"pagecounts-20121020-090000.gz",
"pagecounts-20121020-100000.gz",
"pagecounts-20121020-110000.gz",
"pagecounts-20121020-120000.gz",
"pagecounts-20121020-130000.gz",
"pagecounts-20121020-140000.gz",
"pagecounts-20121020-150000.gz",
"pagecounts-20121020-160000.gz",
"pagecounts-20121020-170000.gz",
"pagecounts-20121020-180001.gz",
"pagecounts-20121020-190000.gz",
"pagecounts-20121020-200000.gz",
"pagecounts-20121020-210000.gz",
"pagecounts-20121020-220000.gz",
"pagecounts-20121020-230000.gz",
"pagecounts-20121021-000000.gz",
"pagecounts-20121021-010000.gz",
"pagecounts-20121021-020000.gz",
"pagecounts-20121021-030000.gz",
"pagecounts-20121021-040000.gz",
"pagecounts-20121021-050000.gz",
"pagecounts-20121021-060000.gz",
"pagecounts-20121021-070000.gz",
"pagecounts-20121021-080001.gz",
"pagecounts-20121021-090000.gz",
"pagecounts-20121021-100000.gz",
"pagecounts-20121021-110000.gz",
"pagecounts-20121021-120000.gz",
"pagecounts-20121021-130000.gz",
"pagecounts-20121021-140000.gz",
"pagecounts-20121021-150000.gz",
"pagecounts-20121021-160000.gz",
"pagecounts-20121021-170000.gz",
"pagecounts-20121021-180000.gz",
"pagecounts-20121021-190000.gz",
"pagecounts-20121021-200000.gz",
"pagecounts-20121021-210000.gz",
"pagecounts-20121021-220001.gz",
"pagecounts-20121021-230000.gz",
"pagecounts-20121022-000000.gz",
"pagecounts-20121022-010000.gz",
"pagecounts-20121022-020000.gz",
"pagecounts-20121022-030000.gz",
"pagecounts-20121022-040000.gz",
"pagecounts-20121022-050000.gz",
"pagecounts-20121022-060000.gz",
"pagecounts-20121022-070000.gz",
"pagecounts-20121022-080000.gz",
"pagecounts-20121022-090000.gz",
"pagecounts-20121022-100000.gz",
"pagecounts-20121022-110001.gz",
"pagecounts-20121022-120000.gz",
"pagecounts-20121022-130000.gz",
"pagecounts-20121022-140000.gz",
"pagecounts-20121022-150000.gz",
"pagecounts-20121022-160000.gz",
"pagecounts-20121022-170000.gz",
"pagecounts-20121022-180000.gz",
"pagecounts-20121022-190000.gz",
"pagecounts-20121022-200000.gz",
"pagecounts-20121022-210000.gz",
"pagecounts-20121022-220000.gz",
"pagecounts-20121022-230000.gz",
"pagecounts-20121023-000001.gz",
"pagecounts-20121023-010000.gz",
"pagecounts-20121023-020000.gz",
"pagecounts-20121023-030000.gz",
"pagecounts-20121023-040000.gz",
"pagecounts-20121023-050000.gz",
"pagecounts-20121023-060000.gz",
"pagecounts-20121023-070000.gz",
"pagecounts-20121023-080000.gz",
"pagecounts-20121023-090000.gz",
"pagecounts-20121023-100000.gz",
"pagecounts-20121023-110000.gz",
"pagecounts-20121023-120000.gz",
"pagecounts-20121023-130000.gz",
"pagecounts-20121023-140000.gz",
"pagecounts-20121023-150001.gz",
"pagecounts-20121023-160000.gz",
"pagecounts-20121023-170000.gz",
"pagecounts-20121023-180000.gz",
"pagecounts-20121023-190000.gz",
"pagecounts-20121023-200000.gz",
"pagecounts-20121023-210000.gz",
"pagecounts-20121023-220000.gz",
"pagecounts-20121023-230000.gz",
"pagecounts-20121024-000000.gz",
"pagecounts-20121024-010000.gz",
"pagecounts-20121024-020000.gz",
"pagecounts-20121024-030001.gz",
"pagecounts-20121024-040000.gz",
"pagecounts-20121024-050000.gz",
"pagecounts-20121024-060000.gz",
"pagecounts-20121024-070000.gz",
"pagecounts-20121024-080000.gz",
"pagecounts-20121024-090000.gz",
"pagecounts-20121024-100000.gz",
"pagecounts-20121024-110000.gz",
"pagecounts-20121024-120000.gz",
"pagecounts-20121024-130000.gz",
"pagecounts-20121024-140000.gz",
"pagecounts-20121024-150000.gz",
"pagecounts-20121024-160001.gz",
"pagecounts-20121024-170000.gz",
"pagecounts-20121024-180000.gz",
"pagecounts-20121024-190000.gz",
"pagecounts-20121024-200000.gz",
"pagecounts-20121024-210000.gz",
"pagecounts-20121024-220000.gz",
"pagecounts-20121024-230000.gz",
"pagecounts-20121025-000000.gz",
"pagecounts-20121025-010000.gz",
"pagecounts-20121025-020000.gz",
"pagecounts-20121025-030000.gz",
"pagecounts-20121025-040000.gz",
"pagecounts-20121025-050001.gz",
"pagecounts-20121025-060000.gz",
"pagecounts-20121025-070000.gz",
"pagecounts-20121025-080000.gz",
"pagecounts-20121025-090000.gz",
"pagecounts-20121025-100000.gz",
"pagecounts-20121025-110000.gz",
"pagecounts-20121025-120000.gz",
"pagecounts-20121025-130000.gz",
"pagecounts-20121025-140000.gz",
"pagecounts-20121025-150000.gz",
"pagecounts-20121025-160000.gz",
"pagecounts-20121025-170001.gz",
"pagecounts-20121025-180000.gz",
"pagecounts-20121025-190000.gz",
"pagecounts-20121025-200000.gz",
"pagecounts-20121025-210000.gz",
"pagecounts-20121025-220000.gz",
"pagecounts-20121025-230000.gz",
"pagecounts-20121026-000000.gz",
"pagecounts-20121026-010000.gz",
"pagecounts-20121026-020000.gz",
"pagecounts-20121026-030000.gz",
"pagecounts-20121026-040000.gz",
"pagecounts-20121026-050000.gz",
"pagecounts-20121026-060000.gz",
"pagecounts-20121026-070001.gz",
"pagecounts-20121026-080000.gz",
"pagecounts-20121026-090000.gz",
"pagecounts-20121026-100000.gz",
"pagecounts-20121026-110000.gz",
"pagecounts-20121026-120000.gz",
"pagecounts-20121026-130000.gz",
"pagecounts-20121026-140000.gz",
"pagecounts-20121026-150000.gz",
"pagecounts-20121026-160000.gz",
"pagecounts-20121026-170000.gz",
"pagecounts-20121026-180000.gz",
"pagecounts-20121026-190000.gz",
"pagecounts-20121026-200001.gz",
"pagecounts-20121026-210000.gz",
"pagecounts-20121026-220000.gz",
"pagecounts-20121026-230000.gz",
"pagecounts-20121027-000000.gz",
"pagecounts-20121027-010000.gz",
"pagecounts-20121027-020000.gz",
"pagecounts-20121027-030000.gz",
"pagecounts-20121027-040000.gz",
"pagecounts-20121027-050000.gz",
"pagecounts-20121027-060000.gz",
"pagecounts-20121027-070000.gz",
"pagecounts-20121027-080000.gz",
"pagecounts-20121027-090001.gz",
"pagecounts-20121027-100000.gz",
"pagecounts-20121027-110000.gz",
"pagecounts-20121027-120000.gz",
"pagecounts-20121027-130000.gz",
"pagecounts-20121027-140000.gz",
"pagecounts-20121027-150000.gz",
"pagecounts-20121027-160000.gz",
"pagecounts-20121027-170000.gz",
"pagecounts-20121027-180000.gz",
"pagecounts-20121027-190000.gz",
"pagecounts-20121027-200000.gz",
"pagecounts-20121027-210000.gz",
"pagecounts-20121027-220001.gz",
"pagecounts-20121027-230000.gz",
"pagecounts-20121028-000000.gz",
"pagecounts-20121028-010000.gz",
"pagecounts-20121028-020000.gz",
"pagecounts-20121028-030000.gz",
"pagecounts-20121028-040000.gz",
"pagecounts-20121028-050000.gz",
"pagecounts-20121028-060000.gz",
"pagecounts-20121028-070000.gz",
"pagecounts-20121028-080000.gz",
"pagecounts-20121028-090000.gz",
"pagecounts-20121028-100000.gz",
"pagecounts-20121028-110000.gz",
"pagecounts-20121028-120001.gz",
"pagecounts-20121028-130000.gz",
"pagecounts-20121028-140000.gz",
"pagecounts-20121028-150000.gz",
"pagecounts-20121028-160000.gz",
"pagecounts-20121028-170000.gz",
"pagecounts-20121028-180000.gz",
"pagecounts-20121028-190000.gz",
"pagecounts-20121028-200000.gz",
"pagecounts-20121028-210000.gz",
"pagecounts-20121028-220000.gz",
"pagecounts-20121028-230000.gz",
"pagecounts-20121029-000000.gz",
"pagecounts-20121029-010001.gz",
"pagecounts-20121029-020000.gz",
"pagecounts-20121029-030000.gz",
"pagecounts-20121029-040000.gz",
"pagecounts-20121029-050000.gz",
"pagecounts-20121029-060000.gz",
"pagecounts-20121029-070000.gz",
"pagecounts-20121029-080000.gz",
"pagecounts-20121029-090000.gz",
"pagecounts-20121029-100000.gz",
"pagecounts-20121029-110000.gz",
"pagecounts-20121029-120000.gz",
"pagecounts-20121029-130000.gz",
"pagecounts-20121029-140000.gz",
"pagecounts-20121029-150001.gz",
"pagecounts-20121029-160000.gz",
"pagecounts-20121029-170000.gz",
"pagecounts-20121029-180000.gz",
"pagecounts-20121029-190000.gz",
"pagecounts-20121029-200000.gz",
"pagecounts-20121029-210000.gz",
"pagecounts-20121029-220000.gz",
"pagecounts-20121029-230000.gz",
"pagecounts-20121030-000000.gz",
"pagecounts-20121030-010000.gz",
"pagecounts-20121030-020000.gz",
"pagecounts-20121030-030000.gz",
"pagecounts-20121030-040001.gz",
"pagecounts-20121030-050000.gz",
"pagecounts-20121030-060000.gz",
"pagecounts-20121030-070000.gz",
"pagecounts-20121030-080000.gz",
"pagecounts-20121030-090000.gz",
"pagecounts-20121030-100000.gz",
"pagecounts-20121030-110000.gz",
"pagecounts-20121030-120000.gz",
"pagecounts-20121030-130000.gz",
"pagecounts-20121030-140000.gz",
"pagecounts-20121030-150000.gz",
"pagecounts-20121030-160000.gz",
"pagecounts-20121030-170001.gz",
"pagecounts-20121030-180000.gz",
"pagecounts-20121030-190000.gz",
"pagecounts-20121030-200000.gz",
"pagecounts-20121030-210000.gz",
"pagecounts-20121030-220000.gz",
"pagecounts-20121030-230000.gz",
"pagecounts-20121031-000000.gz",
"pagecounts-20121031-010000.gz",
"pagecounts-20121031-020000.gz",
"pagecounts-20121031-030000.gz",
"pagecounts-20121031-040000.gz",
"pagecounts-20121031-050000.gz",
"pagecounts-20121031-060001.gz",
"pagecounts-20121031-070000.gz",
"pagecounts-20121031-080000.gz",
"pagecounts-20121031-090000.gz",
"pagecounts-20121031-100000.gz",
"pagecounts-20121031-110000.gz",
"pagecounts-20121031-120000.gz",
"pagecounts-20121031-130000.gz",
"pagecounts-20121031-140000.gz",
"pagecounts-20121031-150000.gz",
"pagecounts-20121031-160000.gz",
"pagecounts-20121031-170000.gz",
"pagecounts-20121031-180000.gz",
"pagecounts-20121031-190001.gz",
"pagecounts-20121031-200000.gz",
"pagecounts-20121031-210000.gz",
"pagecounts-20121031-220000.gz",
"pagecounts-20121031-230000.gz",
]
import os
base = "http://dumps.wikimedia.org/other/pagecounts-raw/"
tail = "2012/2012-10/"
i = 0
for url in urls:
print "%d completeted of %d total. %d remaining" % (i, len(urls), len(urls) - i)
#os.system("curl --silent -O %s >> /dev/null" % (base + tail + url))
os.system("curl -O %s" % (base + tail + url))
i = i + 1
|
mit
| 190,367,817,570,999,400 | 31.883752 | 82 | 0.78512 | false | 2.405818 | false | false | false |
jenskutilek/Glyphs-Scripts
|
Layers/Delete all non-Master layers.py
|
1
|
1836
|
# MenuTitle: Delete all non-Master layers
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
__doc__ = """
Goes through selected glyphs and deletes all glyph layers which are not a Master, Bracket or Brace layer.
"""
Font = Glyphs.font
selectedLayers = Font.selectedLayers
searchTerms = ["[]", "{}"]
def process(thisGlyph):
count = 0
numberOfLayers = len(thisGlyph.layers)
for i in range(numberOfLayers)[::-1]:
thisLayer = thisGlyph.layers[i]
if (
thisLayer.layerId != thisLayer.associatedMasterId
): # not the master layer
thisLayerName = thisLayer.name
thisLayerShouldBeRemoved = True
if thisLayerName: # always delete unnamed layers
for parentheses in searchTerms:
opening = parentheses[0]
closing = parentheses[1]
# check if ONE of them is at the END of the layer name, like:
# Bold [160], Bold [160[, Bold ]160], Regular {120}
if thisLayerName.endswith(
opening
) or thisLayerName.endswith(closing):
thisLayerShouldBeRemoved = False
if thisLayerShouldBeRemoved:
count += 1
del thisGlyph.layers[i]
return count
Font.disableUpdateInterface()
for thisLayer in selectedLayers:
thisGlyph = thisLayer.parent
thisGlyphName = thisGlyph.name
if str(thisGlyphName)[:7] != "_smart.":
thisGlyph.beginUndo()
print("%s layers deleted in %s." % (process(thisGlyph), thisGlyphName))
thisGlyph.endUndo()
else:
print("Smart layers kept in %s." % (thisGlyphName))
Font.enableUpdateInterface()
|
mit
| -8,980,516,049,966,369,000 | 28.142857 | 105 | 0.592593 | false | 4.259861 | false | false | false |
Oldentide/Oldentide
|
db/tools/character_builder_scraper.py
|
2
|
9353
|
#!/bin/python
# This script will parse and reformat all of the race and profession modifiers in the RoE files.
# Modules.
import argparse
import os
import pprint
import re
import sys
import time
# Set up command line arguments.
parser = argparse.ArgumentParser(description='This script will parse and reformat all of the profession and class modifiers in the RoE files.')
parser.add_argument('crsm_dir', type=str, help='Full path of the Crsm folder in the RoE game directory.')
args = parser.parse_args()
# Initialize path and files.
race_modifiers_file = 'race_templates.csv'
profession_modifiers_file = 'profession_templates.csv'
# Initialize modifiers data structures.
skills = (
"Hppl",
"Mppl",
"Strength",
"Constitution",
"Intelligence",
"Dexterity",
"Axe",
"Dagger",
"Hammer",
"Polearm",
"Spear",
"Staff",
"Sword",
"Unarmed",
"Archery",
"Crossbow",
"Sling",
"Thrown",
"Armor",
"Dual Weapon",
"Shield",
"Bardic",
"Conjuring",
"Druidic",
"Illusion",
"Necromancy",
"Shamanic",
"Sorcery",
"Summoning",
"Spellcraft",
"Focus",
"Alchemy",
"Armorsmithing",
"Calligraphy",
"Enchanting",
"Fletching",
"Lapidary",
"Tailoring",
"Weaponsmithing",
"Herbalism",
"Hunting",
"Mining",
"Bargaining",
"Camping",
"First Aid",
"Lore",
"Pick Locks",
"Scouting",
"Search",
"Stealth",
"Traps",
"Aeolandis",
"Hieroform",
"High Gundis",
"Old Praxic",
"Praxic",
"Runic",
"Skill_1_multi",
"Skill_1_names",
"Skill_1_value",
"Skill_2_multi",
"Skill_2_names",
"Skill_2_value",
"Skill_3_multi",
"Skill_3_names",
"Skill_3_value",
"Skill_4_multi",
"Skill_4_names",
"Skill_4_value",
"Skill_5_multi",
"Skill_5_names",
"Skill_5_value",
"Description"
)
headers = (
"hppl",
"mppl",
"strength_mod",
"constitution_mod",
"intelligence_mod",
"dexterity_mod",
"axe_mod",
"dagger_mod",
"hammer_mod",
"polearm_mod",
"spear_mod",
"staff_mod",
"sword_mod",
"unarmed_mod",
"archery_mod",
"crossbow_mod",
"sling_mod",
"thrown_mod",
"armor_mod",
"dualweapon_mod",
"shield_mod",
"bardic_mod",
"conjuring_mod",
"druidic_mod",
"illusion_mod",
"necromancy_mod",
"shamanic_mod",
"sorcery_mod",
"summoning_mod",
"spellcraft_mod",
"focus_mod",
"alchemy_mod",
"armorsmithing_mod",
"calligraphy_mod",
"enchanting_mod",
"fletching_mod",
"lapidary_mod",
"tailoring_mod",
"weaponsmithing_mod",
"herbalism_mod",
"hunting_mod",
"mining_mod",
"bargaining_mod",
"camping_mod",
"firstaid_mod",
"lore_mod",
"picklocks_mod",
"scouting_mod",
"search_mod",
"stealth_mod",
"traps_mod",
"aeolandis_mod",
"hieroform_mod",
"highgundis_mod",
"oldpraxic_mod",
"praxic_mod",
"runic_mod",
"skill_1_multi",
"skill_1_names",
"skill_1_value",
"skill_2_multi",
"skill_2_names",
"skill_2_value",
"skill_3_multi",
"skill_3_names",
"skill_3_value",
"skill_4_multi",
"skill_4_names",
"skill_4_value",
"skill_5_multi",
"skill_5_names",
"skill_5_value",
"description"
)
races = dict()
professions = dict()
# Parse race modifiers and store in data structure.
races_path = args.crsm_dir + "\\rsm"
print("Parsing character race modifiers in \"" + races_path + "\"")
for filename in os.listdir(races_path):
race_name = filename.split('.')[0]
race_dict = dict()
race_file_path = races_path + "\\" + filename
race_data = open(race_file_path).readlines()
for line in race_data:
skill, mod = line.rstrip("\n").rsplit(None,1)
if skill in skills:
race_dict[skill] = float(mod)
if race_name == "Dwarf":
race_dict["Strength"] = 5.0
race_dict["Constitution"] = 15.0
race_dict["Intelligence"] = -5.0
race_dict["Dexterity"] = -15.0
elif race_name == "Elf":
race_dict["Strength"] = -5.0
race_dict["Constitution"] = -15.0
race_dict["Intelligence"] = 5.0
race_dict["Dexterity"] = 15.0
elif race_name == "Gnome":
race_dict["Strength"] = -10.0
race_dict["Constitution"] = -10.0
race_dict["Intelligence"] = 10.0
race_dict["Dexterity"] = 10.0
elif race_name == "Human":
race_dict["Strength"] = 0.0
race_dict["Constitution"] = 0.0
race_dict["Intelligence"] = 0.0
race_dict["Dexterity"] = 0.0
elif race_name == "Leshy":
race_dict["Strength"] = -15.0
race_dict["Constitution"] = -5.0
race_dict["Intelligence"] = 20.0
race_dict["Dexterity"] = 0.0
elif race_name == "Ogre":
race_dict["Strength"] = 20.0
race_dict["Constitution"] = 5.0
race_dict["Intelligence"] = -15.0
race_dict["Dexterity"] = -10.0
elif race_name == "Orc":
race_dict["Strength"] = 5.0
race_dict["Constitution"] = 0.0
race_dict["Intelligence"] = -10.0
race_dict["Dexterity"] = 5.0
races[race_name] = race_dict
# Parse profession modifiers and store in a data structure.
professions_path = args.crsm_dir + "\\csm"
print("Parsing character profession modifiers in \"" + professions_path + "\"")
for filename in os.listdir(professions_path):
profession_name = filename.split('.')[0]
profession_dict = dict()
profession_file_path = professions_path + "\\" + filename
profession_data = open(profession_file_path).readlines()
for line in profession_data:
if line == profession_data[0]:
hppl, mppl = line.split(" ")
profession_dict['Hppl'] = hppl.rstrip("\n")
profession_dict['Mppl'] = mppl.rstrip("\n")
else:
skill, mod = line.rstrip("\n").rsplit(None,1)
if skill in skills:
profession_dict[skill] = float(mod)
professions[profession_name] = profession_dict
# Parse profession base info and store in a data structure.
professions_base_path = args.crsm_dir + "\\crq"
print("Parsing character profession base info in \"" + professions_base_path + "\"")
for filename in os.listdir(professions_base_path):
profession_base_name = filename.split('.')[0]
profession_base_file_path = professions_base_path + "\\" + filename
profession_base_data = open(profession_base_file_path).readlines()
# State variables for parsing class skill options.
optional = False
base_mod = 0
skill_options = []
skill_option_index = 1
for line in profession_base_data:
if (len(line.split()) > 1):
skill, mod = line.rstrip("\n").rsplit(None,1)
if skill == 'STR':
professions[profession_base_name]['Strength'] = float(mod)
elif skill == 'DEX':
professions[profession_base_name]['Dexterity'] = float(mod)
elif skill == 'CON':
professions[profession_base_name]['Constitution'] = float(mod)
elif skill == 'INT':
professions[profession_base_name]['Intelligence'] = float(mod)
elif skill in skills:
sos = "Skill_" + str(skill_option_index) + "_multi"
professions[profession_base_name][sos] = "0"
sos = "Skill_" + str(skill_option_index) + "_names"
professions[profession_base_name][sos] = skill
sos = "Skill_" + str(skill_option_index) + "_value"
professions[profession_base_name][sos] = mod
skill_option_index += 1
elif skill == "*":
sos = "Skill_" + str(skill_option_index) + "_multi"
if mod == "0":
professions[profession_base_name][sos] = "0"
else:
professions[profession_base_name][sos] = "1"
sos = "Skill_" + str(skill_option_index) + "_value"
professions[profession_base_name][sos] = mod
skill_option_index += 1
else:
skill = line.rstrip("\n")
if skill in skills:
sos = "Skill_" + str(skill_option_index) + "_names"
if sos in professions[profession_base_name]:
professions[profession_base_name][sos] += ("-" + skill)
else:
professions[profession_base_name][sos] = skill
# Parse profession information and store in a data structure.
info_path = args.crsm_dir + "\\info"
print("Parsing character information in \"" + info_path + "\"")
for filename in os.listdir(info_path):
info_name = filename.split('.')[0]
info_file_name = info_path + "\\" + filename
info_data = open(info_file_name).readlines()
if (info_data[-1] == "\n"):
info_data = info_data[0:-1]
if info_name in professions:
professions[info_name]['Description'] = info_data[-1].rstrip("\n").replace(",", " -")
elif info_name in races:
races[info_name]['Description'] = info_data[-1].rstrip("\n").replace(",", " -")
#pprint.pprint(races)
#pprint.pprint(professions)
# Write parsed race data to a csv file.
race_mod_file = open(race_modifiers_file, "w+")
race_mod_file.write("race")
for header_key in headers:
if not "skill_" in header_key and not "hppl" in header_key and not "mppl" in header_key:
race_mod_file.write("," + header_key)
race_mod_file.write("\n")
for race in sorted(races):
race_mod_file.write(race)
for skill in skills:
if not "Skill_" in skill and not "Hppl" in skill and not "Mppl" in skill:
if skill in races[race]:
race_mod_file.write("," + str(races[race][skill]))
else:
race_mod_file.write(",0.0")
race_mod_file.write("\n")
# Write parsed profession data to a csv file.
profession_mod_file = open(profession_modifiers_file, "w+")
profession_mod_file.write("profession")
for header_key in headers:
profession_mod_file.write("," + header_key)
profession_mod_file.write("\n")
for profession in sorted(professions):
profession_mod_file.write(profession)
for skill in skills:
if skill in professions[profession]:
profession_mod_file.write("," + str(professions[profession][skill]))
else:
profession_mod_file.write(",0.0")
profession_mod_file.write("\n")
|
gpl-2.0
| -4,958,627,607,280,227,000 | 26.431085 | 143 | 0.651449 | false | 2.634648 | false | false | false |
ACCUConf/ACCUConf_Submission_Web_Application
|
accuconf_cfp/views/review.py
|
1
|
11087
|
"""Routes associated with reviewing submitted proposals."""
from flask import jsonify, render_template, request, session
from accuconf_cfp import app, db, year
from accuconf_cfp.utils import is_acceptable_route, is_logged_in, md
from models.proposal import Proposal
from models.proposal_types import sessiontype_descriptions
from models.role_types import Role
from models.score import CommentForProposer, CommentForCommittee, Score
from models.user import User
base_page = {
'year': year,
}
def already_reviewed(proposal, reviewer):
return any(x.proposal == proposal for x in reviewer.scores)
@app.route('/review_list')
def review_list():
check = is_acceptable_route()
if not check[0]:
return check[1]
assert check[1] is None
if is_logged_in():
user = User.query.filter_by(email=session['email']).first()
if not user:
return render_template('/general.html', page=md(base_page, {
'pagetitle': 'Review List Failed',
'data': 'Logged in user is not a registered user. This cannot happen.',
}))
if user.role != Role.reviewer:
return render_template('/general.html', page=md(base_page, {
'pagetitle': 'Review List Failed',
'data': 'Logged in user is not a registered reviewer.',
}))
# TODO reviewer cannot review proposed proposals (done) but what about being a presenter?
proposals = [(p.id, p.title, lead.presenter.name, p.session_type.value, already_reviewed(p, user))
for p in Proposal.query.all() if p.proposer != user
for lead in p.proposal_presenters if lead.is_lead
]
return render_template('/review_list.html', page=md(base_page, {
'pagetitle': 'List of Proposals',
'data': 'Please click on the proposal you wish to review.',
'proposals': proposals,
}))
return render_template('review_list.html', page=md(base_page, {
'pagetitle': 'Review List Failed',
'data': 'You must be registered, logged in, and a reviewer to review proposals',
}))
def _reviewer_is_in_proposal(reviewer, proposal):
if reviewer.email == proposal.proposer.email:
return True
for p in proposal.presenters:
if reviewer.email == p.email:
return True
return False
def _reviewer_is_in_proposal_index(reviewer, i):
proposal = Proposal.query.filter_by(id=i).all()
if not proposal:
return False
assert len(proposal) == 1
proposal = proposal[0]
return _reviewer_is_in_proposal(reviewer, proposal)
@app.route('/review_proposal/<int:id>', methods=['GET', 'POST'])
def review_proposal(id):
check = is_acceptable_route()
if not check[0]:
return check[1]
assert check[1] is None
if is_logged_in():
reviewer = User.query.filter_by(email=session['email']).first()
if request.method == 'POST':
review_data = request.json
if not reviewer:
response = jsonify('Logged in person is not a reviewer. This cannot happen.')
response.status_code = 400
return response
proposal = Proposal.query.filter_by(id=id).first()
if not proposal:
response = jsonify('Proposal cannot be found. This cannot happen.')
response.status_code = 400
return response
# TODO Is this the right way of doing this?
score = Score.query.filter_by(proposal=proposal, scorer=reviewer).all()
if score:
assert len(score) == 1
score[0].score = review_data['score']
else:
db.session.add(Score(proposal, reviewer, review_data['score']))
if review_data['comment_for_proposer']:
comment = CommentForProposer.query.filter_by(proposal=proposal, commenter=reviewer).all()
if comment:
comment[0].comment = review_data['comment_for_proposer']
else:
db.session.add(CommentForProposer(proposal, reviewer, review_data['comment_for_proposer']))
if review_data['comment_for_committee']:
comment = CommentForCommittee.query.filter_by(proposal=proposal, commenter=reviewer).all()
if comment:
comment[0].comment = review_data['comment_for_committee']
else:
db.session.add(CommentForCommittee(proposal, reviewer, review_data['comment_for_committee']))
db.session.commit()
return jsonify('Review stored.')
if not reviewer:
return render_template('/general.html', page=md(base_page, {
'pagetitle': 'Review Proposal Failed',
'data': 'Logged in user is not a registered user. This cannot happen.',
}))
if reviewer.role != Role.reviewer:
return render_template('/general.html', page=md(base_page, {
'pagetitle': 'Review Proposal Failed',
'data': 'Logged in user is not a registered reviewer.',
}))
number_of_proposals = Proposal.query.count()
if not (1 <= id <= number_of_proposals):
return render_template('general.html', page=md(base_page, {
'pagetitle': 'Review Proposal Failed',
'data': 'Requested proposal does not exist.',
}))
proposal = Proposal.query.filter_by(id=id).first()
presenters = [{'name': p.name, 'bio': p.bio} for p in proposal.presenters]
score = ''
comment_for_proposer = ''
comment_for_committee = ''
if already_reviewed(proposal, reviewer):
scores = [s for s in reviewer.scores if s.proposal == proposal]
assert len(scores) == 1
score = scores[0].score
comments_for_proposer = [c for c in reviewer.comments_for_proposer if c.proposal == proposal]
if comments_for_proposer:
comment_for_proposer = comments_for_proposer[0].comment
comments_for_committee = [c for c in reviewer.comments_for_committee if c.proposal == proposal]
if comments_for_committee:
comment_for_committee = comments_for_committee[0].comment
has_next = id < number_of_proposals
if has_next:
for i in range(id + 1, number_of_proposals + 1):
if not _reviewer_is_in_proposal_index(reviewer, i):
break
else:
has_next = False
has_previous = id > 1
if has_previous:
for i in range(id - 1, 0, -1):
if not _reviewer_is_in_proposal_index(reviewer, i):
break
else:
has_previous = False
return render_template('/review_proposal.html', page=md(base_page, {
'pagetitle': 'Proposal to Review',
'data': 'There is no specific "do nothing" button, to not do anything simply navigate away from this page.',
'proposal_id': id,
'title': proposal.title,
'summary': proposal.summary,
'session_type': sessiontype_descriptions[proposal.session_type],
'audience': proposal.audience.value,
'notes': proposal.notes,
'presenters': presenters,
'button_label': 'Submit' if not score else 'Update',
'score': score,
'comment_for_proposer': comment_for_proposer,
'comment_for_committee': comment_for_committee,
'has_previous': has_previous,
'has_next': has_next,
}))
return render_template('general.html', page=md(base_page, {
'pagetitle': 'Review Proposal Failed',
'data': 'You must be registered, logged in, and a reviewer to review a proposal',
}))
@app.route('/previous_proposal/<int:id>/<int:unreviewed>')
def previous_proposal(id, unreviewed):
check = is_acceptable_route()
if not check[0]:
return check[1]
assert check[1] is None
if is_logged_in():
user = User.query.filter_by(email=session['email']).first()
if user.role != Role.reviewer:
return render_template('/general.html', page=md(base_page, {
'pagetitle': 'Proposal Navigation Failed',
'data': 'Logged in user is not a registered reviewer.',
}))
if not unreviewed:
for i in range(id - 1, 0, -1):
if not _reviewer_is_in_proposal_index(user, i):
return jsonify(i)
response = jsonify("Requested proposal does not exist.")
response.status_code = 400
return response
for i in range(id - 1, 0, -1):
proposal = Proposal.query.filter_by(id=i).first()
if not proposal:
break
if not already_reviewed(proposal, user):
if not _reviewer_is_in_proposal(user, proposal):
return jsonify(i)
response = jsonify("Requested proposal does not exist.")
response.status_code = 400
return response
return render_template('general.html', page=md(base_page, {
'pagetitle': 'Proposal Navigation Failed',
'data': 'You must be registered, logged in, and a reviewer to review a proposal',
}))
@app.route('/next_proposal/<int:id>/<int:unreviewed>')
def next_proposal(id, unreviewed):
check = is_acceptable_route()
if not check[0]:
return check[1]
assert check[1] is None
if is_logged_in():
user = User.query.filter_by(email=session['email']).first()
if user.role != Role.reviewer:
return render_template('/general.html', page=md(base_page, {
'pagetitle': 'Proposal Navigation Failed',
'data': 'Logged in user is not a registered reviewer.',
}))
if not unreviewed:
number_of_proposals = Proposal.query.count()
for i in range(id + 1, number_of_proposals + 1):
if not _reviewer_is_in_proposal_index(user, i):
return jsonify(i)
response = jsonify("Requested proposal does not exist.")
response.status_code = 400
return response
i = id + 1
while True:
proposal = Proposal.query.filter_by(id=i).first()
if not proposal:
break
if not already_reviewed(proposal, user):
if not _reviewer_is_in_proposal(user, proposal):
return jsonify(i)
i += 1
response = jsonify("Requested proposal does not exist.")
response.status_code = 400
return response
return render_template('general.html', page=md(base_page, {
'pagetitle': 'Proposal Navigation Failed',
'data': 'You must be registered, logged in, and a reviewer to review a proposal',
}))
|
gpl-3.0
| 8,220,997,768,545,114,000 | 42.478431 | 120 | 0.582394 | false | 4.06564 | false | false | false |
fritzgerald/NehePyOpenGL
|
01-05/nehe-02.py
|
1
|
2520
|
#!/usr/bin/env python
import glfw
import OpenGL.GL as gl
import OpenGL.GLU as glu
window_width = 640
window_height = 480
def main():
# Initialize the library
if not glfw.init():
return
# Create a windowed mode window and its OpenGL context
window = glfw.create_window(window_width, window_height, "Hello World", None, None)
if not window:
glfw.terminate()
return
# Make the window's context current
glfw.make_context_current(window)
glfw.set_window_size_callback(window, on_window_size)
initGL(window)
# Loop until the user closes the window
while not glfw.window_should_close(window):
# Render here, e.g. using pyOpenGL
display()
# Swap front and back buffers
glfw.swap_buffers(window)
# Poll for and process events
glfw.poll_events()
glfw.terminate()
def on_window_size(window, w, h):
# get the frame buffer size and don't rely on the window
# the window size and the framebuffer can vary on retina displays
size_w, size_h = glfw.get_framebuffer_size(window)
window_width = size_w
window_height = size_h
gl.glViewport(0, 0, window_width, window_height) #Reset The Current Viewport And Perspective Transformation
gl.glMatrixMode(gl.GL_PROJECTION) #Select The Projection Matrix
gl.glLoadIdentity() #Reset The Projection Matrix
glu.gluPerspective(45.0,window_width/window_height,0.1,100.0) #Calculate The Aspect Ratio Of The Window
gl.glMatrixMode(gl.GL_MODELVIEW) #Select The Modelview Matrix
def initGL(window):
gl.glClearColor(0.40,0.58,0.93,1.0) #cornflower blue
gl.glClearDepth(1.0) #Enables Clearing Of The Depth Buffer
gl.glDepthFunc(gl.GL_LESS) #The Type Of Depth Test To Do
gl.glEnable(gl.GL_DEPTH_TEST) #Enables Depth Testing
gl.glShadeModel(gl.GL_SMOOTH) #Enables Smooth Color Shading
on_window_size(window, window_width, window_height)
def display():
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glLoadIdentity()
gl.glTranslatef(-1.5,0.0,-6.0)
gl.glBegin(gl.GL_TRIANGLES)
gl.glVertex3f( 0.0, 1.0, 0.0)
gl.glVertex3f(-1.0,-1.0, 0.0)
gl.glVertex3f( 1.0,-1.0, 0.0)
gl.glEnd()
gl.glLoadIdentity()
gl.glTranslatef(1.5,0.0,-6.0)
gl.glBegin(gl.GL_QUADS)
gl.glVertex3f(-1.0, 1.0, 0.0)
gl.glVertex3f( 1.0, 1.0, 0.0)
gl.glVertex3f( 1.0,-1.0, 0.0)
gl.glVertex3f(-1.0,-1.0, 0.0)
gl.glEnd()
if __name__ == "__main__":
main()
|
mit
| -5,422,371,594,309,356,000 | 27.325843 | 111 | 0.666667 | false | 2.957746 | false | false | false |
ArchiveTeam/ftp-grab
|
pipeline.py
|
1
|
10356
|
from distutils.version import StrictVersion
import datetime
import hashlib
import os
import re
import socket
import shutil
import time
import sys
import urllib
try:
import requests
except ImportError:
print('Please install or update the requests module.')
sys.exit(1)
import seesaw
from seesaw.config import realize, NumberConfigValue
from seesaw.externalprocess import WgetDownload, ExternalProcess
from seesaw.item import ItemInterpolation, ItemValue
from seesaw.pipeline import Pipeline
from seesaw.project import Project
from seesaw.task import SimpleTask, SetItemKey, LimitConcurrent
from seesaw.tracker import PrepareStatsForTracker, GetItemFromTracker, \
UploadWithTracker, SendDoneToTracker
from seesaw.util import find_executable
# check the seesaw version
if StrictVersion(seesaw.__version__) < StrictVersion("0.8.3"):
raise Exception("This pipeline needs seesaw version 0.8.3 or higher.")
###########################################################################
# Find a useful Wpull executable.
#
# WPULL_EXE will be set to the first path that
# 1. does not crash with --version, and
# 2. prints the required version string
WPULL_EXE = find_executable(
"Wpull",
re.compile(r"\b1\.2\b"),
[
"./wpull",
os.path.expanduser("~/.local/share/wpull-1.2/wpull"),
os.path.expanduser("~/.local/bin/wpull"),
"./wpull_bootstrap",
"wpull",
]
)
if not WPULL_EXE:
raise Exception("No usable Wpull found.")
###########################################################################
# The version number of this pipeline definition.
#
# Update this each time you make a non-cosmetic change.
# It will be added to the WARC files and reported to the tracker.
VERSION = "20160304.01"
TRACKER_ID = 'ftp'
TRACKER_HOST = 'tracker.archiveteam.org'
###########################################################################
# This section defines project-specific tasks.
#
# Simple tasks (tasks that do not need any concurrency) are based on the
# SimpleTask class and have a process(item) method that is called for
# each item.
class CheckIP(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "CheckIP")
self._counter = 0
def process(self, item):
if self._counter <= 0:
item.log_output('Checking IP address.')
ip_set = set()
ip_set.add(socket.gethostbyname('twitter.com'))
ip_set.add(socket.gethostbyname('facebook.com'))
ip_set.add(socket.gethostbyname('youtube.com'))
ip_set.add(socket.gethostbyname('microsoft.com'))
ip_set.add(socket.gethostbyname('icanhas.cheezburger.com'))
ip_set.add(socket.gethostbyname('archiveteam.org'))
if len(ip_set) != 6:
item.log_output('Got IP addresses: {0}'.format(ip_set))
item.log_output(
'You are behind a firewall or proxy. That is a big no-no!')
raise Exception(
'You are behind a firewall or proxy. That is a big no-no!')
# Check only occasionally
if self._counter <= 0:
self._counter = 10
else:
self._counter -= 1
class PrepareDirectories(SimpleTask):
def __init__(self, warc_prefix):
SimpleTask.__init__(self, "PrepareDirectories")
self.warc_prefix = warc_prefix
def process(self, item):
item_name = item["item_name"]
escaped_item_name = item_name.replace(':', '_').replace('/', '_')
item['escaped_item_name'] = escaped_item_name
dirname = "/".join((item["data_dir"], escaped_item_name))
if os.path.isdir(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
item["item_dir"] = dirname
item["warc_file_base"] = "%s-%s-%s" % (
self.warc_prefix, escaped_item_name,
time.strftime("%Y%m%d-%H%M%S")
)
open("%(item_dir)s/%(warc_file_base)s.warc.gz" % item, "w").close()
class MoveFiles(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "MoveFiles")
def process(self, item):
# Check if wget was compiled with zlib support
if os.path.exists("%(item_dir)s/%(warc_file_base)s.warc" % item):
raise Exception('Please compile wget with zlib support!')
os.rename("%(item_dir)s/%(warc_file_base)s.warc.gz" % item,
"%(data_dir)s/%(warc_file_base)s.warc.gz" % item)
shutil.rmtree("%(item_dir)s" % item)
def get_hash(filename):
with open(filename, 'rb') as in_file:
return hashlib.sha1(in_file.read()).hexdigest()
CWD = os.getcwd()
PIPELINE_SHA1 = get_hash(os.path.join(CWD, 'pipeline.py'))
SCRIPT_SHA1 = get_hash(os.path.join(CWD, 'ftp.py'))
def stats_id_function(item):
# For accountability and stats.
d = {
'pipeline_hash': PIPELINE_SHA1,
'script_hash': SCRIPT_SHA1,
'python_version': sys.version,
}
return d
class WgetArgs(object):
def realize(self, item):
wget_args = [
WPULL_EXE,
"-nv",
"--python-script", "ftp.py",
"-o", ItemInterpolation("%(item_dir)s/wpull.log"),
"--no-check-certificate",
"--database", ItemInterpolation("%(item_dir)s/wpull.db"),
"--delete-after",
"--no-robots",
"--no-cookies",
"--rotate-dns",
"--timeout", "60",
"--tries", "inf",
"--wait", "0.5",
"--random-wait",
"--waitretry", "5",
"--warc-file", ItemInterpolation("%(item_dir)s/%(warc_file_base)s"),
"--warc-header", "operator: Archive Team",
"--warc-header", "ftp-dld-script-version: " + VERSION,
"--warc-header", ItemInterpolation("ftp-user: %(item_name)s"),
]
item_name = item['item_name']
assert ':' in item_name
item_sort, item_item, item_file = item_name.split(':', 2)
item['item_item'] = item_item
MAX_SIZE = 10737418240
skipped = requests.get('https://raw.githubusercontent.com/ArchiveTeam/ftp-items/master/skipped_sites')
if skipped.status_code != 200:
raise Exception('Something went wrong getting the skipped_sites list from GitHub. ABORTING.')
skipped_items = skipped.text.splitlines()
for skipped_item in skipped_items:
if item_file.startswith(skipped_item):
raise Exception('This FTP will be skipped...')
item_list = requests.get('http://archive.org/download/{0}/{1}'.format(item_item, item_file))
if item_list.status_code != 200:
raise Exception('You received status code %d with URL %s. ABORTING.'%(item_list.status_code, 'https://archive.org/download/{0}/{1}'.format(item_item, item_file)))
itemsize = int(re.search(r'ITEM_TOTAL_SIZE: ([0-9]+)', item_list.text).group(1))
if itemsize > MAX_SIZE:
raise Exception('Item is %d bytes. This is larger then %d bytes. ABORTING.'%(itemsize, MAX_SIZE))
for url in item_list.text.splitlines():
if url.startswith('ftp://'):
url = url.replace(' ', '%20').replace('&', '&')
url = urllib.unquote(url)
if item_item == 'archiveteam_ftp_items_2015120102':
url = url.replace('ftp://ftp.research.microsoft.com/downloads/downloads/', 'ftp://ftp.research.microsoft.com/downloads/')
if '#' in url:
raise Exception('%s containes a bad character.'%(url))
else:
wget_args.append("{0}".format(url))
if 'bind_address' in globals():
wget_args.extend(['--bind-address', globals()['bind_address']])
print('')
print('*** Wget will bind address at {0} ***'.format(
globals()['bind_address']))
print('')
return realize(wget_args, item)
###########################################################################
# Initialize the project.
#
# This will be shown in the warrior management panel. The logo should not
# be too big. The deadline is optional.
project = Project(
title="ftp",
project_html="""
<img class="project-logo" alt="Project logo" src="http://archiveteam.org/images/thumb/f/f3/Archive_team.png/235px-Archive_team.png" height="50px" title=""/>
<h2>FTP <span class="links"><a href="http://archiveteam.org/index.php?title=FTP">Website</a> ·
<a href="http://tracker.archiveteam.org/ftp/">Leaderboard</a></span></h2>
<p>Archiving all FTPs!</p>
"""
)
pipeline = Pipeline(
CheckIP(),
GetItemFromTracker("http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader,
VERSION),
PrepareDirectories(warc_prefix="ftp"),
WgetDownload(
WgetArgs(),
max_tries=1,
accept_on_exit_code=[0, 8],
env={
"item_dir": ItemValue("item_dir"),
"item_item": ItemValue("item_item"),
"downloader": downloader
}
),
PrepareStatsForTracker(
defaults={"downloader": downloader, "version": VERSION},
file_groups={
"data": [
ItemInterpolation("%(item_dir)s/%(warc_file_base)s.warc.gz"),
]
},
id_function=stats_id_function,
),
MoveFiles(),
LimitConcurrent(
NumberConfigValue(min=1, max=4, default="1",
name="shared:rsync_threads", title="Rsync threads",
description="The maximum number of concurrent uploads."),
UploadWithTracker(
"http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
downloader=downloader,
version=VERSION,
files=[
ItemInterpolation("%(data_dir)s/%(warc_file_base)s.warc.gz"),
],
rsync_target_source_path=ItemInterpolation("%(data_dir)s/"),
rsync_extra_args=[
"--recursive",
"--partial",
"--partial-dir", ".rsync-tmp",
]
),
),
SendDoneToTracker(
tracker_url="http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
stats=ItemValue("stats")
)
)
|
unlicense
| -4,047,015,062,139,173,400 | 33.868687 | 174 | 0.565856 | false | 3.792018 | false | false | false |
evenmarbles/mlpy
|
mlpy/knowledgerep/cbr/similarity.py
|
1
|
17713
|
from __future__ import division, print_function, absolute_import
import math
import numpy as np
from abc import ABCMeta, abstractmethod
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.neighbors.dist_metrics import METRIC_MAPPING
class Stat(object):
"""The similarity statistics container.
The similarity statistics is a container to pass the
calculated measure of similarity between the case
identified by the case id and the query case between
functions.
Parameters
----------
case_id : int
The case's id.
similarity : float
The similarity measure.
"""
__slots__ = ('_case_id', '_similarity')
@property
def case_id(self):
"""The case's id.
Returns
-------
int :
The case's id
"""
return self._case_id
@property
def similarity(self):
"""The similarity measure.
Returns
-------
float :
The similarity measure.
"""
return self._similarity
def __init__(self, case_id, similarity=None):
self._case_id = case_id
self._similarity = similarity
class SimilarityFactory(object):
"""The similarity factory.
An instance of a similarity model can be created by passing
the similarity model type.
Examples
--------
>>> from mlpy.knowledgerep.cbr.similarity import SimilarityFactory
>>> SimilarityFactory.create('float', **{})
"""
@staticmethod
def create(_type, **kwargs):
"""
Create a feature of the given type.
Parameters
----------
_type : str
The feature type. Valid feature types are:
knn
A k-nearest-neighbor algorithm is used to determine similarity
between cases (:class:`NeighborSimilarity`). The value
``n_neighbors`` must be specified.
radius-n
Similarity between cases is determined by the nearest neighbors
within a radius (:class:`NeighborSimilarity`). The value ``radius``
must be specified.
kmeans
Similarity is determined by a KMeans clustering algorithm
(:class:`KMeansSimilarity`). The value ``n_clusters`` must be specified.
exact-match
Only exact matches are considered similar (:class:`ExactMatchSimilarity`).
cosine
A cosine similarity measure is used to determine similarity between
cases (:class:`CosineSimilarity`).
kwargs : dict, optional
Non-positional arguments to pass to the class of the given type
for initialization.
Returns
-------
ISimilarity :
A similarity instance of the given type.
"""
try:
if _type == "knn":
kwargs["n_neighbors"] = kwargs["method_params"]
elif _type == "radius-n":
kwargs["radius"] = kwargs["method_params"]
elif _type == "kmeans":
kwargs["n_cluster"] = kwargs["method_params"]
elif _type == "cosine":
kwargs["threshold"] = kwargs["method_params"]
del kwargs["method_params"]
return {
"knn": NeighborSimilarity,
"radius-n": NeighborSimilarity,
"kmeans": KMeansSimilarity,
"exact-match": ExactMatchSimilarity,
"cosine": CosineSimilarity,
}[_type](**kwargs)
except KeyError:
return None
class ISimilarity(object):
"""The similarity model interface.
The similarity model keeps an internal indexing structure of
the relevant case data to efficiently computing the similarity
measure between data points.
Notes
-----
All similarity models must inherit from this class.
"""
__metaclass__ = ABCMeta
def __init__(self):
#: The indexing structure
self._indexing_structure = None
#: The mapping of the data points to their case ids
self._id_map = None
""":ivar: dict"""
@abstractmethod
def build_indexing_structure(self, data, id_map):
"""Build the indexing structure.
Parameters
----------
data : ndarray[ndarray[float]]
The raw data points to be indexed.
id_map : dict[int, int]
The mapping from the data points to their case ids.
Raises
------
NotImplementedError
If the child class does not implement this function.
"""
raise NotImplementedError
@abstractmethod
def compute_similarity(self, data_point):
"""Computes the similarity.
Computes the similarity between the data point and the data in
the indexing structure returning the results in a collection of
similarity statistics (:class:`Stat`).
Parameters
----------
data_point : list[float]
The raw data point to compare against the data points stored in the
indexing structure.
Returns
-------
list[Stat] :
A collection of similarity statistics.
Raises
------
NotImplementedError
If the child class does not implement this function.
"""
raise NotImplementedError
class NeighborSimilarity(ISimilarity):
"""The neighborhood similarity model.
The neighbor similarity model determines similarity between the data
in the indexing structure and the query data by using the nearest
neighbor algorithm :class:`sklearn.neighbors.NearestNeighbors`.
Both a k-neighbors classifier and a radius-neighbor-classifier are implemented.
To choose between the classifiers either `n_neighbors` or `radius` must be
specified.
Parameters
----------
n_neighbors : int
The number of data points considered to be closest neighbors.
radius : int
The radius around the query data point, within which the data points
are considered closest neighbors.
algorithm : str
The internal indexing structure of the training data. Defaults to
`kd-tree`.
metric : str
The metric used to compute the distances between pairs of points.
Refer to :class:`sklearn.neighbors.DistanceMetric` for valid
identifiers. Default is `euclidean`.
metric_params : dict
Parameters relevant to the specified metric.
Raises
------
UserWarning :
If the either both or none of `n_neighbors` and `radius` are given.
See Also
--------
:class:`sklearn.neighbors.KNeighborsClassifier`, :class:`sklearn.neighbors.RadiusNeighborsClassifier`
"""
def __init__(self, n_neighbors=None, radius=None, algorithm=None, metric=None, metric_params=None):
super(NeighborSimilarity, self).__init__()
if (n_neighbors is not None and radius is not None) or not (n_neighbors is None or radius is None):
raise UserWarning("Exactly one of n_neighbors or radius must be initialized.")
self._n_neighbors = n_neighbors
self._radius = radius
if algorithm is not None:
if algorithm not in ["ball_tree", "kd_tree", "brute", "auto"]:
raise ValueError("%s is not a valid retrieval algorithm" % algorithm)
self._algorithm = algorithm
else:
self._algorithm = "kd_tree"
if metric is not None:
if metric not in METRIC_MAPPING:
raise ValueError("%s is not a valid retrieval metric" % metric)
self._metric = metric
else:
self._metric = "euclidean"
self._metric_params = metric_params if metric_params is not None else 2
def build_indexing_structure(self, data, id_map):
"""Build the indexing structure.
Build the indexing structure by fitting the data according to the
specified algorithm.
Parameters
----------
data : ndarray[ndarray[float]]
The raw data points to be indexed.
id_map : dict[int, int]
The mapping from the data points to their case ids.
"""
self._id_map = id_map
if self._n_neighbors is not None:
self._indexing_structure = NearestNeighbors(n_neighbors=self._n_neighbors, algorithm=self._algorithm,
metric=self._metric, p=self._metric_params).fit(data)
else:
self._indexing_structure = NearestNeighbors(radius=self._radius, algorithm=self._algorithm,
metric=self._metric, p=self._metric_params).fit(data)
def compute_similarity(self, data_point):
"""Computes the similarity.
Computes the similarity between the data point and the data in
the indexing structure using the :class:`sklearn.neighbors.NearestNeighbors`
algorithm. The results are returned in a collection of similarity statistics
(:class:`Stat`).
Parameters
----------
data_point : list[float]
The raw data point to compare against the data points stored in the
indexing structure.
Returns
-------
list[Stat] :
A collection of similarity statistics.
"""
if self._n_neighbors is not None:
# noinspection PyProtectedMember
raw_data = self._indexing_structure._fit_X
if len(raw_data) < self._n_neighbors:
result = []
for i, feat in enumerate(raw_data):
dist = np.linalg.norm(np.asarray(data_point) - np.asarray(feat))
result.append(Stat(self._id_map[i], dist))
# noinspection PyShadowingNames
result = sorted(result, key=lambda x: x.similarity)
else:
d, key_lists = self._indexing_structure.kneighbors(data_point)
result = [Stat(self._id_map[x], d[0][i]) for i, x in enumerate(key_lists[0])]
else:
d, key_lists = self._indexing_structure.radius_neighbors(data_point)
result = [Stat(self._id_map[x], d[0][i]) for i, x in enumerate(key_lists[0])]
return result
class KMeansSimilarity(ISimilarity):
"""The KMeans similarity model.
The KMeans similarity model determines similarity between the data in the
indexing structure and the query data by using the :class:`sklearn.cluster.KMeans`
algorithm.
Parameters
----------
n_cluster : int
The number of clusters to fit the raw data in.
"""
def __init__(self, n_cluster=None):
super(KMeansSimilarity, self).__init__()
self._n_cluster = n_cluster if n_cluster is None else 8
def build_indexing_structure(self, data, id_map):
"""Build the indexing structure.
Build the indexing structure by fitting the data into `n_cluster`
clusters.
Parameters
----------
data : ndarray[ndarray[float]]
The raw data points to be indexed.
id_map : dict[int, int]
The mapping from the data points to their case ids.
"""
self._id_map = id_map
self._indexing_structure = KMeans(init='k-means++', n_clusters=self._n_cluster, n_init=10).fit(data)
def compute_similarity(self, data_point):
"""Computes the similarity.
Computes the similarity between the data point and the data in
the indexing structure using the :class:`sklearn.cluster.KMeans`
clustering algorithm. The results are returned in a collection
of similarity statistics (:class:`Stat`).
Parameters
----------
data_point : list[float]
The raw data point to compare against the data points stored in the
indexing structure.
Returns
-------
list[Stat] :
A collection of similarity statistics.
"""
label = self._indexing_structure.predict(data_point)
result = []
try:
# noinspection PyTypeChecker,PyUnresolvedReferences
key_lists = np.nonzero(self._indexing_structure.labels_ == label[0])[0]
result = [Stat(self._id_map[x]) for x in key_lists]
except IndexError:
pass
return result
class ExactMatchSimilarity(ISimilarity):
"""The exact match similarity model.
The exact match similarity model considered only exact matches between
the data in the indexing structure and the query data as similar.
"""
# noinspection PyUnusedLocal
def __init__(self, **kwargs):
super(ExactMatchSimilarity, self).__init__()
def build_indexing_structure(self, data, id_map):
"""Build the indexing structure.
To determine exact matches a brute-force algorithm is used thus
the data remains as is and no special indexing structure is
implemented.
Parameters
----------
data : ndarray[ndarray[float]]
The raw data points to be indexed.
id_map : dict[int, int]
The mapping from the data points to their case ids.
.. todo::
It might be worth looking into a more efficient way of determining
exact matches.
"""
self._id_map = id_map
self._indexing_structure = data
def compute_similarity(self, data_point):
"""Computes the similarity.
Computes the similarity between the data point and the data in
the indexing structure identifying exact matches. The results are
returned in a collection of similarity statistics (:class:`Stat`).
Parameters
----------
data_point : list[float]
The raw data point to compare against the data points stored in the
indexing structure.
Returns
-------
list[Stat] :
A collection of similarity statistics.
"""
result = []
for i, feat in enumerate(self._indexing_structure):
total = 0
for j, val in enumerate(data_point):
total += math.pow(val - feat[j], 2)
if total == 0.0:
result.append(Stat(self._id_map[i]))
return result
class CosineSimilarity(ISimilarity):
"""The cosine similarity model.
Cosine similarity is a measure of similarity between two vectors of an inner
product space that measures the cosine of the angle between them. The cosine
of 0 degree is 1, and it is less than 1 for any other angle. It is thus a
judgement of orientation and not magnitude: tow vectors with the same
orientation have a cosine similarity of 1, two vectors at 90 degrees have a
similarity of 0, and two vectors diametrically opposed have a similarity of -1,
independent of their magnitude [1]_.
The cosine model employs the
`cosine_similarity <http://scikit-learn.org/stable/modules/metrics.html#cosine-similarity>`_
function from the :mod:`sklearn.metrics.pairwise` module to determine similarity.
.. seealso::
`Machine Learning::Cosine Similarity for Vector Space Models (Part III)
<http://blog.christianperone.com/?p=2497>`_
References
----------
.. [1] `Wikipidia::cosine_similarity <https://en.wikipedia.org/wiki/Cosine_similarity>`_
"""
# noinspection PyUnusedLocal
def __init__(self, **kwargs):
super(CosineSimilarity, self).__init__()
def build_indexing_structure(self, data, id_map):
"""Build the indexing structure.
The cosine_similarity function from :mod:`sklearn.metrics.pairwise` takes
the raw data as input. Thus the data remains as is and no special indexing
structure is implemented.
Parameters
----------
data : ndarray[ndarray[float]]
The raw data points to be indexed.
id_map : dict[int, int]
The mapping from the data points to their case ids.
"""
self._id_map = id_map
self._indexing_structure = data
def compute_similarity(self, data_point):
"""Computes the similarity.
Computes the similarity between the data point and the data in
the indexing structure using the function :func:`cosine_similarity` from
:mod:`sklearn.metrics.pairwise`.
The resulting similarity ranges from -1 meaning exactly opposite, to 1
meaning exactly the same, with 0 indicating orthogonality (decorrelation),
and in-between values indicating intermediate similarity or dissimilarity.
The results are returned in a collection of similarity statistics (:class:`Stat`).
Parameters
----------
data_point : list[float]
The raw data point to compare against the data points stored in the
indexing structure.
Returns
-------
list[Stat] :
A collection of similarity statistics.
"""
similarity = cosine_similarity(data_point, self._indexing_structure)
if not np.any(data_point):
similarity = np.array([[float(np.array_equal(data_point, m)) for m in np.array(self._indexing_structure)]])
return [Stat(self._id_map[i], x) for i, x in enumerate(similarity[0])]
|
mit
| -1,097,991,111,061,715,100 | 31.985102 | 119 | 0.603342 | false | 4.835654 | false | false | false |
larsks/cobbler-larsks
|
koan/utils.py
|
1
|
16252
|
"""
koan = kickstart over a network
general usage functions
Copyright 2006-2008 Red Hat, Inc.
Michael DeHaan <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import random
import os
import traceback
import tempfile
import exceptions
ANCIENT_PYTHON = 0
try:
try:
import subprocess as sub_process
except:
import sub_process
import urllib2
except:
ANCIENT_PYTHON = 1
import time
import shutil
import errno
import re
import sys
import xmlrpclib
import string
import re
import glob
import socket
import shutil
import tempfile
import urlgrabber
VIRT_STATE_NAME_MAP = {
0 : "running",
1 : "running",
2 : "running",
3 : "paused",
4 : "shutdown",
5 : "shutdown",
6 : "crashed"
}
class InfoException(exceptions.Exception):
"""
Custom exception for tracking of fatal errors.
"""
def __init__(self,value,**args):
self.value = value % args
self.from_koan = 1
def __str__(self):
return repr(self.value)
def setupLogging(appname):
"""
set up logging ... code borrowed/adapted from virt-manager
"""
import logging
import logging.handlers
dateFormat = "%a, %d %b %Y %H:%M:%S"
fileFormat = "[%(asctime)s " + appname + " %(process)d] %(levelname)s (%(module)s:%(lineno)d) %(message)s"
streamFormat = "%(asctime)s %(levelname)-8s %(message)s"
filename = "/var/log/koan/koan.log"
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
fileHandler = logging.handlers.RotatingFileHandler(filename, "a",
1024*1024, 5)
fileHandler.setFormatter(logging.Formatter(fileFormat,
dateFormat))
rootLogger.addHandler(fileHandler)
streamHandler = logging.StreamHandler(sys.stderr)
streamHandler.setFormatter(logging.Formatter(streamFormat,
dateFormat))
streamHandler.setLevel(logging.DEBUG)
rootLogger.addHandler(streamHandler)
def urlread(url):
"""
to support more distributions, implement (roughly) some
parts of urlread and urlgrab from urlgrabber, in ways that
are less cool and less efficient.
"""
print "- reading URL: %s" % url
if url is None or url == "":
raise InfoException, "invalid URL: %s" % url
elif url[0:3] == "nfs":
try:
ndir = os.path.dirname(url[6:])
nfile = os.path.basename(url[6:])
nfsdir = tempfile.mkdtemp(prefix="koan_nfs",dir="/tmp")
nfsfile = os.path.join(nfsdir,nfile)
cmd = ["mount","-t","nfs","-o","ro", ndir, nfsdir]
subprocess_call(cmd)
fd = open(nfsfile)
data = fd.read()
fd.close()
cmd = ["umount",nfsdir]
subprocess_call(cmd)
return data
except:
traceback.print_exc()
raise InfoException, "Couldn't mount and read URL: %s" % url
elif url[0:4] == "http":
try:
fd = urllib2.urlopen(url)
data = fd.read()
fd.close()
return data
except:
if ANCIENT_PYTHON:
# this logic is to support python 1.5 and EL 2
import urllib
fd = urllib.urlopen(url)
data = fd.read()
fd.close()
return data
traceback.print_exc()
raise InfoException, "Couldn't download: %s" % url
elif url[0:4] == "file":
try:
fd = open(url[5:])
data = fd.read()
fd.close()
return data
except:
raise InfoException, "Couldn't read file from URL: %s" % url
else:
raise InfoException, "Unhandled URL protocol: %s" % url
def urlgrab(url,saveto):
"""
like urlread, but saves contents to disk.
see comments for urlread as to why it's this way.
"""
data = urlread(url)
fd = open(saveto, "w+")
fd.write(data)
fd.close()
def subprocess_call(cmd,ignore_rc=0):
"""
Wrapper around subprocess.call(...)
"""
print "- %s" % cmd
if not ANCIENT_PYTHON:
rc = sub_process.call(cmd)
else:
cmd = string.join(cmd, " ")
print "cmdstr=(%s)" % cmd
rc = os.system(cmd)
if rc != 0 and not ignore_rc:
raise InfoException, "command failed (%s)" % rc
return rc
def input_string_or_hash(options,delim=None,allow_multiples=True):
"""
Older cobbler files stored configurations in a flat way, such that all values for strings.
Newer versions of cobbler allow dictionaries. This function is used to allow loading
of older value formats so new users of cobbler aren't broken in an upgrade.
"""
if options is None:
return {}
elif type(options) == list:
raise InfoException("No idea what to do with list: %s" % options)
elif type(options) == type(""):
new_dict = {}
tokens = string.split(options, delim)
for t in tokens:
tokens2 = string.split(t,"=")
if len(tokens2) == 1:
# this is a singleton option, no value
key = tokens2[0]
value = None
else:
key = tokens2[0]
value = tokens2[1]
# if we're allowing multiple values for the same key,
# check to see if this token has already been
# inserted into the dictionary of values already
if key in new_dict.keys() and allow_multiples:
# if so, check to see if there is already a list of values
# otherwise convert the dictionary value to an array, and add
# the new value to the end of the list
if type(new_dict[key]) == list:
new_dict[key].append(value)
else:
new_dict[key] = [new_dict[key], value]
else:
new_dict[key] = value
# dict.pop is not avail in 2.2
if new_dict.has_key(""):
del new_dict[""]
return new_dict
elif type(options) == type({}):
options.pop('',None)
return options
else:
raise InfoException("invalid input type: %s" % type(options))
def hash_to_string(hash):
"""
Convert a hash to a printable string.
used primarily in the kernel options string
and for some legacy stuff where koan expects strings
(though this last part should be changed to hashes)
"""
buffer = ""
if type(hash) != dict:
return hash
for key in hash:
value = hash[key]
if value is None:
buffer = buffer + str(key) + " "
elif type(value) == list:
# this value is an array, so we print out every
# key=value
for item in value:
buffer = buffer + str(key) + "=" + str(item) + " "
else:
buffer = buffer + str(key) + "=" + str(value) + " "
return buffer
def nfsmount(input_path):
# input: [user@]server:/foo/bar/x.img as string
# output: (dirname where mounted, last part of filename) as 2-element tuple
# FIXME: move this function to util.py so other modules can use it
# we have to mount it first
filename = input_path.split("/")[-1]
dirpath = string.join(input_path.split("/")[:-1],"/")
tempdir = tempfile.mkdtemp(suffix='.mnt', prefix='koan_', dir='/tmp')
mount_cmd = [
"/bin/mount", "-t", "nfs", "-o", "ro", dirpath, tempdir
]
print "- running: %s" % mount_cmd
rc = sub_process.call(mount_cmd)
if not rc == 0:
shutil.rmtree(tempdir, ignore_errors=True)
raise koan.InfoException("nfs mount failed: %s" % dirpath)
# NOTE: option for a blocking install might be nice, so we could do this
# automatically, if supported by python-virtinst
print "after install completes, you may unmount and delete %s" % tempdir
return (tempdir, filename)
def find_vm(conn, vmid):
"""
Extra bonus feature: vmid = -1 returns a list of everything
This function from Func: fedorahosted.org/func
"""
vms = []
# this block of code borrowed from virt-manager:
# get working domain's name
ids = conn.listDomainsID();
for id in ids:
vm = conn.lookupByID(id)
vms.append(vm)
# get defined domain
names = conn.listDefinedDomains()
for name in names:
vm = conn.lookupByName(name)
vms.append(vm)
if vmid == -1:
return vms
for vm in vms:
if vm.name() == vmid:
return vm
raise InfoException("koan could not find the VM to watch: %s" % vmid)
def get_vm_state(conn, vmid):
"""
Returns the state of a libvirt VM, by name.
From Func: fedorahosted.org/func
"""
state = find_vm(conn, vmid).info()[0]
return VIRT_STATE_NAME_MAP.get(state,"unknown")
def check_dist():
"""
Determines what distro we're running under.
"""
if os.path.exists("/etc/debian_version"):
import lsb_release
return lsb_release.get_distro_information()['ID'].lower()
elif os.path.exists("/etc/SuSE-release"):
return "suse"
else:
# valid for Fedora and all Red Hat / Fedora derivatives
return "redhat"
def os_release():
"""
This code is borrowed from Cobbler and really shouldn't be repeated.
"""
if ANCIENT_PYTHON:
return ("unknown", 0)
if check_dist() == "redhat":
fh = open("/etc/redhat-release")
data = fh.read().lower()
if data.find("fedora") != -1:
make = "fedora"
elif data.find("centos") != -1:
make = "centos"
else:
make = "redhat"
release_index = data.find("release")
rest = data[release_index+7:-1]
tokens = rest.split(" ")
for t in tokens:
try:
return (make,float(t))
except ValueError, ve:
pass
raise CX("failed to detect local OS version from /etc/redhat-release")
elif check_dist() == "debian":
import lsb_release
release = lsb_release.get_distro_information()['RELEASE']
return ("debian", release)
elif check_dist() == "ubuntu":
version = sub_process.check_output(("lsb_release","--release","--short")).rstrip()
make = "ubuntu"
return (make, float(version))
elif check_dist() == "suse":
fd = open("/etc/SuSE-release")
for line in fd.read().split("\n"):
if line.find("VERSION") != -1:
version = line.replace("VERSION = ","")
if line.find("PATCHLEVEL") != -1:
rest = line.replace("PATCHLEVEL = ","")
make = "suse"
return (make, float(version))
else:
return ("unknown",0)
def uniqify(lst, purge=None):
temp = {}
for x in lst:
temp[x] = 1
if purge is not None:
temp2 = {}
for x in temp.keys():
if x != purge:
temp2[x] = 1
temp = temp2
return temp.keys()
def get_network_info():
try:
import ethtool
except:
try:
import rhpl.ethtool
ethtool = rhpl.ethtool
except:
raise InfoException("the rhpl or ethtool module is required to use this feature (is your OS>=EL3?)")
interfaces = {}
# get names
inames = ethtool.get_devices()
for iname in inames:
mac = ethtool.get_hwaddr(iname)
if mac == "00:00:00:00:00:00":
mac = "?"
try:
ip = ethtool.get_ipaddr(iname)
if ip == "127.0.0.1":
ip = "?"
except:
ip = "?"
bridge = 0
module = ""
try:
nm = ethtool.get_netmask(iname)
except:
nm = "?"
interfaces[iname] = {
"ip_address" : ip,
"mac_address" : mac,
"netmask" : nm,
"bridge" : bridge,
"module" : module
}
# print interfaces
return interfaces
def connect_to_server(server=None,port=None):
if server is None:
server = os.environ.get("COBBLER_SERVER","")
if server == "":
raise InfoException("--server must be specified")
if port is None:
port = 25151
connect_ok = 0
try_urls = [
"http://%s/cobbler_api" % (server),
"https://%s/cobbler_api" % (server),
]
for url in try_urls:
print "- looking for Cobbler at %s" % url
server = __try_connect(url)
if server is not None:
return server
raise InfoException ("Could not find Cobbler.")
def create_xendomains_symlink(name):
"""
Create an /etc/xen/auto/<name> symlink for use with "xendomains"-style
VM boot upon dom0 reboot.
"""
src = "/etc/xen/%s" % name
dst = "/etc/xen/auto/%s" % name
# check that xen config file exists and create symlink
if os.path.exists(src) and os.access(os.path.dirname(dst), os.W_OK):
os.symlink(src, dst)
else:
raise InfoException("Could not create /etc/xen/auto/%s symlink. Please check write permissions and ownership" % name)
def libvirt_enable_autostart(domain_name):
import libvirt
try:
conn = libvirt.open("qemu:///system")
conn.listDefinedDomains()
domain = conn.lookupByName(domain_name)
domain.setAutostart(1)
except:
raise InfoException("libvirt could not find domain %s" % domain_name)
if not domain.autostart:
raise InfoException("Could not enable autostart on domain %s." % domain_name)
def make_floppy(kickstart):
(fd, floppy_path) = tempfile.mkstemp(suffix='.floppy', prefix='tmp', dir="/tmp")
print "- creating floppy image at %s" % floppy_path
# create the floppy image file
cmd = "dd if=/dev/zero of=%s bs=1440 count=1024" % floppy_path
print "- %s" % cmd
rc = os.system(cmd)
if not rc == 0:
raise InfoException("dd failed")
# vfatify
cmd = "mkdosfs %s" % floppy_path
print "- %s" % cmd
rc = os.system(cmd)
if not rc == 0:
raise InfoException("mkdosfs failed")
# mount the floppy
mount_path = tempfile.mkdtemp(suffix=".mnt", prefix='tmp', dir="/tmp")
cmd = "mount -o loop -t vfat %s %s" % (floppy_path, mount_path)
print "- %s" % cmd
rc = os.system(cmd)
if not rc == 0:
raise InfoException("mount failed")
# download the kickstart file onto the mounted floppy
print "- downloading %s" % kickstart
save_file = os.path.join(mount_path, "unattended.txt")
urlgrabber.urlgrab(kickstart,filename=save_file)
# umount
cmd = "umount %s" % mount_path
print "- %s" % cmd
rc = os.system(cmd)
if not rc == 0:
raise InfoException("umount failed")
# return the path to the completed disk image to pass to virtinst
return floppy_path
def sync_file(ofile, nfile, uid, gid, mode):
sub_process.call(['/usr/bin/diff', ofile, nfile])
shutil.copy(nfile, ofile)
os.chmod(ofile,mode)
os.chown(ofile,uid,gid)
#class ServerProxy(xmlrpclib.ServerProxy):
#
# def __init__(self, url=None):
# try:
# xmlrpclib.ServerProxy.__init__(self, url, allow_none=True)
# except:
# # for RHEL3's xmlrpclib -- cobblerd should strip Nones anyway
# xmlrpclib.ServerProxy.__init__(self, url)
def __try_connect(url):
try:
xmlrpc_server = xmlrpclib.Server(url)
xmlrpc_server.ping()
return xmlrpc_server
except:
traceback.print_exc()
return None
|
gpl-2.0
| 3,835,868,868,866,504,700 | 28.549091 | 126 | 0.580852 | false | 3.748155 | false | false | false |
h4wkmoon/shinken
|
shinken/macroresolver.py
|
1
|
17498
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# This class resolve Macro in commands by looking at the macros list
# in Class of elements. It give a property that call be callable or not.
# It not callable, it's a simple property and replace the macro with the value
# If callable, it's a method that is called to get the value. for example, to
# get the number of service in a host, you call a method to get the
# len(host.services)
import re
import time
from shinken.borg import Borg
class MacroResolver(Borg):
"""Please Add a Docstring to describe the class here"""
my_type = 'macroresolver'
# Global macros
macros = {
'TOTALHOSTSUP': '_get_total_hosts_up',
'TOTALHOSTSDOWN': '_get_total_hosts_down',
'TOTALHOSTSUNREACHABLE': '_get_total_hosts_unreachable',
'TOTALHOSTSDOWNUNHANDLED': '_get_total_hosts_unhandled',
'TOTALHOSTSUNREACHABLEUNHANDLED': '_get_total_hosts_unreachable_unhandled',
'TOTALHOSTPROBLEMS': '_get_total_host_problems',
'TOTALHOSTPROBLEMSUNHANDLED': '_get_total_host_problems_unhandled',
'TOTALSERVICESOK': '_get_total_service_ok',
'TOTALSERVICESWARNING': '_get_total_services_warning',
'TOTALSERVICESCRITICAL': '_get_total_services_critical',
'TOTALSERVICESUNKNOWN': '_get_total_services_unknown',
'TOTALSERVICESWARNINGUNHANDLED': '_get_total_services_warning_unhandled',
'TOTALSERVICESCRITICALUNHANDLED': '_get_total_services_critical_unhandled',
'TOTALSERVICESUNKNOWNUNHANDLED': '_get_total_services_unknown_unhandled',
'TOTALSERVICEPROBLEMS': '_get_total_service_problems',
'TOTALSERVICEPROBLEMSUNHANDLED': '_get_total_service_problems_unhandled',
'LONGDATETIME': '_get_long_date_time',
'SHORTDATETIME': '_get_short_date_time',
'DATE': '_get_date',
'TIME': '_get_time',
'TIMET': '_get_timet',
'PROCESSSTARTTIME': '_get_process_start_time',
'EVENTSTARTTIME': '_get_events_start_time',
}
# This must be called ONCE. It just put links for elements
# by scheduler
def init(self, conf):
# For searching class and elements for ondemand
# we need link to types
self.conf = conf
self.lists_on_demand = []
self.hosts = conf.hosts
# For special void host_name handling...
self.host_class = self.hosts.inner_class
self.lists_on_demand.append(self.hosts)
self.services = conf.services
self.contacts = conf.contacts
self.lists_on_demand.append(self.contacts)
self.hostgroups = conf.hostgroups
self.lists_on_demand.append(self.hostgroups)
self.commands = conf.commands
self.servicegroups = conf.servicegroups
self.lists_on_demand.append(self.servicegroups)
self.contactgroups = conf.contactgroups
self.lists_on_demand.append(self.contactgroups)
self.illegal_macro_output_chars = conf.illegal_macro_output_chars
self.output_macros = ['HOSTOUTPUT', 'HOSTPERFDATA', 'HOSTACKAUTHOR', 'HOSTACKCOMMENT', 'SERVICEOUTPUT', 'SERVICEPERFDATA', 'SERVICEACKAUTHOR', 'SERVICEACKCOMMENT']
# Try cache :)
#self.cache = {}
# Return all macros of a string, so cut the $
# And create a dict with it:
# val: value, not set here
# type: type of macro, like class one, or ARGN one
def _get_macros(self, s):
#if s in self.cache:
# return self.cache[s]
p = re.compile(r'(\$)')
elts = p.split(s)
macros = {}
in_macro = False
for elt in elts:
if elt == '$':
in_macro = not in_macro
elif in_macro:
macros[elt] = {'val': '', 'type': 'unknown'}
#self.cache[s] = macros
if '' in macros:
del macros['']
return macros
# Get a value from a property of a element
# Prop can be a function or a property
# So we call it or not
def _get_value_from_element(self, elt, prop):
try:
value = getattr(elt, prop)
if callable(value):
return unicode(value())
else:
return unicode(value)
except AttributeError, exp:
# Return no value
return ''
except UnicodeError, exp:
if isinstance(value, str):
return unicode(value, 'utf8', errors='ignore')
else:
return ''
# For some macros, we need to delete unwanted characters
def _delete_unwanted_caracters(self, s):
for c in self.illegal_macro_output_chars:
s = s.replace(c, '')
return s
# return a dict with all environment variable came from
# the macros of the datas object
def get_env_macros(self, data):
env = {}
for o in data:
cls = o.__class__
macros = cls.macros
for macro in macros:
if macro.startswith("USER"):
break
#print "Macro in %s: %s" % (o.__class__, macro)
prop = macros[macro]
value = self._get_value_from_element(o, prop)
env['NAGIOS_%s' % macro] = value
if hasattr(o, 'customs'):
# make NAGIOS__HOSTMACADDR from _MACADDR
for cmacro in o.customs:
env['NAGIOS__' + o.__class__.__name__.upper() + cmacro[1:].upper()] = o.customs[cmacro]
return env
# This function will look at elements in data (and args if it filled)
# to replace the macros in c_line with real value.
def resolve_simple_macros_in_string(self, c_line, data, args=None):
# Now we prepare the classes for looking at the class.macros
data.append(self) # For getting global MACROS
if hasattr(self, 'conf'):
data.append(self.conf) # For USERN macros
clss = [d.__class__ for d in data]
# we should do some loops for nested macros
# like $USER1$ hiding like a ninja in a $ARG2$ Macro. And if
# $USER1$ is pointing to $USER34$ etc etc, we should loop
# until we reach the bottom. So the last loop is when we do
# not still have macros :)
still_got_macros = True
nb_loop = 0
while still_got_macros:
nb_loop += 1
# Ok, we want the macros in the command line
macros = self._get_macros(c_line)
# We can get out if we do not have macros this loop
still_got_macros = (len(macros) != 0)
#print "Still go macros:", still_got_macros
# Put in the macros the type of macro for all macros
self._get_type_of_macro(macros, clss)
# Now we get values from elements
for macro in macros:
# If type ARGN, look at ARGN cutting
if macros[macro]['type'] == 'ARGN' and args is not None:
macros[macro]['val'] = self._resolve_argn(macro, args)
macros[macro]['type'] = 'resolved'
# If class, get value from properties
if macros[macro]['type'] == 'class':
cls = macros[macro]['class']
for elt in data:
if elt is not None and elt.__class__ == cls:
prop = cls.macros[macro]
macros[macro]['val'] = self._get_value_from_element(elt, prop)
# Now check if we do not have a 'output' macro. If so, we must
# delete all special characters that can be dangerous
if macro in self.output_macros:
macros[macro]['val'] = self._delete_unwanted_caracters(macros[macro]['val'])
if macros[macro]['type'] == 'CUSTOM':
cls_type = macros[macro]['class']
# Beware : only cut the first _HOST value, so the macro name can have it on it...
macro_name = re.split('_' + cls_type, macro, 1)[1].upper()
# Ok, we've got the macro like MAC_ADDRESS for _HOSTMAC_ADDRESS
# Now we get the element in data that have the type HOST
# and we check if it got the custom value
for elt in data:
if elt is not None and elt.__class__.my_type.upper() == cls_type:
if '_' + macro_name in elt.customs:
macros[macro]['val'] = elt.customs['_' + macro_name]
# Then look on the macromodulations, in reserver order, so
# the last to set, will be the firt to have. (yes, don't want to play
# with break and such things sorry...)
mms = getattr(elt, 'macromodulations', [])
for mm in mms[::-1]:
# Look if the modulation got the value, but also if it's currently active
if '_' + macro_name in mm.customs and mm.is_active():
macros[macro]['val'] = mm.customs['_' + macro_name]
if macros[macro]['type'] == 'ONDEMAND':
macros[macro]['val'] = self._resolve_ondemand(macro, data)
# We resolved all we can, now replace the macro in the command call
for macro in macros:
c_line = c_line.replace('$'+macro+'$', macros[macro]['val'])
# A $$ means we want a $, it's not a macro!
# We replace $$ by a big dirty thing to be sure to not misinterpret it
c_line = c_line.replace("$$", "DOUBLEDOLLAR")
if nb_loop > 32: # too much loop, we exit
still_got_macros = False
# We now replace the big dirty token we made by only a simple $
c_line = c_line.replace("DOUBLEDOLLAR", "$")
#print "Retuning c_line", c_line.strip()
return c_line.strip()
# Resolve a command with macro by looking at data classes.macros
# And get macro from item properties.
def resolve_command(self, com, data):
c_line = com.command.command_line
return self.resolve_simple_macros_in_string(c_line, data, args=com.args)
# For all Macros in macros, set the type by looking at the
# MACRO name (ARGN? -> argn_type,
# HOSTBLABLA -> class one and set Host in class)
# _HOSTTOTO -> HOST CUSTOM MACRO TOTO
# $SERVICESTATEID:srv-1:Load$ -> MACRO SERVICESTATEID of
# the service Load of host srv-1
def _get_type_of_macro(self, macros, clss):
for macro in macros:
# ARGN Macros
if re.match('ARG\d', macro):
macros[macro]['type'] = 'ARGN'
continue
# USERN macros
# are managed in the Config class, so no
# need to look that here
elif re.match('_HOST\w', macro):
macros[macro]['type'] = 'CUSTOM'
macros[macro]['class'] = 'HOST'
continue
elif re.match('_SERVICE\w', macro):
macros[macro]['type'] = 'CUSTOM'
macros[macro]['class'] = 'SERVICE'
# value of macro: re.split('_HOST', '_HOSTMAC_ADDRESS')[1]
continue
elif re.match('_CONTACT\w', macro):
macros[macro]['type'] = 'CUSTOM'
macros[macro]['class'] = 'CONTACT'
continue
# On demand macro
elif len(macro.split(':')) > 1:
macros[macro]['type'] = 'ONDEMAND'
continue
# OK, classical macro...
for cls in clss:
if macro in cls.macros:
macros[macro]['type'] = 'class'
macros[macro]['class'] = cls
continue
# Resolve MACROS for the ARGN
def _resolve_argn(self, macro, args):
# first, get the number of args
id = None
r = re.search('ARG(?P<id>\d+)', macro)
if r is not None:
id = int(r.group('id')) - 1
try:
return args[id]
except IndexError:
return ''
# Resolve on-demand macro, quite hard in fact
def _resolve_ondemand(self, macro, data):
#print "\nResolving macro", macro
elts = macro.split(':')
nb_parts = len(elts)
macro_name = elts[0]
# Len 3 == service, 2 = all others types...
if nb_parts == 3:
val = ''
#print "Got a Service on demand asking...", elts
(host_name, service_description) = (elts[1], elts[2])
# host_name can be void, so it's the host in data
# that is important. We use our self.host_class to
# find the host in the data :)
if host_name == '':
for elt in data:
if elt is not None and elt.__class__ == self.host_class:
host_name = elt.host_name
# Ok now we get service
s = self.services.find_srv_by_name_and_hostname(host_name, service_description)
if s is not None:
cls = s.__class__
prop = cls.macros[macro_name]
val = self._get_value_from_element(s, prop)
#print "Got val:", val
return val
# Ok, service was easy, now hard part
else:
val = ''
elt_name = elts[1]
# Special case: elt_name can be void
# so it's the host where it apply
if elt_name == '':
for elt in data:
if elt is not None and elt.__class__ == self.host_class:
elt_name = elt.host_name
for list in self.lists_on_demand:
cls = list.inner_class
# We search our type by looking at the macro
if macro_name in cls.macros:
prop = cls.macros[macro_name]
i = list.find_by_name(elt_name)
if i is not None:
val = self._get_value_from_element(i, prop)
# Ok we got our value :)
break
return val
return ''
# Get Fri 15 May 11:42:39 CEST 2009
def _get_long_date_time(self):
return time.strftime("%a %d %b %H:%M:%S %Z %Y").decode('UTF-8', 'ignore')
# Get 10-13-2000 00:30:28
def _get_short_date_time(self):
return time.strftime("%d-%m-%Y %H:%M:%S")
# Get 10-13-2000
def _get_date(self):
return time.strftime("%d-%m-%Y")
# Get 00:30:28
def _get_time(self):
return time.strftime("%H:%M:%S")
# Get epoch time
def _get_timet(self):
return str(int(time.time()))
def _get_total_hosts_up(self):
return len([h for h in self.hosts if h.state == 'UP'])
def _get_total_hosts_down(self):
return len([h for h in self.hosts if h.state == 'DOWN'])
def _get_total_hosts_unreachable(self):
return len([h for h in self.hosts if h.state == 'UNREACHABLE'])
# TODO
def _get_total_hosts_unreachable_unhandled(self):
return 0
def _get_total_hosts_problems(self):
return len([h for h in self.hosts if h.is_problem])
def _get_total_hosts_problems_unhandled(self):
return 0
def _get_total_service_ok(self):
return len([s for s in self.services if s.state == 'OK'])
def _get_total_services_warning(self):
return len([s for s in self.services if s.state == 'WARNING'])
def _get_total_services_critical(self):
return len([s for s in self.services if s.state == 'CRITICAL'])
def _get_total_services_unknown(self):
return len([s for s in self.services if s.state == 'UNKNOWN'])
# TODO
def _get_total_services_warning_unhandled(self):
return 0
def _get_total_services_critical_unhandled(self):
return 0
def _get_total_services_unknown_unhandled(self):
return 0
def _get_total_service_problems(self):
return len([s for s in self.services if s.is_problem])
def _get_total_service_problems_unhandled(self):
return 0
def _get_process_start_time(self):
return 0
def _get_events_start_time(self):
return 0
|
agpl-3.0
| -1,762,913,324,187,749,400 | 39.133028 | 171 | 0.552349 | false | 3.9687 | false | false | false |
bbondy/brianbondy.gae
|
libs/markdown/extensions/meta.py
|
1
|
2697
|
#!usr/bin/python
"""
Meta Data Extension for Python-Markdown
=======================================
This extension adds Meta Data handling to markdown.
Basic Usage:
>>> import markdown
>>> text = '''Title: A Test Doc.
... Author: Waylan Limberg
... John Doe
... Blank_Data:
...
... The body. This is paragraph one.
... '''
>>> md = markdown.Markdown(['meta'])
>>> md.convert(text)
u'<p>The body. This is paragraph one.</p>'
>>> md.Meta
{u'blank_data': [u''], u'author': [u'Waylan Limberg', u'John Doe'], u'title': [u'A Test Doc.']}
Make sure text without Meta Data still works (markdown < 1.6b returns a <p>).
>>> text = ' Some Code - not extra lines of meta data.'
>>> md = markdown.Markdown(['meta'])
>>> md.convert(text)
u'<pre><code>Some Code - not extra lines of meta data.\\n</code></pre>'
>>> md.Meta
{}
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
Project website: <http://www.freewisdom.org/project/python-markdown/Meta-Data>
Contact: [email protected]
License: BSD (see ../docs/LICENSE for details)
"""
import markdown, re
# Global Vars
META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
class MetaExtension (markdown.Extension):
""" Meta-Data extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add MetaPreprocessor to Markdown instance. """
md.preprocessors.add("meta", MetaPreprocessor(md), "_begin")
class MetaPreprocessor(markdown.preprocessors.Preprocessor):
""" Get Meta-Data. """
def run(self, lines):
""" Parse Meta-Data and store in Markdown.Meta. """
meta = {}
key = None
while 1:
line = lines.pop(0)
if line.strip() == '':
break # blank line - done
m1 = META_RE.match(line)
if m1:
key = m1.group('key').lower().strip()
meta[key] = [m1.group('value').strip()]
else:
m2 = META_MORE_RE.match(line)
if m2 and key:
# Add another line to existing key
meta[key].append(m2.group('value').strip())
else:
lines.insert(0, line)
break # no meta data - done
self.markdown.Meta = meta
return lines
def makeExtension(configs={}):
return MetaExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
mit
| -9,115,914,859,540,492,000 | 27.966667 | 99 | 0.531702 | false | 3.751043 | false | false | false |
shadowmint/nwidget
|
lib/cocos2d-0.5.5/test/test_transition_fadebl.py
|
1
|
1224
|
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 0.5, s, t 1, s, t 1.5, s, t 2.1, s, q"
tags = "FadeBLTransition"
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
from cocos.scenes import *
from cocos.sprite import *
import pyglet
from pyglet.gl import *
class BackgroundLayer(cocos.layer.Layer):
def __init__(self):
super(BackgroundLayer, self).__init__()
self.img = pyglet.resource.image('background_image.png')
def draw( self ):
glColor4ub(255, 255, 255, 255)
glPushMatrix()
self.transform()
self.img.blit(0,0)
glPopMatrix()
def main():
director.init( resizable=True )
scene1 = cocos.scene.Scene()
scene2 = cocos.scene.Scene()
colorl = ColorLayer(32,32,255,255)
sprite = Sprite( 'grossini.png', (320,240) )
colorl.add( sprite )
scene1.add( BackgroundLayer(), z=0 )
scene2.add( colorl, z=0 )
director.run( FadeBLTransition( scene1, 2, scene2) )
if __name__ == '__main__':
main()
|
apache-2.0
| -6,601,661,051,400,266,000 | 24.608696 | 72 | 0.619281 | false | 3.083123 | false | false | false |
akanouras/dm-tool
|
dm_tool.py
|
1
|
11221
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import print_function # , unicode_literals
# Based on dm-tool.c from the LightDM project.
# Original Author: Robert Ancell <[email protected]>
# Copyright (C) 2013 Antonis Kanouras <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version. See http://www.gnu.org/copyleft/gpl.html the full text of the
# license.
# Constants
__version__ = '1.6.0.metadosis0'
__all__ = ['DMTool']
COMMANDS_HELP = '''\
Commands:
switch-to-greeter Switch to the greeter
switch-to-user USERNAME [SESSION] Switch to a user session
switch-to-guest [SESSION] Switch to a guest session
lock Lock the current seat
list-seats List the active seats
add-nested-seat [XEPHYR_ARGS...] Start a nested display
add-local-x-seat DISPLAY_NUMBER Add a local X seat
add-seat TYPE [NAME=VALUE...] Add a dynamic seat
'''
import os
import sys
import errno
import signal
import traceback
import argparse
import collections
import itertools
from io import StringIO
import dbus
# Python 3 compatibility
try:
unicode
except NameError:
unicode = str
u = unicode
def get_free_display_number():
'''Get a unique display number.
It's racy, but the only reliable method to get one.'''
for display_number in itertools.count():
try:
os.stat('/tmp/.X{0}-lock'.format(display_number))
except OSError as e:
if e.errno == errno.ENOENT:
return display_number
else:
raise
class DBusFormats(collections.defaultdict):
'Dict of dbus.types.*: (format, formatter)'
default_factory = lambda: ("{0}{1}={2}", lambda x: x)
default_formats = {
dbus.String: ("{0}{1}='{2}'", lambda x: x),
dbus.Boolean: ("{0}{1}={2}", lambda x: u(bool(x)).lower()),
}
def __init__(self, default_format=None, default_formats=None):
if default_format is not None:
self.default_factory = default_format
if default_formats is not None:
self.default_formats = default_formats
self.update(self.default_formats)
class DMTool(object):
__doc__ = COMMANDS_HELP
# Dict of method: path
_dbus_paths = {
'SwitchToGreeter': '/org/freedesktop/DisplayManager/Seat',
'SwitchToUser': '/org/freedesktop/DisplayManager/Seat',
'SwitchToGuest': '/org/freedesktop/DisplayManager/Seat',
'Lock': '/org/freedesktop/DisplayManager/Seat',
'AddLocalXSeat': '/org/freedesktop/DisplayManager',
'AddSeat': '/org/freedesktop/DisplayManager',
}
_dbus_formats = DBusFormats()
def __init__(self, bus=None):
'bus must be a dbus.*Bus instance'
if not os.environ.get('XDG_SEAT_PATH', '').startswith(
'/org/freedesktop/DisplayManager/Seat'):
raise Exception('Not running inside a display manager,'
' XDG_SEAT_PATH is invalid or not defined')
if bus is None:
bus = dbus.SystemBus()
self._bus = bus
def __call__(self, command, *args, **kwargs):
'Call a command argv-style, see self.__doc__ for details'
command = getattr(self, command.replace('-', '_'))
return command(*args, **kwargs)
@staticmethod
def _path_to_interface(path):
return path.rstrip('0123456789').lstrip('/').replace('/', '.')
def _get_proxy(self, path):
return self._bus.get_object('org.freedesktop.DisplayManager', path)
def _dbus_call(self, method, *args, **kwargs):
'Call one of the predefined dbus methods'
object_path = self._dbus_paths[method]
interface = self._path_to_interface(object_path)
if object_path == '/org/freedesktop/DisplayManager/Seat':
object_path = os.environ['XDG_SEAT_PATH']
proxy = self._get_proxy(object_path)
method = proxy.get_dbus_method(
method,
dbus_interface=interface)
return method(*args, **kwargs)
@classmethod
def _get_commands(self):
'Returns a dict of command: description'
return {cmd.replace('_', '-'): getattr(self, cmd).__doc__
for cmd in dir(self) if not cmd.startswith('_')}
def switch_to_greeter(self):
'Switch to the greeter'
return self._dbus_call('SwitchToGreeter')
def switch_to_user(self, username, session=None):
'Switch to a user session'
return self._dbus_call('SwitchToUser', username, session or '')
def switch_to_guest(self, session=None):
'Switch to a guest session'
return self._dbus_call('SwitchToGuest', session or '')
def lock(self):
'Lock the current seat'
return self._dbus_call('Lock')
def list_seats(self):
'List the active seats'
def get_properties(proxy):
interface = self._path_to_interface(proxy.object_path)
return proxy.GetAll(interface, dbus_interface=dbus.PROPERTIES_IFACE)
def get_name_from_path(path):
return path.split('/org/freedesktop/DisplayManager/')[-1]
def print_item(key, value, indent=0, file=None):
fmt, formatter = self._dbus_formats[type(value)]
print(u(fmt).format(' ' * indent, key, formatter(value)), file=file)
def print_path(path, exclude=None, indent=0, file=None):
path_proxy = self._get_proxy(path)
path_name = get_name_from_path(path)
print(u('{0}{1}').format(' ' * indent, path_name), file=file)
indent += 2
descend_paths = []
path_properties = get_properties(path_proxy)
for key, value in sorted(path_properties.items()):
if value == exclude:
continue
if isinstance(value, dbus.Array):
if len(value) > 0 and isinstance(value[0], dbus.ObjectPath):
descend_paths += value
continue
print_item(key, value, indent=indent, file=file)
for descend_path in descend_paths:
print_path(descend_path, exclude=path, indent=indent, file=file)
output = StringIO()
dm_proxy = self._get_proxy('/org/freedesktop/DisplayManager')
seats = get_properties(dm_proxy)['Seats']
for seat in sorted(seats):
print_path(seat, file=output)
return output.getvalue().rstrip('\n')
def add_nested_seat(self, *xephyr_args):
'Start a nested display'
def xephyr_signal_handler(sig, frame):
# Fugly, nonlocal (Py3K+) would make this prettier
xephyr_signal_handler.was_called = True
def setup_xephyr_handler():
xephyr_signal_handler.original_handler = signal.getsignal(signal.SIGUSR1)
xephyr_signal_handler.was_called = False
signal.signal(signal.SIGUSR1, xephyr_signal_handler)
def wait_for_xephyr(pid):
try:
os.waitpid(pid, 0)
except: # On purpose
pass
signal.signal(signal.SIGUSR1, xephyr_signal_handler.original_handler)
return xephyr_signal_handler.was_called
xephyr_argv = ['Xephyr']
# Determine the display number to use for Xephyr
for arg in xephyr_args:
if arg.startswith(':'):
try:
xephyr_display_number = int(arg.lstrip(':'))
break
except ValueError:
continue
else:
xephyr_display_number = get_free_display_number()
xephyr_argv += ':{0}'.format(xephyr_display_number)
xephyr_argv.extend(xephyr_args)
# Wait for signal from Xephyr when it is ready
setup_xephyr_handler()
# Spawn Xephyr
xephyr_pid = os.fork()
if xephyr_pid == 0:
# In child
os.closerange(0, 1023)
# This makes X(ephyr) SIGUSR1 its parent when ready.
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
try:
os.execvp(xephyr_argv[0], xephyr_argv)
except OSError as e:
sys.exit(e.errno)
# Wait for Xephyr to signal us
if wait_for_xephyr(xephyr_pid):
try:
return self._dbus_call('AddLocalXSeat', xephyr_display_number)
except Exception as e:
os.kill(xephyr_pid, signal.SIGQUIT)
raise Exception('Unable to add seat: {0}'.format(e))
else:
raise Exception('Xephyr launch failed')
def add_local_x_seat(self, display_number):
'Add a local X seat'
return self._dbus_call('AddLocalXSeat', int(display_number))
def add_seat(self, type, *args, **kwargs):
'Add a dynamic seat'
# AddSeat expects a list of tuples
properties = [tuple(arg.split('=', 1))
if not isinstance(arg, tuple) else arg
for arg in args] + kwargs.items()
return self._dbus_call('AddSeat', type, properties)
def get_parser():
parser = argparse.ArgumentParser(
description='Display Manager tool',
usage='%(prog)s [OPTION...] COMMAND [ARGS...]',
epilog=COMMANDS_HELP,
add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
options = parser.add_argument_group('Options')
options.add_argument('-h', '--help', help='Show help options',
action='help')
options.add_argument('-v', '--version', help='Show release version',
action='version',
version='%(prog)s {0}'.format(__version__))
options.add_argument('--debug', dest='debug',
action='store_true',
help='Show debugging information')
options.add_argument('--session-bus', dest='session_bus',
action='store_true',
help='Use session D-Bus')
parser.add_argument('command', metavar='COMMAND',
choices=DMTool._get_commands(), help=argparse.SUPPRESS)
parser.add_argument('rest', metavar='ARGS', nargs='*',
help=argparse.SUPPRESS)
return parser
def main():
parser = get_parser()
args, unparsed = parser.parse_known_args()
command_args = args.rest + unparsed
bus = dbus.SessionBus() if args.session_bus else dbus.SystemBus()
dmtool = DMTool(bus)
try:
print(dmtool(args.command, *command_args) or '')
except Exception as e:
if args.debug:
traceback.print_exc()
else:
print(e, file=sys.stderr)
if isinstance(e, TypeError):
parser.print_help()
return os.EX_USAGE
else:
return 1
if __name__ == '__main__':
sys.exit(main())
|
gpl-3.0
| 4,424,071,183,432,009,000 | 33.420245 | 85 | 0.583014 | false | 3.908394 | false | false | false |
jrtaal/pyramid_handlebars
|
test/test_renderers.py
|
1
|
9484
|
'''
Created on 3 mei 2012
@author: jacco
'''
import unittest
import os
from lifeshare.lib.renderers import stackdict
from lifeshare.lib.renderers.ajax_renderer import AjaxRendererFactory
from lifeshare.lib.renderers.pybars_renderer import PybarsRendererFactory
from pyramid import testing
from pyramid.threadlocal import get_current_registry
from pyramid.i18n import TranslationStringFactory
class RendererInfoFixture(object):
def __init__(self, name, registry):
self.registry = registry
self.settings = registry.settings
self.name = name
self.package = self
class RendererTest(unittest.TestCase):
def setUp(self):
from ..test import settings
from sqlalchemy.ext.declarative import declarative_base
settings['pybars.directories'] = "lifeshare.lib.test:templates"
settings['osiris.store.sqla_base'] = declarative_base()
config = testing.setUp(settings=settings)
self.config = config
#self.config.include('lifeshare.app')
#self.config.include('lifeshare.api.api')
self.config.add_translation_dirs('lifeshare:locale')
config.add_renderer('.bar', 'lifeshare.lib.renderers.pybars_renderer.PybarsRendererFactory')
import lifeshare.templates.deps as template_deps
def master_get_globals():
return {}
ajax_template_factory = AjaxRendererFactory(
dependencies = template_deps.deps,
get_globals = master_get_globals)
ajax_master_template_factory = AjaxRendererFactory( master ="jsbase.bar",
dependencies = template_deps.deps,
get_globals = master_get_globals)
config.add_renderer('.ajax', ajax_master_template_factory)
config.add_renderer('ajax', ajax_master_template_factory)
#config.add_renderer('.ajax-nomaster', ajax_template_factory)
self.registry = get_current_registry()
#self.registry.settings["pybars.directories"] = "lifeshare:templates/handlebars"
def tearDown(self):
testing.tearDown()
def test_types(self):
sd = stackdict({'a':1,'b':2})
sd['c']=3
self.assertEqual(sd['a'], 1)
self.assertEqual(sd['b'], 2)
self.assertEqual(sd['c'], 3)
sd.push( dict(a = 9, d=4))
self.assertEqual(sd['a'], 9)
self.assertEqual(sd['d'], 4)
sd.pop()
self.assertEqual(sd['a'], 1)
self.assertEqual(set(sd.keys()), set(['a','b','c']))
self.assertEqual(set(sd.iterkeys()), set(['a','b','c']))
self.assertEqual(set(sd.iteritems()), set([ ('a', 1),('b', 2),('c',3)]))
def get_request(self):
request = testing.DummyRequest()
from lifeshare.lib.renderers.acceptparse import AnnotatedMIMEAccept
import time
request.accept = AnnotatedMIMEAccept("text/html")
request._request_time = time.time()
return request
def test_pybars(self):
request = self.get_request()
renderer = PybarsRendererFactory(RendererInfoFixture("test.bar", self.registry))
response = renderer( {"value1": "Test Value", "value2" : ["Value 2a", "Value 2b"], "value3" : u"Videos\xc3" } ,
dict(request=request, registry = self.registry))
#print ">" + response + "<"
self.assertEqual(response,
u"""Begin Child
Value 1:
Test Value
Value 2:
- Value 2a
- Value 2b
Videos\xc3
End Child
""")
def test_ajaxrenderer(self):
from lifeshare.templates.deps import deps
def get_globals(request):
return {}
factory = AjaxRendererFactory("test_master.bar", deps, get_globals = get_globals)
renderer = factory(RendererInfoFixture("test.bar.ajax", self.registry))
request = self.get_request()
request.is_xhr = False
request.user = None
system = dict(request = request, registry = self.registry )
response = renderer({ "title" : "Test Title", "preamble":"",
"body": "BODY", "resource": { "value1": "Test Value", "value2" : ["Value 2a", "Value 2b"],
"value3" : u"BLA\xc0" }} , system)
#print ">" + response + "<"
self.assertEqual(response ,
u"""Master
Title: Test Title
Begin Body
Begin Child
Value 1:
Test Value
Value 2:
- Value 2a
- Value 2b
BLA\xc0
End Child
End Body
End Master
""")
def test_path(self):
pass
def test_ajaxjson(self):
from lifeshare.templates.deps import deps
def get_globals(request):
return {}
data = { "title" : "Test Title", "preamble":"",
"body": "BODY", "resource": { "value1": "Test Value", "value2" : ["Value 2a", "Value 2b"],
"value3" : u"BLA\xc0" }}
factory = AjaxRendererFactory("test_master.bar", deps, get_globals = get_globals)
renderer = factory(RendererInfoFixture("test.bar.ajax", self.registry))
request = self.get_request()
request.is_xhr = True
request.view_name = "json"
request.user = None
system = dict(request = request, registry = self.registry )
response = renderer( data , system)
self.assertEqual(response ,
"""{"body": "BODY", "path": "/", "preamble": "", "resource": {"value3": "BLA\\u00c0", "value2": ["Value 2a", "Value 2b"], "value1": "Test Value"}, "title": "Test Title"}""")
request.view_name = "test"
response = renderer( data, system)
self.assertEqual(str(response), """Master
Title: Test Title
Begin Body
<pre>{'body': 'BODY',
'path': '/',
'preamble': '',
'resource': {'value1': 'Test Value', 'value2': ('Value 2a', 'Value 2b'), 'value3': u'BLA\\xc0'},
'title': 'Test Title'}</pre>
End Body
End Master
""")
request.view_name = "ajax"
response = renderer( data, system)
#print ">" + response + "<"
self.assertEqual(str(response), """{"body": "Begin Child\\n Value 1:\\n Test Value\\n Value 2:\\n \\n - Value 2a\\n \\n - Value 2b\\n \\n BLA\u00c0\\nEnd Child\\n", "resource": {"value3": "BLA\u00c0", "value2": ["Value 2a", "Value 2b"], "value1": "Test Value"}, "title": "Test Title", "path": "/", "preamble": ""}""")
request.view_name = "ajaxp"
response = renderer( data, system)
#print ">" + response + "<"
self.assertEqual(str(response), """load({"body": "Begin Child\\n Value 1:\\n Test Value\\n Value 2:\\n \\n - Value 2a\\n \\n - Value 2b\\n \\n BLA\\u00c0\\nEnd Child\\n", "resource": {"value3": "BLA\\u00c0", "value2": ["Value 2a", "Value 2b"], "value1": "Test Value"}, "title": "Test Title", "path": "/", "preamble": ""})""")
request.view_name = "ajaxtest"
response = renderer( data, system)
#print ">" + response + "<"
self.assertEqual(str(response), """{'body': u'Begin Child\\n Value 1:\\n Test Value\\n Value 2:\\n \\n - Value 2a\\n \\n - Value 2b\\n \\n BLA\\xc0\\nEnd Child\\n',\n 'path': '/',\n 'preamble': '',\n 'resource': {'value1': 'Test Value', 'value2': ('Value 2a', 'Value 2b'), 'value3': u'BLA\\xc0'},\n 'title': 'Test Title'}""")
def test_i18n_bars(self):
renderer = PybarsRendererFactory(RendererInfoFixture("i18ntest.bar", self.registry))
_ = TranslationStringFactory("Lifeshare")
for locale in ("nl", "en") :
request = self.get_request()
request._LOCALE_ = locale
response = renderer( {"n": 1, "o": 2, "a" : ["1","2",_("Video")], "b" : ["1","2",_("Videos")], "NAME" : "Jacco" } , dict(request=request, registry = self.registry))
#print "LOCALE", locale
#print response
if locale in ("nl", "nl_NL"):
self.assertEqual(response[0:20], u"Welkom bij Lifeshare")
if locale in ("en", "en_US"):
self.assertEqual(response[0:20], u"Welcome to Lifeshare")
#self.assertEqual(response[0:8] , "Value 1:")
def test_subdir_template(self):
import pdb; pdb.set_trace()
request = self.get_request()
renderer = PybarsRendererFactory(RendererInfoFixture("test_embed.bar", self.registry))
response = renderer( {"value1": "Test Value", "value2" : ["Value 2a", "Value 2b"], "value3" : u"Videos\xc3" } ,
dict(request=request, registry = self.registry))
print response
def test_localize_template(self):
from lifeshare.lib.renderers.handlebars_i18n import extract_handlebars, translate_handlebars
from pyramid.i18n import get_localizer
tmp = open(os.path.join(os.path.dirname(__file__), "templates/i18ntest.bar"))
strings = extract_handlebars(tmp,[],[],{})
self.assertEqual(strings[1][2], u"English")
self.assertEqual(strings[2][2], u"Dutch")
tmp.seek(0)
request = self.get_request()
request._LOCALE_ = "nl"
localizer = get_localizer(request)
tmp2 = translate_handlebars(tmp.read(), localizer, "Lifeshare")
self.assertEqual(tmp2[0:114], """Welkom bij Lifeshare
<div class="row-fluid">
<div class="span4">EngelsNederlands</div>
<div class="span8">""")
|
mit
| 4,120,112,888,118,071,300 | 34.924242 | 333 | 0.578132 | false | 3.626769 | true | false | false |
ceball/param
|
setup.py
|
1
|
2666
|
import os
from setuptools import setup
########## autover ##########
def get_setup_version(reponame):
"""Use autover to get up to date version."""
# importing self into setup.py is unorthodox, but param has no
# required dependencies outside of python
from param.version import Version
return Version.setup_version(os.path.dirname(__file__),reponame,archive_commit="$Format:%h$")
########## dependencies ##########
extras_require = {
# pip doesn't support tests_require
# (https://github.com/pypa/pip/issues/1197)
'tests': [
'nose',
'flake8',
]
}
extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
########## metadata for setuptools ##########
setup_args = dict(
name='param',
version=get_setup_version("param"),
description='Make your Python code clearer and more reliable by declaring Parameters.',
long_description=open('README.rst').read() if os.path.isfile('README.rst') else 'Consult README.rst',
author="HoloViz",
author_email="[email protected]",
maintainer="HoloViz",
maintainer_email="[email protected]",
platforms=['Windows', 'Mac OS X', 'Linux'],
license='BSD',
url='http://param.holoviz.org/',
packages=["param","numbergen"],
provides=["param","numbergen"],
include_package_data = True,
python_requires=">=2.7",
install_requires=[],
extras_require=extras_require,
tests_require=extras_require['tests'],
project_urls={
"Documentation": "https://param.holoviz.org/",
"Releases": "https://github.com/holoviz/param/releases",
"Bug Tracker": "https://github.com/holoviz/param/issues",
"Source Code": "https://github.com/holoviz/param",
"Panel Examples": "https://panel.holoviz.org/user_guide/Param.html",
},
classifiers=[
"License :: OSI Approved :: BSD License",
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Natural Language :: English",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries"]
)
if __name__=="__main__":
setup(**setup_args)
|
bsd-3-clause
| -6,838,050,883,231,067,000 | 32.746835 | 105 | 0.615904 | false | 3.875 | false | false | false |
robmadole/jig
|
src/jig/output.py
|
1
|
15443
|
# coding=utf-8
import sys
import codecs
from functools import wraps
from StringIO import StringIO
from contextlib import contextmanager
from jig.exc import ForcedExit
# Message types
INFO = u'info'
WARN = u'warn'
STOP = u'stop'
def strip_paint(payload):
"""
Removes any console specific color characters.
Where ``payload`` is a string containing special characters used to print
colored output to the terminal.
Returns a unicode string without the paint.
"""
strip = [u'\x1b[31;1m', u'\x1b[32;1m', u'\x1b[33;1m', u'\x1b[39;22m']
for paint in strip:
payload = payload.replace(paint, '')
return payload
def lookup_type(strtype):
"""
Returns the actual type for a string message representation of it.
For example::
>>> lookup_type('Info')
u'info'
>>> lookup_type('warn')
u'warn'
>>> lookup_type('s')
u'stop'
It will default to ``INFO``.
>>> lookup_type('unknown'):
u'info'
"""
strtype = unicode(strtype) or u''
mt = strtype.lower()
if mt.startswith(u'i'):
return INFO
if mt.startswith(u'w'):
return WARN
if mt.startswith(u's'):
return STOP
# Default to INFO
return INFO
def _get_hint(hint):
"""
Retrieves a hint by the given name from :module:`jig.commands.hints`.
:param string hintname: the ALL_CAPS constant defined in the hints module
:rtype: list
"""
from jig.commands import hints
try:
return getattr(hints, hint)
except AttributeError:
return hint
def utf8_writer(filelike):
"""
Wrap a file-like object with a UTF-8 wrapper.
:param file filelike: the file-like object to wrap
"""
return codecs.getwriter('utf_8')(filelike)
class Message(object):
"""
Represents one message that a plugin is communicating to the user.
"""
def __init__(self, plugin, type=INFO, body='', file=None, line=None):
"""
Create a message object associated with a plugin.
All messages must be associated with the Plugin ``plugin`` that was
responsible for creating them.
"""
self.plugin = plugin
self.type = type
self.body = body
self.file = file
self.line = line
def __repr__(self):
reprstr = '<{cls} type="{t}", body={b}, file={f}, line={l}>'
return reprstr.format(
cls=self.__class__.__name__,
t=self.type, b=repr(self.body), f=repr(self.file), l=self.line)
def __eq__(self, other):
"""
If type, body, file, and line attributes are the same they are equal.
"""
try:
attrs = ('type', 'body', 'file', 'line')
for attr in attrs:
if not getattr(self, attr) == getattr(other, attr):
return False
except AttributeError:
# If the other object is missing an attribute they can't be equal.
return False
return True
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = lookup_type(value)
class Error(Message):
"""
An error message related to a plugin's results.
"""
def __init__(self, *args, **kwargs):
if 'type' not in kwargs:
# Default to stop for errors
kwargs['type'] = 'stop'
super(Error, self).__init__(*args, **kwargs)
class ConsoleView(object):
"""
Main view used to handle output to the console.
"""
def __init__(self, collect_output=False, exit_on_exception=True,
stdout=None, stderr=None):
# Do we collect output? False means we print it out
self.collect_output = collect_output
self.exit_on_exception = exit_on_exception
self.init_collector(stdout=stdout, stderr=stderr)
def init_collector(self, stdout=None, stderr=None):
self._collect = {
'stdout': stdout or StringIO(), 'stderr': stderr or StringIO()}
@contextmanager
def out(self):
try:
stdout = utf8_writer(sys.stdout)
fo = self._collect['stdout'] if self.collect_output else stdout
yield lambda line: fo.write(unicode(line) + u'\n')
except Exception as e:
stderr = utf8_writer(sys.stderr)
fo = self._collect['stderr'] if self.collect_output else stderr
fo.write(unicode(e) + u'\n')
if hasattr(e, 'hint'):
fo.write(unicode(_get_hint(e.hint)) + u'\n')
try:
retcode = e.retcode
except AttributeError:
# This exception does not have a return code, assume 1
retcode = 1
if self.exit_on_exception:
sys.exit(retcode) # pragma: no cover
else:
raise ForcedExit(retcode)
def print_help(self, commands):
"""
Format and print help for using the console script.
"""
with self.out() as printer:
printer('usage: jig [-h] COMMAND')
printer('')
printer('optional arguments:')
printer(' -h, --help show this help message and exit')
printer('')
printer('jig commands:')
for command in commands:
name = command.__module__.split('.')[-1]
description = command.parser.description
printer(' {name:12}{description}'.format(
name=name, description=description))
printer('')
printer('See `jig COMMAND --help` for more information')
class ResultsCollator(object):
"""
Collects and combines plugin results into a unified summary.
"""
def __init__(self, results):
# Decorate our message methods
setattr(
self, '_commit_specific_message',
self.iterresults(self._commit_specific_message))
setattr(
self, '_file_specific_message',
self.iterresults(self._file_specific_message))
setattr(
self, '_line_specific_message',
self.iterresults(self._line_specific_message))
self._results = results
self._plugins = set()
self._reporters = set()
self._counts = {INFO: 0, WARN: 0, STOP: 0}
self._errors = []
# Pre-compute our messages (collate)
self._cm = list(self._commit_specific_message())
self._fm = list(self._file_specific_message())
self._lm = list(self._line_specific_message())
@property
def messages(self):
"""
Messages by type for the plugin results.
Return a tuple of messages by type based on the results that were
provided when initializing the collator.
Each tuple contains a generator object which will return
``jig.output.Message`` objects.
The tuple has a length of 3 and is in this order:
1. Commit specific messages
2. File specific messages
3. Line specific messages
"""
return (self._cm, self._fm, self._lm)
@property
def plugins(self):
"""
Provides a set of plugins that were present in the results.
This method will return a plugin regardless of whether it yielded
messages or not.
"""
return self._plugins
@property
def reporters(self):
"""
Provides a set of plugins that yielded messages.
This method will only provide something other than an empty set when
the commit, file, or line specific message methods have been called.
"""
return self._reporters
@property
def counts(self):
"""
Tally of the type of messages from the results.
Returns a dictionary like::
{u'info': 5, u'warn': 0, u'stop', 1}
"""
return self._counts
@property
def errors(self):
"""
Errors that were generated during collation.
Errors are found when a piece of data given to one of the collators is
of a type that can't be understood.
Returns a list of ``jig.output.Error`` objects.
"""
return self._errors
def iterresults(self, func):
"""
Decorator that iterates through results.
This simplifies some of the boilerplate for our collation. The
decorated function must be a generator that yields ``Message`` or
``Error`` object. It will sift errors and collect those into a separate
container. The ``Message`` objects then be returned to the caller.
"""
@wraps(func)
def wrapper(*args, **kwargs):
for plugin, result in self._results.items():
self._plugins.add(plugin)
retcode, stdout, stderr = result
if not retcode == 0:
error = Error(plugin)
error.body = stderr or stdout
self._errors.append(error)
# Remove this plugin since it's an error. If we don't do
# this we'll end up reporting on this 3 times.
del self._results[plugin]
continue
for message in list(func(plugin, stdout)):
if isinstance(message, Error):
self._errors.append(message)
try:
del self._results[plugin]
except KeyError:
pass
continue # pragma: no cover
self._reporters.add(plugin)
self._counts[message.type] += 1
yield message
return wrapper
def _commit_specific_message(self, plugin, obj):
"""
Look for plugins that are reporting generic messages.
These messages are not specific to any file or line number. They
generally come from plugins that are inspecting the commit as a whole
and reporting on some characteristic. A good example of this would be a
plugin that checked to see if any modifications were made to a docs
directory if modifications were also made to a src directory.
Basically, a "did you write/update the docs" message.
"""
if not obj:
# This is falsy, there is nothing of interest here
return
if isinstance(obj, dict):
# This is for file or line specific messages
return
if isinstance(obj, basestring):
# Straight up message, normalize this for our loop
obj = [obj]
if isinstance(obj, list):
# It's a list of [TYPE, BODY]
for m in obj:
if not m:
continue
if isinstance(m, basestring):
# Straight up message, normalize this for our loop
yield Message(plugin, body=m)
continue
if not isinstance(m, list) or len(m) != 2:
yield Error(plugin, body=m)
continue
if not m[1]:
# Empty message body, this isn't useful
continue
yield Message(plugin, type=m[0], body=m[1])
# We understood this, so no need to continue
return
# This object is not understood
yield Error(plugin, body=obj)
def _file_specific_message(self, plugin, obj):
"""
Look for plugins that are reporting file specific messages.
These messages are specific to a file but not necessarily to a line
number. In general they apply to a condition that is present that
affects the whole file. An example of this would be detecting
underscores or camel case in the filename.
"""
if not isinstance(obj, dict):
# This is not a file specific messages
return
for filename, group in obj.items():
if isinstance(group, basestring):
group = [group]
if not isinstance(group, list):
yield Error(plugin, body=group, file=filename)
continue
for msg in group:
if isinstance(msg, basestring):
msg = [msg]
if not isinstance(msg, list):
yield Error(plugin, body=msg, file=filename)
continue
if len(msg) == 0:
# There is nothing here of interest
continue
if len(msg) == 1:
# Should default to info type
if not msg[0]:
continue
yield Message(plugin, body=msg[0], file=filename)
continue
if len(msg) == 2:
if not msg[1]:
continue
# In the format of [TYPE, BODY]
yield Message(
plugin, body=msg[1], type=msg[0],
file=filename)
continue
if len(msg) == 3:
if not msg[2]:
continue
# In the format of [LINE, TYPE, BODY]
if msg[0] is not None:
# This is line specific, skip this
continue
yield Message(
plugin, body=msg[2], type=msg[1],
file=filename)
continue
# This object is not understood
yield Error(plugin, body=obj)
def _line_specific_message(self, plugin, obj):
"""
Look for plugins that are reporting line specific messages.
For plugins wishing to identify specific lines, they use line specific
messages. For example, you may have a JavaScript plugin that reports
the existence of ``console.log`` on line 45. This allows the developer
to pinpoint the problem much quicker than file or commit specific
messages.
There is a lack of error handling in this method. The commit and file
specific handlers take care of error handling for us. This method gets
to be pretty clean.
"""
if not isinstance(obj, dict):
# This is not a file or line specific messages
return
for filename, group in obj.items():
if isinstance(group, basestring):
group = [group]
for msg in group:
if isinstance(msg, basestring):
msg = [msg]
if 0 <= len(msg) <= 2:
# There is nothing here of interest
continue
if msg[0] is None:
# This is not line specific
continue
if not msg[2]:
# The body is empty
continue
# In the format of [LINE, TYPE, BODY]
yield Message(
plugin, body=msg[2], type=msg[1],
file=filename, line=msg[0])
continue
|
bsd-2-clause
| 4,069,934,028,138,322,000 | 30.324544 | 79 | 0.538885 | false | 4.71686 | false | false | false |
wehriam/awspider
|
awspider/aws/sdb.py
|
1
|
29905
|
import base64
import hmac
import hashlib
import urllib
import xml.etree.cElementTree as ET
from datetime import datetime
import time
import dateutil.parser
import logging
from twisted.internet.defer import DeferredList
from ..requestqueuer import RequestQueuer
from .lib import etree_to_dict, safe_quote_tuple
LOGGER = logging.getLogger("main")
SDB_NAMESPACE = "{http://sdb.amazonaws.com/doc/2009-04-15/}"
def base10toN(num,n):
"""Change a to a base-n number.
Up to base-36 is supported without special notation."""
num_rep={10:'a',
11:'b',
12:'c',
13:'d',
14:'e',
15:'f',
16:'g',
17:'h',
18:'i',
19:'j',
20:'k',
21:'l',
22:'m',
23:'n',
24:'o',
25:'p',
26:'q',
27:'r',
28:'s',
29:'t',
30:'u',
31:'v',
32:'w',
33:'x',
34:'y',
35:'z'}
new_num_string=''
current=num
while current!=0:
remainder=current%n
if 36>remainder>9:
remainder_string=num_rep[remainder]
elif remainder>=36:
remainder_string='('+str(remainder)+')'
else:
remainder_string=str(remainder)
new_num_string=remainder_string+new_num_string
current=current/n
return new_num_string
def base10to36(i):
return base10toN(i, 36)
def base36to10(s):
return int(s, 36)
def sdb_now(offset=0):
"""Return an 11 character, zero padded string with the current Unixtime.
**Keyword arguments:**
* *offset* -- Offset in seconds. (Default 0)
"""
return str(int(offset + time.time())).zfill(11)
def sdb_now_add(seconds, offset=0):
"""Return an 11 character, zero padded string with the current Unixtime
plus an integer.
**Arguments:**
* *seconds* -- Seconds to add to the current time.
**Keyword arguments:**
* *offset* -- Offset in seconds. (Default 0)
"""
return str(int(offset + time.time() + seconds)).zfill(11)
def sdb_parse_time(date_string, offset=0):
"""Parse a date string, then return an 11 character, zero padded
string with the current Unixtime plus an integer.
**Arguments:**
* *date_string* -- Date string
**Keyword arguments:**
* *offset* -- Offset in seconds. (Default 0)
"""
parsed_time = time.mktime(dateutil.parser.parse(date_string).timetuple())
return str(int(offset + parsed_time)).zfill(11)
def sdb_latitude(latitude):
"""Return an 8 character, zero padded string version of the
latitude parameter.
**Arguments:**
* *latitude* -- Latitude.
"""
adjusted = (90 + float(latitude)) * 100000
return str(int(adjusted)).zfill(8)
def sdb_longitude(longitude):
"""Return an 8 character, zero padded string version of the
longitude parameter.
**Arguments:**
* *longitude* -- Longitude.
"""
adjusted = (180 + float(longitude)) * 100000
return str(int(adjusted)).zfill(8)
class AmazonSDB:
"""
Amazon Simple Database API.
"""
host = "sdb.amazonaws.com"
box_usage = 0.0
def __init__(self, aws_access_key_id, aws_secret_access_key, rq=None):
"""
**Arguments:**
* *aws_access_key_id* -- Amazon AWS access key ID
* *aws_secret_access_key* -- Amazon AWS secret access key
**Keyword arguments:**
* *rq* -- Optional RequestQueuer object.
"""
if rq is None:
self.rq = RequestQueuer()
else:
self.rq = rq
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.rq.setHostMaxRequestsPerSecond(self.host, 0)
self.rq.setHostMaxSimultaneousRequests(self.host, 0)
def copyDomain(self, source_domain, destination_domain):
"""
Copy all elements of a source domain to a destination domain.
**Arguments:**
* *source_domain* -- Source domain name
* *destination_domain* -- Destination domain name
"""
d = self.checkAndCreateDomain(destination_domain)
d.addCallback(self._copyDomainCallback, source_domain,
destination_domain)
return d
def _copyDomainCallback(self, data, source_domain, destination_domain):
return self._copyDomainCallback2(source_domain, destination_domain)
def _copyDomainCallback2(self, source_domain, destination_domain,
next_token=None, total_box_usage=0):
parameters = {}
parameters["Action"] = "Select"
parameters["SelectExpression"] = "SELECT * FROM `%s`" % source_domain
if next_token is not None:
parameters["NextToken"] = next_token
d = self._request(parameters)
d.addCallback(self._copyDomainCallback3,
source_domain=source_domain,
destination_domain=destination_domain,
total_box_usage=total_box_usage)
d.addErrback(self._genericErrback)
return d
def _copyDomainCallback3(self, data, source_domain, destination_domain,
total_box_usage=0):
xml = ET.fromstring(data["response"])
box_usage = float(xml.find(".//%sBoxUsage" % SDB_NAMESPACE).text)
self.box_usage += box_usage
total_box_usage += box_usage
next_token_element = xml.find(".//%sNextToken" % SDB_NAMESPACE)
if next_token_element is not None:
next_token = next_token_element.text
else:
next_token = None
items = xml.findall(".//%sItem" % SDB_NAMESPACE)
results = {}
for item in items:
key = item.find("./%sName" % SDB_NAMESPACE).text
attributes = item.findall("%sAttribute" % SDB_NAMESPACE)
attribute_dict = {}
for attribute in attributes:
attr_name = attribute.find("./%sName" % SDB_NAMESPACE).text
attr_value = attribute.find("./%sValue" % SDB_NAMESPACE).text
if attr_name in attribute_dict:
attribute_dict[attr_name].append(attr_value)
else:
attribute_dict[attr_name] = [attr_value]
results[key] = attribute_dict
deferreds = []
for key in results:
d = self.putAttributes(destination_domain, key, results[key])
d.addErrback(self._copyPutAttributesErrback, destination_domain, key, results[key])
deferreds.append(d)
d = DeferredList(deferreds, consumeErrors=True)
d.addCallback(self._copyDomainCallback4, source_domain,
destination_domain, next_token=next_token, total_box_usage=total_box_usage)
return d
def _copyDomainCallback4(self, data, source_domain, destination_domain,
next_token=None, total_box_usage=0):
for row in data:
if row[0] == False:
raise row[1]
if next_token is not None:
return self._copyDomainCallback2(
source_domain=source_domain,
destination_domain=destination_domain,
next_token=next_token,
total_box_usage=total_box_usage)
LOGGER.debug("""CopyDomain:\n%s -> %s\nBox usage: %s""" % (
source_domain,
destination_domain,
total_box_usage))
return True
def _copyPutAttributesErrback(self, error, destination_domain, key, attributes, count=0):
if count < 3:
d = self.putAttributes(destination_domain, key, attributes)
d.addErrback(self._copyPutAttributesErrback, destination_domain, key, attributes, count=count + 1)
return d
return error
def checkAndCreateDomain(self, domain):
"""
Check for a SimpleDB domain's existence. If it does not exist,
create it.
**Arguments:**
* *domain* -- Domain name
"""
d = self.domainMetadata(domain)
d.addErrback(self._checkAndCreateDomainErrback, domain)
return d
def _checkAndCreateDomainErrback(self, error, domain):
if hasattr(error, "value") and hasattr(error.value, "status"):
if int(error.value.status) == 400:
d = self.createDomain(domain)
d.addErrback(self._checkAndCreateDomainErrback2, domain)
return d
message = "Could not find or create domain '%s'." % domain
raise Exception(message)
def _checkAndCreateDomainErrback2(self, error, domain):
message = "Could not create domain '%s'" % domain
raise Exception(message)
def createDomain(self, domain):
"""
Create a SimpleDB domain.
**Arguments:**
* *domain* -- Domain name
"""
parameters = {
"Action":"CreateDomain",
"DomainName":domain
}
d = self._request(parameters)
d.addCallback(self._createDomainCallback, domain)
d.addErrback(self._genericErrback)
return d
def _createDomainCallback(self, data, domain):
xml = ET.fromstring(data["response"])
box_usage = float(xml.find(".//%sBoxUsage" % SDB_NAMESPACE).text)
self.box_usage += box_usage
LOGGER.debug("Created SimpleDB domain '%s'. Box usage: %s" % (domain,
box_usage))
return True
def deleteDomain(self, domain):
"""
Delete a SimpleDB domain.
**Arguments:**
* *domain* -- Domain name
"""
parameters = {}
parameters["Action"] = "DeleteDomain"
parameters["DomainName"] = domain
d = self._request(parameters)
d.addCallback(self._deleteDomainCallback, domain)
d.addErrback(self._genericErrback)
return d
def _deleteDomainCallback(self, data, domain):
xml = ET.fromstring(data["response"])
box_usage = float(xml.find(".//%sBoxUsage" % SDB_NAMESPACE).text)
self.box_usage += box_usage
LOGGER.debug("Deleted SimpleDB domain '%s'. Box usage: %s" % (domain,
box_usage))
return True
def listDomains(self):
"""
List SimpleDB domains associated with an account.
"""
return self._listDomains()
def _listDomains(self,
next_token=None,
previous_results=None,
total_box_usage=0):
parameters = {}
parameters["Action"] = "ListDomains"
if next_token is not None:
parameters["NextToken"] = next_token
d = self._request(parameters)
d.addCallback(self._listDomainsCallback,
previous_results=previous_results,
total_box_usage=total_box_usage)
d.addErrback(self._genericErrback)
return d
def _listDomainsCallback(self,
data,
previous_results=None,
total_box_usage=0):
xml = ET.fromstring(data["response"])
box_usage = float(xml.find(".//%sBoxUsage" % SDB_NAMESPACE).text)
self.box_usage += box_usage
total_box_usage += box_usage
xml_response = etree_to_dict(xml, namespace=SDB_NAMESPACE)
if "DomainName" in xml_response["ListDomainsResult"][0]:
results = xml_response["ListDomainsResult"][0]["DomainName"]
else:
results = []
if previous_results is not None:
results.extend(previous_results)
if "NextToken" in xml_response["ListDomainsResult"]:
next_token = xml_response["ListDomainsResult"][0]["NextToken"][0]
return self._listDomains(next_token=next_token,
previous_results=results,
total_box_usage=total_box_usage)
LOGGER.debug("Listed domains. Box usage: %s" % total_box_usage)
return results
def domainMetadata(self, domain):
"""
Return meta-information about a domain.
**Arguments:**
* *domain* -- Domain name
"""
parameters = {}
parameters["Action"] = "DomainMetadata"
parameters["DomainName"] = domain
d = self._request(parameters)
d.addCallback(self._domainMetadataCallback, domain)
d.addErrback(self._genericErrback)
return d
def _domainMetadataCallback(self, data, domain):
xml = ET.fromstring(data["response"])
box_usage = float(xml.find(".//%sBoxUsage" % SDB_NAMESPACE).text)
self.box_usage += box_usage
LOGGER.debug("Got SimpleDB domain '%s' metadata. Box usage: %s" % (
domain,
box_usage))
xml_response = etree_to_dict(xml, namespace=SDB_NAMESPACE)
return xml_response["DomainMetadataResult"][0]
def batchPutAttributes(self, domain, attributes_by_item_name,
replace_by_item_name=None):
"""
Batch put attributes into domain.
**Arguments:**
* *domain* -- Domain name
* *attributes_by_item_name* -- Dictionary of dictionaries. First
level keys are the item name, value is dictionary of key/value
pairs. Example: ``{"item_name":{"attribute_name":"value"}}``
**Keyword arguments:**
* *replace_by_item_name* -- Dictionary of lists. First level keys
are the item names, value is a list of of attributes that should
be overwritten. ``{"item_name":["attribute_name"]}`` (Default
empty dictionary)
"""
if replace_by_item_name is None:
replace_by_item_name = {}
if len(attributes_by_item_name) > 25:
raise Exception("Too many items in batchPutAttributes. Up to 25 items per call allowed.")
for item_name in replace_by_item_name:
if not isinstance(replace_by_item_name[item_name], list):
raise Exception("Replace argument '%s' must be a list." % item_name)
for item_name in attributes_by_item_name:
if not isinstance(attributes_by_item_name[item_name], dict):
raise Exception("Attributes argument '%s' must be a dictionary." % item_name)
parameters = {}
parameters["Action"] = "BatchPutAttributes"
parameters["DomainName"] = domain
i = 0
for item_name in attributes_by_item_name:
parameters["Item.%s.ItemName" % i] = item_name
attributes_list = []
for attribute in attributes_by_item_name[item_name].items():
# If the attribute is a list, split into multiple attributes.
if isinstance(attribute[1], list):
for value in attribute[1]:
attributes_list.append((attribute[0], value))
else:
attributes_list.append(attribute)
j = 0
for attribute in attributes_list:
parameters["Item.%s.Attribute.%s.Name" % (i,j)] = attribute[0]
parameters["Item.%s.Attribute.%s.Value" % (i,j)] = attribute[1]
if item_name in replace_by_item_name:
if attribute[0] in replace_by_item_name[item_name]:
parameters["Item.%s.Attribute.%s.Replace" % (i,j)] = "true"
j += 1
i += 1
d = self._request(parameters)
d.addCallback(
self._batchPutAttributesCallback,
domain,
attributes_by_item_name)
d.addErrback(self._genericErrback)
return d
def _batchPutAttributesCallback(self,
data,
domain,
attributes_by_item_name):
xml = ET.fromstring(data["response"])
box_usage = float(xml.find(".//%sBoxUsage" % SDB_NAMESPACE).text)
self.box_usage += box_usage
LOGGER.debug("""Batch put attributes %s in SimpleDB domain '%s'. Box usage: %s""" % (
attributes_by_item_name,
domain,
box_usage))
return True
def putAttributes(self, domain, item_name, attributes, replace=None):
"""
Put attributes into domain at item_name.
**Arguments:**
* *domain* -- Domain name
* *item_name* -- Item name
* *attributes* -- Dictionary of attributes
**Keyword arguments:**
* *replace* -- List of attributes that should be overwritten
(Default empty list)
"""
if replace is None:
replace = []
if not isinstance(replace, list):
raise Exception("Replace argument must be a list.")
if not isinstance(attributes, dict):
raise Exception("Attributes argument must be a dictionary.")
parameters = {}
parameters["Action"] = "PutAttributes"
parameters["DomainName"] = domain
parameters["ItemName"] = item_name
attributes_list = []
for attribute in attributes.items():
# If the attribute is a list, split into multiple attributes.
if isinstance(attribute[1], list):
for value in attribute[1]:
attributes_list.append((attribute[0], value))
else:
attributes_list.append(attribute)
i = 0
for attribute in attributes_list:
parameters["Attribute.%s.Name" % i] = attribute[0]
parameters["Attribute.%s.Value" % i] = attribute[1]
if attribute[0] in replace:
parameters["Attribute.%s.Replace" % i] = "true"
i += 1
d = self._request(parameters)
d.addCallback(self._putAttributesCallback, domain, item_name, attributes)
d.addErrback(self._genericErrback)
return d
def _putAttributesCallback(self, data, domain, item_name, attributes):
xml = ET.fromstring(data["response"])
box_usage = float(xml.find(".//%sBoxUsage" % SDB_NAMESPACE).text)
self.box_usage += box_usage
LOGGER.debug("""Put attributes %s on '%s' in SimpleDB domain '%s'. Box usage: %s""" % (
attributes,
item_name,
domain,
box_usage))
return True
def getAttributes(self, domain, item_name, attribute_name=None):
"""
Get one or all attributes from domain at item_name.
**Arguments:**
* *domain* -- Domain name
* *item_name* -- Item name
**Keyword arguments:**
* *attribute_name* -- Name of specific attribute to get (Default None)
"""
parameters = {}
parameters["Action"] = "GetAttributes"
parameters["DomainName"] = domain
parameters["ItemName"] = item_name
if attribute_name is not None:
parameters["AttributeName"] = attribute_name
d = self._request(parameters)
d.addCallback(self._getAttributesCallback, domain, item_name)
d.addErrback(self._genericErrback)
return d
def _getAttributesCallback(self, data, domain, item_name):
xml = ET.fromstring(data["response"])
box_usage = float(xml.find(".//%sBoxUsage" % SDB_NAMESPACE).text)
self.box_usage += box_usage
LOGGER.debug("""Got attributes from '%s' in SimpleDB domain '%s'. Box usage: %s""" % (
item_name,
domain,
box_usage))
xml_response = etree_to_dict(xml, namespace=SDB_NAMESPACE)
attributes = {}
if xml_response["GetAttributesResult"][0] is None:
raise Exception("Item does not exist.")
for attribute in xml_response["GetAttributesResult"][0]['Attribute']:
if attribute["Name"][0] not in attributes:
attributes[attribute["Name"][0]] = []
attributes[attribute["Name"][0]].extend(attribute["Value"])
return attributes
def delete(self, domain, item_name):
"""
Delete all attributes from domain at item_name.
**Arguments:**
* *domain* -- Domain name
* *item_name* -- Item name
"""
return self.deleteAttributes(domain, item_name)
def deleteAttributes(self, domain, item_name, attributes=None):
"""
Delete one or all attributes from domain at item_name.
**Arguments:**
* *domain* -- Domain name
* *item_name* -- Item name
**Keyword arguments:**
* *attributes* -- List of attribute names, or dictionary of
attribute name / value pairs. (Default empty dict)
"""
if attributes is None:
attributes = {}
if not isinstance(attributes, dict) and \
not isinstance(attributes, list):
message = "Attributes parameter must be a dictionary or a list."
raise Exception(message)
parameters = {}
parameters["Action"] = "DeleteAttributes"
parameters["DomainName"] = domain
parameters["ItemName"] = item_name
if isinstance(attributes, dict):
attr_count = 1
for key in attributes:
parameters["Attribute.%s.Name" % attr_count] = key
parameters["Attribute.%s.Value" % attr_count] = attributes[key]
attr_count += 1
if isinstance(attributes, list):
attr_count = 0
for key in attributes:
parameters["Attribute.%s.Name" % attr_count] = key
attr_count += 1
d = self._request(parameters)
d.addCallback(self._deleteAttributesCallback, domain, item_name)
d.addErrback(self._genericErrback)
return d
def _deleteAttributesCallback(self, data, domain, item_name):
xml = ET.fromstring(data["response"])
box_usage = float(xml.find(".//%sBoxUsage" % SDB_NAMESPACE).text)
self.box_usage += box_usage
LOGGER.debug("""Deleted attributes from '%s' in SimpleDB domain '%s'. Box usage: %s""" % (
item_name,
domain,
box_usage))
return True
def select(self, select_expression, max_results=0):
"""
Run a select query
**Arguments:**
* *select_expression* -- Select expression
"""
if "count(" in select_expression.lower():
return self._selectCount(select_expression)
return self._select(select_expression, max_results=max_results)
def _selectCount(self, select_expression, next_token=None,
previous_count=0,
total_box_usage=0):
parameters = {}
parameters["Action"] = "Select"
parameters["SelectExpression"] = select_expression
if next_token is not None:
parameters["NextToken"] = next_token
d = self._request(parameters)
d.addCallback(self._selectCountCallback,
select_expression=select_expression,
previous_count=previous_count,
total_box_usage=total_box_usage)
d.addErrback(self._genericErrback)
return d
def _selectCountCallback(self, data, select_expression=None,
previous_count=0,
total_box_usage=0):
xml = ET.fromstring(data["response"])
box_usage = float(xml.find(".//%sBoxUsage" % SDB_NAMESPACE).text)
self.box_usage += box_usage
total_box_usage += box_usage
next_token_element = xml.find(".//%sNextToken" % SDB_NAMESPACE)
if next_token_element is not None:
next_token = next_token_element.text
else:
next_token = None
count = previous_count + int(xml.find(".//%sValue" % SDB_NAMESPACE).text)
if next_token is not None:
return self._selectCount(select_expression, next_token=next_token,
previous_count=count,
total_box_usage=total_box_usage)
LOGGER.debug("""Select:\n'%s'\nBox usage: %s""" % (
select_expression,
total_box_usage))
return count
def _select(self, select_expression, next_token=None,
previous_results=None,
total_box_usage=0,
max_results=0):
parameters = {}
parameters["Action"] = "Select"
parameters["SelectExpression"] = select_expression
if next_token is not None:
parameters["NextToken"] = next_token
d = self._request(parameters)
d.addCallback(self._selectCallback,
select_expression=select_expression,
previous_results=previous_results,
total_box_usage=total_box_usage,
max_results=max_results)
d.addErrback(self._genericErrback)
return d
def _selectCallback(self, data, select_expression=None,
previous_results=None,
total_box_usage=0,
max_results=0):
if previous_results is not None:
results = previous_results
else:
results = {}
xml = ET.fromstring(data["response"])
box_usage = float(xml.find(".//%sBoxUsage" % SDB_NAMESPACE).text)
self.box_usage += box_usage
total_box_usage += box_usage
next_token_element = xml.find(".//%sNextToken" % SDB_NAMESPACE)
if next_token_element is not None:
next_token = next_token_element.text
else:
next_token = None
items = xml.findall(".//%sItem" % SDB_NAMESPACE)
for item in items:
key = item.find("./%sName" % SDB_NAMESPACE).text
attributes = item.findall("%sAttribute" % SDB_NAMESPACE)
attribute_dict = {}
for attribute in attributes:
attr_name = attribute.find("./%sName" % SDB_NAMESPACE).text
attr_value = attribute.find("./%sValue" % SDB_NAMESPACE).text
if attr_name in attribute_dict:
attribute_dict[attr_name].append(attr_value)
else:
attribute_dict[attr_name] = [attr_value]
results[key] = attribute_dict
if next_token is not None:
if max_results == 0 or len(results) < max_results:
return self._select(select_expression, next_token=next_token,
previous_results=results,
total_box_usage=total_box_usage,
max_results=max_results)
LOGGER.debug("""Select:\n'%s'\nBox usage: %s""" % (
select_expression,
total_box_usage))
return results
def _request(self, parameters):
"""
Add authentication parameters and make request to Amazon.
**Arguments:**
* *parameters* -- Key value pairs of parameters
"""
parameters = self._getAuthorization("GET", parameters)
query_string = urllib.urlencode(parameters)
url = "https://%s/?%s" % (self.host, query_string)
if len(url) > 4096:
del parameters['Signature']
parameters = self._getAuthorization("POST", parameters)
query_string = urllib.urlencode(parameters)
url = "https://%s" % (self.host)
d = self.rq.getPage(url, method="POST", postdata=query_string)
return d
else:
d = self.rq.getPage(url, method="GET")
return d
def _canonicalize(self, parameters):
"""
Canonicalize parameters for use with AWS Authorization.
**Arguments:**
* *parameters* -- Key value pairs of parameters
**Returns:**
* A safe-quoted string representation of the parameters.
"""
parameters = parameters.items()
parameters.sort(lambda x, y:cmp(x[0], y[0]))
return "&".join([safe_quote_tuple(x) for x in parameters])
def _getAuthorization(self, method, parameters):
"""
Create authentication parameters.
**Arguments:**
* *method* -- HTTP method of the request
* *parameters* -- Key value pairs of parameters
**Returns:**
* A dictionary of authorization parameters
"""
signature_parameters = {
"AWSAccessKeyId":self.aws_access_key_id,
"SignatureVersion":"2",
"SignatureMethod":"HmacSHA256",
'Timestamp':datetime.utcnow().isoformat()[0:19]+"+00:00",
"AWSAccessKeyId":self.aws_access_key_id,
"Version":"2009-04-15"
}
signature_parameters.update(parameters)
query_string = self._canonicalize(signature_parameters)
string_to_sign = "%(method)s\n%(host)s\n%(resource)s\n%(qs)s" % {
"method":method,
"host":self.host.lower(),
"resource":"/",
"qs":query_string,
}
args = [self.aws_secret_access_key, string_to_sign, hashlib.sha256]
signature = base64.encodestring(hmac.new(*args).digest()).strip()
signature_parameters.update({'Signature': signature})
return signature_parameters
def _genericErrback(self, error):
if hasattr(error, "value"):
if hasattr(error.value, "response"):
xml = ET.XML(error.value.response)
try:
LOGGER.debug(xml.find(".//Message").text)
except Exception, e:
pass
return error
|
mit
| 3,535,296,104,513,814,500 | 36.664987 | 110 | 0.562281 | false | 4.24968 | false | false | false |
marcino239/reactor_arm
|
reactor_controller/scripts/joystick_control.py
|
1
|
4438
|
import gflags
import dynamixel
import time
import sys
import pygame
SERVO_STEP = 10
FLAGS = gflags.FLAGS
gflags.DEFINE_string( 'port', '/dev/ttyUSB0', 'dynamixel port' )
gflags.DEFINE_integer( 'baud', 1000000, 'baud rate' )
gflags.DEFINE_integer( 'min_id', 1, 'lowest dynamixel ID' )
gflags.DEFINE_integer( 'max_id', 8, 'highest dynamixel ID' )
gflags.DEFINE_enum( 'command', '', [ '', 'position', 'torque_on', 'torque_off', 'control' ], 'command to execute' )
if __name__ == '__main__':
flags = FLAGS( sys.argv )
serial = dynamixel.SerialStream( port=FLAGS.port,
baudrate=FLAGS.baud,
timeout=1 )
# Instantiate network object
net = dynamixel.DynamixelNetwork( serial )
# Populate network with dynamixel objects
for servoId in range( FLAGS.min_id, FLAGS.max_id + 1 ):
newDynamixel = dynamixel.Dynamixel( servoId, net )
net._dynamixel_map[ servoId ] = newDynamixel
servos = net.get_dynamixels()
print( 'network initialised' )
if FLAGS.command == '' or FLAGS.command == 'torque_off':
for d in servos:
d.torque_enable = False
elif FLAGS.command == 'position':
while True:
pos = [ d.current_position for d in servos ]
pos_txt = [ '{:4d}'.format( p ) for p in pos ]
print( ''.join( pos_txt ) )
time.sleep( 0.25 )
elif FLAGS.command == 'torque_off':
for d in servos:
d.torque_enable = True
elif FLAGS.command == 'control':
pygame.init()
pygame.joystick.init()
js = pygame.joystick.Joystick( 0 )
js.init()
# torque on
for d in servos:
d.moving_speed = 50
d.torque_enable = True
d.torque_limit = 800
d.max_torque = 800
d.goal_position = 512
# Send all the commands to the servos.
net.synchronize()
print( 'moving to default position' )
time.sleep( 5 )
print( 'done' )
# get initial positions
servo_pos = [ d.current_position for d in servos ]
clip = lambda x: int( min( 1023.0, max( 0.0, x ) ) )
while True:
pygame.event.pump()
axis = [ js.get_axis( a ) for a in range( 27 ) ]
servo_pos[0] = clip( servo_pos[0] - axis[0] * SERVO_STEP )
servo_pos[1] = clip( servo_pos[1] + axis[1] * SERVO_STEP )
servo_pos[2] = clip( servo_pos[2] + axis[3] * SERVO_STEP )
servo_pos[3] = clip( servo_pos[3] + axis[2] * SERVO_STEP )
servo_pos[4] = clip( servo_pos[4] - (axis[12] + 1.0) * SERVO_STEP / 2 + (axis[13] + 1.0) * SERVO_STEP / 2 )
servo_pos[5] = clip( servo_pos[5] - (axis[14] + 1.0) * SERVO_STEP / 2 + (axis[15] + 1.0) * SERVO_STEP / 2 )
if axis[0] != 0.0:
# shoulder yaw
servos[0].goal_position = servo_pos[0]
else:
if abs( servos[0].current_position - servo_pos[0] ) > SERVO_STEP:
servo_pos[0] = servos[0].current_position
if axis[1] != 0.0:
# shoulder piych - coupling
servos[1].goal_position = servo_pos[1]
servos[2].goal_position = 1024 - servo_pos[1]
else:
if abs( servos[1].current_position - servo_pos[1] ) > SERVO_STEP:
servo_pos[1] = servos[1].current_position
if axis[3] != 0.0:
# elbow pitch - coupling
servos[3].goal_position = servo_pos[2]
servos[4].goal_position = 1024 - servo_pos[2]
else:
if abs( servos[3].current_position - servo_pos[2] ) > SERVO_STEP:
servo_pos[2] = servos[3].current_position
if axis[2] != 0.0:
# wrist pitch
servos[5].goal_position = servo_pos[3]
else:
if abs( servos[5].current_position - servo_pos[3] ) > SERVO_STEP:
servo_pos[3] = servos[5].current_position
# wrist roll
servos[6].goal_position = servo_pos[4]
# gripper
servos[7].goal_position = servo_pos[5]
# show desired position
# print( ''.join( [ '{:4d}'.format( p ) for p in servo_pos ] ) )
# current position
# print( ''.join( [ '{:5d}'.format( d.current_position ) for d in servos ] ) )
# goal position
# print( ''.join( [ '{:5d}'.format( d.goal_position ) for d in servos ] ) )
# diff position
# print( 'diff: ' + ''.join( [ '{:5d}'.format( d.current_position - d.goal_position ) for d in servos ] ) )
# current temperature
# print( ''.join( [ '{:3d}'.format( d.current_temperature ) for d in servos ] ) )
# current load
# print( ''.join( [ '{:5d}'.format( d.current_load ) for d in servos ] ) )
# current load and temperature
print( ''.join( [ '{:5d},{:3d} '.format( d.current_load, d.current_temperature ) for d in servos ] ) )
# Send all the commands to the servos.
net.synchronize()
time.sleep( 0.05 )
|
gpl-2.0
| 807,746,683,606,955,400 | 28.785235 | 115 | 0.614241 | false | 2.644815 | false | false | false |
XianwuLin/block_games
|
snake-ai/snake.py
|
1
|
10849
|
# coding: utf-8
import curses
from curses import KEY_RIGHT, KEY_LEFT, KEY_UP, KEY_DOWN
from random import randint
# 蛇运动的场地长宽
HEIGHT = 10
WIDTH = 20
FIELD_SIZE = HEIGHT * WIDTH
# 蛇头总是位于snake数组的第一个元素
HEAD = 0
# 用来代表不同东西的数字,由于矩阵上每个格子会处理成到达食物的路径长度,
# 因此这三个变量间需要有足够大的间隔(>HEIGHT*WIDTH)
FOOD = 0
UNDEFINED = (HEIGHT + 1) * (WIDTH + 1)
SNAKE = 2 * UNDEFINED
# 由于snake是一维数组,所以对应元素直接加上以下值就表示向四个方向移动
LEFT = -1
RIGHT = 1
UP = -WIDTH
DOWN = WIDTH
# 错误码
ERR = -1111
# 用一维数组来表示二维的东西
# board表示蛇运动的矩形场地
# 初始化蛇头在(1,1)的地方,第0行,HEIGHT行,第0列,WIDTH列为围墙,不可用
# 初始蛇长度为1
board = [0] * FIELD_SIZE
snake = [0] * (FIELD_SIZE+1)
snake[HEAD] = 1*WIDTH+1
snake_size = 1
# 与上面变量对应的临时变量,蛇试探性地移动时使用
tmpboard = [0] * FIELD_SIZE
tmpsnake = [0] * (FIELD_SIZE+1)
tmpsnake[HEAD] = 1*WIDTH+1
tmpsnake_size = 1
# food:食物位置(0~FIELD_SIZE-1),初始在(3, 3)
# best_move: 运动方向
food = 3 * WIDTH + 3
best_move = ERR
# 运动方向数组
mov = [LEFT, RIGHT, UP, DOWN]
# 接收到的键 和 分数
key = KEY_RIGHT
score = 1 #分数也表示蛇长
# 检查一个cell有没有被蛇身覆盖,没有覆盖则为free,返回true
def is_cell_free(idx, psize, psnake):
return not (idx in psnake[:psize])
# 检查某个位置idx是否可向move方向运动
def is_move_possible(idx, move):
flag = False
if move == LEFT:
flag = True if idx%WIDTH > 1 else False
elif move == RIGHT:
flag = True if idx%WIDTH < (WIDTH-2) else False
elif move == UP:
flag = True if idx > (2*WIDTH-1) else False # 即idx/WIDTH > 1
elif move == DOWN:
flag = True if idx < (FIELD_SIZE-2*WIDTH) else False # 即idx/WIDTH < HEIGHT-2
return flag
# 重置board
# board_refresh后,UNDEFINED值都变为了到达食物的路径长度
# 如需要还原,则要重置它
def board_reset(psnake, psize, pboard):
for i in xrange(FIELD_SIZE):
if i == food:
pboard[i] = FOOD
elif is_cell_free(i, psize, psnake): # 该位置为空
pboard[i] = UNDEFINED
else: # 该位置为蛇身
pboard[i] = SNAKE
# 广度优先搜索遍历整个board,
# 计算出board中每个非SNAKE元素到达食物的路径长度
def board_refresh(pfood, psnake, pboard):
queue = []
queue.append(pfood)
inqueue = [0] * FIELD_SIZE
found = False
# while循环结束后,除了蛇的身体,
# 其它每个方格中的数字代码从它到食物的路径长度
while len(queue)!=0:
idx = queue.pop(0)
if inqueue[idx] == 1: continue
inqueue[idx] = 1
for i in xrange(4):
if is_move_possible(idx, mov[i]):
if idx + mov[i] == psnake[HEAD]:
found = True
if pboard[idx+mov[i]] < SNAKE: # 如果该点不是蛇的身体
if pboard[idx+mov[i]] > pboard[idx]+1:
pboard[idx+mov[i]] = pboard[idx] + 1
if inqueue[idx+mov[i]] == 0:
queue.append(idx+mov[i])
return found
# 从蛇头开始,根据board中元素值,
# 从蛇头周围4个领域点中选择最短路径
def choose_shortest_safe_move(psnake, pboard):
best_move = ERR
min = SNAKE
for i in xrange(4):
if is_move_possible(psnake[HEAD], mov[i]) and pboard[psnake[HEAD]+mov[i]]<min:
min = pboard[psnake[HEAD]+mov[i]]
best_move = mov[i]
return best_move
# 从蛇头开始,根据board中元素值,
# 从蛇头周围4个领域点中选择最远路径
def choose_longest_safe_move(psnake, pboard):
best_move = ERR
max = -1
for i in xrange(4):
if is_move_possible(psnake[HEAD], mov[i]) and pboard[psnake[HEAD]+mov[i]]<UNDEFINED and pboard[psnake[HEAD]+mov[i]]>max:
max = pboard[psnake[HEAD]+mov[i]]
best_move = mov[i]
return best_move
# 检查是否可以追着蛇尾运动,即蛇头和蛇尾间是有路径的
# 为的是避免蛇头陷入死路
# 虚拟操作,在tmpboard,tmpsnake中进行
def is_tail_inside():
global tmpboard, tmpsnake, food, tmpsnake_size
tmpboard[tmpsnake[tmpsnake_size-1]] = 0 # 虚拟地将蛇尾变为食物(因为是虚拟的,所以在tmpsnake,tmpboard中进行)
tmpboard[food] = SNAKE # 放置食物的地方,看成蛇身
result = board_refresh(tmpsnake[tmpsnake_size-1], tmpsnake, tmpboard) # 求得每个位置到蛇尾的路径长度
for i in xrange(4): # 如果蛇头和蛇尾紧挨着,则返回False。即不能follow_tail,追着蛇尾运动了
if is_move_possible(tmpsnake[HEAD], mov[i]) and tmpsnake[HEAD]+mov[i]==tmpsnake[tmpsnake_size-1] and tmpsnake_size>3:
result = False
return result
# 让蛇头朝着蛇尾运行一步
# 不管蛇身阻挡,朝蛇尾方向运行
def follow_tail():
global tmpboard, tmpsnake, food, tmpsnake_size
tmpsnake_size = snake_size
tmpsnake = snake[:]
board_reset(tmpsnake, tmpsnake_size, tmpboard) # 重置虚拟board
tmpboard[tmpsnake[tmpsnake_size-1]] = FOOD # 让蛇尾成为食物
tmpboard[food] = SNAKE # 让食物的地方变成蛇身
board_refresh(tmpsnake[tmpsnake_size-1], tmpsnake, tmpboard) # 求得各个位置到达蛇尾的路径长度
tmpboard[tmpsnake[tmpsnake_size-1]] = SNAKE # 还原蛇尾
return choose_longest_safe_move(tmpsnake, tmpboard) # 返回运行方向(让蛇头运动1步)
# 在各种方案都不行时,随便找一个可行的方向来走(1步),
def any_possible_move():
global food , snake, snake_size, board
best_move = ERR
board_reset(snake, snake_size, board)
board_refresh(food, snake, board)
min = SNAKE
for i in xrange(4):
if is_move_possible(snake[HEAD], mov[i]) and board[snake[HEAD]+mov[i]]<min:
min = board[snake[HEAD]+mov[i]]
best_move = mov[i]
return best_move
def shift_array(arr, size):
for i in xrange(size, 0, -1):
arr[i] = arr[i-1]
def new_food():
global food, snake_size
cell_free = False
while not cell_free:
w = randint(1, WIDTH-2)
h = randint(1, HEIGHT-2)
food = h * WIDTH + w
cell_free = is_cell_free(food, snake_size, snake)
win.addch(food/WIDTH, food%WIDTH, '@')
# 真正的蛇在这个函数中,朝pbest_move走1步
def make_move(pbest_move):
global key, snake, board, snake_size, score
shift_array(snake, snake_size)
snake[HEAD] += pbest_move
# 按esc退出,getch同时保证绘图的流畅性,没有它只会看到最终结果
win.timeout(10)
event = win.getch()
key = key if event == -1 else event
if key == 27: return
p = snake[HEAD]
win.addch(p/WIDTH, p%WIDTH, '*')
# 如果新加入的蛇头就是食物的位置
# 蛇长加1,产生新的食物,重置board(因为原来那些路径长度已经用不上了)
if snake[HEAD] == food:
board[snake[HEAD]] = SNAKE # 新的蛇头
snake_size += 1
score += 1
if snake_size < FIELD_SIZE: new_food()
else: # 如果新加入的蛇头不是食物的位置
board[snake[HEAD]] = SNAKE # 新的蛇头
board[snake[snake_size]] = UNDEFINED # 蛇尾变为空格
win.addch(snake[snake_size]/WIDTH, snake[snake_size]%WIDTH, ' ')
# 虚拟地运行一次,然后在调用处检查这次运行可否可行
# 可行才真实运行。
# 虚拟运行吃到食物后,得到虚拟下蛇在board的位置
def virtual_shortest_move():
global snake, board, snake_size, tmpsnake, tmpboard, tmpsnake_size, food
tmpsnake_size = snake_size
tmpsnake = snake[:] # 如果直接tmpsnake=snake,则两者指向同一处内存
tmpboard = board[:] # board中已经是各位置到达食物的路径长度了,不用再计算
board_reset(tmpsnake, tmpsnake_size, tmpboard)
food_eated = False
while not food_eated:
board_refresh(food, tmpsnake, tmpboard)
move = choose_shortest_safe_move(tmpsnake, tmpboard)
shift_array(tmpsnake, tmpsnake_size)
tmpsnake[HEAD] += move # 在蛇头前加入一个新的位置
# 如果新加入的蛇头的位置正好是食物的位置
# 则长度加1,重置board,食物那个位置变为蛇的一部分(SNAKE)
if tmpsnake[HEAD] == food:
tmpsnake_size += 1
board_reset(tmpsnake, tmpsnake_size, tmpboard) # 虚拟运行后,蛇在board的位置(label101010)
tmpboard[food] = SNAKE
food_eated = True
else: # 如果蛇头不是食物的位置,则新加入的位置为蛇头,最后一个变为空格
tmpboard[tmpsnake[HEAD]] = SNAKE
tmpboard[tmpsnake[tmpsnake_size]] = UNDEFINED
# 如果蛇与食物间有路径,则调用本函数
def find_safe_way():
global snake, board
safe_move = ERR
# 虚拟地运行一次,因为已经确保蛇与食物间有路径,所以执行有效
# 运行后得到虚拟下蛇在board中的位置,即tmpboard,见label101010
virtual_shortest_move() # 该函数唯一调用处
if is_tail_inside(): # 如果虚拟运行后,蛇头蛇尾间有通路,则选最短路运行(1步)
return choose_shortest_safe_move(snake, board)
safe_move = follow_tail() # 否则虚拟地follow_tail 1步,如果可以做到,返回true
return safe_move
curses.initscr()
win = curses.newwin(HEIGHT, WIDTH, 0, 0)
win.keypad(1)
curses.noecho()
curses.curs_set(0)
win.border(0)
win.nodelay(1)
win.addch(food/WIDTH, food%WIDTH, '@')
while key != 27:
win.border(0)
win.addstr(0, 2, 'S:' + str(score) + ' ')
win.timeout(10)
# 接收键盘输入,同时也使显示流畅
event = win.getch()
key = key if event == -1 else event
# 重置矩阵
board_reset(snake, snake_size, board)
# 如果蛇可以吃到食物,board_refresh返回true
# 并且board中除了蛇身(=SNAKE),其它的元素值表示从该点运动到食物的最短路径长
if board_refresh(food, snake, board):
best_move = find_safe_way() # find_safe_way的唯一调用处
else:
best_move = follow_tail()
if best_move == ERR:
best_move = any_possible_move()
# 上面一次思考,只得出一个方向,运行一步
if best_move != ERR: make_move(best_move)
else: break
curses.endwin()
print("\nScore - " + str(score))
|
mit
| -5,874,944,903,512,969,000 | 28.52069 | 128 | 0.625044 | false | 1.927719 | false | false | false |
rk700/rbook
|
rbook/r_pdf.py
|
1
|
6074
|
#!/usr/bin/env python
#-*- coding: utf8 -*-
#
# Copyright (C) 2012 Ruikai Liu <[email protected]>
#
# This file is part of rbook.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with rbook. If not, see <http://www.gnu.org/licenses/>.
import subprocess
import wx
import doc_scroll
import fitz
class DocScroll(doc_scroll.DocScroll):
def __init__(self, parent, current_page_idx):
#self.ctx = parent.ctx
#self.currentPage = parent.document.loadPage(current_page_idx)
#self.width = parent.document.load_page(current_page_idx).bound_page().get_width()
self.width = parent.document.loadPage(current_page_idx).bound().width
doc_scroll.DocScroll.__init__(self, parent, current_page_idx)
self.panel.Bind(wx.EVT_MOTION, self.on_motion)
self.panel.Bind(wx.EVT_LEFT_DOWN, self.on_left_down)
def on_motion(self, event):
cx, cy = event.GetPositionTuple()
mouse_on_link = False
link = self.links
while link:
rect = fitz.Rect(link.rect).transform(self.trans)
if cx >= rect.x0 and cx <= rect.x1 and \
cy >= rect.y0 and cy <= rect.y1:
mouse_on_link = True
break
link = link.next
if mouse_on_link:
self.panel.SetCursor(wx.StockCursor(wx.CURSOR_HAND))
self.link_context = (link.dest.kind, \
link.dest.page, \
link.dest.flags, \
link.dest.lt, \
link.dest.uri)
else:
self.panel.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
self.link_context = None
def on_left_down(self, event):
if not self.link_context is None:
if self.link_context[0] == fitz.LINK_GOTO:
# after change page, link_context becomes None,
# so we need to record the pos
pos = self.link_context[3]
flag = self.link_context[2]
self.parent.change_page(self.link_context[1])
if flag & fitz.LINK_FLAG_T_VALID:
pos = fitz.Point(pos).transform(self.trans)
self.Scroll(-1, (self.height-pos.y)/self.scroll_unit)
elif self.link_context[0] == fitz.LINK_URI:
subprocess.Popen(('xdg-open', self.link_context[4]))
event.Skip()
def set_page_size(self):
self.trans = fitz.Matrix(self.scale, self.scale)
self.rect = fitz.Rect(self.page_rect).transform(self.trans) #page_rect is the unscaled one
self.irect = self.rect.round()
self.width = self.irect.width
self.height = self.irect.height
def do_drawing(self):
self.buffer = wx.BitmapFromBuffer(self.width,
self.height,
self.pix.samples)
dc = wx.BufferedDC(wx.ClientDC(self.panel),
self.buffer)
def set_current_page(self, current_page_idx, draw=True, scroll=None, scale=None):
self.hitbbox = []
if scale:
self.scale = scale
current_page = self.parent.document.loadPage(current_page_idx)
self.page_rect = current_page.bound()
#self.orig_width = self.page_rect.width
self.set_page_size()
self.text_sheet = fitz.TextSheet()
self.text_page = fitz.TextPage(self.page_rect)
self.display_list = fitz.DisplayList(self.page_rect)
current_page.run(fitz.Device(self.display_list), self.trans)
self.links = current_page.loadLinks()
self.link_context = None
self.display_list.run(fitz.Device(self.text_sheet, self.text_page), fitz.Identity, self.rect)
if draw:
self.setup_drawing(scroll=scroll)
def setup_drawing(self, hitbbox=None, scroll=None):
doc_scroll.DocScroll.setup_drawing(self, hitbbox, scroll)
self.pix = fitz.Pixmap(fitz.Colorspace(fitz.CS_RGB), self.irect)
self.pix.clearWith(255);
self.display_list.run(fitz.Device(self.pix, None), fitz.Identity, self.rect)
if hitbbox:
for rect in hitbbox:
self.pix.invertIRect(rect.round())
self.do_drawing()
def new_scale_setup_drawing(self):
try:
#hitbbox = self.hitbbox[self.parent.hit]
self.setup_drawing()
except IndexError:
self.setup_drawing()
def scroll_to_next_found(self, hit):
trans_hitbbox = self.trans.transform_irect(self.hitbbox[hit][0])
self.setup_drawing(self.hitbbox[hit],
(trans_hitbbox.x0/self.scroll_unit,
trans_hitbbox.y0/self.scroll_unit))
def get_hitbbox(self, s):
return self.text_page.search(s, self.parent.main_frame.settings['ic'])
def search_in_current(self, newhit):
old_hitbbox = self.hitbbox[self.parent.hit]
for bbox in old_hitbbox:
self.pix.invert_pixmap(self.trans.transform_irect(bbox))
new_hitbbox = self.hitbbox[newhit]
for bbox in new_hitbbox:
self.pix.invert_pixmap(self.trans.transform_irect(bbox))
self.do_drawing()
self.Scroll(new_hitbbox[0].x0/self.scroll_unit,
new_hitbbox[0].y0/self.scroll_unit)
def on_refresh(self):
self.parent.document = fitz.Document(self.parent.filepath)
self.parent.n_pages = self.parent.document.pageCount
|
gpl-3.0
| 3,068,230,262,222,277,600 | 36.9625 | 101 | 0.596806 | false | 3.600474 | false | false | false |
fy0/my-leetcode
|
1028. Recover a Tree From Preorder Traversal/main.py
|
1
|
1267
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
import re
class Solution:
def recoverFromPreorder(self, S: str) -> TreeNode:
items = list(map(lambda x: [len(x[0]), x[1]], re.findall(r'(-*)(\d+)', S)))
if not items:
return None
if items[0][0] != 0:
raise
nodes = [TreeNode(items[0][1])]
cur_depth = 0
def node_add(cur_node, v):
n = TreeNode(v)
if not cur_node.left:
cur_node.left = n
else:
cur_node.right = n
nodes.append(n)
for d, v in items[1:]:
if d > cur_depth:
# 层级增加,添加下级节点
cur_depth += 1
node_add(nodes[-1], v)
elif d < cur_depth:
# 层级减少,先pop到指定位置,然后添加下级节点
nodes = nodes[:d]
cur_depth = d
node_add(nodes[-1], v)
else:
# 层级不变,添加同级节点
nodes.pop()
node_add(nodes[-1], v)
return nodes[0]
|
apache-2.0
| 4,045,531,576,449,108,000 | 22.66 | 83 | 0.426881 | false | 3.048969 | false | false | false |
TeamPurple/Cyber
|
yowsup_dev/src/whatspy_flask.py
|
1
|
2908
|
import sys, os, time, shutil
from Yowsup.connectionmanager import YowsupConnectionManager
from Yowsup.Common.debugger import Debugger
USERNAME = '972583340860'
PASSWORD = 'jag6FSF6MicZmp9M8lrsSqoXYo8='.decode('base64')
phase = None
cm = None
signals_interface = None
methods_interface = None
# Utils
def phone_number2jid(phone_number):
return phone_number + '@s.whatsapp.net'
# Login
def cb_auth_success(username):
global phase
print 'Authed %s' % username
methods_interface.call('ready')
phase = True
def cb_auth_fail(username, reason):
global phase
print 'Auth Fail!', username, reason
phase = False
def login():
methods_interface.call('auth_login', (USERNAME, PASSWORD))
# Presence
def cb_presence_updated_once(jid, last_seen):
''' TODO: save the time to something and then use the web app to load it'''
global time_got
print 'HELLo'
print 'Last seen @', time.ctime(time.time() - last_seen)
time_got = True
# Contacts
def cb_contact_gotProfilePicture_once(jid, picture_id, image_path):
''' for eric to use for web app
TODO: modify the path so that it goes to where you want
'''
global photo_got
phone_number = jid.split('@')[0]
print 'Got', phone_number
shutil.copyfile(image_path, os.path.join(args.t, phone_number + '.jpg'))
photo_got = True
# Misc
def cb_disconnected(reason):
print 'Disconnected because %s' % reason
sys.exit(0)
# Main
def setup():
global cm, signals_interface, methods_interface
Debugger.enabled = False
cm = YowsupConnectionManager()
cm.setAutoPong(True)
signals_interface = cm.getSignalsInterface()
methods_interface = cm.getMethodsInterface()
signals_interface.registerListener('auth_success', cb_auth_success)
signals_interface.registerListener('auth_fail', cb_auth_fail)
signals_interface.registerListener('disconnected', cb_disconnected)
def get_photo_time(phone_number):
''' phone number includes the country code
'''
global photo_got, time_got
setup()
login()
photo_got = False
time_got = False
while phase is None:
time.sleep(0.5)
signals_interface.registerListener('contact_gotProfilePicture', cb_contact_gotProfilePicture_once)
signals_interface.registerListener('presence_updated', cb_presence_updated_once)
jid = phone_number2jid(phone_number)
methods_interface.call('presence_request', (jid,))
methods_interface.call('contact_getProfilePicture', (jid,))
timeout = 0
while not (photo_got and time_got) and timeout < 1:
#TODO: Time out the request for both photo and time depending on whats available
print photo_got, time_got
time.sleep(0.25)
timeout += 0.25
# methods_interface.call('disconnect', ('closed!',))
get_photo_time('16094755004')
get_photo_time('16094755004')
|
gpl-3.0
| 2,017,714,758,593,760,500 | 26.17757 | 102 | 0.686039 | false | 3.568098 | false | false | false |
USGSDenverPychron/pychron
|
pychron/headless_config_loadable.py
|
1
|
1222
|
#!/usr/bin/python
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.base_config_loadable import BaseConfigLoadable
from pychron.headless_loggable import HeadlessLoggable
class HeadlessConfigLoadable(BaseConfigLoadable, HeadlessLoggable):
"""
"""
# ============= EOF =============================================
|
apache-2.0
| 4,949,899,419,939,401,000 | 38.419355 | 81 | 0.555646 | false | 4.907631 | false | false | false |
blaze/dask
|
dask/array/slicing.py
|
1
|
40834
|
from itertools import product
import math
from numbers import Integral, Number
from operator import add, getitem, itemgetter
import warnings
import functools
import bisect
import numpy as np
from tlz import memoize, merge, pluck, concat, accumulate
from .. import core
from .. import config
from .. import utils
from ..highlevelgraph import HighLevelGraph
from ..base import tokenize, is_dask_collection
colon = slice(None, None, None)
def _sanitize_index_element(ind):
"""Sanitize a one-element index."""
if isinstance(ind, Number):
ind2 = int(ind)
if ind2 != ind:
raise IndexError("Bad index. Must be integer-like: %s" % ind)
else:
return ind2
elif ind is None:
return None
else:
raise TypeError("Invalid index type", type(ind), ind)
def sanitize_index(ind):
"""Sanitize the elements for indexing along one axis
>>> sanitize_index([2, 3, 5])
array([2, 3, 5])
>>> sanitize_index([True, False, True, False])
array([0, 2])
>>> sanitize_index(np.array([1, 2, 3]))
array([1, 2, 3])
>>> sanitize_index(np.array([False, True, True]))
array([1, 2])
>>> type(sanitize_index(np.int32(0)))
<class 'int'>
>>> sanitize_index(1.0)
1
>>> sanitize_index(0.5)
Traceback (most recent call last):
...
IndexError: Bad index. Must be integer-like: 0.5
"""
if ind is None:
return None
elif isinstance(ind, slice):
return slice(
_sanitize_index_element(ind.start),
_sanitize_index_element(ind.stop),
_sanitize_index_element(ind.step),
)
elif isinstance(ind, Number):
return _sanitize_index_element(ind)
elif is_dask_collection(ind):
return ind
index_array = np.asanyarray(ind)
if index_array.dtype == bool:
nonzero = np.nonzero(index_array)
if len(nonzero) == 1:
# If a 1-element tuple, unwrap the element
nonzero = nonzero[0]
return np.asanyarray(nonzero)
elif np.issubdtype(index_array.dtype, np.integer):
return index_array
elif np.issubdtype(index_array.dtype, np.floating):
int_index = index_array.astype(np.intp)
if np.allclose(index_array, int_index):
return int_index
else:
check_int = np.isclose(index_array, int_index)
first_err = index_array.ravel()[np.flatnonzero(~check_int)[0]]
raise IndexError("Bad index. Must be integer-like: %s" % first_err)
else:
raise TypeError("Invalid index type", type(ind), ind)
def slice_array(out_name, in_name, blockdims, index, itemsize):
"""
Master function for array slicing
This function makes a new dask that slices blocks along every
dimension and aggregates (via cartesian product) each dimension's
slices so that the resulting block slices give the same results
as the original slice on the original structure
Index must be a tuple. It may contain the following types
int, slice, list (at most one list), None
Parameters
----------
in_name - string
This is the dask variable name that will be used as input
out_name - string
This is the dask variable output name
blockshape - iterable of integers
index - iterable of integers, slices, lists, or None
itemsize : int
The number of bytes required for each element of the array.
Returns
-------
Dict where the keys are tuples of
(out_name, dim_index[, dim_index[, ...]])
and the values are
(function, (in_name, dim_index, dim_index, ...),
(slice(...), [slice()[,...]])
Also new blockdims with shapes of each block
((10, 10, 10, 10), (20, 20))
Examples
--------
>>> dsk, blockdims = slice_array('y', 'x', [(20, 20, 20, 20, 20)],
... (slice(10, 35),)) # doctest: +SKIP
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, ('x', 0), (slice(10, 20),)),
('y', 1): (getitem, ('x', 1), (slice(0, 15),))}
>>> blockdims # doctest: +SKIP
((10, 15),)
See Also
--------
This function works by successively unwrapping cases and passing down
through a sequence of functions.
slice_with_newaxis : handle None/newaxis case
slice_wrap_lists : handle fancy indexing with lists
slice_slices_and_integers : handle everything else
"""
blockdims = tuple(map(tuple, blockdims))
# x[:, :, :] - Punt and return old value
if all(
isinstance(index, slice) and index == slice(None, None, None) for index in index
):
suffixes = product(*[range(len(bd)) for bd in blockdims])
dsk = dict(((out_name,) + s, (in_name,) + s) for s in suffixes)
return dsk, blockdims
# Add in missing colons at the end as needed. x[5] -> x[5, :, :]
not_none_count = sum(i is not None for i in index)
missing = len(blockdims) - not_none_count
index += (slice(None, None, None),) * missing
# Pass down to next function
dsk_out, bd_out = slice_with_newaxes(out_name, in_name, blockdims, index, itemsize)
bd_out = tuple(map(tuple, bd_out))
return dsk_out, bd_out
def slice_with_newaxes(out_name, in_name, blockdims, index, itemsize):
"""
Handle indexing with Nones
Strips out Nones then hands off to slice_wrap_lists
"""
# Strip Nones from index
index2 = tuple([ind for ind in index if ind is not None])
where_none = [i for i, ind in enumerate(index) if ind is None]
where_none_orig = list(where_none)
for i, x in enumerate(where_none):
n = sum(isinstance(ind, Integral) for ind in index[:x])
if n:
where_none[i] -= n
# Pass down and do work
dsk, blockdims2 = slice_wrap_lists(out_name, in_name, blockdims, index2, itemsize)
if where_none:
expand = expander(where_none)
expand_orig = expander(where_none_orig)
# Insert ",0" into the key: ('x', 2, 3) -> ('x', 0, 2, 0, 3)
dsk2 = {
(out_name,) + expand(k[1:], 0): (v[:2] + (expand_orig(v[2], None),))
for k, v in dsk.items()
if k[0] == out_name
}
# Add back intermediate parts of the dask that weren't the output
dsk3 = merge(dsk2, {k: v for k, v in dsk.items() if k[0] != out_name})
# Insert (1,) into blockdims: ((2, 2), (3, 3)) -> ((2, 2), (1,), (3, 3))
blockdims3 = expand(blockdims2, (1,))
return dsk3, blockdims3
else:
return dsk, blockdims2
def slice_wrap_lists(out_name, in_name, blockdims, index, itemsize):
"""
Fancy indexing along blocked array dasks
Handles index of type list. Calls slice_slices_and_integers for the rest
See Also
--------
take : handle slicing with lists ("fancy" indexing)
slice_slices_and_integers : handle slicing with slices and integers
"""
assert all(isinstance(i, (slice, list, Integral, np.ndarray)) for i in index)
if not len(blockdims) == len(index):
raise IndexError("Too many indices for array")
# Do we have more than one list in the index?
where_list = [
i for i, ind in enumerate(index) if isinstance(ind, np.ndarray) and ind.ndim > 0
]
if len(where_list) > 1:
raise NotImplementedError("Don't yet support nd fancy indexing")
# Is the single list an empty list? In this case just treat it as a zero
# length slice
if where_list and not index[where_list[0]].size:
index = list(index)
index[where_list.pop()] = slice(0, 0, 1)
index = tuple(index)
# No lists, hooray! just use slice_slices_and_integers
if not where_list:
return slice_slices_and_integers(out_name, in_name, blockdims, index)
# Replace all lists with full slices [3, 1, 0] -> slice(None, None, None)
index_without_list = tuple(
slice(None, None, None) if isinstance(i, np.ndarray) else i for i in index
)
# lists and full slices. Just use take
if all(isinstance(i, np.ndarray) or i == slice(None, None, None) for i in index):
axis = where_list[0]
blockdims2, dsk3 = take(
out_name, in_name, blockdims, index[where_list[0]], itemsize, axis=axis
)
# Mixed case. Both slices/integers and lists. slice/integer then take
else:
# Do first pass without lists
tmp = "slice-" + tokenize((out_name, in_name, blockdims, index))
dsk, blockdims2 = slice_slices_and_integers(
tmp, in_name, blockdims, index_without_list
)
# After collapsing some axes due to int indices, adjust axis parameter
axis = where_list[0]
axis2 = axis - sum(
1 for i, ind in enumerate(index) if i < axis and isinstance(ind, Integral)
)
# Do work
blockdims2, dsk2 = take(out_name, tmp, blockdims2, index[axis], 8, axis=axis2)
dsk3 = merge(dsk, dsk2)
return dsk3, blockdims2
def slice_slices_and_integers(out_name, in_name, blockdims, index):
"""
Dask array indexing with slices and integers
See Also
--------
_slice_1d
"""
from .core import unknown_chunk_message
shape = tuple(cached_cumsum(dim, initial_zero=True)[-1] for dim in blockdims)
for dim, ind in zip(shape, index):
if np.isnan(dim) and ind != slice(None, None, None):
raise ValueError(
"Arrays chunk sizes are unknown: %s%s" % (shape, unknown_chunk_message)
)
assert all(isinstance(ind, (slice, Integral)) for ind in index)
assert len(index) == len(blockdims)
# Get a list (for each dimension) of dicts{blocknum: slice()}
block_slices = list(map(_slice_1d, shape, blockdims, index))
sorted_block_slices = [sorted(i.items()) for i in block_slices]
# (in_name, 1, 1, 2), (in_name, 1, 1, 4), (in_name, 2, 1, 2), ...
in_names = list(product([in_name], *[pluck(0, s) for s in sorted_block_slices]))
# (out_name, 0, 0, 0), (out_name, 0, 0, 1), (out_name, 0, 1, 0), ...
out_names = list(
product(
[out_name],
*[
range(len(d))[::-1] if i.step and i.step < 0 else range(len(d))
for d, i in zip(block_slices, index)
if not isinstance(i, Integral)
]
)
)
all_slices = list(product(*[pluck(1, s) for s in sorted_block_slices]))
dsk_out = {
out_name: (getitem, in_name, slices)
for out_name, in_name, slices in zip(out_names, in_names, all_slices)
}
new_blockdims = [
new_blockdim(d, db, i)
for d, i, db in zip(shape, index, blockdims)
if not isinstance(i, Integral)
]
return dsk_out, new_blockdims
def _slice_1d(dim_shape, lengths, index):
"""Returns a dict of {blocknum: slice}
This function figures out where each slice should start in each
block for a single dimension. If the slice won't return any elements
in the block, that block will not be in the output.
Parameters
----------
dim_shape - the number of elements in this dimension.
This should be a positive, non-zero integer
blocksize - the number of elements per block in this dimension
This should be a positive, non-zero integer
index - a description of the elements in this dimension that we want
This might be an integer, a slice(), or an Ellipsis
Returns
-------
dictionary where the keys are the integer index of the blocks that
should be sliced and the values are the slices
Examples
--------
Trivial slicing
>>> _slice_1d(100, [60, 40], slice(None, None, None))
{0: slice(None, None, None), 1: slice(None, None, None)}
100 length array cut into length 20 pieces, slice 0:35
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 35))
{0: slice(None, None, None), 1: slice(0, 15, 1)}
Support irregular blocks and various slices
>>> _slice_1d(100, [20, 10, 10, 10, 25, 25], slice(10, 35))
{0: slice(10, 20, 1), 1: slice(None, None, None), 2: slice(0, 5, 1)}
Support step sizes
>>> _slice_1d(100, [15, 14, 13], slice(10, 41, 3))
{0: slice(10, 15, 3), 1: slice(1, 14, 3), 2: slice(2, 12, 3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 100, 40)) # step > blocksize
{0: slice(0, 20, 40), 2: slice(0, 20, 40), 4: slice(0, 20, 40)}
Also support indexing single elements
>>> _slice_1d(100, [20, 20, 20, 20, 20], 25)
{1: 5}
And negative slicing
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 0, -3)) # doctest: +NORMALIZE_WHITESPACE
{4: slice(-1, -21, -3),
3: slice(-2, -21, -3),
2: slice(-3, -21, -3),
1: slice(-1, -21, -3),
0: slice(-2, -20, -3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 12, -3)) # doctest: +NORMALIZE_WHITESPACE
{4: slice(-1, -21, -3),
3: slice(-2, -21, -3),
2: slice(-3, -21, -3),
1: slice(-1, -21, -3),
0: slice(-2, -8, -3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, -12, -3))
{4: slice(-1, -12, -3)}
"""
chunk_boundaries = cached_cumsum(lengths)
if isinstance(index, Integral):
# use right-side search to be consistent with previous result
i = bisect.bisect_right(chunk_boundaries, index)
if i > 0:
# the very first chunk has no relative shift
ind = index - chunk_boundaries[i - 1]
else:
ind = index
return {int(i): int(ind)}
assert isinstance(index, slice)
if index == colon:
return {k: colon for k in range(len(lengths))}
step = index.step or 1
if step > 0:
start = index.start or 0
stop = index.stop if index.stop is not None else dim_shape
else:
start = index.start if index.start is not None else dim_shape - 1
start = dim_shape - 1 if start >= dim_shape else start
stop = -(dim_shape + 1) if index.stop is None else index.stop
# posify start and stop
if start < 0:
start += dim_shape
if stop < 0:
stop += dim_shape
d = dict()
if step > 0:
istart = bisect.bisect_right(chunk_boundaries, start)
istop = bisect.bisect_left(chunk_boundaries, stop)
# the bound is not exactly tight; make it tighter?
istop = min(istop + 1, len(lengths))
# jump directly to istart
if istart > 0:
start = start - chunk_boundaries[istart - 1]
stop = stop - chunk_boundaries[istart - 1]
for i in range(istart, istop):
length = lengths[i]
if start < length and stop > 0:
d[i] = slice(start, min(stop, length), step)
start = (start - length) % step
else:
start = start - length
stop -= length
else:
rstart = start # running start
istart = bisect.bisect_left(chunk_boundaries, start)
istop = bisect.bisect_right(chunk_boundaries, stop)
# the bound is not exactly tight; make it tighter?
istart = min(istart + 1, len(chunk_boundaries) - 1)
istop = max(istop - 1, -1)
for i in range(istart, istop, -1):
chunk_stop = chunk_boundaries[i]
# create a chunk start and stop
if i == 0:
chunk_start = 0
else:
chunk_start = chunk_boundaries[i - 1]
# if our slice is in this chunk
if (chunk_start <= rstart < chunk_stop) and (rstart > stop):
d[i] = slice(
rstart - chunk_stop,
max(chunk_start - chunk_stop - 1, stop - chunk_stop),
step,
)
# compute the next running start point,
offset = (rstart - (chunk_start - 1)) % step
rstart = chunk_start + offset - 1
# replace 0:20:1 with : if appropriate
for k, v in d.items():
if v == slice(0, lengths[k], 1):
d[k] = slice(None, None, None)
if not d: # special case x[:0]
d[0] = slice(0, 0, 1)
return d
def partition_by_size(sizes, seq):
"""
>>> partition_by_size([10, 20, 10], [1, 5, 9, 12, 29, 35])
[array([1, 5, 9]), array([ 2, 19]), array([5])]
"""
seq = np.asanyarray(seq)
left = np.empty(len(sizes) + 1, dtype=int)
left[0] = 0
right = np.cumsum(sizes, out=left[1:])
locations = np.empty(len(sizes) + 1, dtype=int)
locations[0] = 0
locations[1:] = np.searchsorted(seq, right)
return [(seq[j:k] - l) for j, k, l in zip(locations[:-1], locations[1:], left)]
def issorted(seq):
"""Is sequence sorted?
>>> issorted([1, 2, 3])
True
>>> issorted([3, 1, 2])
False
"""
if len(seq) == 0:
return True
return np.all(seq[:-1] <= seq[1:])
def slicing_plan(chunks, index):
"""Construct a plan to slice chunks with the given index
Parameters
----------
chunks : Tuple[int]
One dimensions worth of chunking information
index : np.ndarray[int]
The index passed to slice on that dimension
Returns
-------
out : List[Tuple[int, np.ndarray]]
A list of chunk/sub-index pairs corresponding to each output chunk
"""
index = np.asanyarray(index)
cum_chunks = cached_cumsum(chunks)
chunk_locations = np.searchsorted(cum_chunks, index, side="right")
where = np.where(np.diff(chunk_locations))[0] + 1
where = np.concatenate([[0], where, [len(chunk_locations)]])
out = []
for i in range(len(where) - 1):
sub_index = index[where[i] : where[i + 1]]
chunk = chunk_locations[where[i]]
if chunk > 0:
sub_index = sub_index - cum_chunks[chunk - 1]
out.append((chunk, sub_index))
return out
def take(outname, inname, chunks, index, itemsize, axis=0):
"""Index array with an iterable of index
Handles a single index by a single list
Mimics ``np.take``
>>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [5, 1, 47, 3], 8, axis=0)
>>> chunks
((2, 1, 1),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, (np.concatenate, [(getitem, ('x', 0), ([1, 3, 5],)),
(getitem, ('x', 2), ([7],))],
0),
(2, 0, 4, 1))}
When list is sorted we retain original block structure
>>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], 8, axis=0)
>>> chunks
((3, 1),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, ('x', 0), ([1, 3, 5],)),
('y', 2): (getitem, ('x', 2), ([7],))}
When any indexed blocks would otherwise grow larger than
dask.config.array.chunk-size, we might split them,
depending on the value of ``dask.config.slicing.split-large-chunks``.
>>> import dask
>>> with dask.config.set({"array.slicing.split-large-chunks": True}):
... chunks, dsk = take('y', 'x', [(1, 1, 1), (1000, 1000), (1000, 1000)],
... [0] + [1] * 6 + [2], axis=0, itemsize=8)
>>> chunks
((1, 3, 3, 1), (1000, 1000), (1000, 1000))
"""
from .core import PerformanceWarning
plan = slicing_plan(chunks[axis], index)
if len(plan) >= len(chunks[axis]) * 10:
factor = math.ceil(len(plan) / len(chunks[axis]))
warnings.warn(
"Slicing with an out-of-order index is generating %d "
"times more chunks" % factor,
PerformanceWarning,
stacklevel=6,
)
index = np.asarray(index)
# Check for chunks from the plan that would violate the user's
# configured chunk size.
nbytes = utils.parse_bytes(config.get("array.chunk-size"))
other_chunks = [chunks[i] for i in range(len(chunks)) if i != axis]
other_numel = np.prod([sum(x) for x in other_chunks])
if math.isnan(other_numel):
warnsize = maxsize = math.inf
else:
maxsize = math.ceil(nbytes / (other_numel * itemsize))
warnsize = maxsize * 5
split = config.get("array.slicing.split-large-chunks", None)
# Warn only when the default is not specified.
warned = split is not None
for _, index_list in plan:
if not warned and len(index_list) > warnsize:
msg = (
"Slicing is producing a large chunk. To accept the large\n"
"chunk and silence this warning, set the option\n"
" >>> with dask.config.set(**{'array.slicing.split_large_chunks': False}):\n"
" ... array[indexer]\n\n"
"To avoid creating the large chunks, set the option\n"
" >>> with dask.config.set(**{'array.slicing.split_large_chunks': True}):\n"
" ... array[indexer]"
)
warnings.warn(msg, PerformanceWarning, stacklevel=6)
warned = True
where_index = []
index_lists = []
for where_idx, index_list in plan:
index_length = len(index_list)
if split and index_length > maxsize:
index_sublist = np.array_split(
index_list, math.ceil(index_length / maxsize)
)
index_lists.extend(index_sublist)
where_index.extend([where_idx] * len(index_sublist))
else:
index_lists.append(np.array(index_list))
where_index.append(where_idx)
dims = [range(len(bd)) for bd in chunks]
indims = list(dims)
indims[axis] = list(range(len(where_index)))
keys = list(product([outname], *indims))
outdims = list(dims)
outdims[axis] = where_index
slices = [[colon] * len(bd) for bd in chunks]
slices[axis] = index_lists
slices = list(product(*slices))
inkeys = list(product([inname], *outdims))
values = [(getitem, inkey, slc) for inkey, slc in zip(inkeys, slices)]
chunks2 = list(chunks)
chunks2[axis] = tuple(map(len, index_lists))
dsk = dict(zip(keys, values))
return tuple(chunks2), dsk
def posify_index(shape, ind):
"""Flip negative indices around to positive ones
>>> posify_index(10, 3)
3
>>> posify_index(10, -3)
7
>>> posify_index(10, [3, -3])
array([3, 7])
>>> posify_index((10, 20), (3, -3))
(3, 17)
>>> posify_index((10, 20), (3, [3, 4, -3])) # doctest: +NORMALIZE_WHITESPACE
(3, array([ 3, 4, 17]))
"""
if isinstance(ind, tuple):
return tuple(map(posify_index, shape, ind))
if isinstance(ind, Integral):
if ind < 0 and not math.isnan(shape):
return ind + shape
else:
return ind
if isinstance(ind, (np.ndarray, list)) and not math.isnan(shape):
ind = np.asanyarray(ind)
return np.where(ind < 0, ind + shape, ind)
return ind
@memoize
def _expander(where):
if not where:
def expand(seq, val):
return seq
return expand
else:
decl = """def expand(seq, val):
return ({left}) + tuple({right})
"""
left = []
j = 0
for i in range(max(where) + 1):
if i in where:
left.append("val, ")
else:
left.append("seq[%d], " % j)
j += 1
right = "seq[%d:]" % j
left = "".join(left)
decl = decl.format(**locals())
ns = {}
exec(compile(decl, "<dynamic>", "exec"), ns, ns)
return ns["expand"]
def expander(where):
"""Create a function to insert value at many locations in sequence.
>>> expander([0, 2])(['a', 'b', 'c'], 'z')
('z', 'a', 'z', 'b', 'c')
"""
return _expander(tuple(where))
def new_blockdim(dim_shape, lengths, index):
"""
>>> new_blockdim(100, [20, 10, 20, 10, 40], slice(0, 90, 2))
[10, 5, 10, 5, 15]
>>> new_blockdim(100, [20, 10, 20, 10, 40], [5, 1, 30, 22])
[4]
>>> new_blockdim(100, [20, 10, 20, 10, 40], slice(90, 10, -2))
[16, 5, 10, 5, 4]
"""
if index == slice(None, None, None):
return lengths
if isinstance(index, list):
return [len(index)]
assert not isinstance(index, Integral)
pairs = sorted(_slice_1d(dim_shape, lengths, index).items(), key=itemgetter(0))
slices = [
slice(0, lengths[i], 1) if slc == slice(None, None, None) else slc
for i, slc in pairs
]
if isinstance(index, slice) and index.step and index.step < 0:
slices = slices[::-1]
return [int(math.ceil((1.0 * slc.stop - slc.start) / slc.step)) for slc in slices]
def replace_ellipsis(n, index):
"""Replace ... with slices, :, : ,:
>>> replace_ellipsis(4, (3, Ellipsis, 2))
(3, slice(None, None, None), slice(None, None, None), 2)
>>> replace_ellipsis(2, (Ellipsis, None))
(slice(None, None, None), slice(None, None, None), None)
"""
# Careful about using in or index because index may contain arrays
isellipsis = [i for i, ind in enumerate(index) if ind is Ellipsis]
if not isellipsis:
return index
else:
loc = isellipsis[0]
extra_dimensions = n - (len(index) - sum(i is None for i in index) - 1)
return (
index[:loc] + (slice(None, None, None),) * extra_dimensions + index[loc + 1 :]
)
def normalize_slice(idx, dim):
"""Normalize slices to canonical form
Parameters
----------
idx: slice or other index
dim: dimension length
Examples
--------
>>> normalize_slice(slice(0, 10, 1), 10)
slice(None, None, None)
"""
if isinstance(idx, slice):
if math.isnan(dim):
return idx
start, stop, step = idx.indices(dim)
if step > 0:
if start == 0:
start = None
if stop >= dim:
stop = None
if step == 1:
step = None
if stop is not None and start is not None and stop < start:
stop = start
elif step < 0:
if start >= dim - 1:
start = None
if stop < 0:
stop = None
return slice(start, stop, step)
return idx
def normalize_index(idx, shape):
"""Normalize slicing indexes
1. Replaces ellipses with many full slices
2. Adds full slices to end of index
3. Checks bounding conditions
4. Replace multidimensional numpy arrays with dask arrays
5. Replaces numpy arrays with lists
6. Posify's integers and lists
7. Normalizes slices to canonical form
Examples
--------
>>> normalize_index(1, (10,))
(1,)
>>> normalize_index(-1, (10,))
(9,)
>>> normalize_index([-1], (10,))
(array([9]),)
>>> normalize_index(slice(-3, 10, 1), (10,))
(slice(7, None, None),)
>>> normalize_index((Ellipsis, None), (10,))
(slice(None, None, None), None)
>>> normalize_index(np.array([[True, False], [False, True], [True, True]]), (3, 2))
(dask.array<array, shape=(3, 2), dtype=bool, chunksize=(3, 2), chunktype=numpy.ndarray>,)
"""
from .core import from_array
if not isinstance(idx, tuple):
idx = (idx,)
# if a > 1D numpy.array is provided, cast it to a dask array
if len(idx) > 0 and len(shape) > 1:
i = idx[0]
if isinstance(i, np.ndarray) and i.shape == shape:
idx = (from_array(i), *idx[1:])
idx = replace_ellipsis(len(shape), idx)
n_sliced_dims = 0
for i in idx:
if hasattr(i, "ndim") and i.ndim >= 1:
n_sliced_dims += i.ndim
elif i is None:
continue
else:
n_sliced_dims += 1
idx = idx + (slice(None),) * (len(shape) - n_sliced_dims)
if len([i for i in idx if i is not None]) > len(shape):
raise IndexError("Too many indices for array")
none_shape = []
i = 0
for ind in idx:
if ind is not None:
none_shape.append(shape[i])
i += 1
else:
none_shape.append(None)
for i, d in zip(idx, none_shape):
if d is not None:
check_index(i, d)
idx = tuple(map(sanitize_index, idx))
idx = tuple(map(normalize_slice, idx, none_shape))
idx = posify_index(none_shape, idx)
return idx
def check_index(ind, dimension):
"""Check validity of index for a given dimension
Examples
--------
>>> check_index(3, 5)
>>> check_index(5, 5)
Traceback (most recent call last):
...
IndexError: Index is not smaller than dimension 5 >= 5
>>> check_index(6, 5)
Traceback (most recent call last):
...
IndexError: Index is not smaller than dimension 6 >= 5
>>> check_index(-1, 5)
>>> check_index(-6, 5)
Traceback (most recent call last):
...
IndexError: Negative index is not greater than negative dimension -6 <= -5
>>> check_index([1, 2], 5)
>>> check_index([6, 3], 5)
Traceback (most recent call last):
...
IndexError: Index out of bounds 5
>>> check_index(slice(0, 3), 5)
>>> check_index([True], 1)
>>> check_index([True, True], 3)
Traceback (most recent call last):
...
IndexError: Boolean array length 2 doesn't equal dimension 3
>>> check_index([True, True, True], 1)
Traceback (most recent call last):
...
IndexError: Boolean array length 3 doesn't equal dimension 1
"""
# unknown dimension, assumed to be in bounds
if np.isnan(dimension):
return
elif isinstance(ind, (list, np.ndarray)):
x = np.asanyarray(ind)
if x.dtype == bool:
if x.size != dimension:
raise IndexError(
"Boolean array length %s doesn't equal dimension %s"
% (x.size, dimension)
)
elif (x >= dimension).any() or (x < -dimension).any():
raise IndexError("Index out of bounds %s" % dimension)
elif isinstance(ind, slice):
return
elif is_dask_collection(ind):
return
elif ind is None:
return
elif ind >= dimension:
raise IndexError(
"Index is not smaller than dimension %d >= %d" % (ind, dimension)
)
elif ind < -dimension:
msg = "Negative index is not greater than negative dimension %d <= -%d"
raise IndexError(msg % (ind, dimension))
def slice_with_int_dask_array(x, index):
"""Slice x with at most one 1D dask arrays of ints.
This is a helper function of :meth:`Array.__getitem__`.
Parameters
----------
x: Array
index: tuple with as many elements as x.ndim, among which there are
one or more Array's with dtype=int
Returns
-------
tuple of (sliced x, new index)
where the new index is the same as the input, but with slice(None)
replaced to the original slicer where a 1D filter has been applied and
one less element where a zero-dimensional filter has been applied.
"""
from .core import Array
assert len(index) == x.ndim
fancy_indexes = [
isinstance(idx, (tuple, list))
or (isinstance(idx, (np.ndarray, Array)) and idx.ndim > 0)
for idx in index
]
if sum(fancy_indexes) > 1:
raise NotImplementedError("Don't yet support nd fancy indexing")
out_index = []
dropped_axis_cnt = 0
for in_axis, idx in enumerate(index):
out_axis = in_axis - dropped_axis_cnt
if isinstance(idx, Array) and idx.dtype.kind in "iu":
if idx.ndim == 0:
idx = idx[np.newaxis]
x = slice_with_int_dask_array_on_axis(x, idx, out_axis)
x = x[tuple(0 if i == out_axis else slice(None) for i in range(x.ndim))]
dropped_axis_cnt += 1
elif idx.ndim == 1:
x = slice_with_int_dask_array_on_axis(x, idx, out_axis)
out_index.append(slice(None))
else:
raise NotImplementedError(
"Slicing with dask.array of ints only permitted when "
"the indexer has zero or one dimensions"
)
else:
out_index.append(idx)
return x, tuple(out_index)
def slice_with_int_dask_array_on_axis(x, idx, axis):
"""Slice a ND dask array with a 1D dask arrays of ints along the given
axis.
This is a helper function of :func:`slice_with_int_dask_array`.
"""
from .core import Array, blockwise, from_array
from . import chunk
assert 0 <= axis < x.ndim
if np.isnan(x.chunks[axis]).any():
raise NotImplementedError(
"Slicing an array with unknown chunks with "
"a dask.array of ints is not supported"
)
# Calculate the offset at which each chunk starts along axis
# e.g. chunks=(..., (5, 3, 4), ...) -> offset=[0, 5, 8]
offset = np.roll(np.cumsum(x.chunks[axis]), 1)
offset[0] = 0
offset = from_array(offset, chunks=1)
# Tamper with the declared chunks of offset to make blockwise align it with
# x[axis]
offset = Array(offset.dask, offset.name, (x.chunks[axis],), offset.dtype)
# Define axis labels for blockwise
x_axes = tuple(range(x.ndim))
idx_axes = (x.ndim,) # arbitrary index not already in x_axes
offset_axes = (axis,)
p_axes = x_axes[: axis + 1] + idx_axes + x_axes[axis + 1 :]
y_axes = x_axes[:axis] + idx_axes + x_axes[axis + 1 :]
# Calculate the cartesian product of every chunk of x vs every chunk of idx
p = blockwise(
chunk.slice_with_int_dask_array,
p_axes,
x,
x_axes,
idx,
idx_axes,
offset,
offset_axes,
x_size=x.shape[axis],
axis=axis,
dtype=x.dtype,
)
# Aggregate on the chunks of x along axis
y = blockwise(
chunk.slice_with_int_dask_array_aggregate,
y_axes,
idx,
idx_axes,
p,
p_axes,
concatenate=True,
x_chunks=x.chunks[axis],
axis=axis,
dtype=x.dtype,
)
return y
def slice_with_bool_dask_array(x, index):
"""Slice x with one or more dask arrays of bools
This is a helper function of `Array.__getitem__`.
Parameters
----------
x: Array
index: tuple with as many elements as x.ndim, among which there are
one or more Array's with dtype=bool
Returns
-------
tuple of (sliced x, new index)
where the new index is the same as the input, but with slice(None)
replaced to the original slicer when a filter has been applied.
Note: The sliced x will have nan chunks on the sliced axes.
"""
from .core import Array, blockwise, elemwise
out_index = [
slice(None) if isinstance(ind, Array) and ind.dtype == bool else ind
for ind in index
]
if len(index) == 1 and index[0].ndim == x.ndim:
if not np.isnan(x.shape).any() and not np.isnan(index[0].shape).any():
x = x.ravel()
index = tuple(i.ravel() for i in index)
elif x.ndim > 1:
warnings.warn(
"When slicing a Dask array of unknown chunks with a boolean mask "
"Dask array, the output array may have a different ordering "
"compared to the equivalent NumPy operation. This will raise an "
"error in a future release of Dask.",
stacklevel=3,
)
y = elemwise(getitem, x, *index, dtype=x.dtype)
name = "getitem-" + tokenize(x, index)
dsk = {(name, i): k for i, k in enumerate(core.flatten(y.__dask_keys__()))}
chunks = ((np.nan,) * y.npartitions,)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[y])
return Array(graph, name, chunks, x.dtype), out_index
if any(
isinstance(ind, Array) and ind.dtype == bool and ind.ndim != 1 for ind in index
):
raise NotImplementedError(
"Slicing with dask.array of bools only permitted when "
"the indexer has only one dimension or when "
"it has the same dimension as the sliced "
"array"
)
indexes = [
ind if isinstance(ind, Array) and ind.dtype == bool else slice(None)
for ind in index
]
arginds = []
i = 0
for ind in indexes:
if isinstance(ind, Array) and ind.dtype == bool:
new = (ind, tuple(range(i, i + ind.ndim)))
i += x.ndim
else:
new = (slice(None), None)
i += 1
arginds.append(new)
arginds = list(concat(arginds))
out = blockwise(
getitem_variadic,
tuple(range(x.ndim)),
x,
tuple(range(x.ndim)),
*arginds,
dtype=x.dtype
)
chunks = []
for ind, chunk in zip(index, out.chunks):
if isinstance(ind, Array) and ind.dtype == bool:
chunks.append((np.nan,) * len(chunk))
else:
chunks.append(chunk)
out._chunks = tuple(chunks)
return out, tuple(out_index)
def getitem_variadic(x, *index):
return x[index]
def make_block_sorted_slices(index, chunks):
"""Generate blockwise-sorted index pairs for shuffling an array.
Parameters
----------
index : ndarray
An array of index positions.
chunks : tuple
Chunks from the original dask array
Returns
-------
index2 : ndarray
Same values as `index`, but each block has been sorted
index3 : ndarray
The location of the values of `index` in `index2`
Examples
--------
>>> index = np.array([6, 0, 4, 2, 7, 1, 5, 3])
>>> chunks = ((4, 4),)
>>> a, b = make_block_sorted_slices(index, chunks)
Notice that the first set of 4 items are sorted, and the
second set of 4 items are sorted.
>>> a
array([0, 2, 4, 6, 1, 3, 5, 7])
>>> b
array([3, 0, 2, 1, 7, 4, 6, 5])
"""
from .core import slices_from_chunks
slices = slices_from_chunks(chunks)
if len(slices[0]) > 1:
slices = [slice_[0] for slice_ in slices]
offsets = np.roll(np.cumsum(chunks[0]), 1)
offsets[0] = 0
index2 = np.empty_like(index)
index3 = np.empty_like(index)
for slice_, offset in zip(slices, offsets):
a = index[slice_]
b = np.sort(a)
c = offset + np.argsort(b.take(np.argsort(a)))
index2[slice_] = b
index3[slice_] = c
return index2, index3
def shuffle_slice(x, index):
"""A relatively efficient way to shuffle `x` according to `index`.
Parameters
----------
x : Array
index : ndarray
This should be an ndarray the same length as `x` containing
each index position in ``range(0, len(x))``.
Returns
-------
Array
"""
from .core import PerformanceWarning
chunks1 = chunks2 = x.chunks
if x.ndim > 1:
chunks1 = (chunks1[0],)
index2, index3 = make_block_sorted_slices(index, chunks1)
with warnings.catch_warnings():
warnings.simplefilter("ignore", PerformanceWarning)
return x[index2].rechunk(chunks2)[index3]
class _HashIdWrapper(object):
"""Hash and compare a wrapped object by identity instead of value"""
def __init__(self, wrapped):
self.wrapped = wrapped
def __eq__(self, other):
if not isinstance(other, _HashIdWrapper):
return NotImplemented
return self.wrapped is other.wrapped
def __ne__(self, other):
if not isinstance(other, _HashIdWrapper):
return NotImplemented
return self.wrapped is not other.wrapped
def __hash__(self):
return id(self.wrapped)
@functools.lru_cache()
def _cumsum(seq, initial_zero):
if isinstance(seq, _HashIdWrapper):
seq = seq.wrapped
if initial_zero:
return tuple(accumulate(add, seq, 0))
else:
return tuple(accumulate(add, seq))
def cached_cumsum(seq, initial_zero=False):
"""Compute :meth:`toolz.accumulate` with caching.
Caching is by the identify of `seq` rather than the value. It is thus
important that `seq` is a tuple of immutable objects, and this function
is intended for use where `seq` is a value that will persist (generally
block sizes).
Parameters
----------
seq : tuple
Values to cumulatively sum.
initial_zero : bool, optional
If true, the return value is prefixed with a zero.
Returns
-------
tuple
"""
if isinstance(seq, tuple):
# Look up by identity first, to avoid a linear-time __hash__
# if we've seen this tuple object before.
result = _cumsum(_HashIdWrapper(seq), initial_zero)
else:
# Construct a temporary tuple, and look up by value.
result = _cumsum(tuple(seq), initial_zero)
return result
|
bsd-3-clause
| -1,367,151,266,620,555,500 | 30.147216 | 97 | 0.568938 | false | 3.571903 | false | false | false |
eReuse/DeviceHub
|
ereuse_devicehub/resources/event/device/allocate/settings.py
|
1
|
1485
|
from ereuse_devicehub.resources.account.settings import unregistered_user, unregistered_user_doc
from ereuse_devicehub.resources.event.device.settings import EventWithDevices, \
EventSubSettingsMultipleDevices, materialized_components
class Allocate(EventWithDevices):
to = {
'type': ['objectid', 'dict', 'string'], # We should not add string but it does not work otherwise...
'data_relation': {
'resource': 'accounts',
'field': '_id',
'embeddable': True,
},
'schema': unregistered_user,
'doc': 'The user the devices are allocated to. ' + unregistered_user_doc,
'get_from_data_relation_or_create': 'email',
'required': True,
'sink': 2
}
toOrganization = {
'type': 'string',
'readonly': True,
'materialized': True,
'doc': 'Materialization of the organization that, by the time of the allocation, the user worked in.'
}
components = materialized_components
class AllocateSettings(EventSubSettingsMultipleDevices):
_schema = Allocate
fa = 'fa-hand-o-right'
sink = -5
extra_response_fields = EventSubSettingsMultipleDevices.extra_response_fields + ['to']
short_description = 'Assign the devices to someone, so that person \'owns\' the device'
# Receiver OR ReceiverEmail. We need to hook this in a required field so it is always executed
# And @type is an always required field so we can happily hook on it
|
agpl-3.0
| -407,023,383,197,654,600 | 39.135135 | 109 | 0.66532 | false | 4.002695 | false | false | false |
kubernetes-client/python
|
kubernetes/client/models/v1_priority_class.py
|
1
|
11062
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1PriorityClass(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'description': 'str',
'global_default': 'bool',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'preemption_policy': 'str',
'value': 'int'
}
attribute_map = {
'api_version': 'apiVersion',
'description': 'description',
'global_default': 'globalDefault',
'kind': 'kind',
'metadata': 'metadata',
'preemption_policy': 'preemptionPolicy',
'value': 'value'
}
def __init__(self, api_version=None, description=None, global_default=None, kind=None, metadata=None, preemption_policy=None, value=None, local_vars_configuration=None): # noqa: E501
"""V1PriorityClass - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._description = None
self._global_default = None
self._kind = None
self._metadata = None
self._preemption_policy = None
self._value = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if description is not None:
self.description = description
if global_default is not None:
self.global_default = global_default
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if preemption_policy is not None:
self.preemption_policy = preemption_policy
self.value = value
@property
def api_version(self):
"""Gets the api_version of this V1PriorityClass. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1PriorityClass. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1PriorityClass.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1PriorityClass. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def description(self):
"""Gets the description of this V1PriorityClass. # noqa: E501
description is an arbitrary string that usually provides guidelines on when this priority class should be used. # noqa: E501
:return: The description of this V1PriorityClass. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this V1PriorityClass.
description is an arbitrary string that usually provides guidelines on when this priority class should be used. # noqa: E501
:param description: The description of this V1PriorityClass. # noqa: E501
:type: str
"""
self._description = description
@property
def global_default(self):
"""Gets the global_default of this V1PriorityClass. # noqa: E501
globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority. # noqa: E501
:return: The global_default of this V1PriorityClass. # noqa: E501
:rtype: bool
"""
return self._global_default
@global_default.setter
def global_default(self, global_default):
"""Sets the global_default of this V1PriorityClass.
globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority. # noqa: E501
:param global_default: The global_default of this V1PriorityClass. # noqa: E501
:type: bool
"""
self._global_default = global_default
@property
def kind(self):
"""Gets the kind of this V1PriorityClass. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1PriorityClass. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1PriorityClass.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1PriorityClass. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1PriorityClass. # noqa: E501
:return: The metadata of this V1PriorityClass. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1PriorityClass.
:param metadata: The metadata of this V1PriorityClass. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def preemption_policy(self):
"""Gets the preemption_policy of this V1PriorityClass. # noqa: E501
PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. # noqa: E501
:return: The preemption_policy of this V1PriorityClass. # noqa: E501
:rtype: str
"""
return self._preemption_policy
@preemption_policy.setter
def preemption_policy(self, preemption_policy):
"""Sets the preemption_policy of this V1PriorityClass.
PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. # noqa: E501
:param preemption_policy: The preemption_policy of this V1PriorityClass. # noqa: E501
:type: str
"""
self._preemption_policy = preemption_policy
@property
def value(self):
"""Gets the value of this V1PriorityClass. # noqa: E501
The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec. # noqa: E501
:return: The value of this V1PriorityClass. # noqa: E501
:rtype: int
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this V1PriorityClass.
The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec. # noqa: E501
:param value: The value of this V1PriorityClass. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and value is None: # noqa: E501
raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PriorityClass):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PriorityClass):
return True
return self.to_dict() != other.to_dict()
|
apache-2.0
| 1,955,843,117,827,839,500 | 37.276817 | 411 | 0.644549 | false | 4.177492 | true | false | false |
yittg/Snipping
|
snipping/prompt_toolkit/layout.py
|
1
|
5507
|
"""snipping.prompt_toolkit.layout
wrappers for layout
"""
from prompt_toolkit.key_binding import vi_state
from prompt_toolkit.layout import containers
from prompt_toolkit.layout import controls
from prompt_toolkit.layout import dimension
from prompt_toolkit.layout import highlighters
from prompt_toolkit.layout import margins
from prompt_toolkit.layout import processors
from prompt_toolkit.layout import screen
from prompt_toolkit.layout import toolbars
from snipping.prompt_toolkit import style
from snipping.prompt_toolkit import buffers
class NumberredMargin(margins.NumberredMargin):
""" A simple and customized `create_margin` of origin `NumberredMargin`
"""
def create_margin(self, cli, wr_info, width, height):
visible_line_to_input_line = wr_info.visible_line_to_input_line
token = style.Token.LineNumber
token_error = style.ErrorLineNo
result = []
app = cli.application
snippet = buffers.get_content(app)
cp = app.engine.compile(snippet)
for y in range(wr_info.window_height):
line_number = visible_line_to_input_line.get(y)
if line_number is not None:
if cp is not None and line_number + 1 == cp:
result.append((token_error,
('%i ' % (line_number + 1)).rjust(width)))
else:
result.append((token,
('%i ' % (line_number + 1)).rjust(width)))
result.append((style.Token, '\n'))
return result
def dim(min_=None, max_=None, exact=None):
if exact is not None:
return dimension.LayoutDimension.exact(exact)
return dimension.LayoutDimension(min=min_, max=max_)
def horizontal_line(min_width=None, max_width=None, char=' '):
height = dim(exact=1)
width = dim(min_=min_width, max_=max_width)
content = controls.FillControl(char, token=style.Line)
return containers.Window(width=width, height=height, content=content)
def vertical_line(min_height=None, max_height=None, char=' '):
width = dim(exact=1)
height = dim(min_=min_height, max_=max_height)
content = controls.FillControl(char, token=style.Line)
return containers.Window(width=width, height=height, content=content)
def text_window_bar(name=None, key_binding_manager=None):
def get_tokens(cli):
text_style = style.Bar.Text
display_text, read_only = buffers.buffer_display(cli.application, name)
if not read_only and cli.current_buffer_name == name:
vi_mode = key_binding_manager.get_vi_state(cli).input_mode
if vi_mode == vi_state.InputMode.INSERT:
text_style = style.Bar.Hl_Text
tokens = [(text_style, display_text),
(text_style, u' \u2022 ')]
if vi_mode == vi_state.InputMode.INSERT:
tokens.append((text_style, 'INSERT'))
elif vi_mode == vi_state.InputMode.NAVIGATION:
tokens.append((text_style, 'NORMAL'))
else:
tokens.append((text_style, '[ ]'))
return tokens
else:
return [(text_style, display_text)]
return toolbars.TokenListToolbar(
get_tokens, default_char=screen.Char(' ', style.Bar.Text))
def normal_text_window(name=None, lang=None, lineno=False,
leading_space=False, trailing_space=False,
width=None, height=None):
if name is None:
name = buffers.DEFAULT_BUFFER
bf_attrs = {'buffer_name': name,
'lexer': style.get_lexer_by_lang(lang),
'highlighters': [highlighters.SelectionHighlighter()]}
input_processors = []
if leading_space:
input_processors.append(processors.ShowLeadingWhiteSpaceProcessor())
if trailing_space:
input_processors.append(processors.ShowTrailingWhiteSpaceProcessor())
if input_processors:
bf_attrs['input_processors'] = input_processors
win_attrs = {}
left_margins = []
if lineno:
left_margins.append(NumberredMargin(name))
if left_margins:
win_attrs['left_margins'] = left_margins
if height is not None:
win_attrs['height'] = height
if width is not None:
win_attrs['width'] = width
content = controls.BufferControl(**bf_attrs)
return containers.Window(content=content, **win_attrs)
def horizontal_tokenlist_window(get_tokens, align='left'):
tlc_attrs = {}
if align == 'center':
tlc_attrs['align_center'] = True
if align == 'right':
tlc_attrs['align_right'] = True
height = dim(exact=1)
content = controls.TokenListControl(get_tokens, **tlc_attrs)
return containers.Window(height=height, content=content)
def window_rows(windows):
return containers.HSplit(windows)
def window_columns(windows):
return containers.VSplit(windows)
def text_window_with_bar(name=None, lang=None, lineno=False,
leading_space=False, trailing_space=False,
width=None, height=None, key_binding_manager=None):
if name is None:
name = buffers.DEFAULT_BUFFER
return window_rows([
normal_text_window(
name=name, lang=lang, lineno=lineno,
leading_space=leading_space, trailing_space=trailing_space,
width=width, height=height),
text_window_bar(name=name, key_binding_manager=key_binding_manager),
])
|
mit
| -1,226,969,072,952,198,400 | 33.85443 | 79 | 0.632831 | false | 3.840307 | false | false | false |
thecarebot/carebot
|
tests/test_spreadsheet.py
|
1
|
3336
|
#!/usr/bin/env python
import datetime
from mock import patch
try:
import unittest2 as unittest
except ImportError:
import unittest
import app_config
app_config.DATABASE_NAME = 'carebot_test.db'
app_config.date_cutoff = datetime.date(1997, 1, 1)
from scrapers.npr_spreadsheet import SpreadsheetScraper
from util.config import Config
from util.models import Story
from tests.test_util.db import clear_stories
class TestSpreadsheet(unittest.TestCase):
source = {
'doc_key': 'foo-bar-baz'
}
def test_scrape_spreadsheet(self):
"""
Make sure we grab the right data from spreadsheets
"""
scraper = SpreadsheetScraper(self.source)
stories = scraper.scrape_spreadsheet('tests/data/stories.xlsx')
self.assertEqual(len(stories), 4)
self.assertEqual(stories[0]['date'], '42467') # Crappy excel date format
self.assertEqual(stories[0]['graphic_slug'], 'voting-wait-20160404')
self.assertEqual(stories[0]['graphic_type'], 'Graphic')
self.assertEqual(stories[0]['story_headline'], 'What Keeps Election Officials Up At Night? Fear Of Long Lines At The Polls')
self.assertEqual(stories[0]['story_url'], 'http://www.npr.org/2016/04/07/473293026/what-keeps-election-officials-up-at-night-fear-of-long-lines-at-the-polls')
self.assertEqual(stories[0]['contact'], 'Alyson Hurt')
self.assertEqual(stories[0]['date'], '42467')
self.assertEqual(stories[3]['graphic_slug'], 'seed-market-20160405')
self.assertEqual(stories[3]['graphic_type'], 'Graphic')
self.assertEqual(stories[3]['story_headline'], 'Big Seed: Consolidation Is Shrinking The Industry Even Further')
self.assertEqual(stories[3]['story_url'], 'http://www.npr.org/sections/thesalt/2016/04/06/472960018/big-seed-consolidation-is-shrinking-the-industry-even-further')
self.assertEqual(stories[3]['contact'], 'Alyson Hurt')
@patch('util.s3.Uploader.upload', return_value='http://image-url-here')
def test_write_spreadsheet(self, mock_upload):
"""
Make sure we save the stories to the database when scraping from a
spreadsheet
"""
clear_stories()
scraper = SpreadsheetScraper(self.source)
stories = scraper.scrape_spreadsheet('tests/data/stories.xlsx')
scraper.write(stories)
results = Story.select()
self.assertEqual(len(results), 4)
for idx, story in enumerate(stories):
self.assertEqual(results[idx].name, story['story_headline'])
self.assertEqual(results[idx].url, story['story_url'])
@patch('util.s3.Uploader.upload')
def test_write_spreadsheet_duplicates(self, mock_upload):
"""
Make sure stories don't get inserted more than once
"""
mock_upload.return_value = 'http://image-url-here'
clear_stories()
scraper = SpreadsheetScraper(self.source)
stories = scraper.scrape_spreadsheet('tests/data/stories.xlsx')
# Insert the stories
scraper.write(stories)
results = Story.select()
self.assertEqual(len(results), 4)
# Now insert them again and make sure we don't have duplicates
scraper.write(stories)
results = Story.select()
self.assertEqual(len(results), 4)
|
mit
| 1,149,627,926,656,244,500 | 37.344828 | 171 | 0.667866 | false | 3.537646 | true | false | false |
deepmind/acme
|
acme/wrappers/action_repeat.py
|
1
|
1582
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper that implements action repeats."""
from acme import types
from acme.wrappers import base
import dm_env
class ActionRepeatWrapper(base.EnvironmentWrapper):
"""Action repeat wrapper."""
def __init__(self, environment: dm_env.Environment, num_repeats: int = 1):
super().__init__(environment)
self._num_repeats = num_repeats
def step(self, action: types.NestedArray) -> dm_env.TimeStep:
# Initialize accumulated reward and discount.
reward = 0.
discount = 1.
# Step the environment by repeating action.
for _ in range(self._num_repeats):
timestep = self._environment.step(action)
# Accumulate reward and discount.
reward += timestep.reward * discount
discount *= timestep.discount
# Don't go over episode boundaries.
if timestep.last():
break
# Replace the final timestep's reward and discount.
return timestep._replace(reward=reward, discount=discount)
|
apache-2.0
| 64,054,636,518,076,696 | 31.958333 | 76 | 0.719343 | false | 4.196286 | false | false | false |
treycucco/pxp
|
pxp/stdlib/operator.py
|
1
|
10810
|
from decimal import Decimal
from pxp.exception import OperatorError
from pxp.function import FunctionArg, FunctionList, InjectedFunction
from pxp.stdlib.types import number_t, string_t, boolean_t
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Number Operators
def op_number_add(resolver, left, right):
"""Returns the sum of two numbers."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval + rval
def op_number_subtract(resolver, left, right):
"""Returns the difference of two numbers."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval - rval
def op_number_multiply(resolver, left, right):
"""Returns the product of two numbers."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval * rval
def op_number_divide(resolver, left, right):
"""Returns the quotient of two numbers."""
rval = resolver.resolve(right)
if rval == Decimal(0):
raise OperatorError("Divide by 0")
lval = resolver.resolve(left)
return lval / rval
def op_number_modulus(resolver, left, right):
"""Returns the remainder from left / right."""
rval = resolver.resolve(right)
if rval == Decimal(0):
raise OperatorError("Divide by 0")
lval = resolver.resolve(left)
return lval % rval
def op_number_exponentiate(resolver, left, right):
"""Returns the value of left raised to right."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval ** rval
def op_number_negate(resolver, arg):
"""Returns the negation of arg."""
aval = resolver.resolve(arg)
return -1 * aval
def op_number_null_coalesce(resolver, left, right):
"""Returns the left if left is not null, otherwise right. Right is not resolved until it is
determined that left is null.
"""
lval = resolver.resolve(left, none_ok=True)
if lval is not None:
return lval
else:
rval = resolver.resolve(right, none_ok=True)
return rval
def op_number_cmp_equal(resolver, left, right):
"""Returns True if left is equal to right."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval == rval
def op_number_cmp_not_equal(resolver, left, right):
"""Returns True if left is not equal to right."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval != rval
def op_number_cmp_greater_than_or_equal(resolver, left, right):
"""Returns True if left is greater than or equal to right."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval >= rval
def op_number_cmp_less_than_or_equal(resolver, left, right):
"""Returns True if left is less than or equal to right."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval <= rval
def op_number_cmp_greater_than(resolver, left, right):
"""Returns True if left is strictly greater than right."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval > rval
def op_number_cmp_less_than(resolver, left, right):
"""Returns True if left is strictly less than right."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval < rval
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# String operators
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def op_string_add(resolver, left, right):
"""Returns the concatenation of left and right."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval + rval
def op_string_null_coalesce(resolver, left, right):
"""Returns the left if left is not null, otherwise right. Right is not resolved until it is
determined that left is null.
"""
lval = resolver.resolve(left, none_ok=True)
if lval is not None:
return lval
else:
rval = resolver.resolve(right, none_ok=True)
return rval
def op_string_cmp_equal(resolver, left, right):
"""Returns True if left is lexicographically equal to right."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval == rval
def op_string_cmp_not_equal(resolver, left, right):
"""Returns True if left is not lexicographically equal to right."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval != rval
def op_string_cmp_greater_than_or_equal(resolver, left, right):
"""Returns True if left is lexicographically greater than or equal to right."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval >= rval
def op_string_cmp_less_than_or_equal(resolver, left, right):
"""Returns True if left is lexicographically less than or equal to right."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval <= rval
def op_string_cmp_greater_than(resolver, left, right):
"""Returns True if left is lexicographically strictly greater than right."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval > rval
def op_string_cmp_less_than(resolver, left, right):
"""Returns True if left is lexicographically strictly less than right."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval < rval
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Boolean Operators
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def op_boolean_null_coalesce(resolver, left, right):
"""Returns the left if left is not null, otherwise right. Right is not resolved until it is
determined that left is null.
"""
lval = resolver.resolve(left, none_ok=True)
if lval is not None:
return lval
else:
rval = resolver.resolve(right, none_ok=True)
return rval
def op_boolean_cmp_equal(resolver, left, right):
"""Returns True if left is equal to right."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval == rval
def op_boolean_cmp_not_equal(resolver, left, right):
"""Returns True if left is not equal to right."""
lval = resolver.resolve(left)
rval = resolver.resolve(right)
return lval != rval
def op_boolean_logical_not(resolver, arg):
"""Returns the negation of arg. If arg is True, False is returned. If arg is False, True is
returned.
"""
aval = resolver.resolve(arg)
return not aval
def op_boolean_logical_and(resolver, left, right):
"""Returns True if both left and right evaluate to True, False otherwise.
If left is not True, the value of right doesn't matter, so right will not be evaluated.
"""
# Short circuit
lval = resolver.resolve(left)
if not lval:
return False
rval = resolver.resolve(right)
if rval:
return True
return False
def op_boolean_logical_or(resolver, left, right):
"""Returns True if left or right evaluate to True, False otherwise.
If left is True, the value of right doesn't matter, so right will not be evaluated.
"""
# Short circuit
lval = resolver.resolve(left)
if lval:
return True
rval = resolver.resolve(right)
if rval:
return True
return False
operator_functions = FunctionList((
InjectedFunction("operator+", (FunctionArg(number_t, "left"), FunctionArg(number_t, "right")), number_t, op_number_add),
InjectedFunction("operator-", (FunctionArg(number_t, "left"), FunctionArg(number_t, "right")), number_t, op_number_subtract),
InjectedFunction("operator*", (FunctionArg(number_t, "left"), FunctionArg(number_t, "right")), number_t, op_number_multiply),
InjectedFunction("operator/", (FunctionArg(number_t, "left"), FunctionArg(number_t, "right")), number_t, op_number_divide),
InjectedFunction("operator%", (FunctionArg(number_t, "left"), FunctionArg(number_t, "right")), number_t, op_number_modulus),
InjectedFunction("operator^", (FunctionArg(number_t, "left"), FunctionArg(number_t, "right")), number_t, op_number_exponentiate),
InjectedFunction("operatorunary-", (FunctionArg(number_t, "arg"), ), number_t, op_number_negate),
InjectedFunction("operator?", (FunctionArg(number_t, "left"), FunctionArg(number_t, "right")), number_t, op_number_null_coalesce),
InjectedFunction("operator=", (FunctionArg(number_t, "left"), FunctionArg(number_t, "right")), boolean_t, op_number_cmp_equal),
InjectedFunction("operator!=", (FunctionArg(number_t, "left"), FunctionArg(number_t, "right")), boolean_t, op_number_cmp_not_equal),
InjectedFunction("operator>=", (FunctionArg(number_t, "left"), FunctionArg(number_t, "right")), boolean_t, op_number_cmp_greater_than_or_equal),
InjectedFunction("operator<=", (FunctionArg(number_t, "left"), FunctionArg(number_t, "right")), boolean_t, op_number_cmp_less_than_or_equal),
InjectedFunction("operator>", (FunctionArg(number_t, "left"), FunctionArg(number_t, "right")), boolean_t, op_number_cmp_greater_than),
InjectedFunction("operator<", (FunctionArg(number_t, "left"), FunctionArg(number_t, "right")), boolean_t, op_number_cmp_less_than),
InjectedFunction("operator+", (FunctionArg(string_t, "left"), FunctionArg(string_t, "right")), string_t, op_string_add),
InjectedFunction("operator?", (FunctionArg(string_t, "left"), FunctionArg(string_t, "right")), string_t, op_string_null_coalesce),
InjectedFunction("operator=", (FunctionArg(string_t, "left"), FunctionArg(string_t, "right")), boolean_t, op_string_cmp_equal),
InjectedFunction("operator!=", (FunctionArg(string_t, "left"), FunctionArg(string_t, "right")), boolean_t, op_string_cmp_not_equal),
InjectedFunction("operator>=", (FunctionArg(string_t, "left"), FunctionArg(string_t, "right")), boolean_t, op_string_cmp_greater_than_or_equal),
InjectedFunction("operator<=", (FunctionArg(string_t, "left"), FunctionArg(string_t, "right")), boolean_t, op_string_cmp_less_than_or_equal),
InjectedFunction("operator>", (FunctionArg(string_t, "left"), FunctionArg(string_t, "right")), boolean_t, op_string_cmp_greater_than),
InjectedFunction("operator<", (FunctionArg(string_t, "left"), FunctionArg(string_t, "right")), boolean_t, op_string_cmp_less_than),
InjectedFunction("operator?", (FunctionArg(boolean_t, "left"), FunctionArg(boolean_t, "right")), boolean_t, op_boolean_null_coalesce),
InjectedFunction("operator=", (FunctionArg(boolean_t, "left"), FunctionArg(boolean_t, "right")), boolean_t, op_boolean_cmp_equal),
InjectedFunction("operator!=", (FunctionArg(boolean_t, "left"), FunctionArg(boolean_t, "right")), boolean_t, op_boolean_cmp_not_equal),
InjectedFunction("operatorunary!", (FunctionArg(boolean_t, "arg"), ), boolean_t, op_boolean_logical_not),
InjectedFunction("operator&", (FunctionArg(boolean_t, "left"), FunctionArg(boolean_t, "right")), boolean_t, op_boolean_logical_and),
InjectedFunction("operator|", (FunctionArg(boolean_t, "left"), FunctionArg(boolean_t, "right")), boolean_t, op_boolean_logical_or)
))
|
bsd-3-clause
| -3,144,274,327,464,263,700 | 36.797203 | 146 | 0.684736 | false | 3.359229 | false | false | false |
gerard256/script.trakt
|
rating.py
|
1
|
7947
|
# -*- coding: utf-8 -*-
"""Module used to launch rating dialogues and send ratings to Trakt"""
import xbmc
import xbmcaddon
import xbmcgui
import utilities as utils
import globals
import logging
logger = logging.getLogger(__name__)
__addon__ = xbmcaddon.Addon("script.trakt")
def ratingCheck(media_type, summary_info, watched_time, total_time, playlist_length):
"""Check if a video should be rated and if so launches the rating dialog"""
logger.debug("Rating Check called for '%s'" % media_type)
if not utils.getSettingAsBool("rate_%s" % media_type):
logger.debug("'%s' is configured to not be rated." % media_type)
return
if summary_info is None:
logger.debug("Summary information is empty, aborting.")
return
watched = (watched_time / total_time) * 100
if watched >= utils.getSettingAsFloat("rate_min_view_time"):
if (playlist_length <= 1) or utils.getSettingAsBool("rate_each_playlist_item"):
rateMedia(media_type, summary_info)
else:
logger.debug("Rate each playlist item is disabled.")
else:
logger.debug("'%s' does not meet minimum view time for rating (watched: %0.2f%%, minimum: %0.2f%%)" % (media_type, watched, utils.getSettingAsFloat("rate_min_view_time")))
def rateMedia(media_type, itemsToRate, unrate=False, rating=None):
"""Launches the rating dialog"""
for summary_info in itemsToRate:
if not utils.isValidMediaType(media_type):
logger.debug("Not a valid media type")
return
elif 'user' not in summary_info:
logger.debug("No user data")
return
s = utils.getFormattedItemName(media_type, summary_info)
logger.debug("Summary Info %s" % summary_info)
if unrate:
rating = None
if summary_info['user']['ratings']['rating'] > 0:
rating = 0
if not rating is None:
logger.debug("'%s' is being unrated." % s)
__rateOnTrakt(rating, media_type, summary_info, unrate=True)
else:
logger.debug("'%s' has not been rated, so not unrating." % s)
return
rerate = utils.getSettingAsBool('rate_rerate')
if rating is not None:
if summary_info['user']['ratings']['rating'] == 0:
logger.debug("Rating for '%s' is being set to '%d' manually." % (s, rating))
__rateOnTrakt(rating, media_type, summary_info)
else:
if rerate:
if not summary_info['user']['ratings']['rating'] == rating:
logger.debug("Rating for '%s' is being set to '%d' manually." % (s, rating))
__rateOnTrakt(rating, media_type, summary_info)
else:
utils.notification(utils.getString(32043), s)
logger.debug("'%s' already has a rating of '%d'." % (s, rating))
else:
utils.notification(utils.getString(32041), s)
logger.debug("'%s' is already rated." % s)
return
if summary_info['user']['ratings'] and summary_info['user']['ratings']['rating']:
if not rerate:
logger.debug("'%s' has already been rated." % s)
utils.notification(utils.getString(32041), s)
return
else:
logger.debug("'%s' is being re-rated." % s)
xbmc.executebuiltin('Dialog.Close(all, true)')
gui = RatingDialog(
"script-trakt-RatingDialog.xml",
__addon__.getAddonInfo('path'),
media_type=media_type,
media=summary_info,
rerate=rerate
)
gui.doModal()
if gui.rating:
rating = gui.rating
if rerate:
rating = gui.rating
if summary_info['user']['ratings'] and summary_info['user']['ratings']['rating'] > 0 and rating == summary_info['user']['ratings']['rating']:
rating = 0
if rating == 0 or rating == "unrate":
__rateOnTrakt(rating, gui.media_type, gui.media, unrate=True)
else:
__rateOnTrakt(rating, gui.media_type, gui.media)
else:
logger.debug("Rating dialog was closed with no rating.")
del gui
#Reset rating and unrate for multi part episodes
unrate=False
rating=None
def __rateOnTrakt(rating, media_type, media, unrate=False):
logger.debug("Sending rating (%s) to Trakt.tv" % rating)
params = media
if utils.isMovie(media_type):
key = 'movies'
params['rating'] = rating
elif utils.isShow(media_type):
key = 'shows'
params['rating'] = rating
elif utils.isSeason(media_type):
key = 'shows'
params['seasons'] = [{'rating': rating, 'number': media['season']}]
elif utils.isEpisode(media_type):
key = 'episodes'
params['rating'] = rating
else:
return
root = {key: [params]}
if not unrate:
data = globals.traktapi.addRating(root)
else:
data = globals.traktapi.removeRating(root)
if data:
s = utils.getFormattedItemName(media_type, media)
if 'not_found' in data and not data['not_found']['movies'] and not data['not_found']['episodes'] and not data['not_found']['shows']:
if not unrate:
utils.notification(utils.getString(32040), s)
else:
utils.notification(utils.getString(32042), s)
else:
utils.notification(utils.getString(32044), s)
class RatingDialog(xbmcgui.WindowXMLDialog):
buttons = {
11030: 1,
11031: 2,
11032: 3,
11033: 4,
11034: 5,
11035: 6,
11036: 7,
11037: 8,
11038: 9,
11039: 10
}
focus_labels = {
11030: 32028,
11031: 32029,
11032: 32030,
11033: 32031,
11034: 32032,
11035: 32033,
11036: 32034,
11037: 32035,
11038: 32036,
11039: 32027
}
def __init__(self, xmlFile, resourcePath, forceFallback=False, media_type=None, media=None, rerate=False):
self.media_type = media_type
self.media = media
self.rating = None
self.rerate = rerate
self.default_rating = utils.getSettingAsInt('rating_default')
def onInit(self):
s = utils.getFormattedItemName(self.media_type, self.media)
self.getControl(10012).setLabel(s)
rateID = 11029 + self.default_rating
if self.rerate and self.media['user']['ratings'] and int(self.media['user']['ratings']['rating']) > 0:
rateID = 11029 + int(self.media['user']['ratings']['rating'])
self.setFocus(self.getControl(rateID))
def onClick(self, controlID):
if controlID in self.buttons:
self.rating = self.buttons[controlID]
self.close()
def onFocus(self, controlID):
if controlID in self.focus_labels:
s = utils.getString(self.focus_labels[controlID])
if self.rerate:
if self.media['user']['ratings'] and self.media['user']['ratings']['rating'] == self.buttons[controlID]:
if utils.isMovie(self.media_type):
s = utils.getString(32037)
elif utils.isShow(self.media_type):
s = utils.getString(32038)
elif utils.isEpisode(self.media_type):
s = utils.getString(32039)
elif utils.isSeason(self.media_type):
s = utils.getString(32132)
else:
pass
self.getControl(10013).setLabel(s)
else:
self.getControl(10013).setLabel('')
|
gpl-2.0
| -5,166,443,607,900,316,000 | 34.959276 | 179 | 0.556436 | false | 3.941964 | false | false | false |
oblique-labs/pyVM
|
rpython/memory/gctypelayout.py
|
1
|
22275
|
from rpython.rtyper.lltypesystem import lltype, llmemory, llarena, llgroup
from rpython.rtyper import rclass
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rlib.debug import ll_assert
from rpython.rlib.rarithmetic import intmask
from rpython.tool.identity_dict import identity_dict
class GCData(object):
"""The GC information tables, and the query functions that the GC
calls to decode their content. The encoding of this information
is done by encode_type_shape(). These two places should be in sync,
obviously, but in principle no other code should depend on the
details of the encoding in TYPE_INFO.
"""
_alloc_flavor_ = 'raw'
OFFSETS_TO_GC_PTR = lltype.Array(lltype.Signed)
# A CUSTOM_FUNC is either a destructor, or a custom tracer.
# A destructor is called when the object is about to be freed.
# A custom tracer (CT) enumerates the addresses that contain GCREFs.
# Both are called with the address of the object as only argument.
CUSTOM_FUNC = lltype.FuncType([llmemory.Address], lltype.Void)
CUSTOM_FUNC_PTR = lltype.Ptr(CUSTOM_FUNC)
# structure describing the layout of a typeid
TYPE_INFO = lltype.Struct("type_info",
("infobits", lltype.Signed), # combination of the T_xxx consts
("customfunc", CUSTOM_FUNC_PTR),
("fixedsize", lltype.Signed),
("ofstoptrs", lltype.Ptr(OFFSETS_TO_GC_PTR)),
hints={'immutable': True},
)
VARSIZE_TYPE_INFO = lltype.Struct("varsize_type_info",
("header", TYPE_INFO),
("varitemsize", lltype.Signed),
("ofstovar", lltype.Signed),
("ofstolength", lltype.Signed),
("varofstoptrs", lltype.Ptr(OFFSETS_TO_GC_PTR)),
hints={'immutable': True},
)
TYPE_INFO_PTR = lltype.Ptr(TYPE_INFO)
VARSIZE_TYPE_INFO_PTR = lltype.Ptr(VARSIZE_TYPE_INFO)
def __init__(self, type_info_group):
assert isinstance(type_info_group, llgroup.group)
self.type_info_group = type_info_group
self.type_info_group_ptr = type_info_group._as_ptr()
def get(self, typeid):
res = llop.get_group_member(GCData.TYPE_INFO_PTR,
self.type_info_group_ptr,
typeid)
_check_valid_type_info(res)
return res
def get_varsize(self, typeid):
res = llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR,
self.type_info_group_ptr,
typeid)
_check_valid_type_info_varsize(res)
return res
def q_is_varsize(self, typeid):
infobits = self.get(typeid).infobits
return (infobits & T_IS_VARSIZE) != 0
def q_has_gcptr_in_varsize(self, typeid):
infobits = self.get(typeid).infobits
return (infobits & T_HAS_GCPTR_IN_VARSIZE) != 0
def q_has_gcptr(self, typeid):
infobits = self.get(typeid).infobits
return (infobits & T_HAS_GCPTR) != 0
def q_is_gcarrayofgcptr(self, typeid):
infobits = self.get(typeid).infobits
return (infobits & T_IS_GCARRAY_OF_GCPTR) != 0
def q_cannot_pin(self, typeid):
typeinfo = self.get(typeid)
ANY = (T_HAS_GCPTR | T_IS_WEAKREF)
return (typeinfo.infobits & ANY) != 0 or bool(typeinfo.customfunc)
def q_finalizer_handlers(self):
adr = self.finalizer_handlers # set from framework.py or gcwrapper.py
return llmemory.cast_adr_to_ptr(adr, lltype.Ptr(FIN_HANDLER_ARRAY))
def q_destructor_or_custom_trace(self, typeid):
return self.get(typeid).customfunc
def q_is_old_style_finalizer(self, typeid):
typeinfo = self.get(typeid)
return (typeinfo.infobits & T_HAS_OLDSTYLE_FINALIZER) != 0
def q_offsets_to_gc_pointers(self, typeid):
return self.get(typeid).ofstoptrs
def q_fixed_size(self, typeid):
return self.get(typeid).fixedsize
def q_varsize_item_sizes(self, typeid):
return self.get_varsize(typeid).varitemsize
def q_varsize_offset_to_variable_part(self, typeid):
return self.get_varsize(typeid).ofstovar
def q_varsize_offset_to_length(self, typeid):
return self.get_varsize(typeid).ofstolength
def q_varsize_offsets_to_gcpointers_in_var_part(self, typeid):
return self.get_varsize(typeid).varofstoptrs
def q_weakpointer_offset(self, typeid):
infobits = self.get(typeid).infobits
if infobits & T_IS_WEAKREF:
return weakptr_offset
return -1
def q_member_index(self, typeid):
infobits = self.get(typeid).infobits
return infobits & T_MEMBER_INDEX
def q_is_rpython_class(self, typeid):
infobits = self.get(typeid).infobits
return infobits & T_IS_RPYTHON_INSTANCE != 0
def q_has_custom_trace(self, typeid):
infobits = self.get(typeid).infobits
return infobits & T_HAS_CUSTOM_TRACE != 0
def q_fast_path_tracing(self, typeid):
# return True if none of the flags T_HAS_GCPTR_IN_VARSIZE,
# T_IS_GCARRAY_OF_GCPTR or T_HAS_CUSTOM_TRACE is set
T_ANY_SLOW_FLAG = (T_HAS_GCPTR_IN_VARSIZE |
T_IS_GCARRAY_OF_GCPTR |
T_HAS_CUSTOM_TRACE)
infobits = self.get(typeid).infobits
return infobits & T_ANY_SLOW_FLAG == 0
def set_query_functions(self, gc):
gc.set_query_functions(
self.q_is_varsize,
self.q_has_gcptr_in_varsize,
self.q_is_gcarrayofgcptr,
self.q_finalizer_handlers,
self.q_destructor_or_custom_trace,
self.q_is_old_style_finalizer,
self.q_offsets_to_gc_pointers,
self.q_fixed_size,
self.q_varsize_item_sizes,
self.q_varsize_offset_to_variable_part,
self.q_varsize_offset_to_length,
self.q_varsize_offsets_to_gcpointers_in_var_part,
self.q_weakpointer_offset,
self.q_member_index,
self.q_is_rpython_class,
self.q_has_custom_trace,
self.q_fast_path_tracing,
self.q_has_gcptr,
self.q_cannot_pin)
def _has_got_custom_trace(self, typeid):
type_info = self.get(typeid)
type_info.infobits |= (T_HAS_CUSTOM_TRACE | T_HAS_GCPTR)
# the lowest 16bits are used to store group member index
T_MEMBER_INDEX = 0xffff
T_IS_VARSIZE = 0x010000
T_HAS_GCPTR_IN_VARSIZE = 0x020000
T_IS_GCARRAY_OF_GCPTR = 0x040000
T_IS_WEAKREF = 0x080000
T_IS_RPYTHON_INSTANCE = 0x100000 # the type is a subclass of OBJECT
T_HAS_CUSTOM_TRACE = 0x200000
T_HAS_OLDSTYLE_FINALIZER = 0x400000
T_HAS_GCPTR = 0x1000000
T_KEY_MASK = intmask(0xFE000000) # bug detection only
T_KEY_VALUE = intmask(0x5A000000) # bug detection only
def _check_valid_type_info(p):
ll_assert(p.infobits & T_KEY_MASK == T_KEY_VALUE, "invalid type_id")
def _check_valid_type_info_varsize(p):
ll_assert(p.header.infobits & (T_KEY_MASK | T_IS_VARSIZE) ==
(T_KEY_VALUE | T_IS_VARSIZE),
"invalid varsize type_id")
def check_typeid(typeid):
# xxx does not perform a full check of validity, just checks for nonzero
ll_assert(llop.is_group_member_nonzero(lltype.Bool, typeid),
"invalid type_id")
def encode_type_shape(builder, info, TYPE, index):
"""Encode the shape of the TYPE into the TYPE_INFO structure 'info'."""
offsets = offsets_to_gc_pointers(TYPE)
infobits = index
info.ofstoptrs = builder.offsets2table(offsets, TYPE)
if len(offsets) > 0:
infobits |= T_HAS_GCPTR
#
fptrs = builder.special_funcptr_for_type(TYPE)
if fptrs:
if "destructor" in fptrs:
info.customfunc = fptrs["destructor"]
if "old_style_finalizer" in fptrs:
info.customfunc = fptrs["old_style_finalizer"]
infobits |= T_HAS_OLDSTYLE_FINALIZER
#
if not TYPE._is_varsize():
info.fixedsize = llarena.round_up_for_allocation(
llmemory.sizeof(TYPE), builder.GCClass.object_minimal_size)
# note about round_up_for_allocation(): in the 'info' table
# we put a rounded-up size only for fixed-size objects. For
# varsize ones, the GC must anyway compute the size at run-time
# and round up that result.
else:
infobits |= T_IS_VARSIZE
varinfo = lltype.cast_pointer(GCData.VARSIZE_TYPE_INFO_PTR, info)
info.fixedsize = llmemory.sizeof(TYPE, 0)
if isinstance(TYPE, lltype.Struct):
ARRAY = TYPE._flds[TYPE._arrayfld]
ofs1 = llmemory.offsetof(TYPE, TYPE._arrayfld)
varinfo.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY)
varinfo.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0)
else:
assert isinstance(TYPE, lltype.GcArray)
ARRAY = TYPE
if (isinstance(ARRAY.OF, lltype.Ptr)
and ARRAY.OF.TO._gckind == 'gc'):
infobits |= T_IS_GCARRAY_OF_GCPTR
varinfo.ofstolength = llmemory.ArrayLengthOffset(ARRAY)
varinfo.ofstovar = llmemory.itemoffsetof(TYPE, 0)
assert isinstance(ARRAY, lltype.Array)
if ARRAY.OF != lltype.Void:
offsets = offsets_to_gc_pointers(ARRAY.OF)
else:
offsets = ()
if len(offsets) > 0:
infobits |= T_HAS_GCPTR_IN_VARSIZE | T_HAS_GCPTR
varinfo.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF)
varinfo.varitemsize = llmemory.sizeof(ARRAY.OF)
if builder.is_weakref_type(TYPE):
infobits |= T_IS_WEAKREF
if is_subclass_of_object(TYPE):
infobits |= T_IS_RPYTHON_INSTANCE
info.infobits = infobits | T_KEY_VALUE
# ____________________________________________________________
class TypeLayoutBuilder(object):
can_add_new_types = True
can_encode_type_shape = True # set to False initially by the JIT
size_of_fixed_type_info = llmemory.sizeof(GCData.TYPE_INFO)
def __init__(self, GCClass, lltype2vtable=None):
self.GCClass = GCClass
self.lltype2vtable = lltype2vtable
self.make_type_info_group()
self.id_of_type = {} # {LLTYPE: type_id}
self.iseen_roots = identity_dict()
# the following are lists of addresses of gc pointers living inside the
# prebuilt structures. It should list all the locations that could
# possibly point to a GC heap object.
# this lists contains pointers in GcStructs and GcArrays
self.addresses_of_static_ptrs = []
# this lists contains pointers in raw Structs and Arrays
self.addresses_of_static_ptrs_in_nongc = []
# for debugging, the following list collects all the prebuilt
# GcStructs and GcArrays
self.all_prebuilt_gc = []
self._special_funcptrs = {}
self.offsettable_cache = {}
def make_type_info_group(self):
self.type_info_group = llgroup.group("typeinfo")
# don't use typeid 0, may help debugging
DUMMY = lltype.Struct("dummy", ('x', lltype.Signed))
dummy = lltype.malloc(DUMMY, immortal=True, zero=True)
self.type_info_group.add_member(dummy)
def get_type_id(self, TYPE):
try:
return self.id_of_type[TYPE]
except KeyError:
assert self.can_add_new_types
assert isinstance(TYPE, (lltype.GcStruct, lltype.GcArray))
# Record the new type_id description as a TYPE_INFO structure.
# build the TYPE_INFO structure
if not TYPE._is_varsize():
fullinfo = lltype.malloc(GCData.TYPE_INFO,
immortal=True, zero=True)
info = fullinfo
else:
fullinfo = lltype.malloc(GCData.VARSIZE_TYPE_INFO,
immortal=True, zero=True)
info = fullinfo.header
type_id = self.type_info_group.add_member(fullinfo)
if self.can_encode_type_shape:
encode_type_shape(self, info, TYPE, type_id.index)
else:
self._pending_type_shapes.append((info, TYPE, type_id.index))
# store it
self.id_of_type[TYPE] = type_id
self.add_vtable_after_typeinfo(TYPE)
return type_id
def add_vtable_after_typeinfo(self, TYPE):
# if gcremovetypeptr is False, then lltype2vtable is None and it
# means that we don't have to store the vtables in type_info_group.
if self.lltype2vtable is None:
return
# does the type have a vtable?
vtable = self.lltype2vtable.get(TYPE, None)
if vtable is not None:
# yes. check that in this case, we are not varsize
assert not TYPE._is_varsize()
vtable = lltype.normalizeptr(vtable)
self.type_info_group.add_member(vtable)
else:
# no vtable from lltype2vtable -- double-check to be sure
# that it's not a subclass of OBJECT.
assert not is_subclass_of_object(TYPE)
def get_info(self, type_id):
res = llop.get_group_member(GCData.TYPE_INFO_PTR,
self.type_info_group._as_ptr(),
type_id)
_check_valid_type_info(res)
return res
def get_info_varsize(self, type_id):
res = llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR,
self.type_info_group._as_ptr(),
type_id)
_check_valid_type_info_varsize(res)
return res
def is_weakref_type(self, TYPE):
return TYPE == WEAKREF
def encode_type_shapes_now(self):
if not self.can_encode_type_shape:
self.can_encode_type_shape = True
for info, TYPE, index in self._pending_type_shapes:
encode_type_shape(self, info, TYPE, index)
del self._pending_type_shapes
def delay_encoding(self):
# used by the JIT
self._pending_type_shapes = []
self.can_encode_type_shape = False
def offsets2table(self, offsets, TYPE):
if len(offsets) == 0:
TYPE = lltype.Void # we can share all zero-length arrays
try:
return self.offsettable_cache[TYPE]
except KeyError:
cachedarray = lltype.malloc(GCData.OFFSETS_TO_GC_PTR,
len(offsets), immortal=True)
for i, value in enumerate(offsets):
cachedarray[i] = value
self.offsettable_cache[TYPE] = cachedarray
return cachedarray
def close_table(self):
# make sure we no longer add members to the type_info_group.
self.can_add_new_types = False
self.offsettable_cache = None
return self.type_info_group
def special_funcptr_for_type(self, TYPE):
if TYPE in self._special_funcptrs:
return self._special_funcptrs[TYPE]
fptr1, is_lightweight = self.make_destructor_funcptr_for_type(TYPE)
fptr2 = self.make_custom_trace_funcptr_for_type(TYPE)
result = {}
if fptr1:
if is_lightweight:
result["destructor"] = fptr1
else:
result["old_style_finalizer"] = fptr1
if fptr2:
result["custom_trace"] = fptr2
self._special_funcptrs[TYPE] = result
return result
def make_destructor_funcptr_for_type(self, TYPE):
# must be overridden for proper destructor support
return None, False
def make_custom_trace_funcptr_for_type(self, TYPE):
# must be overridden for proper custom tracer support
return None
def initialize_gc_query_function(self, gc):
gcdata = GCData(self.type_info_group)
gcdata.set_query_functions(gc)
return gcdata
def consider_constant(self, TYPE, value, gc):
if value is not lltype.top_container(value):
return
if value in self.iseen_roots:
return
self.iseen_roots[value] = True
if isinstance(TYPE, lltype.GcOpaqueType):
self.consider_constant(lltype.typeOf(value.container),
value.container, gc)
return
if isinstance(TYPE, (lltype.GcStruct, lltype.GcArray)):
typeid = self.get_type_id(TYPE)
hdr = gc.gcheaderbuilder.new_header(value)
adr = llmemory.cast_ptr_to_adr(hdr)
gc.init_gc_object_immortal(adr, typeid)
self.all_prebuilt_gc.append(value)
# The following collects the addresses of all the fields that have
# a GC Pointer type, inside the current prebuilt object. All such
# fields are potential roots: unless the structure is immutable,
# they could be changed later to point to GC heap objects.
adr = llmemory.cast_ptr_to_adr(value._as_ptr())
if TYPE._gckind == "gc":
if gc.prebuilt_gc_objects_are_static_roots or gc.DEBUG:
appendto = self.addresses_of_static_ptrs
else:
return
else:
appendto = self.addresses_of_static_ptrs_in_nongc
for a in gc_pointers_inside(value, adr, mutable_only=True):
appendto.append(a)
# ____________________________________________________________
#
# Helpers to discover GC pointers inside structures
def offsets_to_gc_pointers(TYPE):
offsets = []
if isinstance(TYPE, lltype.Struct):
for name in TYPE._names:
FIELD = getattr(TYPE, name)
if isinstance(FIELD, lltype.Array):
continue # skip inlined array
baseofs = llmemory.offsetof(TYPE, name)
suboffsets = offsets_to_gc_pointers(FIELD)
for s in suboffsets:
try:
knownzero = s == 0
except TypeError:
knownzero = False
if knownzero:
offsets.append(baseofs)
else:
offsets.append(baseofs + s)
# sanity check
#ex = lltype.Ptr(TYPE)._example()
#adr = llmemory.cast_ptr_to_adr(ex)
#for off in offsets:
# (adr + off)
elif isinstance(TYPE, lltype.Ptr) and TYPE.TO._gckind == 'gc':
offsets.append(0)
return offsets
def gc_pointers_inside(v, adr, mutable_only=False):
t = lltype.typeOf(v)
if isinstance(t, lltype.Struct):
skip = ()
if mutable_only:
if t._hints.get('immutable'):
return
if 'immutable_fields' in t._hints:
skip = t._hints['immutable_fields'].all_immutable_fields()
for n, t2 in t._flds.iteritems():
if isinstance(t2, lltype.Ptr) and t2.TO._gckind == 'gc':
if n not in skip:
yield adr + llmemory.offsetof(t, n)
elif isinstance(t2, (lltype.Array, lltype.Struct)):
for a in gc_pointers_inside(getattr(v, n),
adr + llmemory.offsetof(t, n),
mutable_only):
yield a
elif isinstance(t, lltype.Array):
if mutable_only and t._hints.get('immutable'):
return
if isinstance(t.OF, lltype.Ptr) and t.OF.TO._gckind == 'gc':
for i in range(len(v.items)):
yield adr + llmemory.itemoffsetof(t, i)
elif isinstance(t.OF, lltype.Struct):
for i in range(len(v.items)):
for a in gc_pointers_inside(v.items[i],
adr + llmemory.itemoffsetof(t, i),
mutable_only):
yield a
def zero_gc_pointers(p):
TYPE = lltype.typeOf(p).TO
zero_gc_pointers_inside(p, TYPE)
def zero_gc_pointers_inside(p, TYPE):
if isinstance(TYPE, lltype.Struct):
for name, FIELD in TYPE._flds.items():
if isinstance(FIELD, lltype.Ptr) and FIELD.TO._gckind == 'gc':
setattr(p, name, lltype.nullptr(FIELD.TO))
elif isinstance(FIELD, lltype.ContainerType):
zero_gc_pointers_inside(getattr(p, name), FIELD)
elif isinstance(TYPE, lltype.Array):
ITEM = TYPE.OF
if isinstance(ITEM, lltype.Ptr) and ITEM.TO._gckind == 'gc':
null = lltype.nullptr(ITEM.TO)
for i in range(p._obj.getlength()):
p[i] = null
elif isinstance(ITEM, lltype.ContainerType):
for i in range(p._obj.getlength()):
zero_gc_pointers_inside(p[i], ITEM)
def is_subclass_of_object(TYPE):
while isinstance(TYPE, lltype.GcStruct):
if TYPE is rclass.OBJECT:
return True
_, TYPE = TYPE._first_struct()
return False
########## weakrefs ##########
# framework: weakref objects are small structures containing only an address
WEAKREF = lltype.GcStruct("weakref", ("weakptr", llmemory.Address))
WEAKREFPTR = lltype.Ptr(WEAKREF)
sizeof_weakref= llmemory.sizeof(WEAKREF)
empty_weakref = lltype.malloc(WEAKREF, immortal=True)
empty_weakref.weakptr = llmemory.NULL
weakptr_offset = llmemory.offsetof(WEAKREF, "weakptr")
def ll_weakref_deref(wref):
wref = llmemory.cast_weakrefptr_to_ptr(WEAKREFPTR, wref)
return wref.weakptr
def convert_weakref_to(targetptr):
# Prebuilt weakrefs don't really need to be weak at all,
# but we need to emulate the structure expected by ll_weakref_deref().
if not targetptr:
return empty_weakref
else:
link = lltype.malloc(WEAKREF, immortal=True)
link.weakptr = llmemory.cast_ptr_to_adr(targetptr)
return link
########## finalizers ##########
FIN_TRIGGER_FUNC = lltype.FuncType([], lltype.Void)
FIN_HANDLER_ARRAY = lltype.Array(('deque', llmemory.Address),
('trigger', lltype.Ptr(FIN_TRIGGER_FUNC)))
|
mit
| 4,813,011,452,356,338,000 | 38.919355 | 79 | 0.590797 | false | 3.544717 | false | false | false |
inveniosoftware-contrib/invenio-workflows-ui
|
invenio_workflows_ui/serializers/__init__.py
|
1
|
1527
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Record serialization."""
from __future__ import absolute_import, print_function
import json
from .response import (
workflow_responsify,
search_responsify,
action_responsify,
file_responsify
)
from .json import JSONSerializer
json_v1 = JSONSerializer()
json_serializer = workflow_responsify(json_v1, 'application/json')
json_search_serializer = search_responsify(json_v1, 'application/json')
json_action_serializer = action_responsify(json_v1, 'application/json')
json_file_serializer = file_responsify(json_v1, 'application/json')
|
gpl-2.0
| -2,496,835,084,541,541,400 | 34.511628 | 76 | 0.75704 | false | 3.742647 | false | false | false |
DesertBus/txircd
|
txircd/modules/cmd_list.py
|
1
|
1917
|
from twisted.words.protocols import irc
from txircd.modbase import Command
from txircd.utils import irc_lower
from fnmatch import fnmatch
class ListCommand(Command):
def onUse(self, user, data):
chancache = []
for channame, channel in self.ircd.channels.iteritems():
if data["chanfilter"] is not None:
filterMatch = False
for filterEntry in data["chanfilter"]:
if fnmatch(channame, filterEntry):
filterMatch = True
break
if not filterMatch:
continue
chancache.append({
"channel": channel,
"name": channel.name,
"users": len(channel.users),
"topic": channel.topic if channel.topic else ""
})
if "listdata" in self.ircd.actions:
for action in self.ircd.actions["listdata"]:
chancache = action(user, chancache)
if not chancache:
break
for cdata in chancache:
user.sendMessage(irc.RPL_LIST, cdata["name"], str(cdata["users"]), ":[{}] {}".format(cdata["channel"].modeString(user), cdata["topic"]))
user.sendMessage(irc.RPL_LISTEND, ":End of channel list")
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "LIST", ":You have not registered")
return {}
if params:
chanFilter = irc_lower(params[0]).split(",")
else:
chanFilter = None
return {
"user": user,
"chanfilter": chanFilter
}
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
def spawn(self):
return {
"commands": {
"LIST": ListCommand()
}
}
|
bsd-3-clause
| -929,418,014,203,899,100 | 33.872727 | 148 | 0.523213 | false | 4.468531 | false | false | false |
akiokio/centralfitestoque
|
src/.pycharm_helpers/test_generator.py
|
1
|
15858
|
# encoding: utf-8
"""
Tests basic things that generator3 consists of.
NOTE: does not work in Jython 2.2 or IronPython 1.x, because pyparsing does not.
"""
import unittest
from generator3 import *
M = ModuleRedeclarator
import sys
IS_CLI = sys.platform == 'cli'
VERSION = sys.version_info[:2] # only (major, minor)
class TestRestoreFuncByDocComment(unittest.TestCase):
"""
Tries to restore function signatures by doc strings.
"""
def setUp(self):
self.m = ModuleRedeclarator(None, None, '/dev/null')
def testTrivial(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(a, b, c) ololo", "f", "f", None)
self.assertEquals(result, "f(a, b, c)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testTrivialNested(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(a, (b, c), d) ololo", "f", "f", None)
self.assertEquals(result, "f(a, (b, c), d)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testWithDefault(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(a, b, c=1) ololo", "f", "f", None)
self.assertEquals(result, "f(a, b, c=1)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testNestedWithDefault(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(a, (b1, b2), c=1) ololo", "f", "f", None)
self.assertEquals(result, "f(a, (b1, b2), c=1)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testAbstractDefault(self):
# like new(S, ...)
result, ret_sig, note = self.m.parseFuncDoc('blah f(a, b=obscuredefault) ololo', "f", "f", None)
self.assertEquals(result, "f(a, b=None)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testWithReserved(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(class, object, def) ololo", "f", "f", None)
self.assertEquals(result, "f(p_class, p_object, p_def)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testWithReservedOpt(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(foo, bar[, def]) ololo", "f", "f", None)
self.assertEquals(result, "f(foo, bar, p_def=None)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testPseudoNested(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(a, (b1, b2, ...)) ololo", "f", "f", None)
self.assertEquals(result, "f(a, b_tuple)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testImportLike(self):
# __import__
result, ret_sig, note = self.m.parseFuncDoc("blah f(name, globals={}, locals={}, fromlist=[], level=-1) ololo",
"f", "f", None)
self.assertEquals(result, "f(name, globals={}, locals={}, fromlist=[], level=-1)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testOptionalBracket(self):
# reduce
result, ret_sig, note = self.m.parseFuncDoc("blah f(function, sequence[, initial]) ololo", "f", "f", None)
self.assertEquals(result, "f(function, sequence, initial=None)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testWithMore(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(foo [, bar1, bar2, ...]) ololo", "f", "f", None)
self.assertEquals(result, "f(foo, *bar)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testNestedOptionals(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(foo [, bar1 [, bar2]]) ololo", "f", "f", None)
self.assertEquals(result, "f(foo, bar1=None, bar2=None)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testInnerTuple(self):
result, ret_sig, note = self.m.parseFuncDoc("blah load_module(name, file, filename, (suffix, mode, type)) ololo"
, "load_module", "load_module", None)
self.assertEquals(result, "load_module(name, file, filename, (suffix, mode, type))")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testIncorrectInnerTuple(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(a, (b=1, c=2)) ololo", "f", "f", None)
self.assertEquals(result, "f(a, p_b)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testNestedOnly(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f((foo, bar, baz)) ololo", "f", "f", None)
self.assertEquals(result, "f((foo, bar, baz))")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testTwoPseudoNested(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f((a1, a2, ...), (b1, b2,..)) ololo", "f", "f", None)
self.assertEquals(result, "f(a_tuple, b_tuple)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testTwoPseudoNestedWithLead(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(x, (a1, a2, ...), (b1, b2,..)) ololo", "f", "f", None)
self.assertEquals(result, "f(x, a_tuple, b_tuple)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testPseudoNestedRange(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f((a1, ..., an), b) ololo", "f", "f", None)
self.assertEquals(result, "f(a_tuple, b)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testIncorrectList(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(x, y, 3, $) ololo", "f", "f", None)
self.assertEquals(result, "f(x, y, *args, **kwargs)")
self.assertEquals(note, M.SIG_DOC_UNRELIABLY)
def testIncorrectStarredList(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(x, *y, 3, $) ololo", "f", "f", None)
self.assertEquals(result, "f(x, *y, **kwargs)")
self.assertEquals(note, M.SIG_DOC_UNRELIABLY)
def testClashingNames(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(x, y, (x, y), z) ololo", "f", "f", None)
self.assertEquals(result, "f(x, y, (x_1, y_1), z)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testQuotedParam(self):
# like __delattr__
result, ret_sig, note = self.m.parseFuncDoc("blah getattr('name') ololo", "getattr", "getattr", None)
self.assertEquals(result, "getattr(name)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testQuotedParam2(self):
# like __delattr__, too
result, ret_sig, note = self.m.parseFuncDoc('blah getattr("name") ololo', "getattr", "getattr", None)
self.assertEquals(result, "getattr(name)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testOptionalTripleDot(self):
# like new(S, ...)
result, ret_sig, note = self.m.parseFuncDoc('blah f(foo, ...) ololo', "f", "f", None)
self.assertEquals(result, "f(foo, *more)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testUnderscoredName(self):
# like new(S, ...)
result, ret_sig, note = self.m.parseFuncDoc('blah f(foo_one, _bar_two) ololo', "f", "f", None)
self.assertEquals(result, "f(foo_one, _bar_two)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testDashedName(self):
# like new(S, ...)
result, ret_sig, note = self.m.parseFuncDoc('blah f(something-else, for-a-change) ololo', "f", "f", None)
self.assertEquals(result, "f(something_else, for_a_change)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testSpacedDefault(self):
# like new(S, ...)
result, ret_sig, note = self.m.parseFuncDoc('blah f(a, b = 1) ololo', "f", "f", None)
self.assertEquals(result, "f(a, b=1)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testSpacedName(self):
# like new(S, ...)
result, ret_sig, note = self.m.parseFuncDoc('blah femme(skirt or pants) ololo', "femme", "femme", None)
self.assertEquals(result, "femme(skirt_or_pants)")
self.assertEquals(note, M.SIG_DOC_NOTE)
class TestRestoreMethodByDocComment(unittest.TestCase):
"""
Restoring with a class name set
"""
def setUp(self):
self.m = ModuleRedeclarator(None, None, '/dev/null')
def testPlainMethod(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(self, foo, bar) ololo", "f", "f", "SomeClass")
self.assertEquals(result, "f(self, foo, bar)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testInsertSelf(self):
result, ret_sig, note = self.m.parseFuncDoc("blah f(foo, bar) ololo", "f", "f", "SomeClass")
self.assertEquals(result, "f(self, foo, bar)")
self.assertEquals(note, M.SIG_DOC_NOTE)
class TestAnnotatedParameters(unittest.TestCase):
"""
f(foo: int) and friends; in doc comments, happen in 2.x world, too.
"""
def setUp(self):
self.m = ModuleRedeclarator(None, None, '/dev/null')
def testMixed(self):
result, ret_sig, note = self.m.parseFuncDoc('blah f(i: int, foo) ololo', "f", "f", None)
self.assertEquals(result, "f(i, foo)")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testNested(self):
result, ret_sig, note = self.m.parseFuncDoc('blah f(i: int, (foo: bar, boo: Decimal)) ololo', "f", "f", None)
self.assertEquals(result, "f(i, (foo, boo))")
self.assertEquals(note, M.SIG_DOC_NOTE)
def testSpaced(self):
result, ret_sig, note = self.m.parseFuncDoc('blah f(i: int, j :int, k : int) ololo', "f", "f", None)
self.assertEquals(result, "f(i, j, k)")
self.assertEquals(note, M.SIG_DOC_NOTE)
if not IS_CLI and VERSION < (3, 0):
class TestInspect(unittest.TestCase):
"""
See that inspect actually works if needed
"""
def setUp(self):
self.m = ModuleRedeclarator(None, None, '/dev/null')
def testSimple(self):
def target(a, b, c=1, *d, **e):
return a, b, c, d, e
result = self.m.restoreByInspect(target)
self.assertEquals(result, "(a, b, c=1, *d, **e)")
def testNested(self):
# NOTE: Py3k can't handle nested tuple args, thus we compile it conditionally
code = (
"def target(a, (b, c), d, e=1):\n"
" return a, b, c, d, e"
)
namespace = {}
eval(compile(code, "__main__", "single"), namespace)
target = namespace['target']
result = self.m.restoreByInspect(target)
self.assertEquals(result, "(a, (b, c), d, e=1)")
class _DiffPrintingTestCase(unittest.TestCase):
def assertEquals(self, etalon, specimen, msg=None):
if type(etalon) == str and type(specimen) == str and etalon != specimen:
print("%s" % "\n")
# print side by side
ei = iter(etalon.split("\n"))
si = iter(specimen.split("\n"))
if VERSION < (3, 0):
si_next = si.next
else:
si_next = si.__next__
for el in ei:
try: sl = si_next()
except StopIteration: break # I wish the exception would just work as break
if el != sl:
print("!%s" % el)
print("?%s" % sl)
else:
print(">%s" % sl)
# one of the iters might not end yet
for el in ei:
print("!%s" % el)
for sl in si:
print("?%s" % sl)
raise self.failureException(msg)
else:
self.failUnlessEqual(etalon, specimen, msg)
class TestSpecialCases(unittest.TestCase):
"""
Tests cases where predefined overrides kick in
"""
def setUp(self):
import sys
if VERSION >= (3, 0):
import builtins as the_builtins
self.builtins_name = the_builtins.__name__
else:
import __builtin__ as the_builtins
self.builtins_name = the_builtins.__name__
self.m = ModuleRedeclarator(the_builtins, None, '/dev/null', doing_builtins=True)
def _testBuiltinFuncName(self, func_name, expected):
class_name = None
self.assertTrue(self.m.isPredefinedBuiltin(self.builtins_name, class_name, func_name))
result, note = self.m.restorePredefinedBuiltin(class_name, func_name)
self.assertEquals(result, func_name + expected)
self.assertEquals(note, "known special case of " + func_name)
def testZip(self):
self._testBuiltinFuncName("zip", "(seq1, seq2, *more_seqs)")
def testRange(self):
self._testBuiltinFuncName("range", "(start=None, stop=None, step=None)")
def testFilter(self):
self._testBuiltinFuncName("filter", "(function_or_none, sequence)")
# we caould want to test a calss without __dict__, but it takes a C extension to really create one,
class TestDataOutput(_DiffPrintingTestCase):
"""
Tests for sanity of output of data members
"""
def setUp(self):
self.m = ModuleRedeclarator(self, None, 4) # Pass anything with __dict__ as module
def checkFmtValue(self, data, expected):
buf = Buf(self.m)
self.m.fmtValue(buf.out, data, 0)
result = "".join(buf.data).strip()
self.assertEquals(expected, result)
def testRecursiveDict(self):
data = {'a': 1}
data['b'] = data
expected = "\n".join((
"{",
" 'a': 1,",
" 'b': '<value is a self-reference, replaced by this string>',",
"}"
))
self.checkFmtValue(data, expected)
def testRecursiveList(self):
data = [1]
data.append(data)
data.append(2)
data.append([10, data, 20])
expected = "\n".join((
"[",
" 1,",
" '<value is a self-reference, replaced by this string>',",
" 2,",
" [",
" 10,",
" '<value is a self-reference, replaced by this string>',",
" 20,",
" ],",
"]"
))
self.checkFmtValue(data, expected)
if not IS_CLI:
class TestReturnTypes(unittest.TestCase):
"""
Tests for sanity of output of data members
"""
def setUp(self):
self.m = ModuleRedeclarator(None, None, 4)
def checkRestoreFunction(self, doc, expected):
spec, ret_literal, note = self.m.parseFuncDoc(doc, "foo", "foo", None)
self.assertEqual(expected, ret_literal, "%r != %r; spec=%r, note=%r" % (expected, ret_literal, spec, note))
pass
def testSimpleArrowInt(self):
doc = "This is foo(bar) -> int"
self.checkRestoreFunction(doc, "0")
def testSimpleArrowList(self):
doc = "This is foo(bar) -> list"
self.checkRestoreFunction(doc, "[]")
def testArrowListOf(self):
doc = "This is foo(bar) -> list of int"
self.checkRestoreFunction(doc, "[]")
# def testArrowTupleOf(self):
# doc = "This is foo(bar) -> (a, b,..)"
# self.checkRestoreFunction(doc, "()")
def testSimplePrefixInt(self):
doc = "This is int foo(bar)"
self.checkRestoreFunction(doc, "0")
def testSimplePrefixObject(self):
doc = "Makes an instance: object foo(bar)"
self.checkRestoreFunction(doc, "object()")
if VERSION < (3, 0):
# TODO: we only support it in 2.x; must update when we do it in 3.x, too
def testSimpleArrowFile(self):
doc = "Opens a file: foo(bar) -> file"
self.checkRestoreFunction(doc, "file('/dev/null')")
def testUnrelatedPrefix(self):
doc = """
Consumes a list of int
foo(bar)
"""
self.checkRestoreFunction(doc, None)
###
if __name__ == '__main__':
unittest.main()
|
bsd-2-clause
| 9,880,757,391,117,172 | 37.397094 | 120 | 0.570942 | false | 3.35548 | true | false | false |
razco/music_similarity
|
midi/midi_editting.py
|
1
|
5807
|
'''
Created on Aug 26, 2017
@author: Raz
'''
import mido
import numpy as np
def midifile_to_dict(mid):
tracks = []
for track in mid.tracks:
tracks.append([vars(msg).copy() for msg in track])
return {
'ticks_per_beat': mid.ticks_per_beat,
'tracks': tracks,
}
def test():
aaa = mido.MidiFile('AUD_DW0146.mid')
aaa.tracks
print 'buya'
mid_dict = midifile_to_dict(aaa)
track_data = np.array(mid_dict['tracks'][0])
notes_inds = np.flatnonzero(np.array(['note' in mid_dict['tracks'][0][idx] for idx in xrange(len(track_data))]))
notes_data = track_data[notes_inds]
outfile = mido.MidiFile()
track = mido.MidiTrack()
outfile.tracks.append(track)
notes_inds_to_keep = np.array(range(10, 50, 1)) # inds in the levenshtein mat that are similar
orig_notes_inds_to_keep = set(notes_inds[notes_inds_to_keep])
for idx in xrange(len(track_data) - 1, -1, -1):
msg = aaa.tracks[0][idx]
if 'note' in msg.type and idx not in orig_notes_inds_to_keep:
aaa.tracks[0].pop(idx)
aaa.save('part_melody.mid')
def run(midi_file, all_midi_notes_inds, midi_series_len, midi_start_note, track, channel):
# exports a part of the midi file at specified track, channel
# find start idx and end idx
min_idx = np.inf
max_idx = -np.inf
midi_notes_inds = all_midi_notes_inds[track][channel]
print 'num inds:', len(midi_notes_inds)
for note_idx in xrange(midi_start_note, midi_start_note + midi_series_len):
idxs = midi_notes_inds[note_idx]
min_idx = min(min_idx, min(idxs))
max_idx = max(max_idx, min(idxs)) # taking the min because it's the "note_on"
orig_note_ind_to_keep_start = min_idx
orig_note_ind_to_keep_end = max_idx
aaa = mido.MidiFile(midi_file)
notes_off_missed = []
for note_inds in midi_notes_inds[midi_start_note: midi_start_note + midi_series_len]:
curr_note_off = max(note_inds)
if curr_note_off > orig_note_ind_to_keep_end:
# max(note_inds) is the note off message of the note
notes_off_missed.append(curr_note_off)
if len(notes_off_missed) > 0:
# if there are notes off that outside orig_note_ind_to_keep_end,
# increase their time, so that when all the other messages that
# are not in the valid range are removed, the time remains ok.
time_to_add_to_missed_note_off = 0
max_note_off_missed = max(notes_off_missed)
notes_off_missed = set(notes_off_missed)
for idx in xrange(orig_note_ind_to_keep_end + 1, max_note_off_missed + 1):
msg = aaa.tracks[track][idx]
if idx in notes_off_missed:
msg.time += time_to_add_to_missed_note_off
time_to_add_to_missed_note_off = 0
else:
time_to_add_to_missed_note_off += msg.time
for idx in xrange(len(aaa.tracks[track]) - 1, -1, -1):
msg = aaa.tracks[track][idx]
if idx in notes_off_missed:
continue
if 'note' in msg.type and (
idx < orig_note_ind_to_keep_start or
idx > orig_note_ind_to_keep_end
):
# if 'note' in msg.type and idx not in rel_notes_inds:
aaa.tracks[track].pop(idx)
elif 'note' in msg.type and msg.channel != channel:
for extra_time_idx in xrange(idx + 1, len(aaa.tracks[track])):
if 'note' in msg.type and (
orig_note_ind_to_keep_start <= extra_time_idx
<= orig_note_ind_to_keep_end
):
aaa.tracks[track][extra_time_idx].time += msg.time
break
aaa.tracks[track].pop(idx)
for track_idx in xrange(len(aaa.tracks) - 1, -1, -1):
if track_idx != track:
aaa.tracks.pop(track_idx)
aaa.save('part_melody_%s' % midi_file.split('/')[-1])
# running shift0:
# score: 561.000000
# running shift1:
# score: 719.000000
# running shift2:
# score: 707.000000
# running shift3:
# score: 691.000000
# running shift4:
# score: 749.000000
# running shift5:
# score: 671.000000
# running shift6:
# score: 805.000000
# running shift7:
# score: 731.000000
# running shift8:
# score: 763.000000
# running shift9:
# score: 789.000000
# running shift10:
# score: 789.000000
# running shift11:
# score: 849.000000
# running window...
# best match with window: 38.000000 at music1 index 98, and music2 index 393
def get_instrument_length(notes_inds, track, channel):
return len(notes_inds[track][channel])
def main():
import midifile_to_notes
# -1 "midi_files/Frere Jacques.mid" -2 "midi_files/Mahler Symphony No.1 Mov.3.mid"
midi_file1 = 'midi_files/Frere Jacques.mid'
# midi_file1 = 'midi_files/the carpenters - please mr postman.mid'
# midi_file1 = 'midi_files/chaka_khan_aint_nobody.mid'
# midi_file1 = 'midi_files/sting - shape of my heart.mid'
# midi_file1 = 'midi_files/Feels - pharrel williams.mid'
_, midi_notes_inds1 = midifile_to_notes.extract_notes(midi_file1)
track = 1
channel = 0
midi_start_note1 = 1
midi_series_len1 = 22
run(midi_file1, midi_notes_inds1, midi_series_len1, midi_start_note1, track, channel)
midi_file2 = 'midi_files/Mahler Symphony No.1 Mov.3.mid'
# midi_file2 = 'midi_files/portugal the man - feel it still.mid'
# midi_file2 = 'midi_files/felix_jaehn_aint_nobody.mid'
# midi_file2 = 'midi_files/Sugababes - Shape.mid'
_, midi_notes_inds2 = midifile_to_notes.extract_notes(midi_file2)
track = 5
channel = 4
midi_start_note2 = 0
midi_series_len2 = 27
run(midi_file2, midi_notes_inds2, midi_series_len2, midi_start_note2, track, channel)
if __name__ == '__main__':
main()
|
mit
| -5,851,520,322,637,116,000 | 32.959064 | 116 | 0.61667 | false | 2.947716 | false | false | false |
EpicScriptTime/update-wrapper
|
updatewrapper/utils/display.py
|
1
|
2279
|
import termcolor
def ask_yes_no(question, default=True, spacing=True):
"""
Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of True or False.
"""
valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}
if default is None:
prompt = ' [y/n] '
elif default is True:
prompt = ' [Y/n] '
elif default is False:
prompt = ' [y/N] '
else:
raise ValueError('invalid default answer: `%s`' % default)
while True:
choice = input(question + prompt).strip().lower()
if spacing:
print() # Required for spacing
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
print('Please respond with `yes` or `no`.')
def print_banner():
print(" _ _ _ _____ _ ")
print(" _ _ _ __ __| | __ _| |_ ___ __ ___ __ __ _ _ __ _ __ ___ _ __ __ _/ | |___ / / |")
print("| | | | '_ \ / _` |/ _` | __/ _ \____\ \ /\ / / '__/ _` | '_ \| '_ \ / _ \ '__| \ \ / / | |_ \ | |")
print("| |_| | |_) | (_| | (_| | || __/_____\ V V /| | | (_| | |_) | |_) | __/ | \ V /| |_ ___) || |")
print(" \__,_| .__/ \__,_|\__,_|\__\___| \_/\_/ |_| \__,_| .__/| .__/ \___|_| \_/ |_(_)____(_)_|")
print(" |_| |_| |_| ")
print()
def print_info(text):
termcolor.cprint(text, 'cyan', attrs=['bold'])
print()
def print_notice(text):
termcolor.cprint(text, 'magenta', attrs=['bold'])
print()
def print_success(text):
termcolor.cprint(text, 'green', attrs=['bold'])
print()
def print_warning(text):
termcolor.cprint(text, 'yellow', attrs=['bold'])
print()
def print_error(text):
termcolor.cprint(text, 'red', attrs=['bold'])
print()
|
mit
| 3,996,592,966,298,821,000 | 30.652778 | 113 | 0.425186 | false | 3.33675 | false | false | false |
frosty308/webapps
|
forms.py
|
1
|
8021
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017-2018 Alan Frost, All rights reserved.
Implementation of user forms
"""
from flask_wtf import FlaskForm
from wtforms import (BooleanField, HiddenField, PasswordField, StringField, SubmitField,
FileField, ValidationError)
from wtforms.validators import Length, InputRequired, Email, EqualTo, Regexp
from utils import check_name, check_password, check_username, check_phone
#import phonenumbers
#https://github.com/daviddrysdale/python-phonenumbers
class UserNameValidator(object):
""" User name validator, unicode except for control, punctuation, separator or symbols
"""
def __init__(self, message=None):
if not message:
message = u'Invalid user name'
self.message = message
def __call__(self, form, field):
length = field.data and len(field.data) or 0
if length == 0:
pass
elif check_username(field.data):
pass
else:
raise ValidationError(self.message)
class NameValidator(object):
""" Display name validator, unicode except for control, symbols and non-space separator
"""
def __init__(self, message=None):
if not message:
message = u'Invalid user name'
self.message = message
def __call__(self, form, field):
length = field.data and len(field.data) or 0
if length == 0:
pass
elif check_name(field.data):
pass
else:
raise ValidationError(self.message)
class PasswordValidator(object):
""" Simple password validator for at least 8 characters with a lower, upper and digit
"""
def __init__(self, message=None):
if not message:
message = u'Password must be at least 8 characters, with UPPER/lowercase and numbers'
self.message = message
def __call__(self, form, field):
length = field.data and len(field.data) or 0
if length == 0:
pass
elif check_password(field.data):
pass
else:
raise ValidationError(self.message)
class PhoneNumberValidator(object):
""" Phone number validator
"""
def __init__(self, message=None):
if not message:
message = u'* Invalid phone number'
self.message = message
def __call__(self, form, field):
length = field.data and len(field.data) or 0
if length == 0:
pass
elif check_phone(field.data):
pass
else:
raise ValidationError(self.message)
#else:
# try:
# input_number = phonenumbers.parse(field.data)
# if not (phonenumbers.is_valid_number(input_number)):
# raise ValidationError(self.message)
# except:
# input_number = phonenumbers.parse("+1"+field.data)
# if not (phonenumbers.is_valid_number(input_number)):
# raise ValidationError(self.message)
class LoginForm(FlaskForm):
""" Login
"""
email = StringField('Email', validators=[
InputRequired(),
Email()])
password = PasswordField('Password', validators=[
InputRequired(),
Length(8, 64)])
remember = BooleanField('Keep me logged in')
submit = SubmitField('Login')
class InviteForm(FlaskForm):
""" Invite a new user
"""
email = StringField('Email', validators=[
InputRequired(),
Email()])
phone = StringField('Phone', validators=[
PhoneNumberValidator()])
user = StringField('Name', validators=[InputRequired(), NameValidator()])
submit = SubmitField('Invite')
class AcceptForm(FlaskForm):
""" Accept invitation with link token, temporary password and code
"""
action = HiddenField('Action')
email = HiddenField('Email')
token = HiddenField('Token')
user = StringField('Name', validators=[InputRequired(), NameValidator()])
phone = StringField('Phone', validators=[PhoneNumberValidator()])
oldpassword = PasswordField('Password', validators=[
InputRequired(),
PasswordValidator()])
password = PasswordField('New Password', validators=[
InputRequired(),
EqualTo('confirm', message='Passwords must match')
])
code = StringField('Code', validators=[InputRequired(), Regexp(r'^(\d{6,8})$')])
confirm = PasswordField('Confirm password', validators=[InputRequired()])
submit = SubmitField('Accept Invitation')
class ConfirmForm(FlaskForm):
""" Confirm account with token
"""
action = HiddenField('Action')
email = HiddenField('Email')
token = HiddenField('Token')
code = StringField('Code', validators=[InputRequired(), Regexp(r'^(\d{6,8})$')])
submit = SubmitField('Confirm Account')
class VerifyForm(FlaskForm):
""" Verify 2FA code
"""
action = HiddenField('Action')
email = HiddenField('Email')
phone = HiddenField('Phone')
code = StringField('Code', validators=[InputRequired(), Regexp(r'^(\d{6,8})$')])
submit = SubmitField('Verify Code')
class UploadForm(FlaskForm):
""" Upload an artistic work
"""
file = FileField('Filename')
title = StringField('Title', validators=[Length(2, 128)])
artform = StringField('Artform', validators=[Length(0, 128)])
created = StringField('Date', validators=[Length(6, 32)])
dimensions = StringField('Dimensions', validators=[Length(0, 64)])
tags = StringField('Tags', validators=[Length(0, 128)])
submit = SubmitField('Upload Image')
class ResendForm(FlaskForm):
""" Resend a confirmtion or verification token
"""
action = HiddenField('Action')
email = StringField('Email Address', validators=[
InputRequired(),
Email()])
phone = StringField('phone', validators=[PhoneNumberValidator()])
submit = SubmitField('Get New Code')
class RegistrationForm(FlaskForm):
""" Register a new account
"""
user = StringField('Name', validators=[InputRequired(), NameValidator()])
email = StringField('Email Address', validators=[
InputRequired(),
Email()])
phone = StringField('Phone', validators=[PhoneNumberValidator()])
password = PasswordField('New password', validators=[
InputRequired(),
PasswordValidator(),
EqualTo('confirm', message='Passwords must match')
])
confirm = PasswordField('Confirm password', validators=[InputRequired()])
token = StringField('Token', validators=[InputRequired()])
class ChangePasswordForm(FlaskForm):
""" Change password
"""
email = HiddenField('Email')
oldpassword = PasswordField('Password', validators=[
InputRequired(),
Length(8, 64)])
password = PasswordField('New Password', validators=[
InputRequired(),
PasswordValidator(),
EqualTo('confirm', message='Passwords must match')
])
confirm = PasswordField('Confirm password', validators=[InputRequired()])
submit = SubmitField('Change Password')
class ForgotPasswordForm(FlaskForm):
""" Request a password reset
"""
email = StringField('Email Address', validators=[
InputRequired(),
Email()])
submit = SubmitField('Request Password Reset')
class ResetPasswordForm(FlaskForm):
""" Reset a password with link token, temporary password and code
"""
email = HiddenField('Email')
action = HiddenField('Action')
token = HiddenField('Token')
oldpassword = PasswordField('Password', validators=[
InputRequired(),
Length(8, 64)])
password = PasswordField('New Password', validators=[
InputRequired(),
PasswordValidator(),
EqualTo('confirm', message='Passwords must match')
])
code = StringField('Code', validators=[InputRequired(), Regexp(r'^(\d{6,8})$')])
confirm = PasswordField('Confirm password', validators=[InputRequired()])
submit = SubmitField('Reset Password')
|
gpl-3.0
| -5,773,261,079,788,126,000 | 33.424893 | 97 | 0.63234 | false | 4.46355 | false | false | false |
apache/incubator-singa
|
examples/autograd/resnet.py
|
1
|
8531
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# the code is modified from
# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
from singa import autograd
from singa import tensor
from singa import device
from singa import opt
import numpy as np
from tqdm import trange
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return autograd.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
)
class BasicBlock(autograd.Layer):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = autograd.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = autograd.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def __call__(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = autograd.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out = autograd.add(out, residual)
out = autograd.relu(out)
return out
class Bottleneck(autograd.Layer):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = autograd.Conv2d(
inplanes, planes, kernel_size=1, bias=False
)
self.bn1 = autograd.BatchNorm2d(planes)
self.conv2 = autograd.Conv2d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn2 = autograd.BatchNorm2d(planes)
self.conv3 = autograd.Conv2d(
planes, planes * self.expansion, kernel_size=1, bias=False
)
self.bn3 = autograd.BatchNorm2d(planes * self.expansion)
self.downsample = downsample
self.stride = stride
def __call__(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = autograd.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = autograd.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = autograd.add(out, residual)
out = autograd.relu(out)
return out
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
class ResNet(autograd.Layer):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = autograd.Conv2d(
3, 64, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = autograd.BatchNorm2d(64)
self.maxpool = autograd.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = autograd.AvgPool2d(7, stride=1)
self.fc = autograd.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
conv = autograd.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
)
bn = autograd.BatchNorm2d(planes * block.expansion)
def downsample(x):
return bn(conv(x))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
def forward(x):
for layer in layers:
x = layer(x)
return x
return forward
def __call__(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = autograd.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = autograd.flatten(x)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
if __name__ == "__main__":
model = resnet50()
print("Start intialization............")
dev = device.create_cuda_gpu_on(0)
niters = 100
batch_size = 32
IMG_SIZE = 224
sgd = opt.SGD(lr=0.1, momentum=0.9, weight_decay=1e-5)
tx = tensor.Tensor((batch_size, 3, IMG_SIZE, IMG_SIZE), dev)
ty = tensor.Tensor((batch_size,), dev, tensor.int32)
autograd.training = True
x = np.random.randn(batch_size, 3, IMG_SIZE, IMG_SIZE).astype(np.float32)
y = np.random.randint(0, 1000, batch_size, dtype=np.int32)
tx.copy_from_numpy(x)
ty.copy_from_numpy(y)
import time
dev.Sync()
start = time.time()
fd = 0
softmax = 0
update = 0
with trange(niters) as t:
for _ in t:
dev.Sync()
tick = time.time()
x = model(tx)
dev.Sync()
fd += time.time() - tick
tick = time.time()
loss = autograd.softmax_cross_entropy(x, ty)
dev.Sync()
softmax += time.time() - tick
for p, g in autograd.backward(loss):
dev.Sync() # this "for" loops for a large number of times, so can slow down
tick = time.time()
sgd.update(p, g)
dev.Sync() # this "for" loops for a large number of times, so can slow down
update += time.time() - tick
dev.Sync()
end = time.time()
throughput = float(niters * batch_size) / (end - start)
print("Throughput = {} per second".format(throughput))
titer = (end - start) / float(niters)
tforward = float(fd) / float(niters)
tsoftmax = float(softmax) / float(niters)
tbackward = titer - tforward - tsoftmax
tsgd = float(update) / float(niters)
print("Total={}, forward={}, softmax={}, backward={}, sgd={}".format(titer, tforward, tsoftmax, tbackward, tsgd))
|
apache-2.0
| 8,992,445,720,500,284,000 | 28.519031 | 117 | 0.593717 | false | 3.423355 | false | false | false |
bxlab/HiFive_Paper
|
Scripts/Figures/fivec_hicpipe_algorithm_comparison.py
|
1
|
14324
|
#!/usr/bin/env python
import sys
import os
import numpy
from pyx import canvas, text, path, graph, color, trafo, unit, attr, deco, style, bitmap
import h5py
import hifive
unit.set(defaultunit="cm")
text.set(mode="latex")
text.preamble(r"\usepackage{times}")
text.preamble(r"\usepackage{sansmath}")
text.preamble(r"\sansmath")
text.preamble(r"\renewcommand*\familydefault{\sfdefault}")
painter = graph.axis.painter.regular( labeldist=0.1, labelattrs=[text.size(-3)], titleattrs=[text.size(-3)] )
methods = ['Raw', 'Prob', 'Exp', 'Bin', 'Exp-KR']
method_colors = {
'Prob':color.cmyk.Black,
'Exp':color.cmyk.CadetBlue,
'Bin':color.cmyk.MidnightBlue,
'Raw':color.cmyk.Dandelion,
'Exp-KR':color.cmyk.Mahogany,
}
def main():
out_fname = sys.argv[1]
basedir = '/'.join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-2])
hic_phillips_fname1 = "%s/Data/HiC/HiCPipe/HM/mm9_ESC_NcoI_Phillips.hch" % basedir
hic_phillips_fname2 = "%s/Data/HiC/HiCPipe/HM/mm9_ESC_HindIII_Phillips.hch" % basedir
hic_nora_fname1 = "%s/Data/HiC/HiCPipe/HM/mm9_ESC_NcoI_Nora.hch" % basedir
hic_nora_fname2 = "%s/Data/HiC/HiCPipe/HM/mm9_ESC_HindIII_Nora.hch" % basedir
hic_phillips1 = h5py.File(hic_phillips_fname1, 'r')
hic_phillips2 = h5py.File(hic_phillips_fname2, 'r')
hic_nora1 = h5py.File(hic_nora_fname1, 'r')
hic_nora2 = h5py.File(hic_nora_fname2, 'r')
hm_phillips = {}
hm_nora = {}
for key in hic_phillips1.keys():
if key.count('unbinned_counts') == 0:
continue
region = int(key.split('.')[0])
hm_phillips[region] = dynamically_bin(hic_phillips1, hic_phillips2, region)
for key in hic_nora1.keys():
if key.count('unbinned_counts') == 0:
continue
region = int(key.split('.')[0])
hm_nora[region] = dynamically_bin(hic_nora1, hic_nora2, region)
fivec_fnames = {
"Prob_Phillips":"%s/Data/FiveC/HiFive/Phillips_ESC_probnodist.fcp" % basedir,
"Prob_Nora":"%s/Data/FiveC/HiFive/Nora_ESC_male_E14_probnodist.fcp" % basedir,
"Bin_Phillips":"%s/Data/FiveC/HiFive/Phillips_ESC_binnodist.fcp" % basedir,
"Bin_Nora":"%s/Data/FiveC/HiFive/Nora_ESC_male_E14_binnodist.fcp" % basedir,
"Exp_Phillips":"%s/Data/FiveC/HiFive/Phillips_ESC_expnodist.fcp" % basedir,
"Exp_Nora":"%s/Data/FiveC/HiFive/Nora_ESC_male_E14_expnodist.fcp" % basedir,
"Exp-KR_Phillips":"%s/Data/FiveC/HiFive/Phillips_ESC_expKRnodist.fcp" % basedir,
"Exp-KR_Nora":"%s/Data/FiveC/HiFive/Nora_ESC_male_E14_expKRnodist.fcp" % basedir,
}
data = {}
imgs = {}
ratio1 = 0
ratio2 = 0
for meth in ['Prob', 'Bin', 'Exp', 'Exp-KR']:
fc = hifive.FiveC(fivec_fnames["%s_Phillips" % meth])
fragments = fc.frags['fragments'][...]
regions = fc.frags['regions'][...]
counts = numpy.zeros(0, dtype=numpy.float64)
expected = numpy.zeros(0, dtype=numpy.float64)
hic_counts = numpy.zeros(0, dtype=numpy.float64)
hic_expected = numpy.zeros(0, dtype=numpy.float64)
skipped = []
for i in range(fc.frags['regions'].shape[0]):
temp = fc.cis_heatmap(i, datatype='fragment', arraytype='compact', binsize=0, skipfiltered=True)
if temp is None:
skipped.append(i)
continue
counts = numpy.hstack((counts, temp[:, :, 0].ravel()))
expected = numpy.hstack((expected, temp[:, :, 1].ravel()))
if i == 6:
ratio1 = temp.shape[1] / float(temp.shape[0])
imgs["%s_Phillips" % meth] = hifive.plotting.plot_full_array(temp, symmetricscaling=False)
if meth == 'Prob':
temp1 = numpy.zeros((temp.shape[0], temp.shape[1]), dtype=numpy.float32)
temp1[numpy.where(temp[:, :, 0] > 0.0)] = 1
if i == 6:
imgs["Raw_Phillips"] = hifive.plotting.plot_full_array(
numpy.dstack((temp[:, :, 0], temp1)), symmetricscaling=False)
binbounds = numpy.hstack((
fragments['start'][regions['start_frag'][i]:regions['stop_frag'][i]].reshape(-1, 1),
fragments['stop'][regions['start_frag'][i]:regions['stop_frag'][i]].reshape(-1, 1)))
valid = numpy.where(fc.filter[regions['start_frag'][i]:regions['stop_frag'][i]])[0]
binbounds = binbounds[valid, :]
temp = hm_phillips[i]
strands = fragments['strand'][regions['start_frag'][i]:regions['stop_frag'][i]][valid]
temp = temp[numpy.where(strands == 0)[0], :, :][:, numpy.where(strands == 1)[0], :]
hic_counts = numpy.hstack((hic_counts, temp[:, :, 0].ravel()))
hic_expected = numpy.hstack((hic_expected, temp[:, :, 1].ravel()))
if i == 6:
imgs["HiC_Phillips"] = hifive.plotting.plot_full_array(temp, symmetricscaling=False)
if meth == 'Prob':
data["Raw_Phillips"] = numpy.copy(counts)
where = numpy.where(hic_expected > 0.0)[0]
hic_counts[where] /= hic_expected[where]
data["HiC_Phillips"] = numpy.copy(hic_counts)
where = numpy.where(expected > 0.0)[0]
counts[where] /= expected[where]
data["%s_Phillips" % meth] = numpy.copy(counts)
fc = hifive.FiveC(fivec_fnames["%s_Nora" % meth])
temp = fc.cis_heatmap(0, datatype='fragment', arraytype='compact', binsize=0, skipfiltered=True)
ratio2 = temp.shape[1] / float(temp.shape[0])
imgs["%s_Nora" % meth] = hifive.plotting.plot_full_array(temp, symmetricscaling=False)
counts = temp[:, :, 0].ravel()
expected = temp[:, :, 1].ravel()
if meth == 'Prob':
temp1 = numpy.zeros((temp.shape[0], temp.shape[1]), dtype=numpy.float32)
temp1[numpy.where(temp[:, :, 0] > 0.0)] = 1
imgs["Raw_Nora"] = hifive.plotting.plot_full_array(
numpy.dstack((temp[:, :, 0], temp1)), symmetricscaling=False)
data["Raw_Nora"] = numpy.copy(counts)
fragments = fc.frags['fragments'][...]
regions = fc.frags['regions'][...]
binbounds = numpy.hstack((
fragments['start'][regions['start_frag'][0]:regions['stop_frag'][0]].reshape(-1, 1),
fragments['stop'][regions['start_frag'][0]:regions['stop_frag'][0]].reshape(-1, 1)))
binbounds = binbounds[numpy.where(fc.filter[regions['start_frag'][0]:regions['stop_frag'][0]])[0], :]
temp = hm_nora[0]
strands = fragments['strand'][regions['start_frag'][0]:regions['stop_frag'][0]]
temp = temp[numpy.where(strands==0)[0], :, :][:, numpy.where(strands == 1)[0], :]
imgs["HiC_Nora"] = hifive.plotting.plot_full_array(temp, symmetricscaling=False)
hic_counts = temp[:, :, 0].ravel()
hic_expected = temp[:, :, 1].ravel()
where = numpy.where(hic_expected > 0.0)[0]
hic_counts[where] /= hic_expected[where]
data["HiC_Nora"] = numpy.copy(hic_counts)
where = numpy.where(expected > 0.0)[0]
counts[where] /= expected[where]
data["%s_Nora" % meth] = numpy.copy(counts)
correlations = {}
output = open(out_fname.replace('pdf', 'txt'), 'w')
print >> output, "Method\tPhillips\tNora"
for meth in methods:
temp = [meth]
for name in ["Phillips", "Nora"]:
valid = numpy.where((data["%s_%s" % (meth, name)] > 0.0) * (data["HiC_%s" % name] > 0.0))
correlations["%s_%s" % (meth, name)] = numpy.corrcoef(numpy.log(data["%s_%s" % (meth, name)][valid]),
numpy.log(data["HiC_%s" % name][valid]))[0, 1]
temp.append(str(correlations["%s_%s" % (meth, name)]))
print >> output, '\t'.join(temp)
output.close()
width = 16.8
spacer = 0.3
c = canvas.canvas()
plot_width = (width - spacer * 3.0 - 0.4) / 4.0
for i, meth in enumerate(["Raw", "Prob", "HiC"]):
meth_names = {"Raw":"Raw", "Prob":"HiFive", "HiC":"HiC"}
c.text(plot_width * (i + 1.5) + spacer * (i + 1), (ratio1 + ratio2) * plot_width + spacer + 0.1,
"%s" % meth_names[meth], [text.halign.center, text.valign.bottom, text.size(-2)])
c.insert(bitmap.bitmap(0, 0, imgs["%s_Phillips" % meth], width=plot_width),
[trafo.translate((i + 1) * (plot_width + spacer), plot_width * ratio2 + spacer)])
c.insert(bitmap.bitmap(0, 0, imgs["%s_Nora" % meth], width=plot_width),
[trafo.translate((i + 1) * (plot_width + spacer), 0)])
g = graph.graphxy(width=plot_width - 0.8, height=plot_width * ratio1,
x=graph.axis.nestedbar(painter=graph.axis.painter.bar(nameattrs=None)),
y=graph.axis.lin(painter=painter),
x2=graph.axis.lin(parter=None, min=0, max=1),
y2=graph.axis.lin(parter=None, min=0, max=1))
for i, meth in enumerate(methods):
Y = numpy.zeros(2, dtype=numpy.float32)
col = method_colors[meth]
for j, name in enumerate(["Phillips", "Nora"]):
Y[j] = correlations["%s_%s" % (meth, name)]
g.plot(graph.data.points(zip(zip(range(Y.shape[0]), [i] * Y.shape[0]), Y), xname=1, y=2),
[graph.style.changebar([col])])
g.text(-0.8, plot_width * ratio1 * 0.5, "Correlation",
[text.halign.center, text.valign.top, text.size(-3), trafo.rotate(90)])
g.text((plot_width - 0.8) * 0.25, -0.1, "Phillips",
[text.halign.center, text.valign.top, text.size(-3)])
g.text((plot_width - 0.8) * 0.75, -0.1, "Nora",
[text.halign.center, text.valign.top, text.size(-3)])
c.insert(g, [trafo.translate(0.8, plot_width * ratio2 + spacer)])
c.text(width, (ratio1 + ratio2 * 0.5) * plot_width + spacer, "Phillips",
[text.halign.center, text.valign.top, trafo.rotate(-90), text.size(-2)])
c.text(width, ratio1 * 0.5 * plot_width, "Nora",
[text.halign.center, text.valign.top, trafo.rotate(-90), text.size(-2)])
meth_names = {"Raw":"Raw", "Prob":"HiFive-Probability", "Exp":"HiFive-Express", "Bin":"HiFive-Binning",
"Exp-KR":"HiFive-ExpressKR", "Exp-KR-dist":"HiFive-ExpressKR-dist"}
for i, meth in enumerate(methods):
c.fill(path.rect(1.0, plot_width * ratio1 - 1.0 - i * 0.5, 0.2, 0.2), [method_colors[meth]])
c.text(1.3, plot_width * ratio1 - 0.9 - i * 0.5, "%s" % meth_names[meth],
[text.halign.left, text.valign.middle, text.size(-3)])
c.writePDFfile(out_fname)
def dynamically_bin(hic1, hic2, region):
counts = hic1['%i.counts' % region][...] + hic2['%i.counts' % region][...]
expected = hic1['%i.expected' % region][...] + hic2['%i.expected' % region][...]
upper = numpy.zeros((counts.shape[0], 2), dtype=numpy.float32)
upper[:, 0] = counts
upper[:, 1] = expected
mids1 = hic1['%i.mids' % region][...]
mids2 = hic2['%i.mids' % region][...]
indices = numpy.triu_indices(mids1.shape[0], 1)
unbinned_counts1 = numpy.zeros((mids1.shape[0], mids1.shape[0]), dtype=numpy.int32)
unbinned_counts1[indices] = hic1['%i.unbinned_counts' % region][...]
unbinned_counts1[indices[1], indices[0]] = unbinned_counts1[indices]
unbinned_expected1 = numpy.zeros((mids1.shape[0], mids1.shape[0]), dtype=numpy.int32)
unbinned_expected1[indices] = hic1['%i.unbinned_expected' % region][...]
unbinned_expected1[indices[1], indices[0]] = unbinned_expected1[indices]
indices = numpy.triu_indices(mids2.shape[0], 1)
unbinned_counts2 = numpy.zeros((mids2.shape[0], mids2.shape[0]), dtype=numpy.int32)
unbinned_counts2[indices] = hic2['%i.unbinned_counts' % region][...]
unbinned_counts2[indices[1], indices[0]] = unbinned_counts2[indices]
unbinned_expected2 = numpy.zeros((mids2.shape[0], mids2.shape[0]), dtype=numpy.int32)
unbinned_expected2[indices] = hic2['%i.unbinned_expected' % region][...]
unbinned_expected2[indices[1], indices[0]] = unbinned_expected2[indices]
bounds = hic1['%i.bounds' % region][...]
allmids = numpy.zeros((mids1.shape[0] + mids2.shape[0], 2), dtype=numpy.int32)
allmids[:mids1.shape[0], 0] = mids1 - 1
allmids[:mids1.shape[0], 1] = mids1 + 1
allmids[mids1.shape[0]:, 0] = mids2 - 1
allmids[mids1.shape[0]:, 1] = mids2 + 1
allmids = allmids[numpy.argsort(allmids[:, 0]), :]
indices1 = numpy.searchsorted(allmids[:, 1], mids1)
indices1_1 = (indices1.reshape(-1, 1) * allmids.shape[0] + indices1.reshape(1, -1)).ravel()
indices2 = numpy.searchsorted(allmids[:, 1], mids2)
indices2_1 = (indices2.reshape(-1, 1) * allmids.shape[0] + indices2.reshape(1, -1)).ravel()
unbinned = numpy.zeros((allmids.shape[0], allmids.shape[0], 2), dtype=numpy.float32)
unbinned[:, :, 0] += numpy.bincount(indices1_1, minlength=allmids.shape[0] ** 2,
weights=unbinned_counts1.ravel()).reshape(allmids.shape[0], -1)
unbinned[:, :, 1] += numpy.bincount(indices1_1, minlength=allmids.shape[0] ** 2,
weights=unbinned_expected1.ravel()).reshape(allmids.shape[0], -1)
unbinned[:, :, 0] += numpy.bincount(indices2_1, minlength=allmids.shape[0] ** 2,
weights=unbinned_counts2.ravel()).reshape(allmids.shape[0], -1)
unbinned[:, :, 1] += numpy.bincount(indices2_1, minlength=allmids.shape[0] ** 2,
weights=unbinned_expected2.ravel()).reshape(allmids.shape[0], -1)
indices = numpy.triu_indices(unbinned.shape[0], 1)
unbinned = unbinned[indices[0], indices[1], :]
indices = numpy.triu_indices(bounds.shape[0], 1)
hifive.hic_binning.dynamically_bin_cis_array(unbinned, allmids, upper, bounds,
expansion_binsize=0, minobservations=25)
binned = numpy.zeros((bounds.shape[0], bounds.shape[0], 2), dtype=numpy.float32)
binned[indices[0], indices[1], :] = upper
binned[indices[1], indices[0], :] = upper
return binned
if __name__ == "__main__":
main()
|
bsd-3-clause
| 1,219,841,942,067,948,000 | 55.616601 | 113 | 0.578749 | false | 2.922073 | false | false | false |
delete/estofadora
|
estofadora/statement/views.py
|
1
|
4766
|
from datetime import datetime
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.contrib import messages
from estofadora.core.utils import MONTHS, last_day_of, month_before_of
from .forms import CashForm
from .models import Cash, Balance
@login_required
def home(request):
return render(request, 'statement/statement.html')
@login_required
def cash(request):
context = {}
date = datetime.now().date()
content = Cash.objects.filter(date=date)
form = CashForm(initial={'date': date})
if request.method == 'POST':
if 'search_form' in request.POST:
date = request.POST.get('search_date')
# Format the date to 21/12/2015 or 2015-12-21
try:
date = datetime.strptime(date, '%d/%m/%Y').date()
except ValueError:
date = datetime.strptime(date, '%Y-%m-%d').date()
content = Cash.objects.filter(date=date)
else:
form = CashForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Registrado com sucesso!')
return redirect(reverse('statement:cash'))
total_before = Balance.total_balance_before(date)
content, balance = Cash.create_balance(content, total_before)
context['form'] = form
context['content'] = content
context['total_value'] = balance
context['total_before'] = total_before
context['choose_date'] = date
context['section'] = 'cash'
return render(request, 'statement/cash.html', context)
@login_required
def delete(request, pk):
cash = get_object_or_404(Cash, pk=pk)
cash.delete()
messages.success(request, 'Registro removido com sucesso!')
return redirect(reverse('statement:cash'))
@login_required
def edit(request, pk):
context = {}
cash = get_object_or_404(Cash, pk=pk)
if request.method == 'POST':
form = CashForm(request.POST, instance=cash)
if form.is_valid():
form.save()
return render(
request, 'statement/item_edit_form_success.html',
{'item': cash}
)
else:
context['form_error'] = True
else:
form = CashForm(instance=cash)
context['form'] = form
context['item'] = cash
return render(request, 'statement/item_edit_form.html', context)
@login_required
def cash_month(request):
context = {}
date = datetime.now().date()
year = date.year
month = date.month
# If a date was not given, filter by the atual date.
content = Cash.filter_by_date(month=month, year=year)
total_value = Cash.total_value_by_date(month=month, year=year)
if request.method == 'POST':
month = int(request.POST.get('selectmonth'))
year = int(request.POST.get('selectyear'))
content = Cash.filter_by_date(month=month, year=year)
total_value = Cash.total_value_by_date(month=month, year=year)
y, m = month_before_of(year, month)
last_day_of_month_before = last_day_of(y, m)
total_before = Balance.total_balance_before(last_day_of_month_before)
content, total_value = Cash.create_balance(content, total_before)
context['content'] = content
context['total_value'] = total_value
context['total_before'] = total_before
context['choose_month'] = month
context['choose_year'] = year
context['months'] = MONTHS
context['years'] = Cash.list_years()
context['section'] = 'cash_month'
return render(request, 'statement/cash_month.html', context)
@login_required
def cash_annual(request):
context = {}
# If an year was not given, use the atual year.
year = datetime.now().date().year
if request.method == 'POST':
year = int(request.POST.get('selectyear'))
balances = []
month = 1
while month < 13:
# Get the total balance from January to December.
balance = Balance.balance_from_month(year=year, month=month)
balances.append(float(balance))
month += 1
total_value = Cash.total_value_by_date(year=year)
# Get the previous year to sum the total of it.
january = 1
y, m = month_before_of(year, january)
last_day_year_before = last_day_of(y, m)
total_before = Balance.total_balance_before(last_day_year_before)
context['total_value'] = total_value
context['total_before'] = total_before
context['choose_year'] = year
context['balances'] = balances
context['years'] = Cash.list_years()
context['section'] = 'cash_annual'
return render(request, 'statement/cash_annual.html', context)
|
mit
| 1,433,432,891,108,522,200 | 28.239264 | 73 | 0.634914 | false | 3.559373 | false | false | false |
stormi/tsunami
|
src/primaires/salle/commandes/ouvrir/__init__.py
|
1
|
3297
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'ouvrir'."""
from primaires.interpreteur.commande.commande import Commande
from primaires.interpreteur.masque.exceptions.erreur_interpretation import \
ErreurInterpretation
class CmdOuvrir(Commande):
"""Commande 'ouvrir'"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "ouvrir", "open")
self.nom_categorie = "bouger"
self.schema = "<nom_sortie>"
self.aide_courte = "ouvre une porte"
self.aide_longue = \
"Cette commande permet d'ouvrir une sortie de la salle où " \
"vous vous trouvez."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
sortie = dic_masques["nom_sortie"].sortie
salle = personnage.salle
nom_complet = sortie.nom_complet.capitalize()
personnage.agir("ouvrir")
if not sortie.porte:
raise ErreurInterpretation(
"|err|Cette sortie n'est pas une porte.|ff|")
if not sortie.porte.fermee:
raise ErreurInterpretation(
"Cette porte est déjà ouverte.|ff|".format(nom_complet))
if sortie.porte.verrouillee:
raise ErreurInterpretation(
"Cette porte semble fermée à clef.".format(nom_complet))
if not personnage.est_immortel() and not sortie.salle_dest.peut_entrer(
personnage):
raise ErreurInterpretation(
"Vous ne pouvez ouvrir cette porte.")
sortie.porte.ouvrir()
personnage << "Vous ouvrez {}.".format(sortie.nom_complet)
salle.envoyer("{{}} ouvre {}.".format(sortie.nom_complet), personnage)
|
bsd-3-clause
| -1,509,933,335,406,380,800 | 42.866667 | 79 | 0.696657 | false | 3.576087 | false | false | false |
Jimdo/ansible-fastly
|
tests/test_fastly_response_object.py
|
1
|
2949
|
#!/usr/bin/env python
import os
import unittest
import sys
from test_common import TestCommon
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'library'))
from fastly_service import FastlyConfiguration
class TestFastlyResponseObject(TestCommon):
@TestCommon.vcr.use_cassette()
def test_fastly_response_object_defaults(self):
response_object_configuration = self.minimal_configuration.copy()
response_object_configuration.update({
'response_objects': [{
'name': 'Set 200 status code',
}]
})
configuration = FastlyConfiguration(response_object_configuration)
service = self.enforcer.apply_configuration(self.FASTLY_TEST_SERVICE, configuration).service
self.assertEqual(service.active_version.configuration.response_objects[0].name, 'Set 200 status code')
self.assertEqual(service.active_version.configuration.response_objects[0].status, '200')
self.assertEqual(service.active_version.configuration.response_objects[0].response, 'Ok')
self.assertEqual(service.active_version.configuration, configuration)
active_version_number = service.active_version.number
service = self.enforcer.apply_configuration(self.FASTLY_TEST_SERVICE, configuration).service
self.assertEqual(service.active_version.number, active_version_number)
@TestCommon.vcr.use_cassette()
def test_fastly_response_object_content_content_type(self):
response_object_configuration = self.minimal_configuration.copy()
response_object_configuration.update({
'response_objects': [{
'name': 'Set 200 status code',
'status': 200,
'response': 'Ok',
'content': 'Hello from Fastly',
'content_type': 'text/plain',
}]
})
configuration = FastlyConfiguration(response_object_configuration)
service = self.enforcer.apply_configuration(self.FASTLY_TEST_SERVICE, configuration).service
self.assertEqual(service.active_version.configuration.response_objects[0].name, 'Set 200 status code')
self.assertEqual(service.active_version.configuration.response_objects[0].status, '200')
self.assertEqual(service.active_version.configuration.response_objects[0].response, 'Ok')
self.assertEqual(service.active_version.configuration.response_objects[0].content, 'Hello from Fastly')
self.assertEqual(service.active_version.configuration.response_objects[0].content_type, 'text/plain')
self.assertEqual(service.active_version.configuration, configuration)
active_version_number = service.active_version.number
service = self.enforcer.apply_configuration(self.FASTLY_TEST_SERVICE, configuration).service
self.assertEqual(service.active_version.number, active_version_number)
if __name__ == '__main__':
unittest.main()
|
mit
| 3,654,560,946,872,806,000 | 45.078125 | 111 | 0.699559 | false | 4.200855 | true | false | false |
ericblau/ipf-xsede
|
ipf/glue2/application.py
|
1
|
10306
|
###############################################################################
# Copyright 2011-2014 The University of Texas at Austin #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
import json
from ipf.data import Data, Representation
from ipf.dt import *
from ipf.error import StepError
from ipf.step import Step
from ipf.sysinfo import ResourceName
from ipf.ipfinfo import IPFInformation, IPFInformationJson, IPFInformationTxt
from .entity import *
#######################################################################################################################
class ApplicationEnvironment(Entity):
def __init__(self):
Entity.__init__(self)
self.AppName = "unknown" # string
self.SpecifiedName = None # string
self.AppVersion = None # string
self.Repository = None # string (url)
self.State = None # string (AppEnvState_t)
self.RemovalDate = None # datetime
self.License = None # string (License_t)
self.Description = None # string
self.BestBenchmark = [] # string (Benchmark_t)
self.ParallelSupport = None # string (ParallelSupport_t)
self.MaxSlots = None # integer
self.MaxJobs = None # integer
self.MaxUserSeats = None # integer
self.FreeSlots = None # integer
self.FreeJobs = None # integer
self.FreeUserSeats = None # integer
self.ExecutionEnvironmentID = [] # string (ID)
self.ComputingManagerID = None # string (ID)
self.ApplicationHandleID = [] # string (ID)
self.Keywords = [] # string (ID)
self.Extension = {}
self.SupportStatus = None
def __str__(self):
return json.dumps(ApplicationEnvironmentOgfJson(self).toJson(), sort_keys=True, indent=4)
#######################################################################################################################
class ApplicationEnvironmentOgfJson(EntityOgfJson):
data_cls = ApplicationEnvironment
def __init__(self, data):
EntityOgfJson.__init__(self, data)
def get(self):
return json.dumps(self.toJson(), sort_keys=True, indent=4)
def toJson(self):
doc = EntityOgfJson.toJson(self)
# Specified name is descriptive Name: field from inside module file
if self.data.SpecifiedName is not None:
doc["Name"] = self.data.SpecifiedName
if self.data.AppName is not None:
doc["AppName"] = self.data.AppName
if self.data.AppVersion is not None:
doc["AppVersion"] = self.data.AppVersion
if self.data.Repository is not None:
doc["Repository"] = self.data.Repository
if self.data.State is not None:
doc["State"] = self.data.State
if self.data.RemovalDate is not None:
doc["RemovalDate"] = dateTimeToText(self.data.RemovalDate)
if self.data.License is not None:
doc["License"] = self.data.License
if self.data.Description is not None:
doc["Description"] = self.data.Description
if len(self.data.BestBenchmark) > 0:
doc["BestBenchmark"] = self.data.BestBenchmark
if self.data.ParallelSupport is not None:
doc["ParallelSupport"] = self.data.ParallelSupport
if self.data.MaxSlots is not None:
doc["MaxSlots"] = self.data.MaxSlots
if self.data.MaxJobs is not None:
doc["MaxJobs"] = self.data.MaxJobs
if self.data.MaxUserSeats is not None:
doc["MaxUserSeats"] = self.data.MaxUserSeats
if self.data.FreeSlots is not None:
doc["FreeSlots"] = self.data.FreeSlots
if self.data.FreeJobs is not None:
doc["FreeJobs"] = self.data.FreeJobs
if self.data.FreeUserSeats is not None:
doc["FreeUserSeats"] = self.data.FreeUserSeats
if len(self.data.Keywords) > 0:
doc["Keywords"] = self.data.Keywords
if len(self.data.Extension) > 0:
extensions = []
for ext in self.data.Extension:
extensions.append(ext)
#doc["Extensions"] = list(extensions)
if self.data.SupportStatus is not None:
doc["SupportStatus"] = self.data.SupportStatus
associations = {}
associations["ExecutionEnvironmentID"] = self.data.ExecutionEnvironmentID
associations["ComputingManagerID"] = self.data.ComputingManagerID
if len(self.data.ApplicationHandleID) > 0:
associations["ApplicationHandleID"] = self.data.ApplicationHandleID
doc["Associations"] = associations
return doc
#######################################################################################################################
class ApplicationHandle(Entity):
def __init__(self):
Entity.__init__(self)
# string (ApplicationHandle_t)
self.Type = "unknown"
# string
self.Value = "unknown"
# string (ID)
self.ApplicationEnvironmentID = "urn:glue2:ApplicationEnvironment:unknown"
#######################################################################################################################
class ApplicationHandleOgfJson(EntityOgfJson):
data_cls = ApplicationHandle
def __init__(self, data):
EntityOgfJson.__init__(self, data)
def get(self):
return json.dumps(self.toJson(), sort_keys=True, indent=4)
def toJson(self):
doc = EntityOgfJson.toJson(self)
doc["Type"] = self.data.Type
doc["Value"] = self.data.Value
associations = {}
associations["ApplicationEnvironmentID"] = self.data.ApplicationEnvironmentID
doc["Associations"] = associations
return doc
#######################################################################################################################
class Applications(Data):
def __init__(self, resource_name, ipfinfo):
Data.__init__(self)
self.id = resource_name
self.environments = []
self.handles = []
self.resource_name = resource_name
self.ipfinfo = ipfinfo
def add(self, env, handles):
if env.AppVersion is None:
app_version = "unknown"
else:
app_version = env.AppVersion
env.Name = "%s-%s" % (env.AppName, app_version)
env.id = "%s.%s.%s" % (app_version, env.AppName, self.resource_name)
env.ID = "urn:glue2:ApplicationEnvironment:%s.%s.%s.%s" % (
app_version, env.AppName, self.resource_name, env.path_hash)
env.ComputingManagerID = "urn:glue2:ComputingManager:%s" % (
self.resource_name)
env.ApplicationHandleID = []
for handle in handles:
handle.ApplicationEnvironmentID = env.ID
handle.Name = "%s-%s" % (env.AppName, app_version)
handle.id = "%s.%s.%s.%s" % (
handle.Type, app_version, env.AppName, self.resource_name)
handle.ID = "urn:glue2:ApplicationHandle:%s:%s.%s.%s.%s" % \
(handle.Type, app_version, env.AppName,
self.resource_name, env.path_hash)
env.ApplicationHandleID.append(handle.ID)
self.environments.append(env)
self.handles.extend(handles)
#######################################################################################################################
class ApplicationsOgfJson(Representation):
data_cls = Applications
def __init__(self, data):
Representation.__init__(
self, Representation.MIME_APPLICATION_JSON, data)
def get(self):
return json.dumps(self.toJson(), sort_keys=True, indent=4)
def toJson(self):
doc = {}
doc["ApplicationEnvironment"] = []
for env in self.data.environments:
doc["ApplicationEnvironment"].append(
ApplicationEnvironmentOgfJson(env).toJson())
doc["ApplicationHandle"] = []
for handle in self.data.handles:
doc["ApplicationHandle"].append(
ApplicationHandleOgfJson(handle).toJson())
doc["PublisherInfo"] = [IPFInformationJson(
ipfinfo).toJson() for ipfinfo in self.data.ipfinfo]
return doc
#######################################################################################################################
class ApplicationsStep(Step):
def __init__(self):
Step.__init__(self)
self.description = "produces a document containing GLUE 2 ApplicationEnvironment and ApplicationHandle"
self.time_out = 30
self.requires = [IPFInformation, ResourceName]
self.produces = [Applications]
self.resource_name = None
def run(self):
self.resource_name = self._getInput(ResourceName).resource_name
self.ipfinfo = [self._getInput(IPFInformation)]
self._output(self._run())
def _run(self):
raise StepError("ApplicationsStep._run not overriden")
#######################################################################################################################
|
apache-2.0
| -30,491,066,512,194,984 | 39.574803 | 119 | 0.522511 | false | 4.556145 | false | false | false |
edouardpoitras/Eva
|
clients/cli.py
|
1
|
4617
|
"""
This file holds the abstract CLI class used to create command line utilities
that interact with Eva.
Please use the local_cli.py or remote_cli.py to interact with Eva via the
command line.
"""
import time
from multiprocessing import Process
class CLI(object):
"""
Interface object used to create CLI-based Eva clients.
Will take care of some of the heavy lifting, such as setting up the pubsub
consumer for Eva messages and responses, and start the interaction loop.
See the LocalCLI and RemoteCLI objects for examples.
"""
def __init__(self):
self.process = None
def start_consumer(self, queue, response_prefix='Eva Message: '):
"""
Start a pubsub consumer to receive messages from Eva.
:param queue: The channel to receive messages from.
:type queue: string
:param response_prefix: A string that will prefix all messages from the queue.
:type response_prefix: string
"""
self.process = Process(target=self.consume_messages, args=(queue, response_prefix))
self.process.start()
def consume_messages(self, queue, response_prefix):
"""
A method that consumes the messages from the queue specified.
Will automatically print the messages to the CLI.
This is the method used to fire off a separate process in the
``start_consumer`` method.
It will continuously tail the MongoDB collection holding the messages.
:param queue: The channel to receive messages from.
:type queue: string
:param response_prefix: A string that will prefix all messages from the queue.
:type response_prefix: string
"""
# Need to listen for messages and print them to the CLI.
pubsub = self.get_pubsub()
subscriber = pubsub.subscribe(queue)
# Subscriber will continuously tail the mongodb collection queue.
for message in subscriber:
if message is not None:
if isinstance(message, dict):
print('%s%s' %(response_prefix, message['output_text']))
else:
print('%s%s' %(response_prefix, message))
time.sleep(0.1)
def get_pubsub(self):
"""
A method meant to be overriden in order to get a pubsub object depending
on the requirements of the CLI client.
:return: An anypubsub object used to send and receive messages.
:rtype: `anypubsub.interfaces.PubSub <https://github.com/smarzola/anypubsub>`_
"""
# Logic here to get the proper anypubsub object.
pass
def interact(self, command=None):
"""
The main method that interacts with the Eva server.
:param command: An optional command to send Eva. If None, this method
will continuously poll the user for a new command/request after
every response from Eva.
:type command: string
"""
if command is not None:
results = self.get_results(command)
self.handle_results(results)
else:
print('=== Eva CLI ===')
while True:
command = input('You: ')
results = self.get_results(command)
if results is not None:
self.handle_results(results)
def get_results(self, command):
"""
This method is meant to be overridden in order to properly process a
command from the user and return Eva's response.
:param command: The query/command to send Eva.
:type command: string
:return: Eva's response to that query/command.
:rtype: string
"""
pass
def handle_results(self, results): #pylint: disable=R0201
"""
This method performs the necessary actions with the data returned from
Eva after a query/command.
:param results: The response dict from Eva after a query/command.
Will contain typically be a dict with the following structure::
{
'output_text': <text_here>,
'output_audio': {
'audio': <audio_data>,
'content_type': <audio_content_type>
}
}
:type results: dict
"""
if results['output_text'] is None:
print('Eva Response: ')
else:
print('Eva Response: %s' %results['output_text'])
if __name__ == '__main__':
print('Please use local_cli.py or remote_cli.py instead.')
|
epl-1.0
| -474,309,522,082,213,950 | 35.354331 | 91 | 0.601473 | false | 4.589463 | false | false | false |
darth-vader-lg/glcncrpi
|
tools/arm-bcm2708/gcc-linaro-arm-none-eabi-4.8-2014.04/arm-none-eabi/lib/v7ve/fpv4/softfp/libstdc++.a-gdb.py
|
1
|
2451
|
# -*- python -*-
# Copyright (C) 2009-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/cbuild/slaves/oorts/crosstool-ng/builds/arm-none-eabi-win32/install/share/gcc-4.8.3/python'
libdir = '/cbuild/slaves/oorts/crosstool-ng/builds/arm-none-eabi-win32/install/arm-none-eabi/lib/v7ve/fpv4/softfp'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
|
gpl-3.0
| -413,694,461,915,479,100 | 39.85 | 114 | 0.720522 | false | 3.572886 | false | false | false |
MKLab-ITI/reveal-user-annotation
|
reveal_user_annotation/rabbitmq/rabbitmq_util.py
|
1
|
3042
|
__author__ = 'Georgios Rizos ([email protected])'
import sys
import subprocess
import urllib
from amqp import Connection, Message
from amqp.exceptions import PreconditionFailed
if sys.version_info > (3,):
import urllib.parse as urlparse
else:
import urlparse
def translate_rabbitmq_url(url):
if url[0:4] == "amqp":
url = "http" + url[4:]
parts = urlparse.urlparse(url)
if parts.scheme == "https":
ssl = True
else:
ssl = False
if parts.hostname is not None:
host_name = parts.hostname
else:
host_name = "localhost"
if parts.port is not None:
port = int(parts.port)
else:
port = 5672
if parts.username is not None:
user_name = parts.username
password = parts.password
else:
user_name = parts.username
password = parts.password
path_parts = parts.path.split('/')
virtual_host = urllib.parse.unquote(path_parts[1])
virtual_host = virtual_host # May be an empty path if URL is e.g. "amqp://guest:guest@localhost:5672/vhost"
if virtual_host == "":
virtual_host = "/" # Default vhost
return user_name, password, host_name, port, virtual_host, ssl
def establish_rabbitmq_connection(rabbitmq_uri):
"""
What it says on the tin.
Input: - rabbitmq_uri: A RabbitMQ URI.
Output: - connection: A RabbitMQ connection.
"""
userid, password, host, port, virtual_host, ssl = translate_rabbitmq_url(rabbitmq_uri)
connection = Connection(userid=userid,
password=password,
host=host,
port=port,
virtual_host=virtual_host,
ssl=False)
return connection
def simple_notification(connection, queue_name, exchange_name, routing_key, text_body):
"""
Publishes a simple notification.
Inputs: - connection: A rabbitmq connection object.
- queue_name: The name of the queue to be checked or created.
- exchange_name: The name of the notification exchange.
- routing_key: The routing key for the exchange-queue binding.
- text_body: The text to be published.
"""
channel = connection.channel()
try:
channel.queue_declare(queue_name, durable=True, exclusive=False, auto_delete=False)
except PreconditionFailed:
pass
try:
channel.exchange_declare(exchange_name, type="fanout", durable=True, auto_delete=False)
except PreconditionFailed:
pass
channel.queue_bind(queue_name, exchange_name, routing_key=routing_key)
message = Message(text_body)
channel.basic_publish(message, exchange_name, routing_key)
def simpler_notification(channel, queue_name, exchange_name, routing_key, text_body):
message = Message(text_body)
channel.basic_publish(message, exchange_name, routing_key)
def rabbitmq_server_service(command):
subprocess.call(["service", "rabbitmq-server", command])
|
apache-2.0
| -7,088,256,928,776,268,000 | 27.971429 | 112 | 0.635766 | false | 3.905006 | false | false | false |
bentzinir/Buffe
|
layers/conv_pool.py
|
1
|
3468
|
import theano as t
import numpy as np
import theano.tensor as tt
from theano.tensor.nnet import conv
from theano.tensor.signal import downsample
import common
rng = np.random.RandomState(23455)
class CONV_POOL(object):
"""Conv Pool Layer of a convolutional network """
def __init__(self, filter_shape, image_shape, border_mode='valid', poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: np.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: t.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
self.filter_shape = filter_shape
self.image_shape = image_shape
self.border_mode = border_mode
self.poolsize = poolsize
assert image_shape[1] == filter_shape[1]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /
np.prod(poolsize))
# initialize weights with random weights
W_bound = np.sqrt(6. / (fan_in + fan_out))
self.W = t.shared(
np.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=t.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = np.zeros((filter_shape[0],), dtype=t.config.floatX)
self.b = t.shared(value=b_values, borrow=True)
# store parameters of this layer
self.params = [self.W, self.b]
def step(self, input):
# self.input = input
# convolve input feature maps with filters
# conv_out = t.conv.conv2d(
# input=input,
# filters=self.W,
# filter_shape=filter_shape,
# image_shape=image_shape
# )
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=self.filter_shape,
image_shape=self.image_shape,
border_mode=self.border_mode
)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=self.poolsize,
ignore_border=True,
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
output = tt.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
return output
|
mit
| 1,760,589,534,825,072,000 | 33.68 | 88 | 0.5891 | false | 3.940909 | false | false | false |
y4ns0l0/collectd-ceph
|
plugins/ceph_latency_plugin.py
|
1
|
3887
|
#!/usr/bin/env python
#
# vim: tabstop=4 shiftwidth=4
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; only version 2 of the License is applicable.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors:
# Ricardo Rocha <[email protected]>
#
# About this plugin:
# This plugin evaluates current latency to write to the test pool.
#
# collectd:
# http://collectd.org
# collectd-python:
# http://collectd.org/documentation/manpages/collectd-python.5.shtml
# ceph pools:
# https://ceph.com/docs/master/man/8/rados/#pool-specific-commands
#
import collectd
import re
import traceback
import subprocess
import base
class CephLatencyPlugin(base.Base):
def __init__(self):
base.Base.__init__(self)
self.prefix = 'ceph'
def get_stats(self):
"""Retrieves stats regarding latency to write to a test pool"""
ceph_cluster = "%s-%s" % (self.prefix, self.cluster)
data = {
ceph_cluster: {},
}
output = None
try:
command = "timeout 30s rados --cluster %s -p %s bench 10 write -t 1 -b 65536" % (self.cluster, format(self.testpool))
output = subprocess.check_output(command, shell=True)
except Exception as exc:
collectd.error("ceph-latency: failed to run rados bench :: %s :: %s"
% (exc, traceback.format_exc()))
return
if output is None:
collectd.error('ceph-latency: failed to run rados bench :: output was None')
regex_match = re.compile('^([a-zA-Z]+) [lL]atency\S*: \s* (\w+.?\w+)\s*', re.MULTILINE)
results = regex_match.findall(output)
if len(results) == 0:
# this is a fast hack, should put regexps into an array and try 'em all
# my format:
## Average Latency: 0.00517643
## Stddev Latency: 0.00179458
regex_match = re.compile('^([a-zA-Z]+) [lL]atency: +(\w+.?\w+)', re.MULTILINE)
results = regex_match.findall(output)
if len(results) == 0:
# hopeless
collectd.error('ceph-latency: failed to run rados bench :: output unrecognized %s' % output)
return
data[ceph_cluster]['cluster'] = {}
for key, value in results:
if key == 'Average':
data[ceph_cluster]['cluster']['avg_latency'] = float(value) * 1000
elif key == 'Stddev':
data[ceph_cluster]['cluster']['stddev_latency'] = float(value) * 1000
elif key == 'Max':
data[ceph_cluster]['cluster']['max_latency'] = float(value) * 1000
elif key == 'Min':
data[ceph_cluster]['cluster']['min_latency'] = float(value) * 1000
return data
try:
plugin = CephLatencyPlugin()
except Exception as exc:
collectd.error("ceph-latency: failed to initialize ceph latency plugin :: %s :: %s"
% (exc, traceback.format_exc()))
def configure_callback(conf):
"""Received configuration information"""
plugin.config_callback(conf)
collectd.register_read(read_callback, plugin.interval)
def read_callback():
"""Callback triggerred by collectd on read"""
plugin.read_callback()
collectd.register_init(CephLatencyPlugin.reset_sigchld)
collectd.register_config(configure_callback)
|
gpl-2.0
| 4,239,711,054,730,847,700 | 34.336364 | 129 | 0.624132 | false | 3.795898 | false | false | false |
eepalms/gem5-newcache
|
src/sim/Process.py
|
1
|
3132
|
# Copyright (c) 2005-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
class Process(SimObject):
type = 'Process'
abstract = True
cxx_header = "sim/process.hh"
input = Param.String('cin', "filename for stdin")
output = Param.String('cout', 'filename for stdout')
errout = Param.String('cerr', 'filename for stderr')
system = Param.System(Parent.any, "system process will run on")
max_stack_size = Param.MemorySize('64MB', 'maximum size of the stack')
@classmethod
def export_methods(cls, code):
code('bool map(Addr vaddr, Addr paddr, int size);')
class LiveProcess(Process):
type = 'LiveProcess'
cxx_header = "sim/process.hh"
executable = Param.String('', "executable (overrides cmd[0] if set)")
cmd = VectorParam.String("command line (executable plus arguments)")
env = VectorParam.String([], "environment settings")
cwd = Param.String('', "current working directory")
uid = Param.Int(100, 'user id')
euid = Param.Int(100, 'effective user id')
gid = Param.Int(100, 'group id')
egid = Param.Int(100, 'effective group id')
pid = Param.Int(100, 'process id')
ppid = Param.Int(99, 'parent process id')
simpoint = Param.UInt64(0, 'simulation point at which to start simulation')
# set P bit in SE mode
need_protect = Param.Bool(0, 'whether the program needs to be protected')
protected_start = Param.UInt64(0, 'start addres of protected data section')
protected_end = Param.UInt64(0, 'end address of protected data section')
|
bsd-3-clause
| -4,790,954,474,984,413,000 | 47.9375 | 79 | 0.735951 | false | 4.088773 | false | false | false |
simbits/Lumiere
|
cabinet_orig.py
|
1
|
2565
|
#!/usr/bin/env python
import random
import socket
import struct
import sys
import time
from Adafruit_MCP230xx import Adafruit_MCP230XX
CABINET_VERSION='1.0b'
START_MSG='## Cabinet version %s ##' % (CABINET_VERSION)
MCAST_GRP = ('224.19.79.1', 9999)
DRAWERS = 9
USE_PULLUPS = 1
RETRIGGER_DELAY = 10 #seconds
WAIT_DELAY = 3 #seconds
if __name__ == '__main__':
mcp = Adafruit_MCP230XX(address=0x20, num_gpios=16) # MCP23017
c_state = [True] * DRAWERS
p_state = [True] * DRAWERS
trigger_delay = [0] * DRAWERS
for i in range(0, DRAWERS):
mcp.config(i, mcp.INPUT)
mcp.pullup(i, USE_PULLUPS)
p_state[i] = bool((mcp.input(i) >> i) & 0x01)
print 'initial state: %s' % (str(p_state))
print 'setting up mcast group @%s' % (str(MCAST_GRP))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(0.2)
ttl = struct.pack('b', 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
try:
sock.sendto(START_MSG, MCAST_GRP)
except Exception as e:
print 'exception during send: %s' % (str(e))
sys.exit(1)
while True:
for i in range(0, DRAWERS):
if trigger_delay[i] > 0:
trigger_delay[i] = trigger_delay[i] - 1
#c_state[i] = bool((mcp.input(i) >> i) & 0x01)
c_state[i] = bool(random.randint(0,1))
triggered = {i for i in range(0, DRAWERS)
if c_state[i] != p_state[i] and
not c_state[i] and
trigger_delay[i] == 0}
closed = {i for i in range(0, DRAWERS)
if c_state[i] != p_state[i] and
c_state[i]}
for i in triggered:
trigger_delay[i] = RETRIGGER_DELAY / WAIT_DELAY
print 'prev: %s' % (p_state)
print 'cur : %s' % (c_state)
print 'd: %s' % (trigger_delay)
print 't: %s' % (triggered)
print 'c: %s' % (closed)
try:
for i in closed:
print 'sending closed drawer: %d' % (i)
sock.sendto('c:%d' % (i), MCAST_GRP)
drawer = random.choice(list(triggered))
print 'sending opened drawer %d' % (drawer)
sock.sendto('o:%d' % (drawer), MCAST_GRP)
except IndexError:
pass
except Exception as e:
print 'exception during send: %s' % (str(e))
p_state = list(c_state)
time.sleep(WAIT_DELAY) # relax a little
|
mit
| -227,441,266,886,782,400 | 29.176471 | 68 | 0.527875 | false | 3.079232 | false | false | false |
theetcher/fxpt
|
fxpt/fx_refsystem/options_dialog_ui.py
|
1
|
4062
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'options_dialog_ui.ui'
#
# Created: Fri Nov 18 22:58:31 2016
# by: pyside-uic 0.2.15 running on PySide 1.2.4
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(576, 351)
self.verticalLayout_2 = QtGui.QVBoxLayout(Dialog)
self.verticalLayout_2.setContentsMargins(6, 6, 6, 6)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.groupBox = QtGui.QGroupBox(Dialog)
self.groupBox.setObjectName("groupBox")
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout.setContentsMargins(6, 6, 6, 6)
self.verticalLayout.setObjectName("verticalLayout")
self.uiLST_roots = QtGui.QListWidget(self.groupBox)
self.uiLST_roots.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.uiLST_roots.setAlternatingRowColors(True)
self.uiLST_roots.setObjectName("uiLST_roots")
self.verticalLayout.addWidget(self.uiLST_roots)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.uiBTN_add = QtGui.QPushButton(self.groupBox)
self.uiBTN_add.setObjectName("uiBTN_add")
self.horizontalLayout.addWidget(self.uiBTN_add)
self.uiBTN_remove = QtGui.QPushButton(self.groupBox)
self.uiBTN_remove.setObjectName("uiBTN_remove")
self.horizontalLayout.addWidget(self.uiBTN_remove)
self.uiBTN_setActive = QtGui.QPushButton(self.groupBox)
self.uiBTN_setActive.setObjectName("uiBTN_setActive")
self.horizontalLayout.addWidget(self.uiBTN_setActive)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout_2.addWidget(self.groupBox)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setMinimumSize(QtCore.QSize(0, 0))
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout_2.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
self.uiLST_roots.setCurrentRow(-1)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), Dialog.reject)
QtCore.QObject.connect(Dialog, QtCore.SIGNAL("finished(int)"), Dialog.onDialogFinished)
QtCore.QObject.connect(Dialog, QtCore.SIGNAL("accepted()"), Dialog.onDialogAccepted)
QtCore.QObject.connect(self.uiBTN_add, QtCore.SIGNAL("clicked()"), Dialog.onAddClicked)
QtCore.QObject.connect(self.uiBTN_setActive, QtCore.SIGNAL("clicked()"), Dialog.onSetActiveClicked)
QtCore.QObject.connect(self.uiLST_roots, QtCore.SIGNAL("itemSelectionChanged()"), Dialog.onSelectionChanged)
QtCore.QObject.connect(self.uiBTN_remove, QtCore.SIGNAL("clicked()"), Dialog.onRemoveClicked)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "FX RefSystem Options", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("Dialog", "References Location Roots", None, QtGui.QApplication.UnicodeUTF8))
self.uiLST_roots.setSortingEnabled(True)
self.uiBTN_add.setText(QtGui.QApplication.translate("Dialog", "Add", None, QtGui.QApplication.UnicodeUTF8))
self.uiBTN_remove.setText(QtGui.QApplication.translate("Dialog", "Remove", None, QtGui.QApplication.UnicodeUTF8))
self.uiBTN_setActive.setText(QtGui.QApplication.translate("Dialog", "Set Active", None, QtGui.QApplication.UnicodeUTF8))
|
mit
| 1,141,909,398,221,489,200 | 57.869565 | 137 | 0.727228 | false | 3.850237 | false | false | false |
KrisHammerberg/DEMtools
|
demtools.py
|
1
|
4546
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
DemTools
A QGIS plugin
A suite of tools for doing neat things with DEMs
-------------------
begin : 2014-05-15
copyright : (C) 2014 by Kris Hammerberg
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Import the PyQt and QGIS libraries
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
# Initialize Qt resources from file resources.py
import resources_rc
# Import the code for the dialog
import os.path
import sys
from shaDEM import shaDEM
from svf import svf
from solaraccess import SolarAccess
class DemTools:
def __init__(self, iface):
# Save reference to the QGIS interface
self.iface = iface
# save reference to tool interfaces
self.shaDEM = shaDEM(iface)
self.svf = svf(iface)
self.SolarAccess = SolarAccess(iface)
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value("locale/userLocale")[0:2]
localePath = os.path.join(self.plugin_dir, 'i18n', 'demtools_{}.qm'.format(locale))
if os.path.exists(localePath):
self.translator = QTranslator()
self.translator.load(localePath)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
#check necessary libraries
try:
import numpy
import numexpr
except ImportError:
QMessageBox.critical( self.iface.mainWindow(),"ImportError", "Plugin requires Numpy & Numexpr libraries.\n\See http://www.numpy.org & https://code.google.com/p/numexpr/" )
try:
import Pysolar as solar
except ImportError:
try:
import solar
except ImportError:
QMessageBox.critical( self.iface.mainWindow(),"ImportError", "Plugin requires Pysolar libraries.\n\See http://pysolar.org/" )
def initGui(self):
# Create action that will start plugin configuration
self.shaDEMact = QAction(
QIcon(":/plugins/demtools/shaDEM.png"),
u"ShaDEM", self.iface.mainWindow())
self.SVFact = QAction(
QIcon(":/plugins/demtools/SVF.png"),
u"SVF", self.iface.mainWindow())
self.solaract = QAction(
QIcon(":/plugins/demtools/solaraccess.png"),
u"SolarAccess", self.iface.mainWindow())
# connect the actions to the run methods
self.shaDEMact.triggered.connect(self.shaDEM.start)
self.SVFact.triggered.connect(self.svf.start)
self.solaract.triggered.connect(self.SolarAccess.start)
# Add toolbar buttons and menu items
self.iface.addToolBarIcon(self.shaDEMact)
self.iface.addPluginToRasterMenu(u"&DEM Tools", self.shaDEMact)
self.iface.addToolBarIcon(self.SVFact)
self.iface.addPluginToRasterMenu(u"&DEM Tools", self.SVFact)
self.iface.addToolBarIcon(self.solaract)
self.iface.addPluginToRasterMenu(u"&DEM Tools", self.solaract)
def unload(self):
# Remove the plugin menu items and icons
self.iface.removePluginRasterMenu(u"&DEM Tools", self.shaDEMact)
self.iface.removeToolBarIcon(self.shaDEMact)
self.iface.removePluginRasterMenu(u"&DEM Tools", self.SVFact)
self.iface.removeToolBarIcon(self.SVFact)
self.iface.removePluginRasterMenu(u"&DEM Tools", self.solaract)
self.iface.removeToolBarIcon(self.solaract)
|
gpl-2.0
| -3,094,973,200,494,068,700 | 37.525424 | 183 | 0.543995 | false | 4.317189 | false | false | false |
WeAreWizards/proppy
|
proppy/proposal.py
|
1
|
6603
|
from collections import defaultdict
from proppy.validators import (
is_currency,
is_date,
is_percentage,
is_present,
are_valid_deliverables,
are_valid_rates
)
from proppy.utils import (
get_work_days_interval,
to_date
)
class Proposal(object):
validation_rules = {
'customer.company': [is_present],
'customer.person': [is_present],
'customer.email': [is_present],
'project.name': [is_present],
'project.description': [is_present],
'project.worker': [is_present],
'project.currency': [is_present, is_currency],
'project.discount': [is_percentage],
'project.start': [is_present, is_date()],
'project.end': [is_present, is_date()],
'project.uat_start': [is_date(optional=True)],
'project.uat_end': [is_date(optional=True)],
'project.rates': [is_present, are_valid_rates],
'project.deliverables': [is_present, are_valid_deliverables],
}
def __init__(self, config):
self._errors = []
# Not using .get below as we have already checked for them
# when loading the toml
self.customer = config['customer']
self.project = config['project']
def _fetch_value(self, field):
"""
Allow dotted path to class objects dict, ie
customer.company is equivalent to self.customer['company']
"""
paths = field.split(".")
base = getattr(self, paths[0])
for key in paths[1:]:
base = base.get(key)
return base
def basic_validation(self):
"""
Only validates using the class validation dict: presence, type etc
Does not check business logic
"""
for field, rules in self.validation_rules.items():
value = self._fetch_value(field)
for rule in rules:
valid = rule(value)
# Only show one error at a time per field
if not valid:
self._errors.append(rule.message % field)
break
def logic_validation(self):
"""
Ensure there's no 'stupid' data, like a UAT period
lasting 1 month while the dev is 5 days or a 100% reduction.
Also saves some of the computed data back on the project object
for use in templates:
- start_date
- end_date
- dev_length
- sum_paid_deliverables
- sum_free_deliverables
"""
# can't have all the deliverable set to free
if all(d['free'] for d in self.project['deliverables']):
self._errors.append("Can't have all deliverables set to free")
return
deliverables = self.project['deliverables']
# not using a rate we haven't specified in a deliverable
rate_names = [rate['name'] for rate in self.project['rates']]
if any(d['rate'] not in rate_names for d in deliverables):
self._errors.append(
"An unknown rate was used in a deliverable"
)
return
# start and end dates are accurates given the estimates
self.project['start_date'] = to_date(self.project['start'])
self.project['end_date'] = to_date(self.project['end'])
length_project = get_work_days_interval(
self.project['start_date'], self.project['end_date']
)
dev_length = sum([d['estimate'] for d in deliverables])
self.project['dev_length'] = dev_length
dev_length /= self.project['worker']
# not too short
if dev_length > length_project:
self._errors.append(
"Project take more time than the timeline allows"
)
return
# but not too long either
if length_project > dev_length * 3:
self._errors.append(
"Project take way less time than the timeline shows"
)
return
# UAT validation: needs to be after the end date of project
# and should be shorter than the project
# UAT is not mandatory though
if 'uat_start' in self.project:
self.project['uat_start_date'] = to_date(self.project['uat_start'])
self.project['uat_end_date'] = to_date(self.project['uat_end'])
if self.project['uat_start_date'] < self.project['end_date']:
self._errors.append(
"UAT can't start before the end of the project"
)
return
length_uat = get_work_days_interval(
self.project['uat_start_date'], self.project['uat_end_date']
)
if length_uat > 14:
self._errors.append(
"UAT can't take longer than two weeks"
)
return
# And finally onto the costs validation
day_rates = {r['name']: r['amount'] for r in self.project['rates']}
# the sum we will invoice based on the deliverables itself
sum_deliverables = 0
# the sum we give away as a discount, ie free deliverables
sum_free_deliverables = 0
self.project['costs_by_rate'] = defaultdict(int)
for d in deliverables:
cost = d['estimate'] * day_rates[d['rate']]
sum_deliverables += cost
if d['free']:
sum_free_deliverables += cost
self.project['costs_by_rate'][d['rate']] += cost
self.project['sum_deliverables'] = sum_deliverables
self.project['sum_free_deliverables'] = sum_free_deliverables
# now we need to check that the discount validation is not too high
if self.project['discount'] > 50:
self._errors.append("Discount is set too high")
return
self.project['discount_amount'] = (
(sum_deliverables - sum_free_deliverables) / 100
* self.project['discount']
)
self.project['invoice_amount'] = (
sum_deliverables
- sum_free_deliverables
- self.project['discount_amount']
)
def is_valid(self):
self.basic_validation()
# If we get errors during basic validation, no need
# to bother doing the business logic one
if len(self._errors) > 0:
return False
# Call business logic before the return
self.logic_validation()
return len(self._errors) == 0
def print_errors(self):
print("ERRORS:")
print('\n'.join(self._errors))
|
mit
| 5,684,873,120,759,950,000 | 34.12234 | 79 | 0.562169 | false | 4.221867 | false | false | false |
dann4520/vigenere_encoder_decoder
|
vigenere_encoder_decoder.py
|
1
|
9778
|
#written using Python 2.7.8
cipher_library = [{"A": "A", "B": "B", "C": "C", "D": "D", "E": "E", "F": "F", "G": "G", "H": "H", "I": "I", "J": "J", "K": "K", "L": "L", "M": "M", "N": "N", "O": "O", "P": "P", "Q": "Q", "R": "R", "S": "S", "T": "T", "U": "U", "V": "V", "W": "W", "X": "X", "Y": "Y", "Z": "Z"},
{"A": "B", "B": "C", "C": "D", "D": "E", "E": "F", "F": "G", "G": "H", "H": "I", "I": "J", "J": "K", "K": "L", "L": "M", "M": "N", "N": "O", "O": "P", "P": "Q", "Q": "R", "R": "S", "S": "T", "T": "U", "U": "V", "V": "W", "W": "X", "X": "Y", "Y": "Z", "Z": "A"},
{"A": "C", "B": "D", "C": "E", "D": "F", "E": "G", "F": "H", "G": "I", "H": "J", "I": "K", "J": "L", "K": "M", "L": "N", "M": "O", "N": "P", "O": "Q", "P": "R", "Q": "S", "R": "T", "S": "U", "T": "V", "U": "W", "V": "X", "W": "Y", "X": "Z", "Y": "A", "Z": "B"},
{"A": "D", "B": "E", "C": "F", "D": "G", "E": "H", "F": "I", "G": "J", "H": "K", "I": "L", "J": "M", "K": "N", "L": "O", "M": "P", "N": "Q", "O": "R", "P": "S", "Q": "T", "R": "U", "S": "V", "T": "W", "U": "X", "V": "Y", "W": "Z", "X": "A", "Y": "B", "Z": "C"},
{"A": "E", "B": "F", "C": "G", "D": "H", "E": "I", "F": "J", "G": "K", "H": "L", "I": "M", "J": "N", "K": "O", "L": "P", "M": "Q", "N": "R", "O": "S", "P": "T", "Q": "U", "R": "V", "S": "W", "T": "X", "U": "Y", "V": "Z", "W": "A", "X": "B", "Y": "C", "Z": "D"},
{"A": "F", "B": "G", "C": "H", "D": "I", "E": "J", "F": "K", "G": "L", "H": "M", "I": "N", "J": "O", "K": "P", "L": "Q", "M": "R", "N": "S", "O": "T", "P": "U", "Q": "V", "R": "W", "S": "X", "T": "Y", "U": "Z", "V": "A", "W": "B", "X": "C", "Y": "D", "Z": "E"},
{"A": "G", "B": "H", "C": "I", "D": "J", "E": "K", "F": "L", "G": "M", "H": "N", "I": "O", "J": "P", "K": "Q", "L": "R", "M": "S", "N": "T", "O": "U", "P": "V", "Q": "W", "R": "X", "S": "Y", "T": "Z", "U": "A", "V": "B", "W": "C", "X": "D", "Y": "E", "Z": "F"},
{"A": "H", "B": "I", "C": "J", "D": "K", "E": "L", "F": "M", "G": "N", "H": "O", "I": "P", "J": "Q", "K": "R", "L": "S", "M": "T", "N": "U", "O": "V", "P": "W", "Q": "X", "R": "Y", "S": "Z", "T": "A", "U": "B", "V": "C", "W": "D", "X": "E", "Y": "F", "Z": "G"},
{"A": "I", "B": "J", "C": "K", "D": "L", "E": "M", "F": "N", "G": "O", "H": "P", "I": "Q", "J": "R", "K": "S", "L": "T", "M": "U", "N": "V", "O": "W", "P": "X", "Q": "Y", "R": "Z", "S": "A", "T": "B", "U": "C", "V": "D", "W": "E", "X": "F", "Y": "G", "Z": "H"},
{"A": "J", "B": "K", "C": "L", "D": "M", "E": "N", "F": "O", "G": "P", "H": "Q", "I": "R", "J": "S", "K": "T", "L": "U", "M": "V", "N": "W", "O": "X", "P": "Y", "Q": "Z", "R": "A", "S": "B", "T": "C", "U": "D", "V": "E", "W": "F", "X": "G", "Y": "H", "Z": "I"},
{"A": "K", "B": "L", "C": "M", "D": "N", "E": "O", "F": "P", "G": "Q", "H": "R", "I": "S", "J": "T", "K": "U", "L": "V", "M": "W", "N": "X", "O": "Y", "P": "Z", "Q": "A", "R": "B", "S": "C", "T": "D", "U": "E", "V": "F", "W": "G", "X": "H", "Y": "I", "Z": "J"},
{"A": "L", "B": "M", "C": "N", "D": "O", "E": "P", "F": "Q", "G": "R", "H": "S", "I": "T", "J": "U", "K": "V", "L": "W", "M": "X", "N": "Y", "O": "Z", "P": "A", "Q": "B", "R": "C", "S": "D", "T": "E", "U": "F", "V": "G", "W": "H", "X": "I", "Y": "J", "Z": "K"},
{"A": "M", "B": "N", "C": "O", "D": "P", "E": "Q", "F": "R", "G": "S", "H": "T", "I": "U", "J": "V", "K": "W", "L": "X", "M": "Y", "N": "Z", "O": "A", "P": "B", "Q": "C", "R": "D", "S": "E", "T": "F", "U": "G", "V": "H", "W": "I", "X": "J", "Y": "K", "Z": "L"},
{"A": "N", "B": "O", "C": "P", "D": "Q", "E": "R", "F": "S", "G": "T", "H": "U", "I": "V", "J": "W", "K": "X", "L": "Y", "M": "Z", "N": "A", "O": "B", "P": "C", "Q": "D", "R": "E", "S": "F", "T": "G", "U": "H", "V": "I", "W": "J", "X": "K", "Y": "L", "Z": "M"},
{"A": "O", "B": "P", "C": "Q", "D": "R", "E": "S", "F": "T", "G": "U", "H": "V", "I": "W", "J": "X", "K": "Y", "L": "Z", "M": "A", "N": "B", "O": "C", "P": "D", "Q": "E", "R": "F", "S": "G", "T": "H", "U": "I", "V": "J", "W": "K", "X": "L", "Y": "M", "Z": "N"},
{"A": "P", "B": "Q", "C": "R", "D": "S", "E": "T", "F": "U", "G": "V", "H": "W", "I": "X", "J": "Y", "K": "Z", "L": "A", "M": "B", "N": "C", "O": "D", "P": "E", "Q": "F", "R": "G", "S": "H", "T": "I", "U": "J", "V": "K", "W": "L", "X": "M", "Y": "N", "Z": "O"},
{"A": "Q", "B": "R", "C": "S", "D": "T", "E": "U", "F": "V", "G": "W", "H": "X", "I": "Y", "J": "Z", "K": "A", "L": "B", "M": "C", "N": "D", "O": "E", "P": "F", "Q": "G", "R": "H", "S": "I", "T": "J", "U": "K", "V": "L", "W": "M", "X": "N", "Y": "O", "Z": "P"},
{"A": "R", "B": "S", "C": "T", "D": "U", "E": "V", "F": "W", "G": "X", "H": "Y", "I": "Z", "J": "A", "K": "B", "L": "C", "M": "D", "N": "E", "O": "F", "P": "G", "Q": "H", "R": "I", "S": "J", "T": "K", "U": "L", "V": "M", "W": "N", "X": "O", "Y": "P", "Z": "Q"},
{"A": "S", "B": "T", "C": "U", "D": "V", "E": "W", "F": "X", "G": "Y", "H": "Z", "I": "A", "J": "B", "K": "C", "L": "D", "M": "E", "N": "F", "O": "G", "P": "H", "Q": "I", "R": "J", "S": "K", "T": "L", "U": "M", "V": "N", "W": "O", "X": "P", "Y": "Q", "Z": "R"},
{"A": "T", "B": "U", "C": "V", "D": "W", "E": "X", "F": "Y", "G": "Z", "H": "A", "I": "B", "J": "C", "K": "D", "L": "E", "M": "F", "N": "G", "O": "H", "P": "I", "Q": "J", "R": "K", "S": "L", "T": "M", "U": "N", "V": "O", "W": "P", "X": "Q", "Y": "R", "Z": "S"},
{"A": "U", "B": "V", "C": "W", "D": "X", "E": "Y", "F": "Z", "G": "A", "H": "B", "I": "C", "J": "D", "K": "E", "L": "F", "M": "G", "N": "H", "O": "I", "P": "J", "Q": "K", "R": "L", "S": "M", "T": "N", "U": "O", "V": "P", "W": "Q", "X": "R", "Y": "S", "Z": "T"},
{"A": "V", "B": "W", "C": "X", "D": "Y", "E": "Z", "F": "A", "G": "B", "H": "C", "I": "D", "J": "E", "K": "F", "L": "G", "M": "H", "N": "I", "O": "J", "P": "K", "Q": "L", "R": "M", "S": "N", "T": "O", "U": "P", "V": "Q", "W": "R", "X": "S", "Y": "T", "Z": "U"},
{"A": "W", "B": "X", "C": "Y", "D": "Z", "E": "A", "F": "B", "G": "C", "H": "D", "I": "E", "J": "F", "K": "G", "L": "H", "M": "I", "N": "J", "O": "K", "P": "L", "Q": "M", "R": "N", "S": "O", "T": "P", "U": "Q", "V": "R", "W": "S", "X": "T", "Y": "U", "Z": "V"},
{"A": "X", "B": "Y", "C": "Z", "D": "A", "E": "B", "F": "C", "G": "D", "H": "E", "I": "F", "J": "G", "K": "H", "L": "I", "M": "J", "N": "K", "O": "L", "P": "M", "Q": "N", "R": "O", "S": "P", "T": "Q", "U": "R", "V": "S", "W": "T", "X": "U", "Y": "V", "Z": "W"},
{"A": "Y", "B": "Z", "C": "A", "D": "B", "E": "C", "F": "D", "G": "E", "H": "F", "I": "G", "J": "H", "K": "I", "L": "J", "M": "K", "N": "L", "O": "M", "P": "N", "Q": "O", "R": "P", "S": "Q", "T": "R", "U": "S", "V": "T", "W": "U", "X": "V", "Y": "W", "Z": "X"},
{"A": "Z", "B": "A", "C": "B", "D": "C", "E": "D", "F": "E", "G": "F", "H": "G", "I": "H", "J": "I", "K": "J", "L": "K", "M": "L", "N": "M", "O": "N", "P": "O", "Q": "P", "R": "Q", "S": "R", "T": "S", "U": "T", "V": "U", "W": "V", "X": "W", "Y": "X", "Z": "Y"}
]
key_library = {"A": 0, "B": 1, "C": 2, "D": 3, "E": 4, "F": 5, "G": 6, "H": 7, "I": 8, "J": 9, "K": 10, "L": 11, "M": 12, "N": 13, "O": 14, "P": 15, "Q": 16, "R": 17, "S": 18, "T": 19, "U": 20, "V": 21, "W": 22, "X": 23, "Y": 24, "Z": 25}
def encode_message(message, passkey):
passencoder = passkey
encoded_message = ""
count = 0
while len(passencoder) < len(message): #ensures we have a long enough passencoder
passencoder += passkey
for char in message:
if char == " ":
encoded_message += " " #passes blank spaces through
elif char not in cipher_library[0] and char != " ": #if not in cipher library
next #will omit char.
else:
encoded_letter = cipher_library[key_library[passencoder[count]]][char] #looks up encoded letter using dictionary
encoded_message += encoded_letter #passencoder[count] indicates
count += 1
print encoded_message
raw_input("Press Enter to continue...")
def decode_message(encoded_message, passkey):
#provided with an encoded message and the proper passkey will return decoded message
passencoder = passkey
while len(passencoder) < len(encoded_message):
passencoder += passkey
count = 0
decoded_message = ""
for c in encoded_message:
if c == " ":
decoded_message += " "
for key, char in cipher_library[key_library[passencoder[count]]].items():
if char == c:
decoded_message += key
count += 1
print decoded_message
raw_input("Press Enter to continue...")
user_option = ""
while user_option != "0":
user_option = raw_input("Enter '1' to encode a message." '\n'
"Enter '2' to decode a message." '\n'
"Enter '0' to quit: ")
if user_option == "0":
print "Quitting is not cool"
elif user_option == "1":
encode_message(raw_input("Input message to encode: ").upper(), passkey = raw_input("Input keyword to encode: ").upper())
elif user_option == "2":
decode_message(raw_input("Input message to decode: ").upper(), passkey = raw_input("Input keyword to decode: ").upper())
else:
print "Invalid selection. Please try again."
|
mit
| -7,248,983,136,332,615,000 | 99.804124 | 279 | 0.275312 | false | 2.059393 | false | false | false |
jeroanan/GameCollection
|
UI/Handlers/HandlerFactory.py
|
1
|
2201
|
# copyright (c) David Wilson 2015
# This file is part of Icarus.
# Icarus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Icarus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Icarus. If not, see <http://www.gnu.org/licenses/>
import json
from UI.Cookies.Cookies import Cookies
from UI.Handlers.Exceptions.UnrecognisedHandlerException import UnrecognisedHandlerException
from UI.Handlers.IndexHandler import IndexHandler
from UI.Handlers.Session.Session import Session
class HandlerFactory(object):
def __init__(self, interactor_factory, renderer, config):
self.__interactor_factory = interactor_factory
self.__renderer = renderer
self.__config = config
self.__handlers = self.__load_handlers()
def __load_handlers(self):
with open("UI/Handlers/handlers.json") as f:
return json.load(f)["handlers"][0]
def create(self, handler_type):
handler = None
def renew_cookies():
if handler is None:
raise ValueError("handler not set")
handler.renew_cookies()
def string_to_handler():
ht = self.__handlers[handler_type]
module = __import__("UI.Handlers." + ht, fromlist=ht)
class_ = getattr(module, ht)
return class_(self.__interactor_factory, self.__renderer)
if handler_type == "index":
handler = IndexHandler(self.__interactor_factory, self.__renderer, self.__config)
elif handler_type in self.__handlers:
handler = string_to_handler()
else:
raise UnrecognisedHandlerException
handler.session = Session()
handler.cookies = Cookies()
renew_cookies()
return handler
|
gpl-3.0
| 6,035,370,178,296,243,000 | 34.5 | 93 | 0.665607 | false | 4.240848 | false | false | false |
alex-pardo/ANLP-PROJECT
|
findDemonyms.py
|
1
|
1884
|
import re
from string import *
import sys
from nltk import *
import locale
from wikitools import wiki
from wikitools import api
from wikitools import page
from wikitools import category
wikiAPI = {
'en': "http://en.wikipedia.org/w/api.php"}
site = wiki.Wiki(wikiAPI['en'])
def generateDemonym(place, add, replace):
candidates = []
for rule in replace:
if len(rule[0]) > 0 and place.endswith(rule[0]):
candidates.append(place[:-len(rule[0])]+rule[1])
for rule in add:
if len(rule[0]) == 0 or place.endswith(rule[0]):
candidates.append(place+rule[1])
return candidates
def matchCandidates(link, candidates):
text = page.Page(site, link).getWikiText()
#if 'demonym' in text.lower():
score = 0
rules = [0]*len(candidates)
pos = 0
for candidate in candidates:
if findWholeWord(candidate.lower())(text.lower()):
score += 1
rules[pos] += 1
pos += 1
return score, rules
# else:
# raise NameError('No demonym')
def findWholeWord(w):
return re.compile(r'\b({0})\b'.format(w), flags=re.IGNORECASE).search
add = []
replace = []
with open('add_rules.csv', 'r') as f:
for line in f.readlines():
line = line.replace('\n','')
tmp = line.split(',')
add.append((tmp[0],tmp[1]))
with open('replace_rules.csv', 'r') as f:
for line in f.readlines():
line = line.replace('\n','')
tmp = line.split(',')
replace.append((tmp[0],tmp[1]))
matchings = 0
test_len = 0
f = open('countries.csv', 'r')
for line in f.readlines():
line = line.replace('\n','')
try:
candidates = generateDemonym(line, add, replace)
score, rules = matchCandidates(line, candidates)
if score > 0:
matching_rules = []
for r in range(0, len(candidates)):
if rules[r]:
matching_rules.append(candidates[r])
print line, ',' ,matching_rules
if score > 0:
matchings += 1
test_len += 1
except:
pass
f.close()
print matchings, test_len
|
apache-2.0
| 5,177,841,268,757,151,000 | 20.409091 | 73 | 0.654989 | false | 2.863222 | false | false | false |
wzong/TinyCloud
|
virtualbox/apis.py
|
1
|
3618
|
import json
import os
from django.http import JsonResponse, StreamingHttpResponse
from django.views.decorators.http import require_http_methods
from authentication import authenticator
from machine import machine_controller
from virtualbox import virtualbox_controller
GUEST_ADDITIONS_DIR = '/var/tinymakecloud/additions/'
GUEST_TEMPLATES_DIR = '/var/tinymakecloud/templates/'
GUEST_OS_DIR = '/var/tinymakecloud/images/'
def _ListFiles(dir_path):
return [
name for name in os.listdir(dir_path)
if os.path.isfile(os.path.join(dir_path, name))
]
def StreamJson(iterator):
yield '['
seq = 0
for line in iterator:
line['seq'] = seq
line['done'] = False
seq += 1
yield json.dumps(line) + ','
yield json.dumps({'seq': seq, 'msg': '', 'done': True, 's': 'OK'})
yield ']'
def StreamError(error):
json_str = json.dumps({'s': 'FAIL', 'msg': error, 'done': True, 'seq': 0})
yield '[' + json_str + ']'
@require_http_methods(['POST'])
@authenticator.RequireAuth
def StartVms(request, vm_names):
vm_names = vm_names.split(',')
return StreamingHttpResponse(StreamJson(
virtualbox_controller.StartVms(vm_names)))
@require_http_methods(['POST'])
@authenticator.RequireAuth
def PowerOffVms(request, vm_names):
vm_names = vm_names.split(',')
return StreamingHttpResponse(StreamJson(
virtualbox_controller.PowerOffVms(vm_names)))
@require_http_methods(['POST'])
@authenticator.RequireAuth
def DeleteVms(request, vm_names):
vm_names = vm_names.split(',')
return StreamingHttpResponse(StreamJson(
virtualbox_controller.DeleteVms(vm_names)))
@require_http_methods(['POST'])
@authenticator.RequireAuth
def CreateVm(request):
reqJson = json.loads(request.body)
vm_name = reqJson['name']
machine_name = reqJson['machine']
memory = reqJson['memory']
if memory not in virtualbox_controller.OPTIONS_MEMORY:
return StreamingHttpResponse(StreamError('Invalid memory option.'))
if reqJson['guest_creation'] == 'CLONE':
guest_template = reqJson['guest_template']
guest_template_from_machine = reqJson['guest_template_from_machine']
return StreamingHttpResponse(StreamJson(
virtualbox_controller.CreateVmFromSnapshot(
vm_name,
machine_name=machine_name,
memory=str(memory),
snapshot_path=guest_template,
snapshot_from_machine=guest_template_from_machine)))
elif reqJson['guest_creation'] == 'INSTALL':
guest_image = reqJson['guest_image']
guest_image_from_machine = reqJson['guest_image_from_machine']
guest_addition = reqJson['guest_addition']
guest_addition_from_machine = reqJson['guest_addition_from_machine']
return StreamingHttpResponse(StreamJson(
virtualbox_controller.CreateVm(
vm_name,
machine_name=machine_name,
password=virtualbox_controller.GetDefaultPassword(),
memory=str(memory),
image=guest_image,
image_from_machine=guest_image_from_machine,
additions_iso=guest_addition,
additions_iso_from_machine=guest_addition_from_machine)))
else:
return StreamingHttpResponse(
StreamError('Invalid option: ' + reqJson['guest_creation']))
@require_http_methods(['POST'])
@authenticator.RequireAuth
def GetAllVmInfo(request):
return JsonResponse({'vms': virtualbox_controller.GetAllVmInfo()})
@require_http_methods(['POST'])
@authenticator.RequireAuth
def GetOptions(request):
try:
return JsonResponse(virtualbox_controller.GetDefaultVm())
except Exception as e:
return JsonResponse({'error': str(e)})
|
apache-2.0
| 4,134,940,897,749,442,000 | 30.189655 | 76 | 0.698729 | false | 3.726056 | false | false | false |
madmath/sous-chef
|
src/order/management/commands/generateorders.py
|
1
|
1511
|
from django.core.management.base import BaseCommand
from order.models import Order
from member.models import Client
from datetime import datetime
from django.contrib.admin.models import LogEntry, ADDITION, CHANGE
class Command(BaseCommand):
help = 'Generate orders for all clients using his preferences'
def add_arguments(self, parser):
parser.add_argument(
'--creation_date',
help='The date must be in the format YYYY-MM-DD',
)
parser.add_argument(
'delivery_date',
help='The date must be in the format YYYY-MM-DD',
)
def handle(self, *args, **options):
if options['creation_date']:
creation_date = datetime.strptime(
options['creation_date'], '%Y-%m-%d'
).date()
else:
creation_date = datetime.now().date()
delivery_date = datetime.strptime(
options['delivery_date'], '%Y-%m-%d'
).date()
clients = Client.active.all()
numorders = Order.create_orders_on_defaults(
creation_date, delivery_date, clients)
LogEntry.objects.log_action(
user_id=1, content_type_id=1,
object_id="", object_repr="Generation of order for "+str(
datetime.now().strftime('%Y-%m-%d %H:%M')),
action_flag=ADDITION,
)
print("On", creation_date,
"created", numorders,
"orders to be delivered on", delivery_date, ".")
|
agpl-3.0
| 185,264,954,829,694,800 | 34.97619 | 69 | 0.581072 | false | 4.197222 | false | false | false |
saihttam/kaggle-axa
|
RobustRegressionDriver.py
|
1
|
5500
|
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from random import sample, seed
from sklearn.decomposition import TruncatedSVD
from math import floor
from sklearn import cross_validation
import numpy as np
from numpy.linalg import norm, svd
def inexact_augmented_lagrange_multiplier(X, lmbda=.01, tol=1e-3,
maxiter=100, verbose=True):
"""
Inexact Augmented Lagrange Multiplier
"""
Y = X
norm_two = norm(Y.ravel(), 2)
norm_inf = norm(Y.ravel(), np.inf) / lmbda
dual_norm = np.max([norm_two, norm_inf])
Y = Y / dual_norm
A = np.zeros(Y.shape)
E = np.zeros(Y.shape)
dnorm = norm(X, 'fro')
mu = 1.25 / norm_two
rho = 1.5
sv = 10.
n = Y.shape[0]
itr = 0
while True:
Eraw = X - A + (1/mu) * Y
Eupdate = np.maximum(Eraw - lmbda / mu, 0) + np.minimum(Eraw + lmbda / mu, 0)
U, S, V = svd(X - Eupdate + (1 / mu) * Y, full_matrices=False)
svp = (S > 1 / mu).shape[0]
if svp < sv:
sv = np.min([svp + 1, n])
else:
sv = np.min([svp + round(.05 * n), n])
Aupdate = np.dot(np.dot(U[:, :svp], np.diag(S[:svp] - 1 / mu)), V[:svp, :])
A = Aupdate
E = Eupdate
Z = X - A - E
Y = Y + mu * Z
mu = np.min([mu * rho, mu * 1e7])
itr += 1
if ((norm(Z, 'fro') / dnorm) < tol) or (itr >= maxiter):
break
if verbose:
print "Finished at iteration %d" % (itr)
return A, E
class RegressionDriver(object):
"""Class for Regression-based analysis of Driver traces"""
def __init__(self, driver, datadict, numberofrows=40): #, numfeatures = 200):
"""Initialize by providing a (positive) driver example and a dictionary of (negative) driver references."""
seed(42)
self.driver = driver
self.numfeatures = self.driver.num_features
featurelist = []
self.__clf = GradientBoostingRegressor(n_estimators=300, max_depth=4, min_samples_leaf=2)
# gbr = GradientBoostingRegressor(n_estimators=500, max_depth=10, max_features=numfeatures, random_state=42)
# pca = PCA(whiten=True, n_components=numfeatures)
# estimators = [('polyf', PolynomialFeatures()), ('scale', MinMaxScaler()), ('pca', PCA()), ('gbr', gbr)]
# self.__clf = Pipeline(estimators)
self.__indexlist = []
for trace in self.driver.traces:
self.__indexlist.append(trace.identifier)
featurelist.append(trace.features)
# Initialize train and test np arrays
self.__traindata = np.asarray(featurelist)
self.__testdata = np.asarray(featurelist)
self.__trainlabels = np.ones((self.__traindata.shape[0],))
data = np.empty((0, self.numfeatures), float)
setkeys = datadict.keys()
if driver.identifier in setkeys:
setkeys.remove(driver.identifier)
else:
setkeys = sample(setkeys, len(setkeys) - 1)
for key in setkeys:
if key != driver.identifier:
rand_smpl = [datadict[key][i] for i in sorted(sample(xrange(len(datadict[key])), numberofrows)) ]
data = np.append(data, np.asarray(rand_smpl), axis=0)
self.__traindata = np.append(self.__traindata, data, axis=0)
self.__trainlabels = np.append(self.__trainlabels, np.zeros((data.shape[0],)), axis=0)
self.__y = np.zeros((self.__testdata.shape[0],))
def classify(self, nfolds=4):
"""Perform classification"""
components = self.__traindata.shape[1]
_, train_rpca_X_np = inexact_augmented_lagrange_multiplier(np.nan_to_num(self.__traindata))
_, test_rpca_X_np = inexact_augmented_lagrange_multiplier(np.nan_to_num(self.__testdata))
skf = cross_validation.StratifiedKFold(self.__trainlabels, n_folds=nfolds)
for train_index, _ in skf:
X_train = train_rpca_X_np[train_index]
y_train = self.__trainlabels[train_index]
self.__clf.fit(X_train, y_train)
self.__y += self.__clf.predict(test_rpca_X_np)
self.__y /= float(nfolds)
# feature_importance = self.__clf.feature_importances_
# feature_importance = 100.0 * (feature_importance / feature_importance.max())
# print feature_importance
def toKaggle(self):
"""Return string in Kaggle submission format"""
returnstring = ""
for i in xrange(len(self.__indexlist) - 1):
returnstring += "%d_%d,%.6f\n" % (self.driver.identifier, self.__indexlist[i], self.__y[i])
returnstring += "%d_%d,%.6f" % (self.driver.identifier, self.__indexlist[len(self.__indexlist)-1], self.__y[len(self.__indexlist)-1])
return returnstring
def validate(self, datadict):
from sklearn.metrics import roc_auc_score
testdata = np.empty((0, self.numfeatures), float)
y_true = np.empty((0,), float)
for key in datadict.keys():
currenttestdata = np.asarray(datadict[key])
testdata = np.append(testdata, currenttestdata, axis=0)
if key != self.driver.identifier:
y_true = np.append(y_true, np.zeros((currenttestdata.shape[0],)), axis=0)
else:
y_true = np.append(y_true, np.ones((currenttestdata.shape[0],)), axis=0)
y_score = self.__clf.predict(testdata)
result = roc_auc_score(y_true, y_score)
return result
|
bsd-2-clause
| 4,683,779,648,376,033,000 | 42.65873 | 141 | 0.586909 | false | 3.424658 | true | false | false |
raymondanthony/youtube-dl
|
youtube_dl/extractor/xnxx.py
|
1
|
1491
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
)
class XNXXIE(InfoExtractor):
_VALID_URL = r'^https?://(?:video|www)\.xnxx\.com/video(?P<id>[0-9]+)/(.*)'
_TEST = {
'url': 'http://video.xnxx.com/video1135332/lida_naked_funny_actress_5_',
'md5': '0831677e2b4761795f68d417e0b7b445',
'info_dict': {
'id': '1135332',
'ext': 'flv',
'title': 'lida » Naked Funny Actress (5)',
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
# Get webpage content
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(r'flv_url=(.*?)&',
webpage, 'video URL')
video_url = compat_urllib_parse.unquote(video_url)
video_title = self._html_search_regex(r'<title>(.*?)\s+-\s+XNXX.COM',
webpage, 'title')
video_thumbnail = self._search_regex(r'url_bigthumb=(.*?)&',
webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'url': video_url,
'title': video_title,
'ext': 'flv',
'thumbnail': video_thumbnail,
'age_limit': 18,
}
|
unlicense
| 5,536,822,106,596,122,000 | 29.408163 | 80 | 0.495973 | false | 3.489461 | false | false | false |
Fuchida/Archive
|
albme-py/albme.py
|
1
|
3547
|
"""Perform automated searches against http://www.albme.org/.
The script will get license information such as the licensee name,
license number and expiration date information of a medical professional.
This information is then saved to a json file
"""
import json
import requests
import grequests
from BeautifulSoup import BeautifulSoup
LICENSE_TYPE = "TA"
LAST_NAME = "c"
LICENSE_NUMBER= ""
FIRST_NAME = ""
CITY = ""
SEARCH_URL = "http://www.albme.org/AlbmeSearchWeb/search"
OUTPUT_FILE = 'data.json'
def save_to_file(userDetails):
"""Save dictionary to local json file
Args:
userDetails: A dictionary of user information
"""
with open(OUTPUT_FILE, 'w') as writeFile:
writeFile.write(json.dumps(userDetails))
def perform_async_requests(urls):
"""Perform asynchronous get requests given multiple links
Args:
urls: An array of URLs
Returns:
An array of requests response objects
"""
unsentRequests = ((grequests.get(resource) for resource in urls))
#Size param specifies the number of requests to be made at a time
return grequests.map(unsentRequests, size=10)
def parse_detail_page(detailPage):
"""Fetch licensee name, number and expiration date from detail page
Args:
detailPage: A html text of the results page
Returns:
A dictionary of licensee name, number and expiration date
"""
soup = BeautifulSoup(detailPage)
details = []
for tableRow in soup.findAll('table')[0].findAll('tr'):
#Information we care about comes back in arrays with two elements
#(key:value for table data).
tableData = tableRow.findAll('td', text=True)
#if there is no value for the table data, it wont be included
if len(tableData) == 2:
details.append(tableData)
else:
continue
#from the list of items, contruct the dictionary to return
parsedDetails= {}
for detail in details:
if detail[0] =="Licensee name:":
parsedDetails["Licensee name"] = detail[1]
elif detail[0] =="License number:":
parsedDetails["License number:"] = detail[1]
elif detail[0] =="Expiration date:":
parsedDetails["Expiration date:"] = detail[1]
else:
continue
return parsedDetails
def parse_results_page(resultsPage):
"""Fetch the detail links from the results page
Args:
resultsPage: A html text of the results page
Returns:
An array of links to the details page
"""
soup = BeautifulSoup(resultsPage)
links = []
for link in soup.findAll('a'):
if link.get('href') != 'search':
#Detail links are relative, appending to make them absolute
#and dropping first period from link and "/search" from SEARCH_URL.
links.append(SEARCH_URL[:-7]+link.get('href')[1:])
else:
continue
return links
def get_user_details():
"""Make a request to the search page then crawl each detail page of result
user information will be saved to a local file
"""
#Empty strings need to be submitted for empty data,
#otherwise the server assumes none are filled
postData = {
'licenseType': LICENSE_TYPE,
'licenseNumber': LICENSE_NUMBER,
'lastName': LAST_NAME,
'firstName': FIRST_NAME,
'city':CITY}
searchResponse = requests.post(
SEARCH_URL,
data=postData
)
detailLinks = parse_results_page(searchResponse.text)
detailResponses = perform_async_requests(detailLinks)
#for each reponse object of the detail page, parse the detail page
userDetails = {}
for detail in detailResponses:
userInformation = parse_detail_page(detail.text)
userDetails[userInformation["License number:"]] = userInformation
save_to_file(userDetails)
if __name__ == "__main__":
get_user_details()
|
mit
| -9,057,134,204,100,410,000 | 26.076336 | 75 | 0.728503 | false | 3.420444 | false | false | false |
smartinov/fennec
|
fennec/libs/importer/importer.py
|
1
|
3594
|
from rest_framework.renderers import JSONRenderer
from fennec.apps.metamodel.serializers import ColumnSerializer, BasicSchemaSerializer, BasicTableSerializer, BasicIndexSerializer, \
ForeignKeyBasicSerializer
from fennec.apps.repository.models import BranchRevisionChange
from fennec.apps.metamodel.models import Change
__author__ = 'Darko'
class FennecImporter():
def __init__(self, model=None, user=None, branch_rev=None):
self.model = model if model else []
self.user = user
self.branch_rev = branch_rev
def import_model(self):
for schema in self.model:
self.__save_schema_change(schema)
for table in schema.tables:
self.__save_table_change(table)
for column in table.columns:
self.__save_column_change(column)
for index in table.indexes:
self.__save_index_change(index)
for fk in table.foreign_keys:
self.__save_foreign_key_change(fk)
def __save_schema_change(self, schema):
serializer = BasicSchemaSerializer(schema)
json = JSONRenderer().render(serializer.data)
change = Change()
change.change_type = 0
change.is_ui_change = False
change.made_by = self.user
change.object_type = 'Schema'
change.object_code = schema.id
change.content = json
change.save()
self.__save_branch_revision_change(change)
def __save_table_change(self, table):
serializer = BasicTableSerializer(table)
json = JSONRenderer().render(serializer.data)
change = Change()
change.change_type = 0
change.is_ui_change = False
change.made_by = self.user
change.object_type = 'Table'
change.object_code = table.id
change.content = json
change.save()
self.__save_branch_revision_change(change)
def __save_column_change(self, column):
serializer = ColumnSerializer(column)
json = JSONRenderer().render(serializer.data)
change = Change()
change.change_type = 0
change.is_ui_change = False
change.made_by = self.user
change.object_type = 'Column'
change.object_code = column.id
change.content = json
change.save()
self.__save_branch_revision_change(change)
def __save_index_change(self, index):
serializer = BasicIndexSerializer(index)
json = JSONRenderer().render(serializer.data)
change = Change()
change.change_type = 0
change.is_ui_change = False
change.made_by = self.user
change.object_type = 'Index'
change.object_code = index.id
change.content = json
change.save()
self.__save_branch_revision_change(change)
def __save_foreign_key_change(self, foreign_key):
serializer = ForeignKeyBasicSerializer(foreign_key)
json = JSONRenderer().render(serializer.data)
change = Change()
change.change_type = 0
change.is_ui_change = False
change.made_by = self.user
change.object_type = 'ForeignKey'
change.object_code = foreign_key.id
change.content = json
change.save()
self.__save_branch_revision_change(change)
def __save_branch_revision_change(self, change):
br_change = BranchRevisionChange()
br_change.branch_revision_ref = self.branch_rev
br_change.change_ref = change
# br_change.id = ordinal
br_change.save()
|
gpl-3.0
| 3,583,280,857,935,306,000 | 32.287037 | 132 | 0.618809 | false | 4.084091 | false | false | false |
co-ment/comt
|
src/cm/utils/comment_positioning.py
|
1
|
7762
|
# -*- coding: utf-8 -*-
from difflib import SequenceMatcher
#from cm.utils.spannifier import Spannifier
import sys, operator
from cm.utils.spannifier import spannify
from cm.converters.pandoc_converters import pandoc_convert
import logging
from cm.utils.spannifier import get_the_soup
import re
import html5lib
from html5lib import treebuilders
def compute_new_comment_positions(old_content, old_format, new_content, new_format, commentList):
# cf. TextVersion.get_content
previousVersionContent = pandoc_convert(old_content, old_format, 'html')
newVersionContent = pandoc_convert(new_content, new_format, 'html')
_, previous_char_list, span_starts_previous = spannify(previousVersionContent, False)
_, new_char_list, span_starts_new = spannify(newVersionContent, False)
sm = SequenceMatcher(None, previous_char_list, new_char_list)
opcodes = sm.get_opcodes()
to_remove_comments_ids = set()
# limit to real comments (not replies) and those that have scope
commentList = [c for c in commentList if not c.is_reply() and not c.is_scope_removed()]
for comment in commentList:
try:
comment.initial_start_offset = span_starts_previous[comment.start_wrapper] + comment.start_offset
comment.initial_end_offset = span_starts_previous[comment.end_wrapper] + comment.end_offset
except KeyError:
logging.error('Key error (wrapper out of bounds of span_starts_previous)')
continue
comment.computed_start_offset = comment.initial_start_offset
comment.computed_end_offset = comment.initial_end_offset
# comment.computed_start_wrapper = None
# comment.computed_end_wrapper = None
comment.valid = True
for tag, i1, i2, j1, j2 in opcodes:
#print tag, i1, i2, j1, j2
for i in xrange(len(commentList)) :
if tag != 'equal' :
comment = commentList[i]
if not comment.valid:
continue
if comment.initial_start_offset >= i2 :
# if offset
delta = ((j2 - j1) - (i2 - i1))
comment.computed_start_offset += delta
comment.computed_end_offset += delta
elif comment.initial_end_offset > i1:
comment.valid = False
# id, initial_start, initial_end, computed_start, computed_end, valid = self.computationResults[i]
for cc in commentList:
if cc.valid:
for id in xrange(len(span_starts_new.keys())):
start = span_starts_new.get(id, 0)
end = span_starts_new.get(id+1, sys.maxint)
# adjust start
if cc.computed_start_offset >= start and cc.computed_start_offset < end:
cc.start_wrapper = id
cc.start_offset = cc.computed_start_offset - start
# adjust end
if cc.computed_end_offset >= start and cc.computed_end_offset < end:
cc.end_wrapper = id
cc.end_offset = cc.computed_end_offset - start
# returns to_modify, to_remove
return [c for c in commentList if c.valid], \
[c for c in commentList if not c.valid]
def add_marker(text, color, start_ids, end_ids, with_markers, with_colors):
# TODO
# THESE 3 LINES ARE REALLY JUST FOR TESTING THIS IS COPIED FROM C-TEXT.CSS AND SHOULD BE DONE DIFFERENTLY
BCKCOLORS = ['#ffffff', '#ffffa8', '#fff6a1', '#ffeb99', '#ffde91', '#ffd08a', '#ffc182', '#ffaf7a', '#ff9d73', '#ff896b', '#ff7363', '#ff5c5c']
for i in range(14) :
BCKCOLORS.append('#ff5c5c')
ret = text
if with_markers:
end_ids.reverse()
ret = "%s%s%s"%(''.join(["[%s>"%start_id for start_id in start_ids]), ret, ''.join(["<%s]"%end_id for end_id in end_ids]))
if with_colors and color != 0 :
# For some reasons, abiwords can read background style attribute but not background-color
from cm.cm_settings import USE_ABI
if USE_ABI:
ret = "<span style='background:%s;'>%s</span>"%(BCKCOLORS[color], ret)
else:
ret = "<span style='background-color:%s;'>%s</span>"%(BCKCOLORS[color], ret)
return ret
# comments are comments and replies :
def insert_comment_markers(htmlcontent, comments, with_markers, with_colors) :
html = get_the_soup(htmlcontent) ;
if comments :
max_wrapper = max([comment.end_wrapper for comment in comments])
min_wrapper = min([comment.start_wrapper for comment in comments])
datas = {} # { wrapper_id : {'start_color':nb_of_comments_unterminated_at_wrapper_start, 'offsets':{offset: [[ids of wrappers starting at offset], [ids of wrappers ending at offset]]}}
# datas['offsets'][someoffset][0] and idem[1] will be ordered the way comments are (should be ('start_wrapper', 'start_offset', 'end_wrapper', 'end_offset') important)
cpt = 1 # starting numbered comment
for comment in comments :
if comment.is_reply() :
continue ;
# start
wrapper_data = datas.get(comment.start_wrapper, {'start_color':0, 'offsets':{}})
offset = wrapper_data.get('offsets').get(comment.start_offset, [[],[]])
offset[0].append(cpt)
wrapper_data['offsets'][comment.start_offset] = offset
datas[comment.start_wrapper] = wrapper_data
# end
wrapper_data = datas.get(comment.end_wrapper, {'start_color':0, 'offsets':{}})
offset = wrapper_data.get('offsets').get(comment.end_offset, [[],[]])
offset[1].append(cpt)
wrapper_data['offsets'][comment.end_offset] = offset
datas[comment.end_wrapper] = wrapper_data
for cc in range(comment.start_wrapper + 1, comment.end_wrapper + 1) :
wrapper_data = datas.get(cc, {'start_color':0, 'offsets':{}})
wrapper_data['start_color'] += 1
datas[cc] = wrapper_data
cpt = cpt + 1
# order ee values
for (wrapper_id, wrapper_data) in datas.items() :
start_color = wrapper_data['start_color']
offsets = sorted(wrapper_data['offsets'].items(), key=operator.itemgetter(0))
d = html.find(id = "sv-%d"%wrapper_id)
if not d: # comment detached
continue
content = d.contents[0]
spans = ""
if offsets :
color = start_color
start = 0
start_ids = []
end_ids = []
for offset, ids in offsets :
end_ids = ids[1]
end = offset
spans += add_marker(content[start:end], color, start_ids, end_ids, with_markers, with_colors)
start_ids = ids[0]
start = end
color += (len(ids[0]) - len(ids[1]))
end_ids = []
spans += add_marker(content[end:], color,start_ids, end_ids, with_markers, with_colors)
else : # the whole content is to be colored with start_color
spans += add_marker(content, start_color, [], [], with_markers, with_colors)
content.replaceWith(spans)
output = unicode(html)
# Soup has introduced HTML entities, which should be expanded
output =re.sub(r""", '"', output)
output =re.sub(r"&", '&', output)
output =re.sub(r">", '>', output)
output =re.sub(r"<", '<', output)
return unicode(output)
|
agpl-3.0
| 467,451,743,071,097,000 | 39.831579 | 188 | 0.577597 | false | 3.810413 | false | false | false |
esquire-/weather
|
weather.py
|
1
|
1797
|
#!/usr/bin/python
'''
Weather.py
John Heenan
14 February 2014
A simple utlity to send notifications to your phone when it starts raining outside of your windowless CS lab.
Run as a login item/launchd process/drop it in .bashrc and it will only call you when you're in the lab.
'''
import urllib2
import json
import time
import pynma
'''
Some configuration variables
'''
WU_KEY = '' # Weather Underground API Key - The free developer tier should suffice
NMA_KEY = '' # Notify My Android API Key
LOC = 'UK/London' # Weather Underground Area ID/Location Name
DELAY = 300 # Refresh interval
'''
You shouldn't need to modify anything after this point.
'''
notifier = pynma.PyNMA(NMA_KEY)
def sendMessage(message):
notifier.push("Weather Checker", message, "The weather outside the CS lab has changed. It is currently " + message + " .\nData by Weather Underground\nImplementation by J. Heenan.")
def main():
print("Weather monitor started.")
last_observation = ''
while True:
notify = False
data = urllib2.urlopen('http://api.wunderground.com/api/' + WU_KEY + '/geolookup/conditions/q/' + LOC + '.json')
json_string = data.read()
parsed_json = json.loads(json_string)
observation = parsed_json['current_observation']['weather']
if "Rain" in observation or "Snow" in observation:
if observation != last_observation:
notify = True # Send message if it has started raining/rain conditions change
if "Rain" in last_observation or "Snow" in last_observation:
if observation != last_observation:
notify = True # Send message if it was raining and it isn't. If rain conditions change this will have no effect, notify is already True
if notify:
sendMessage(observation)
last_observation = observation
time.sleep(DELAY)
if __name__ == '__main__':
main()
|
mpl-2.0
| 5,054,473,239,764,113,000 | 28 | 182 | 0.722315 | false | 3.435946 | false | false | false |
sharoonthomas/trytond-report-html-stock
|
tests/test_views_depends.py
|
1
|
1195
|
# -*- coding: utf-8 -*-
import sys
import os
DIR = os.path.abspath(os.path.normpath(os.path.join(
__file__, '..', '..', '..', '..', '..', 'trytond'
)))
if os.path.isdir(DIR):
sys.path.insert(0, os.path.dirname(DIR))
import unittest
import trytond.tests.test_tryton
from trytond.tests.test_tryton import test_view, test_depends
class TestViewsDepends(unittest.TestCase):
'''
Test views and depends
'''
def setUp(self):
"""
Set up data used in the tests.
this method is called before each test function execution.
"""
trytond.tests.test_tryton.install_module('report_html_stock')
@unittest.skip("No views")
def test0005views(self):
'''
Test views.
'''
test_view('report_html_stock')
def test0006depends(self):
'''
Test depends.
'''
test_depends()
def suite():
"""
Define suite
"""
test_suite = trytond.tests.test_tryton.suite()
test_suite.addTests(
unittest.TestLoader().loadTestsFromTestCase(TestViewsDepends)
)
return test_suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
bsd-3-clause
| 2,409,269,749,330,876,400 | 21.980769 | 69 | 0.59749 | false | 3.676923 | true | false | false |
Ayrx/cryptography
|
src/_cffi_src/openssl/x509v3.py
|
1
|
9359
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/x509v3.h>
/*
* This is part of a work-around for the difficulty cffi has in dealing with
* `LHASH_OF(foo)` as the name of a type. We invent a new, simpler name that
* will be an alias for this type and use the alias throughout. This works
* together with another opaque typedef for the same name in the TYPES section.
* Note that the result is an opaque type.
*/
typedef LHASH_OF(CONF_VALUE) Cryptography_LHASH_OF_CONF_VALUE;
typedef STACK_OF(ACCESS_DESCRIPTION) Cryptography_STACK_OF_ACCESS_DESCRIPTION;
typedef STACK_OF(DIST_POINT) Cryptography_STACK_OF_DIST_POINT;
typedef STACK_OF(POLICYQUALINFO) Cryptography_STACK_OF_POLICYQUALINFO;
typedef STACK_OF(POLICYINFO) Cryptography_STACK_OF_POLICYINFO;
typedef STACK_OF(ASN1_INTEGER) Cryptography_STACK_OF_ASN1_INTEGER;
typedef STACK_OF(GENERAL_SUBTREE) Cryptography_STACK_OF_GENERAL_SUBTREE;
"""
TYPES = """
typedef ... Cryptography_STACK_OF_ACCESS_DESCRIPTION;
typedef ... Cryptography_STACK_OF_POLICYQUALINFO;
typedef ... Cryptography_STACK_OF_POLICYINFO;
typedef ... Cryptography_STACK_OF_ASN1_INTEGER;
typedef ... Cryptography_STACK_OF_GENERAL_SUBTREE;
typedef ... EXTENDED_KEY_USAGE;
typedef ... CONF;
typedef struct {
X509 *issuer_cert;
X509 *subject_cert;
...;
} X509V3_CTX;
typedef void * (*X509V3_EXT_D2I)(void *, const unsigned char **, long);
typedef struct {
ASN1_ITEM_EXP *it;
X509V3_EXT_D2I d2i;
...;
} X509V3_EXT_METHOD;
static const int GEN_OTHERNAME;
static const int GEN_EMAIL;
static const int GEN_X400;
static const int GEN_DNS;
static const int GEN_URI;
static const int GEN_DIRNAME;
static const int GEN_EDIPARTY;
static const int GEN_IPADD;
static const int GEN_RID;
typedef struct {
ASN1_OBJECT *type_id;
ASN1_TYPE *value;
} OTHERNAME;
typedef struct {
...;
} EDIPARTYNAME;
typedef struct {
int ca;
ASN1_INTEGER *pathlen;
} BASIC_CONSTRAINTS;
typedef struct {
Cryptography_STACK_OF_GENERAL_SUBTREE *permittedSubtrees;
Cryptography_STACK_OF_GENERAL_SUBTREE *excludedSubtrees;
} NAME_CONSTRAINTS;
typedef struct {
ASN1_INTEGER *requireExplicitPolicy;
ASN1_INTEGER *inhibitPolicyMapping;
} POLICY_CONSTRAINTS;
typedef struct {
int type;
union {
char *ptr;
OTHERNAME *otherName; /* otherName */
ASN1_IA5STRING *rfc822Name;
ASN1_IA5STRING *dNSName;
ASN1_TYPE *x400Address;
X509_NAME *directoryName;
EDIPARTYNAME *ediPartyName;
ASN1_IA5STRING *uniformResourceIdentifier;
ASN1_OCTET_STRING *iPAddress;
ASN1_OBJECT *registeredID;
/* Old names */
ASN1_OCTET_STRING *ip; /* iPAddress */
X509_NAME *dirn; /* dirn */
ASN1_IA5STRING *ia5; /* rfc822Name, dNSName, */
/* uniformResourceIdentifier */
ASN1_OBJECT *rid; /* registeredID */
ASN1_TYPE *other; /* x400Address */
} d;
...;
} GENERAL_NAME;
typedef struct {
GENERAL_NAME *base;
ASN1_INTEGER *minimum;
ASN1_INTEGER *maximum;
} GENERAL_SUBTREE;
typedef struct stack_st_GENERAL_NAME GENERAL_NAMES;
typedef struct {
ASN1_OCTET_STRING *keyid;
GENERAL_NAMES *issuer;
ASN1_INTEGER *serial;
} AUTHORITY_KEYID;
typedef struct {
ASN1_OBJECT *method;
GENERAL_NAME *location;
} ACCESS_DESCRIPTION;
typedef ... Cryptography_LHASH_OF_CONF_VALUE;
typedef ... Cryptography_STACK_OF_DIST_POINT;
typedef struct {
int type;
union {
GENERAL_NAMES *fullname;
Cryptography_STACK_OF_X509_NAME_ENTRY *relativename;
} name;
...;
} DIST_POINT_NAME;
typedef struct {
DIST_POINT_NAME *distpoint;
ASN1_BIT_STRING *reasons;
GENERAL_NAMES *CRLissuer;
...;
} DIST_POINT;
typedef struct {
ASN1_STRING *organization;
Cryptography_STACK_OF_ASN1_INTEGER *noticenos;
} NOTICEREF;
typedef struct {
NOTICEREF *noticeref;
ASN1_STRING *exptext;
} USERNOTICE;
typedef struct {
ASN1_OBJECT *pqualid;
union {
ASN1_IA5STRING *cpsuri;
USERNOTICE *usernotice;
ASN1_TYPE *other;
} d;
} POLICYQUALINFO;
typedef struct {
ASN1_OBJECT *policyid;
Cryptography_STACK_OF_POLICYQUALINFO *qualifiers;
} POLICYINFO;
"""
FUNCTIONS = """
int X509V3_EXT_add_alias(int, int);
void X509V3_set_ctx(X509V3_CTX *, X509 *, X509 *, X509_REQ *, X509_CRL *, int);
X509_EXTENSION *X509V3_EXT_nconf(CONF *, X509V3_CTX *, char *, char *);
GENERAL_NAME *GENERAL_NAME_new(void);
int GENERAL_NAME_print(BIO *, GENERAL_NAME *);
GENERAL_NAMES *GENERAL_NAMES_new(void);
void GENERAL_NAMES_free(GENERAL_NAMES *);
void *X509V3_EXT_d2i(X509_EXTENSION *);
"""
MACROS = """
/* This is a macro defined by a call to DECLARE_ASN1_FUNCTIONS in the
x509v3.h header. */
BASIC_CONSTRAINTS *BASIC_CONSTRAINTS_new(void);
void BASIC_CONSTRAINTS_free(BASIC_CONSTRAINTS *);
/* This is a macro defined by a call to DECLARE_ASN1_FUNCTIONS in the
x509v3.h header. */
AUTHORITY_KEYID *AUTHORITY_KEYID_new(void);
void AUTHORITY_KEYID_free(AUTHORITY_KEYID *);
NAME_CONSTRAINTS *NAME_CONSTRAINTS_new(void);
void NAME_CONSTRAINTS_free(NAME_CONSTRAINTS *);
OTHERNAME *OTHERNAME_new(void);
void OTHERNAME_free(OTHERNAME *);
POLICY_CONSTRAINTS *POLICY_CONSTRAINTS_new(void);
void POLICY_CONSTRAINTS_free(POLICY_CONSTRAINTS *);
void *X509V3_set_ctx_nodb(X509V3_CTX *);
int i2d_GENERAL_NAMES(GENERAL_NAMES *, unsigned char **);
GENERAL_NAMES *d2i_GENERAL_NAMES(GENERAL_NAMES **, const unsigned char **,
long);
int sk_GENERAL_NAME_num(struct stack_st_GENERAL_NAME *);
int sk_GENERAL_NAME_push(struct stack_st_GENERAL_NAME *, GENERAL_NAME *);
GENERAL_NAME *sk_GENERAL_NAME_value(struct stack_st_GENERAL_NAME *, int);
Cryptography_STACK_OF_ACCESS_DESCRIPTION *sk_ACCESS_DESCRIPTION_new_null(void);
int sk_ACCESS_DESCRIPTION_num(Cryptography_STACK_OF_ACCESS_DESCRIPTION *);
ACCESS_DESCRIPTION *sk_ACCESS_DESCRIPTION_value(
Cryptography_STACK_OF_ACCESS_DESCRIPTION *, int
);
void sk_ACCESS_DESCRIPTION_free(Cryptography_STACK_OF_ACCESS_DESCRIPTION *);
int sk_ACCESS_DESCRIPTION_push(Cryptography_STACK_OF_ACCESS_DESCRIPTION *,
ACCESS_DESCRIPTION *);
ACCESS_DESCRIPTION *ACCESS_DESCRIPTION_new(void);
void ACCESS_DESCRIPTION_free(ACCESS_DESCRIPTION *);
X509_EXTENSION *X509V3_EXT_conf_nid(Cryptography_LHASH_OF_CONF_VALUE *,
X509V3_CTX *, int, char *);
/* These aren't macros these functions are all const X on openssl > 1.0.x */
const X509V3_EXT_METHOD *X509V3_EXT_get(X509_EXTENSION *);
const X509V3_EXT_METHOD *X509V3_EXT_get_nid(int);
Cryptography_STACK_OF_DIST_POINT *sk_DIST_POINT_new_null(void);
void sk_DIST_POINT_free(Cryptography_STACK_OF_DIST_POINT *);
int sk_DIST_POINT_num(Cryptography_STACK_OF_DIST_POINT *);
DIST_POINT *sk_DIST_POINT_value(Cryptography_STACK_OF_DIST_POINT *, int);
int sk_DIST_POINT_push(Cryptography_STACK_OF_DIST_POINT *, DIST_POINT *);
void sk_POLICYINFO_free(Cryptography_STACK_OF_POLICYINFO *);
int sk_POLICYINFO_num(Cryptography_STACK_OF_POLICYINFO *);
POLICYINFO *sk_POLICYINFO_value(Cryptography_STACK_OF_POLICYINFO *, int);
int sk_POLICYINFO_push(Cryptography_STACK_OF_POLICYINFO *, POLICYINFO *);
Cryptography_STACK_OF_POLICYINFO *sk_POLICYINFO_new_null(void);
POLICYINFO *POLICYINFO_new(void);
void POLICYINFO_free(POLICYINFO *);
POLICYQUALINFO *POLICYQUALINFO_new(void);
void POLICYQUALINFO_free(POLICYQUALINFO *);
NOTICEREF *NOTICEREF_new(void);
void NOTICEREF_free(NOTICEREF *);
USERNOTICE *USERNOTICE_new(void);
void USERNOTICE_free(USERNOTICE *);
void sk_POLICYQUALINFO_free(Cryptography_STACK_OF_POLICYQUALINFO *);
int sk_POLICYQUALINFO_num(Cryptography_STACK_OF_POLICYQUALINFO *);
POLICYQUALINFO *sk_POLICYQUALINFO_value(Cryptography_STACK_OF_POLICYQUALINFO *,
int);
int sk_POLICYQUALINFO_push(Cryptography_STACK_OF_POLICYQUALINFO *,
POLICYQUALINFO *);
Cryptography_STACK_OF_POLICYQUALINFO *sk_POLICYQUALINFO_new_null(void);
Cryptography_STACK_OF_GENERAL_SUBTREE *sk_GENERAL_SUBTREE_new_null(void);
void sk_GENERAL_SUBTREE_free(Cryptography_STACK_OF_GENERAL_SUBTREE *);
int sk_GENERAL_SUBTREE_num(Cryptography_STACK_OF_GENERAL_SUBTREE *);
GENERAL_SUBTREE *sk_GENERAL_SUBTREE_value(
Cryptography_STACK_OF_GENERAL_SUBTREE *, int
);
int sk_GENERAL_SUBTREE_push(Cryptography_STACK_OF_GENERAL_SUBTREE *,
GENERAL_SUBTREE *);
GENERAL_SUBTREE *GENERAL_SUBTREE_new(void);
void sk_ASN1_INTEGER_free(Cryptography_STACK_OF_ASN1_INTEGER *);
int sk_ASN1_INTEGER_num(Cryptography_STACK_OF_ASN1_INTEGER *);
ASN1_INTEGER *sk_ASN1_INTEGER_value(Cryptography_STACK_OF_ASN1_INTEGER *, int);
int sk_ASN1_INTEGER_push(Cryptography_STACK_OF_ASN1_INTEGER *, ASN1_INTEGER *);
Cryptography_STACK_OF_ASN1_INTEGER *sk_ASN1_INTEGER_new_null(void);
X509_EXTENSION *X509V3_EXT_i2d(int, int, void *);
DIST_POINT *DIST_POINT_new(void);
void DIST_POINT_free(DIST_POINT *);
DIST_POINT_NAME *DIST_POINT_NAME_new(void);
void DIST_POINT_NAME_free(DIST_POINT_NAME *);
"""
CUSTOMIZATIONS = """
"""
|
bsd-3-clause
| -559,673,534,380,414,200 | 30.725424 | 79 | 0.711508 | false | 3.064506 | false | false | false |
NLeSC/PattyAnalytics
|
scripts/registration.py
|
1
|
3140
|
#!/usr/bin/env python2.7
"""Registration script.
Usage:
registration.py [-h] [-d <sample>] [-U] [-u <upfile>] [-c <camfile>] <source> <drivemap> <footprint> <output>
Positional arguments:
source Source LAS file
drivemap Target LAS file to map source to
footprint Footprint for the source LAS file
output file to write output LAS to
Options:
-d <sample> Downsample source pointcloud to a percentage of number of points
[default: 0.1].
-v <voxel> Downsample source pointcloud using voxel filter to speedup ICP
[default: 0.05]
-s <scale> User override for initial scale factor
-U Dont trust the upvector completely and estimate it in
this script, too
-u <upfile> Json file containing the up vector relative to the pointcloud.
-c <camfile> CSV file containing all the camera postionions. [UNIMPLEMENTED]
"""
from __future__ import print_function
from docopt import docopt
import numpy as np
import os
import json
from patty.utils import (load, save, log)
from patty.srs import (set_srs, force_srs)
from patty.registration import (
coarse_registration,
fine_registration,
initial_registration,
)
if __name__ == '__main__':
####
# Parse comamnd line arguments
args = docopt(__doc__)
sourcefile = args['<source>']
drivemapfile = args['<drivemap>']
footprintcsv = args['<footprint>']
foutLas = args['<output>']
up_file = args['-u']
if args['-U']:
Trust_up = False
else:
Trust_up = True
try:
Downsample = float(args['-d'])
except KeyError:
Downsample = 0.1
try:
Voxel = float(args['-v'])
except KeyError:
Voxel = 0.05
try:
Initial_scale = float(args['-s'])
except:
Initial_scale = None
assert os.path.exists(sourcefile), sourcefile + ' does not exist'
assert os.path.exists(drivemapfile), drivemapfile + ' does not exist'
assert os.path.exists(footprintcsv), footprintcsv + ' does not exist'
#####
# Setup * the low-res drivemap
# * footprint
# * pointcloud
# * up-vector
log("Reading drivemap", drivemapfile)
drivemap = load(drivemapfile)
force_srs(drivemap, srs="EPSG:32633")
log("Reading footprint", footprintcsv)
footprint = load(footprintcsv)
force_srs(footprint, srs="EPSG:32633")
set_srs(footprint, same_as=drivemap)
log("Reading object", sourcefile)
pointcloud = load(sourcefile)
Up = None
try:
with open(up_file) as f:
dic = json.load(f)
Up = np.array(dic['estimatedUpDirection'])
log("Reading up_file", up_file)
except:
log("Cannot parse upfile, skipping")
initial_registration(pointcloud, Up, drivemap,
trust_up=Trust_up, initial_scale=Initial_scale)
save(pointcloud, "initial.las")
center = coarse_registration(pointcloud, drivemap, footprint, Downsample)
save(pointcloud, "coarse.las")
fine_registration(pointcloud, drivemap, center, voxelsize=Voxel)
save(pointcloud, foutLas)
|
apache-2.0
| -5,553,458,156,020,115,000 | 27.288288 | 111 | 0.634076 | false | 3.548023 | false | false | false |
ghetzel/webfriend
|
webfriend/utils/__init__.py
|
1
|
1517
|
import os.path
import random
import inspect
import importlib
import string
PACKAGE_ROOT = os.path.abspath(
os.path.dirname(
os.path.dirname(__file__)
)
)
PACKAGE_NAME = os.path.basename(PACKAGE_ROOT)
def random_string(count, charset=string.lowercase + string.digits):
return ''.join(random.sample(charset, count))
def autotype(value):
if isinstance(value, basestring):
if value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
else:
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return value
def get_module_from_string(string, package=None):
parts = string.split('.')
remainder = []
while len(parts):
try:
return importlib.import_module('.'.join(parts), package=package), remainder
except ImportError:
remainder = [parts.pop()] + remainder
return None, string.split('.')
def resolve_object(parts, parent=None):
if not parent:
parent = globals()
while len(parts):
proceed = False
for member in inspect.getmembers(parent):
if member[0] == parts[0]:
parent = member[1]
parts = parts[1:]
proceed = True
break
if not proceed:
return None
return parent
|
bsd-2-clause
| 6,987,214,187,512,982,000 | 20.671429 | 87 | 0.552406 | false | 4.501484 | false | false | false |
nylas/sync-engine
|
inbox/mailsync/backends/imap/generic.py
|
1
|
38201
|
# deal with unicode literals: http://www.python.org/dev/peps/pep-0263/
# vim: set fileencoding=utf-8 :
"""
----------------
IMAP SYNC ENGINE
----------------
Okay, here's the deal.
The IMAP sync engine runs per-folder on each account.
Only one initial sync can be running per-account at a time, to avoid
hammering the IMAP backend too hard (Gmail shards per-user, so parallelizing
folder download won't actually increase our throughput anyway).
Any time we reconnect, we have to make sure the folder's uidvalidity hasn't
changed, and if it has, we need to update the UIDs for any messages we've
already downloaded. A folder's uidvalidity cannot change during a session
(SELECT during an IMAP session starts a session on a folder) (see
http://tools.ietf.org/html/rfc3501#section-2.3.1.1).
Note that despite a session giving you a HIGHESTMODSEQ at the start of a
SELECT, that session will still always give you the latest message list
including adds, deletes, and flag changes that have happened since that
highestmodseq. (In Gmail, there is a small delay between changes happening on
the web client and those changes registering on a connected IMAP session,
though bizarrely the HIGHESTMODSEQ is updated immediately.) So we have to keep
in mind that the data may be changing behind our backs as we're syncing.
Fetching info about UIDs that no longer exist is not an error but gives us
empty data.
Folder sync state is stored in the ImapFolderSyncStatus table to allow for
restarts.
Here's the state machine:
-----
| ---------------- ----------------------
∨ | initial sync | <-----> | initial uidinvalid |
---------- ---------------- ----------------------
| finish | | ^
---------- | |_________________________
^ ∨ |
| ---------------- ----------------------
|---| poll | <-----> | poll uidinvalid |
---------------- ----------------------
| ∧
----
We encapsulate sync engine instances in greenlets for cooperative coroutine
scheduling around network I/O.
--------------
SESSION SCOPES
--------------
Database sessions are held for as short a duration as possible---just to
query for needed information or update the local state. Long-held database
sessions reduce scalability.
"""
from __future__ import division
from datetime import datetime, timedelta
from gevent import Greenlet
import gevent
import imaplib
from sqlalchemy import func
from sqlalchemy.orm import load_only
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from inbox.basicauth import ValidationError
from inbox.util.concurrency import retry_with_logging
from inbox.util.debug import bind_context
from inbox.util.itert import chunk
from inbox.util.misc import or_none
from inbox.util.threading import fetch_corresponding_thread, MAX_THREAD_LENGTH
from inbox.util.stats import statsd_client
from nylas.logging import get_logger
log = get_logger()
from inbox.crispin import connection_pool, retry_crispin, FolderMissingError
from inbox.models import Folder, Account, Message
from inbox.models.backends.imap import (ImapFolderSyncStatus, ImapThread,
ImapUid, ImapFolderInfo)
from inbox.models.session import session_scope
from inbox.mailsync.backends.imap import common
from inbox.mailsync.backends.base import (MailsyncDone, MailsyncError,
THROTTLE_COUNT, THROTTLE_WAIT)
from inbox.heartbeat.store import HeartbeatStatusProxy
from inbox.events.ical import import_attached_events
# Idle doesn't necessarily pick up flag changes, so we don't want to
# idle for very long, or we won't detect things like messages being
# marked as read.
IDLE_WAIT = 30
DEFAULT_POLL_FREQUENCY = 30
# Poll on the Inbox folder more often.
INBOX_POLL_FREQUENCY = 10
FAST_FLAGS_REFRESH_LIMIT = 100
SLOW_FLAGS_REFRESH_LIMIT = 2000
SLOW_REFRESH_INTERVAL = timedelta(seconds=3600)
FAST_REFRESH_INTERVAL = timedelta(seconds=30)
# Maximum number of uidinvalidity errors in a row.
MAX_UIDINVALID_RESYNCS = 5
CONDSTORE_FLAGS_REFRESH_BATCH_SIZE = 200
class FolderSyncEngine(Greenlet):
"""Base class for a per-folder IMAP sync engine."""
def __init__(self, account_id, namespace_id, folder_name,
email_address, provider_name, syncmanager_lock):
with session_scope(namespace_id) as db_session:
try:
folder = db_session.query(Folder). \
filter(Folder.name == folder_name,
Folder.account_id == account_id).one()
except NoResultFound:
raise MailsyncError(u"Missing Folder '{}' on account {}"
.format(folder_name, account_id))
self.folder_id = folder.id
self.folder_role = folder.canonical_name
# Metric flags for sync performance
self.is_initial_sync = folder.initial_sync_end is None
self.is_first_sync = folder.initial_sync_start is None
self.is_first_message = self.is_first_sync
bind_context(self, 'foldersyncengine', account_id, self.folder_id)
self.account_id = account_id
self.namespace_id = namespace_id
self.folder_name = folder_name
self.email_address = email_address
if self.folder_name.lower() == 'inbox':
self.poll_frequency = INBOX_POLL_FREQUENCY
else:
self.poll_frequency = DEFAULT_POLL_FREQUENCY
self.syncmanager_lock = syncmanager_lock
self.state = None
self.provider_name = provider_name
self.last_fast_refresh = None
self.flags_fetch_results = {}
self.conn_pool = connection_pool(self.account_id)
self.state_handlers = {
'initial': self.initial_sync,
'initial uidinvalid': self.resync_uids,
'poll': self.poll,
'poll uidinvalid': self.resync_uids,
}
self.setup_heartbeats()
Greenlet.__init__(self)
# Some generic IMAP servers are throwing UIDVALIDITY
# errors forever. Instead of resyncing those servers
# ad vitam, we keep track of the number of consecutive
# times we got such an error and bail out if it's higher than
# MAX_UIDINVALID_RESYNCS.
self.uidinvalid_count = 0
def setup_heartbeats(self):
self.heartbeat_status = HeartbeatStatusProxy(self.account_id,
self.folder_id,
self.folder_name,
self.email_address,
self.provider_name)
def _run(self):
# Bind greenlet-local logging context.
self.log = log.new(account_id=self.account_id, folder=self.folder_name,
provider=self.provider_name)
# eagerly signal the sync status
self.heartbeat_status.publish()
try:
self.update_folder_sync_status(lambda s: s.start_sync())
except IntegrityError:
# The state insert failed because the folder ID ForeignKey
# was no longer valid, ie. the folder for this engine was deleted
# while we were starting up.
# Exit the sync and let the monitor sort things out.
log.info("Folder state loading failed due to IntegrityError",
folder_id=self.folder_id, account_id=self.account_id)
raise MailsyncDone()
# NOTE: The parent ImapSyncMonitor handler could kill us at any
# time if it receives a shutdown command. The shutdown command is
# equivalent to ctrl-c.
while True:
retry_with_logging(self._run_impl, account_id=self.account_id,
provider=self.provider_name, logger=log)
def _run_impl(self):
old_state = self.state
try:
self.state = self.state_handlers[old_state]()
self.heartbeat_status.publish(state=self.state)
except UidInvalid:
self.state = self.state + ' uidinvalid'
self.uidinvalid_count += 1
self.heartbeat_status.publish(state=self.state)
# Check that we're not stuck in an endless uidinvalidity resync loop.
if self.uidinvalid_count > MAX_UIDINVALID_RESYNCS:
log.error('Resynced more than MAX_UIDINVALID_RESYNCS in a'
' row. Stopping sync.')
with session_scope(self.namespace_id) as db_session:
account = db_session.query(Account).get(self.account_id)
account.disable_sync('Detected endless uidvalidity '
'resync loop')
account.sync_state = 'stopped'
db_session.commit()
raise MailsyncDone()
except FolderMissingError:
# Folder was deleted by monitor while its sync was running.
# TODO: Monitor should handle shutting down the folder engine.
log.info('Folder disappeared. Stopping sync.',
account_id=self.account_id, folder_id=self.folder_id)
raise MailsyncDone()
except ValidationError as exc:
log.error('Error authenticating; stopping sync', exc_info=True,
account_id=self.account_id, folder_id=self.folder_id,
logstash_tag='mark_invalid')
with session_scope(self.namespace_id) as db_session:
account = db_session.query(Account).get(self.account_id)
account.mark_invalid()
account.update_sync_error(exc)
raise MailsyncDone()
# State handlers are idempotent, so it's okay if we're
# killed between the end of the handler and the commit.
if self.state != old_state:
def update(status):
status.state = self.state
self.update_folder_sync_status(update)
if self.state == old_state and self.state in ['initial', 'poll']:
# We've been through a normal state transition without raising any
# error. It's safe to reset the uidvalidity counter.
self.uidinvalid_count = 0
def update_folder_sync_status(self, cb):
# Loads the folder sync status and invokes the provided callback to
# modify it. Commits any changes and updates `self.state` to ensure
# they are never out of sync.
with session_scope(self.namespace_id) as db_session:
try:
state = ImapFolderSyncStatus.state
saved_folder_status = db_session.query(ImapFolderSyncStatus)\
.filter_by(account_id=self.account_id, folder_id=self.folder_id)\
.options(load_only(state)).one()
except NoResultFound:
saved_folder_status = ImapFolderSyncStatus(
account_id=self.account_id, folder_id=self.folder_id)
db_session.add(saved_folder_status)
cb(saved_folder_status)
db_session.commit()
self.state = saved_folder_status.state
def set_stopped(self, db_session):
self.update_folder_sync_status(lambda s: s.stop_sync())
def _report_initial_sync_start(self):
with session_scope(self.namespace_id) as db_session:
q = db_session.query(Folder).get(self.folder_id)
q.initial_sync_start = datetime.utcnow()
def _report_initial_sync_end(self):
with session_scope(self.namespace_id) as db_session:
q = db_session.query(Folder).get(self.folder_id)
q.initial_sync_end = datetime.utcnow()
@retry_crispin
def initial_sync(self):
log.bind(state='initial')
log.info('starting initial sync')
if self.is_first_sync:
self._report_initial_sync_start()
self.is_first_sync = False
with self.conn_pool.get() as crispin_client:
crispin_client.select_folder(self.folder_name, uidvalidity_cb)
# Ensure we have an ImapFolderInfo row created prior to sync start.
with session_scope(self.namespace_id) as db_session:
try:
db_session.query(ImapFolderInfo). \
filter(ImapFolderInfo.account_id == self.account_id,
ImapFolderInfo.folder_id == self.folder_id). \
one()
except NoResultFound:
imapfolderinfo = ImapFolderInfo(
account_id=self.account_id, folder_id=self.folder_id,
uidvalidity=crispin_client.selected_uidvalidity,
uidnext=crispin_client.selected_uidnext)
db_session.add(imapfolderinfo)
db_session.commit()
self.initial_sync_impl(crispin_client)
if self.is_initial_sync:
self._report_initial_sync_end()
self.is_initial_sync = False
return 'poll'
@retry_crispin
def poll(self):
log.bind(state='poll')
log.debug('polling')
self.poll_impl()
return 'poll'
@retry_crispin
def resync_uids(self):
log.bind(state=self.state)
log.warning('UIDVALIDITY changed; initiating resync')
self.resync_uids_impl()
return 'initial'
def initial_sync_impl(self, crispin_client):
# We wrap the block in a try/finally because the change_poller greenlet
# needs to be killed when this greenlet is interrupted
change_poller = None
try:
assert crispin_client.selected_folder_name == self.folder_name
remote_uids = crispin_client.all_uids()
with self.syncmanager_lock:
with session_scope(self.namespace_id) as db_session:
local_uids = common.local_uids(self.account_id, db_session,
self.folder_id)
common.remove_deleted_uids(
self.account_id, self.folder_id,
set(local_uids).difference(remote_uids))
new_uids = set(remote_uids).difference(local_uids)
with session_scope(self.namespace_id) as db_session:
account = db_session.query(Account).get(self.account_id)
throttled = account.throttled
self.update_uid_counts(
db_session,
remote_uid_count=len(remote_uids),
# This is the initial size of our download_queue
download_uid_count=len(new_uids))
change_poller = gevent.spawn(self.poll_for_changes)
bind_context(change_poller, 'changepoller', self.account_id,
self.folder_id)
uids = sorted(new_uids, reverse=True)
count = 0
for uid in uids:
# The speedup from batching appears to be less clear for
# non-Gmail accounts, so for now just download one-at-a-time.
self.download_and_commit_uids(crispin_client, [uid])
self.heartbeat_status.publish()
count += 1
if throttled and count >= THROTTLE_COUNT:
# Throttled accounts' folders sync at a rate of
# 1 message/ minute, after the first approx. THROTTLE_COUNT
# messages per folder are synced.
# Note this is an approx. limit since we use the #(uids),
# not the #(messages).
gevent.sleep(THROTTLE_WAIT)
finally:
if change_poller is not None:
# schedule change_poller to die
gevent.kill(change_poller)
def should_idle(self, crispin_client):
if not hasattr(self, '_should_idle'):
self._should_idle = (
crispin_client.idle_supported() and self.folder_name in
crispin_client.folder_names()['inbox']
)
return self._should_idle
def poll_impl(self):
with self.conn_pool.get() as crispin_client:
self.check_uid_changes(crispin_client)
if self.should_idle(crispin_client):
crispin_client.select_folder(self.folder_name,
self.uidvalidity_cb)
idling = True
try:
crispin_client.idle(IDLE_WAIT)
except Exception as exc:
# With some servers we get e.g.
# 'Unexpected IDLE response: * FLAGS (...)'
if isinstance(exc, imaplib.IMAP4.error) and \
exc.message.startswith('Unexpected IDLE response'):
log.info('Error initiating IDLE, not idling',
error=exc)
try:
# Still have to take the connection out of IDLE
# mode to reuse it though.
crispin_client.conn.idle_done()
except AttributeError:
pass
idling = False
else:
raise
else:
idling = False
# Close IMAP connection before sleeping
if not idling:
gevent.sleep(self.poll_frequency)
def resync_uids_impl(self):
# First, let's check if the UIVDALIDITY change was spurious, if
# it is, just discard it and go on.
with self.conn_pool.get() as crispin_client:
crispin_client.select_folder(self.folder_name, lambda *args: True)
remote_uidvalidity = crispin_client.selected_uidvalidity
remote_uidnext = crispin_client.selected_uidnext
if remote_uidvalidity <= self.uidvalidity:
log.debug('UIDVALIDITY unchanged')
return
# Otherwise, if the UIDVALIDITY really has changed, discard all saved
# UIDs for the folder, mark associated messages for garbage-collection,
# and return to the 'initial' state to resync.
# This will cause message and threads to be deleted and recreated, but
# uidinvalidity is sufficiently rare that this tradeoff is acceptable.
with session_scope(self.namespace_id) as db_session:
invalid_uids = {
uid for uid, in db_session.query(ImapUid.msg_uid).
filter_by(account_id=self.account_id,
folder_id=self.folder_id)
}
common.remove_deleted_uids(self.account_id, self.folder_id,
invalid_uids)
self.uidvalidity = remote_uidvalidity
self.highestmodseq = None
self.uidnext = remote_uidnext
@retry_crispin
def poll_for_changes(self):
log.new(account_id=self.account_id, folder=self.folder_name)
while True:
log.debug('polling for changes')
self.poll_impl()
def create_message(self, db_session, acct, folder, msg):
assert acct is not None and acct.namespace is not None
# Check if we somehow already saved the imapuid (shouldn't happen, but
# possible due to race condition). If so, don't commit changes.
existing_imapuid = db_session.query(ImapUid).filter(
ImapUid.account_id == acct.id, ImapUid.folder_id == folder.id,
ImapUid.msg_uid == msg.uid).first()
if existing_imapuid is not None:
log.error('Expected to create imapuid, but existing row found',
remote_msg_uid=msg.uid,
existing_imapuid=existing_imapuid.id)
return None
# Check if the message is valid.
# https://sentry.nylas.com/sentry/sync-prod/group/3387/
if msg.body is None:
log.warning('Server returned a message with an empty body.')
return None
new_uid = common.create_imap_message(db_session, acct, folder, msg)
self.add_message_to_thread(db_session, new_uid.message, msg)
db_session.flush()
# We're calling import_attached_events here instead of some more
# obvious place (like Message.create_from_synced) because the function
# requires new_uid.message to have been flushed.
# This is necessary because the import_attached_events does db lookups.
if new_uid.message.has_attached_events:
with db_session.no_autoflush:
import_attached_events(db_session, acct, new_uid.message)
# If we're in the polling state, then we want to report the metric
# for latency when the message was received vs created
if self.state == 'poll':
latency_millis = (
datetime.utcnow() - new_uid.message.received_date) \
.total_seconds() * 1000
metrics = [
'.'.join(['mailsync', 'providers', 'overall', 'message_latency']),
'.'.join(['mailsync', 'providers', self.provider_name, 'message_latency']),
]
for metric in metrics:
statsd_client.timing(metric, latency_millis)
return new_uid
def _count_thread_messages(self, thread_id, db_session):
count, = db_session.query(func.count(Message.id)). \
filter(Message.thread_id == thread_id).one()
return count
def add_message_to_thread(self, db_session, message_obj, raw_message):
"""Associate message_obj to the right Thread object, creating a new
thread if necessary."""
with db_session.no_autoflush:
# Disable autoflush so we don't try to flush a message with null
# thread_id.
parent_thread = fetch_corresponding_thread(
db_session, self.namespace_id, message_obj)
construct_new_thread = True
if parent_thread:
# If there's a parent thread that isn't too long already,
# add to it. Otherwise create a new thread.
parent_message_count = self._count_thread_messages(
parent_thread.id, db_session)
if parent_message_count < MAX_THREAD_LENGTH:
construct_new_thread = False
if construct_new_thread:
message_obj.thread = ImapThread.from_imap_message(
db_session, self.namespace_id, message_obj)
else:
parent_thread.messages.append(message_obj)
def download_and_commit_uids(self, crispin_client, uids):
start = datetime.utcnow()
raw_messages = crispin_client.uids(uids)
if not raw_messages:
return 0
new_uids = set()
with self.syncmanager_lock:
with session_scope(self.namespace_id) as db_session:
account = Account.get(self.account_id, db_session)
folder = Folder.get(self.folder_id, db_session)
for msg in raw_messages:
uid = self.create_message(db_session, account,
folder, msg)
if uid is not None:
db_session.add(uid)
db_session.flush()
new_uids.add(uid)
db_session.commit()
log.debug('Committed new UIDs', new_committed_message_count=len(new_uids))
# If we downloaded uids, record message velocity (#uid / latency)
if self.state == 'initial' and len(new_uids):
self._report_message_velocity(datetime.utcnow() - start,
len(new_uids))
if self.is_first_message:
self._report_first_message()
self.is_first_message = False
return len(new_uids)
def _report_first_message(self):
# Only record the "time to first message" in the inbox. Because users
# can add more folders at any time, "initial sync"-style metrics for
# other folders don't mean much.
if self.folder_role not in ['inbox', 'all']:
return
now = datetime.utcnow()
with session_scope(self.namespace_id) as db_session:
account = db_session.query(Account).get(self.account_id)
account_created = account.created_at
latency = (now - account_created).total_seconds() * 1000
metrics = [
'.'.join(['mailsync', 'providers', self.provider_name, 'first_message']),
'.'.join(['mailsync', 'providers', 'overall', 'first_message'])
]
for metric in metrics:
statsd_client.timing(metric, latency)
def _report_message_velocity(self, timedelta, num_uids):
latency = (timedelta).total_seconds() * 1000
latency_per_uid = float(latency) / num_uids
metrics = [
'.'.join(['mailsync', 'providers', self.provider_name,
'message_velocity']),
'.'.join(['mailsync', 'providers', 'overall', 'message_velocity'])
]
for metric in metrics:
statsd_client.timing(metric, latency_per_uid)
def update_uid_counts(self, db_session, **kwargs):
saved_status = db_session.query(ImapFolderSyncStatus).join(Folder). \
filter(ImapFolderSyncStatus.account_id == self.account_id,
Folder.name == self.folder_name).one()
# We're not updating the current_remote_count metric
# so don't update uid_checked_timestamp.
if kwargs.get('remote_uid_count') is None:
saved_status.update_metrics(kwargs)
else:
metrics = dict(uid_checked_timestamp=datetime.utcnow())
metrics.update(kwargs)
saved_status.update_metrics(metrics)
def get_new_uids(self, crispin_client):
try:
remote_uidnext = crispin_client.conn.folder_status(
self.folder_name, ['UIDNEXT']).get('UIDNEXT')
except ValueError:
# Work around issue where ValueError is raised on parsing STATUS
# response.
log.warning('Error getting UIDNEXT', exc_info=True)
remote_uidnext = None
except imaplib.IMAP4.error as e:
if '[NONEXISTENT]' in e.message:
raise FolderMissingError()
else:
raise e
if remote_uidnext is not None and remote_uidnext == self.uidnext:
return
log.debug('UIDNEXT changed, checking for new UIDs',
remote_uidnext=remote_uidnext, saved_uidnext=self.uidnext)
crispin_client.select_folder(self.folder_name, self.uidvalidity_cb)
with session_scope(self.namespace_id) as db_session:
lastseenuid = common.lastseenuid(self.account_id, db_session,
self.folder_id)
latest_uids = crispin_client.conn.fetch('{}:*'.format(lastseenuid + 1),
['UID']).keys()
new_uids = set(latest_uids) - {lastseenuid}
if new_uids:
for uid in sorted(new_uids):
self.download_and_commit_uids(crispin_client, [uid])
self.uidnext = remote_uidnext
def condstore_refresh_flags(self, crispin_client):
new_highestmodseq = crispin_client.conn.folder_status(
self.folder_name, ['HIGHESTMODSEQ'])['HIGHESTMODSEQ']
# Ensure that we have an initial highestmodseq value stored before we
# begin polling for changes.
if self.highestmodseq is None:
self.highestmodseq = new_highestmodseq
if new_highestmodseq == self.highestmodseq:
# Don't need to do anything if the highestmodseq hasn't
# changed.
return
elif new_highestmodseq < self.highestmodseq:
# This should really never happen, but if it does, handle it.
log.warning('got server highestmodseq less than saved '
'highestmodseq',
new_highestmodseq=new_highestmodseq,
saved_highestmodseq=self.highestmodseq)
return
log.debug('HIGHESTMODSEQ has changed, getting changed UIDs',
new_highestmodseq=new_highestmodseq,
saved_highestmodseq=self.highestmodseq)
crispin_client.select_folder(self.folder_name, self.uidvalidity_cb)
changed_flags = crispin_client.condstore_changed_flags(
self.highestmodseq)
remote_uids = crispin_client.all_uids()
# In order to be able to sync changes to tens of thousands of flags at
# once, we commit updates in batches. We do this in ascending order by
# modseq and periodically "checkpoint" our saved highestmodseq. (It's
# safe to checkpoint *because* we go in ascending order by modseq.)
# That way if the process gets restarted halfway through this refresh,
# we don't have to completely start over. It's also slow to load many
# objects into the SQLAlchemy session and then issue lots of commits;
# we avoid that by batching.
flag_batches = chunk(
sorted(changed_flags.items(), key=lambda (k, v): v.modseq),
CONDSTORE_FLAGS_REFRESH_BATCH_SIZE)
for flag_batch in flag_batches:
with session_scope(self.namespace_id) as db_session:
common.update_metadata(self.account_id, self.folder_id,
self.folder_role, dict(flag_batch),
db_session)
if len(flag_batch) == CONDSTORE_FLAGS_REFRESH_BATCH_SIZE:
interim_highestmodseq = max(v.modseq for k, v in flag_batch)
self.highestmodseq = interim_highestmodseq
with session_scope(self.namespace_id) as db_session:
local_uids = common.local_uids(self.account_id, db_session,
self.folder_id)
expunged_uids = set(local_uids).difference(remote_uids)
if expunged_uids:
# If new UIDs have appeared since we last checked in
# get_new_uids, save them first. We want to always have the
# latest UIDs before expunging anything, in order to properly
# capture draft revisions.
with session_scope(self.namespace_id) as db_session:
lastseenuid = common.lastseenuid(self.account_id, db_session,
self.folder_id)
if remote_uids and lastseenuid < max(remote_uids):
log.info('Downloading new UIDs before expunging')
self.get_new_uids(crispin_client)
common.remove_deleted_uids(self.account_id, self.folder_id,
expunged_uids)
self.highestmodseq = new_highestmodseq
def generic_refresh_flags(self, crispin_client):
now = datetime.utcnow()
slow_refresh_due = (
self.last_slow_refresh is None or
now > self.last_slow_refresh + SLOW_REFRESH_INTERVAL
)
fast_refresh_due = (
self.last_fast_refresh is None or
now > self.last_fast_refresh + FAST_REFRESH_INTERVAL
)
if slow_refresh_due:
self.refresh_flags_impl(crispin_client, SLOW_FLAGS_REFRESH_LIMIT)
self.last_slow_refresh = datetime.utcnow()
elif fast_refresh_due:
self.refresh_flags_impl(crispin_client, FAST_FLAGS_REFRESH_LIMIT)
self.last_fast_refresh = datetime.utcnow()
def refresh_flags_impl(self, crispin_client, max_uids):
crispin_client.select_folder(self.folder_name, self.uidvalidity_cb)
with session_scope(self.namespace_id) as db_session:
local_uids = common.local_uids(account_id=self.account_id,
session=db_session,
folder_id=self.folder_id,
limit=max_uids)
flags = crispin_client.flags(local_uids)
if (max_uids in self.flags_fetch_results and
self.flags_fetch_results[max_uids] == (local_uids, flags)):
# If the flags fetch response is exactly the same as the last one
# we got, then we don't need to persist any changes.
log.debug('Unchanged flags refresh response, '
'not persisting changes', max_uids=max_uids)
return
log.debug('Changed flags refresh response, persisting changes',
max_uids=max_uids)
expunged_uids = set(local_uids).difference(flags.keys())
common.remove_deleted_uids(self.account_id, self.folder_id,
expunged_uids)
with session_scope(self.namespace_id) as db_session:
common.update_metadata(self.account_id, self.folder_id,
self.folder_role, flags, db_session)
self.flags_fetch_results[max_uids] = (local_uids, flags)
def check_uid_changes(self, crispin_client):
self.get_new_uids(crispin_client)
if crispin_client.condstore_supported():
self.condstore_refresh_flags(crispin_client)
else:
self.generic_refresh_flags(crispin_client)
@property
def uidvalidity(self):
if not hasattr(self, '_uidvalidity'):
self._uidvalidity = self._load_imap_folder_info().uidvalidity
return self._uidvalidity
@uidvalidity.setter
def uidvalidity(self, value):
self._update_imap_folder_info('uidvalidity', value)
self._uidvalidity = value
@property
def uidnext(self):
if not hasattr(self, '_uidnext'):
self._uidnext = self._load_imap_folder_info().uidnext
return self._uidnext
@uidnext.setter
def uidnext(self, value):
self._update_imap_folder_info('uidnext', value)
self._uidnext = value
@property
def last_slow_refresh(self):
# We persist the last_slow_refresh timestamp so that we don't end up
# doing a (potentially expensive) full flags refresh for every account
# on every process restart.
if not hasattr(self, '_last_slow_refresh'):
self._last_slow_refresh = self._load_imap_folder_info(). \
last_slow_refresh
return self._last_slow_refresh
@last_slow_refresh.setter
def last_slow_refresh(self, value):
self._update_imap_folder_info('last_slow_refresh', value)
self._last_slow_refresh = value
@property
def highestmodseq(self):
if not hasattr(self, '_highestmodseq'):
self._highestmodseq = self._load_imap_folder_info().highestmodseq
return self._highestmodseq
@highestmodseq.setter
def highestmodseq(self, value):
self._highestmodseq = value
self._update_imap_folder_info('highestmodseq', value)
def _load_imap_folder_info(self):
with session_scope(self.namespace_id) as db_session:
imapfolderinfo = db_session.query(ImapFolderInfo). \
filter(ImapFolderInfo.account_id == self.account_id,
ImapFolderInfo.folder_id == self.folder_id). \
one()
db_session.expunge(imapfolderinfo)
return imapfolderinfo
def _update_imap_folder_info(self, attrname, value):
with session_scope(self.namespace_id) as db_session:
imapfolderinfo = db_session.query(ImapFolderInfo). \
filter(ImapFolderInfo.account_id == self.account_id,
ImapFolderInfo.folder_id == self.folder_id). \
one()
setattr(imapfolderinfo, attrname, value)
db_session.commit()
def uidvalidity_cb(self, account_id, folder_name, select_info):
assert folder_name == self.folder_name
assert account_id == self.account_id
selected_uidvalidity = select_info['UIDVALIDITY']
is_valid = (self.uidvalidity is None or
selected_uidvalidity <= self.uidvalidity)
if not is_valid:
raise UidInvalid(
'folder: {}, remote uidvalidity: {}, '
'cached uidvalidity: {}'.format(folder_name.encode('utf-8'),
selected_uidvalidity,
self.uidvalidity))
return select_info
class UidInvalid(Exception):
"""Raised when a folder's UIDVALIDITY changes, requiring a resync."""
pass
# This version is elsewhere in the codebase, so keep it for now
# TODO(emfree): clean this up.
def uidvalidity_cb(account_id, folder_name, select_info):
assert folder_name is not None and select_info is not None, \
"must start IMAP session before verifying UIDVALIDITY"
with session_scope(account_id) as db_session:
saved_folder_info = common.get_folder_info(account_id, db_session,
folder_name)
saved_uidvalidity = or_none(saved_folder_info, lambda i:
i.uidvalidity)
selected_uidvalidity = select_info['UIDVALIDITY']
if saved_folder_info:
is_valid = (saved_uidvalidity is None or
selected_uidvalidity <= saved_uidvalidity)
if not is_valid:
raise UidInvalid(
'folder: {}, remote uidvalidity: {}, '
'cached uidvalidity: {}'.format(folder_name.encode('utf-8'),
selected_uidvalidity,
saved_uidvalidity))
return select_info
|
agpl-3.0
| -1,116,494,582,475,538,600 | 43.207176 | 91 | 0.586333 | false | 4.237298 | false | false | false |
ros2/rosidl
|
rosidl_cli/rosidl_cli/command/generate/api.py
|
1
|
3504
|
# Copyright 2021 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
from .extensions import load_type_extensions
from .extensions import load_typesupport_extensions
def generate(
*,
package_name,
interface_files,
include_paths=None,
output_path=None,
types=None,
typesupports=None
):
"""
Generate source code from interface definition files.
To do so, this function leverages type representation and type
support generation support as provided by third-party package
extensions.
Each path to an interface definition file is a relative path optionally
prefixed by another path followed by a colon ':', against which the first
relative path is to be resolved.
The directory structure that these relative paths exhibit will be replicated
on output (as opposed to the prefix path, which will be ignored).
If no type representation nor type support is specified, all available ones
will be generated.
If more than one type representation or type support is generated, the
name of each will be appended to the given `output_path` to preclude
name clashes upon writing source code files.
:param package_name: name of the package to generate source code for
:param interface_files: list of paths to interface definition files
:param include_paths: optional list of paths to include dependency
interface definition files from
:param output_path: optional path to directory to hold generated
source code files, defaults to the current working directory
:param types: optional list of type representations to generate
:param typesupports: optional list of type supports to generate
:returns: list of lists of paths to generated source code files,
one group per type or type support extension invoked
"""
extensions = []
unspecific_generation = not types and not typesupports
if types or unspecific_generation:
extensions.extend(load_type_extensions(
specs=types,
strict=not unspecific_generation))
if typesupports or unspecific_generation:
extensions.extend(load_typesupport_extensions(
specs=typesupports,
strict=not unspecific_generation))
if unspecific_generation and not extensions:
raise RuntimeError('No type nor typesupport extensions were found')
if include_paths is None:
include_paths = []
if output_path is None:
output_path = pathlib.Path.cwd()
else:
os.makedirs(output_path, exist_ok=True)
if len(extensions) > 1:
return [
extension.generate(
package_name, interface_files, include_paths,
output_path=output_path / extension.name)
for extension in extensions
]
return [extensions[0].generate(
package_name, interface_files,
include_paths, output_path
)]
|
apache-2.0
| 1,224,383,898,062,600,000 | 34.393939 | 80 | 0.710616 | false | 4.672 | false | false | false |
jtaghiyar/kronos
|
kronos/pipelineui.py
|
1
|
3217
|
'''
Created on May 9, 2014
@author: jtaghiyar
'''
import argparse
import os
parser = argparse.ArgumentParser(description='Pipeline user interface')
parser.add_argument('-b', '--job_scheduler',
default='drmaa',
choices=['sge','drmaa'],
help="job scheduler used to manage jobs on the cluster")
parser.add_argument('-c', '--components_dir',
default=os.getcwd(),
required=True,
help="path to components_dir")
parser.add_argument('-d', '--drmaa_library_path',
default='lib/lx24-amd64/libdrmaa.so',
type=str,
help="path of drmaa library")
parser.add_argument('-e', '--pipeline_name',
default=None,
type=str,
help="pipeline name")
parser.add_argument('-j', '--num_jobs',
default=1,
type=int,
help='maximum number of simultaneous jobs per pipeline')
parser.add_argument('-l', '--log_file',
default=None,
type=str,
help="name of the log file")
parser.add_argument('-n', '--num_pipelines',
default=1,
type=int,
help='maximum number of simultaneous running pipelines')
parser.add_argument('--no_prefix',
default=False,
action='store_true',
help="""Switch off the prefix that is added to all the
output files.""")
parser.add_argument('-p','--python_installation',
default='python',
type=str,
help="python executable")
parser.add_argument('-q', '--qsub_options',
default=None,
type=str,
help="""native qsub specifications for the cluster
in a single string""")
parser.add_argument('-r', '--run_id',
default=None,
type=str,
help="pipeline run id used for re-running")
parser.add_argument('-w', '--working_dir',
default=os.getcwd(),
help="path to the working_dir")
## should be moved to a subcommand print
parser.add_argument('--draw_vertically',
default=False,
action='store_true',
help="specify whether to draw the plot vertically")
parser.add_argument('--extension',
default="png",
type=str,
help="specify the desired extension of the resultant file")
parser.add_argument('--no_key_legend',
default=False,
action='store_true',
help="if True, hide the legend.")
parser.add_argument('--print_only',
default=False,
action='store_true',
help="""if True, print the workflow graph only without
running the pipeline.""")
args, unknown= parser.parse_known_args()
|
mit
| -5,165,773,639,326,006,000 | 32.863158 | 79 | 0.484613 | false | 4.787202 | false | false | false |
APSL/django-kaio
|
kaio/mixins/email.py
|
1
|
2693
|
# -*- coding: utf-8 -*-
import logging
import os
from kaio import Options
from functools import partial
logger = logging.getLogger(__name__)
opts = Options()
get = partial(opts.get, section='Email')
class EmailMixin(object):
"""Settings para enviar emails"""
# Django settings: https://docs.djangoproject.com/en/1.11/ref/settings/#email-backend
@property
def DEFAULT_FROM_EMAIL(self):
return get('DEFAULT_FROM_EMAIL', 'Example <[email protected]>')
@property
def EMAIL_BACKEND(self):
backend = get('EMAIL_BACKEND')
if backend:
return backend
backend = 'django.core.mail.backends.smtp.EmailBackend'
if 'django_yubin' in self.INSTALLED_APPS:
try:
import django_yubin # noqa: F401
backend = 'django_yubin.smtp_queue.EmailBackend'
except ImportError:
logger.warn('WARNING: django_yubin in INSTALLED_APPS but not pip installed.')
return backend
@property
def EMAIL_FILE_PATH(self):
return get('EMAIL_FILE_PATH', None)
@property
def EMAIL_HOST(self):
return get('EMAIL_HOST', 'localhost')
@property
def EMAIL_HOST_PASSWORD(self):
return get('EMAIL_HOST_PASSWORD', '')
@property
def EMAIL_HOST_USER(self):
return get('EMAIL_HOST_USER', '')
@property
def EMAIL_PORT(self):
return get('EMAIL_PORT', 25)
@property
def EMAIL_SUBJECT_PREFIX(self):
return get('EMAIL_SUBJECT_PREFIX', '[Django] ')
@property
def EMAIL_USE_TLS(self):
return get('EMAIL_USE_TLS', False)
# django-yubin settings: http://django-yubin.readthedocs.org/en/latest/settings.html
@property
def MAILER_PAUSE_SEND(self):
return get('MAILER_PAUSE_SEND', False)
@property
def MAILER_USE_BACKEND(self):
return get('MAILER_USE_BACKEND', 'django.core.mail.backends.smtp.EmailBackend')
@property
def MAILER_MAIL_ADMINS_PRIORITY(self):
try:
from django_yubin import constants
priority = constants.PRIORITY_HIGH
except Exception:
priority = 1
return get('MAILER_MAIL_ADMINS_PRIORITY', priority)
@property
def MAILER_MAIL_MANAGERS_PRIORITY(self):
return get('MAILER_MAIL_MANAGERS_PRIORITY', None)
@property
def MAILER_EMPTY_QUEUE_SLEEP(self):
return get('MAILER_EMPTY_QUEUE_SLEEP', 30)
@property
def MAILER_LOCK_WAIT_TIMEOUT(self):
return get('MAILER_LOCK_WAIT_TIMEOUT', 0)
@property
def MAILER_LOCK_PATH(self):
return get("MAILER_LOCK_PATH", os.path.join(self.APP_ROOT, "send_mail"))
|
bsd-3-clause
| -6,720,387,585,946,271,000 | 26.20202 | 93 | 0.629781 | false | 3.704264 | false | false | false |
ozgurgunes/django-cmskit
|
cmskit/slideshow/migrations/0001_initial.py
|
1
|
5691
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Slideshow'
db.create_table('cmsplugin_slideshow', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
))
db.send_create_signal('slideshow', ['Slideshow'])
# Adding model 'Slide'
db.create_table('slideshow_slide', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ordering', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('slideshow', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['slideshow.Slideshow'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=216, null=True, blank=True)),
('summary', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('picture', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
('picture_width', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('picture_height', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
('alt', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('url', self.gf('django.db.models.fields.CharField')(max_length=216)),
('publish', self.gf('django.db.models.fields.BooleanField')(default=False)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('slideshow', ['Slide'])
def backwards(self, orm):
# Deleting model 'Slideshow'
db.delete_table('cmsplugin_slideshow')
# Deleting model 'Slide'
db.delete_table('slideshow_slide')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'slideshow.slide': {
'Meta': {'ordering': "('ordering',)", 'object_name': 'Slide'},
'alt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'picture_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'picture_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'publish': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slideshow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['slideshow.Slideshow']"}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '216', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '216'})
},
'slideshow.slideshow': {
'Meta': {'object_name': 'Slideshow', 'db_table': "'cmsplugin_slideshow'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['slideshow']
|
mit
| 5,875,987,426,555,961,000 | 64.425287 | 155 | 0.583377 | false | 3.693056 | false | false | false |
OCM-Lab-PUC/switch-chile
|
python_utility_scripts/create_transmission_csv.py
|
1
|
5810
|
# -*- coding: utf-8 -*-
# Copyright 2016 The Switch-Chile Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
# Operations, Control and Markets laboratory at Pontificia Universidad
# Católica de Chile.
import pandas, os, re, datetime, sys
from unidecode import unidecode
if sys.getdefaultencoding() != 'utf-8':
# Character encoding may raise errors if set in ascii or other simple
# encodings which do not support spanish characters.
reload(sys)
sys.setdefaultencoding('utf-8')
def limpiar(a):
# Devuelvo un string limpio de carácteres anómalos, espacios y comas
limpio = unidecode(a.replace(' ','_').replace('ó','o')).lower().replace(',','_')
while limpio[0] == '_':
limpio = limpio[1:]
while limpio[-1] == '_':
limpio = limpio[:-1]
return limpio
def SepararLineaSIC(a):
#Algunos nombres separan 220 KV en vez de 220KV, hacemos este cambio para que queden igual
a = a.replace('k','K').replace('v','V').replace(' KV','KV')
try:
#Divido por guion y obtengo primer elemento
se1 = limpiar(a.split('-')[0])
#Obtengo 220kv al eliminar el ultimo termino con espacio y se lo quito al string, luego divido por guion
se2 = limpiar(a.replace(a.split(' ')[-1],'').split('-')[1])
return [se1,se2]
except:
print('No es posible separar',a)
return [limpiar(a),limpiar(a)]
def SepararLineaSIC2(a):
a = a.replace('k','K').replace('v','V').replace(' KV','KV')
try:
#Divido por guion y obtengo primer elemento
se1 = limpiar(a.split('-')[0])
#Obtengo 220kv al eliminar el ultimo termino con espacio y se lo quito al string, luego divido por guion
se2 = limpiar(' '.join(a.split('-')[1].split('KV')[0].split(' ')[:-1]))
return [se1,se2]
except:
print('No es posible separar',a)
return [limpiar(a),limpiar(a)]
def SepararLineaSING(a):
try:
a = a.split('kV ')[1]
#Divido por guion y obtengo primer elemento
se1 = limpiar(a.split('-')[0])
#Obtengo 220kv al eliminar el ultimo termino con espacio y se lo quito al string, luego divido por guion
se2 = limpiar(a.split('-')[1])
return [se1,se2]
except:
print('No es posible separar',a)
return [limpiar(a),limpiar(a)]
###############################
# Obtenemos los datos del SIC #
###############################
#Archivo de conversion de unidades a abrir
transmision = pandas.read_excel('capacidad_instalada_de_transmision.xlsx', sheetname= 'SIC', parse_cols = 'E:K', skiprows=6)
transmision.columns = ['SE','Tramo','dsa','Tension (KV)', 'N','Longitud (km)','Capacidad (MVA)']
#Obtenemos las columnas
#for i,j in enumerate(transmision.columns.values):
# print(limpiar(j),'=',i)
linea = 0
tramo = 1
tension = 3
numerocircuitos = 4
longitud = 5
capacidad = 6
#Construimos un data frame de dos columnas, de subestaciones por linea
SE = pandas.DataFrame({'SE1' : [],'SE2' : [], 'SEalt1' : [],'SEalt2' : []})
for i in transmision.index:
#Mientras leamos
if pandas.isnull(transmision.ix[i,linea]):
break
subs = SepararLineaSIC2(transmision.ix[i,tramo])
subs2 = SepararLineaSIC(transmision.ix[i,linea])
#print(subs,subs2)
fila = pandas.DataFrame([[subs[0],subs[1], subs2[0], subs2[1]]], columns=['SE1','SE2','SEalt1','SEalt2'])
SE = SE.append(fila, ignore_index = True)
#Hacemos la nueva matriz con las subestaciones, voltaje y
neotransmision = pandas.concat([pandas.Series(['sic' for i in range(i)], name = 'Sistema'), SE.ix[:i,0], SE.ix[:i,1], SE.ix[:i,2], SE.ix[:i,3], transmision.ix[:i-1,3], transmision.iloc[:i,4], transmision.iloc[:i,5], transmision.iloc[:i,6]], names = None, axis = 1)
################################
# Obtenemos los datos del SING #
################################
#Leemos, eliminando las dos primeras lineas correspondientes al header (celdas agrupadas no se leen bien...)
transmision = pandas.read_excel('capacidad_instalada_de_transmision.xlsx', sheetname= 'SING', parse_cols = 'E:J', skiprows=6,header = None)
transmision = transmision[2:].reset_index(drop = True)
linea = 0
tension = 1
numerocircuitos = 2
longitud = 3
capacidad = 5
#Construimos un data frame de dos columnas, de subestaciones por linea
SE = pandas.DataFrame({'SE1' : [],'SE2' : [], 'SEalt1' : [],'SEalt2' : []})
for i in transmision.index:
#Mientras leamos
if pandas.isnull(transmision.ix[i,linea]):
break
subs = SepararLineaSING(transmision.ix[i,linea])
fila = pandas.DataFrame([[subs[0],subs[1],subs[0],subs[1]]], columns=['SE1','SE2','SEalt1','SEalt2'])
SE = SE.append(fila, ignore_index = True)
#Si no tiene limite, le asignamos la capacidad
if transmision.ix[i,capacidad] == 'N/I' or pandas.isnull(transmision.ix[i,capacidad]):
transmision.ix[i,capacidad] = transmision.ix[i,4]
#Hacemos la nueva matriz con las subestaciones, voltaje y
neotransmision2 = pandas.concat([pandas.Series(['sing' for i in range(i)], name = 'Sistema'), SE.ix[:i,0], SE.ix[:i,1], SE.ix[:i,0], SE.ix[:i,1], transmision.ix[:i,tension], transmision.ix[:i,numerocircuitos], transmision.iloc[:i,longitud], transmision.iloc[:i,capacidad]], names = None, axis = 1)
neotransmision2 = neotransmision2[:-1]
#Renombramos columnas
neotransmision2.columns = ['Sistema','SE1','SE2','SEalt1','SEalt2','Tension (KV)', 'N','Longitud (km)','Capacidad (MVA)']
#Unimos ambas
transmisionfinal = pandas.concat([neotransmision, neotransmision2])
#Convertimos filas a int
transmisionfinal[['Tension (KV)', 'N']] = transmisionfinal[['Tension (KV)', 'N']].astype(int)
#Imprimimos datos
transmisionfinal.to_csv('transmision.csv', index = None , float_format = '%.2f')
|
apache-2.0
| 7,827,939,213,781,622,000 | 38.496599 | 297 | 0.643472 | false | 2.77932 | false | false | false |
Jackojc/Asciify
|
example/matrix.py
|
1
|
1374
|
from asciify import asciify
import random
import time
import pygame
width, height = (60, 30)
add_per_frame = 5
render = asciify(width, height, "font", fontsize=16)
random.seed(time.time())
Particles = [
[
random.randint(0, width),
random.randint(-1, height),
random.uniform(0.1, 1.5)
] for x in range(add_per_frame)
]
chars = list("abcdefghijklmnopqrstuvwxyz0123456789!\"$%^&*()_+-=[]{}:;@'~#|\<>?,./")
while True:
render.checkexit()
keys = render.listkeys()
for num, part in enumerate(Particles):
part[1] += part[2]
if part[1] > height+1:
del Particles[num]
colorrand = (random.randint(60, 255), random.randint(150, 255), random.randint(1, 100))
render.setString(part[0], part[1], random.choice(chars), color=colorrand)
Particles.extend([
[
random.randint(0, width),
random.randint(-1, height),
random.uniform(0.1, 1.5)
] for x in range(add_per_frame)
])
render.text(0, 0, "THE MATRIX EXAMPLE", center=(1, 1))
render.update(30) # Peasant FPS, I could add frame delays or something if I wanted 60fps but its beyond the scope of this example.
|
mit
| -4,344,498,825,365,630,500 | 31.714286 | 134 | 0.534207 | false | 3.673797 | false | false | false |
chrisjdavie/shares
|
machine_learning/sklearn_dataset_format.py
|
1
|
1160
|
'''
Created on 2 Sep 2014
@author: chris
'''
'''File format - data, length of data, containing unicode
- target, length of data, contains int reference to target
- target_names, type names relative to target
- filenames, names of files storing data (probably target too)
'''
def main():
''' taken from the tutorials, I'm having a look at how they store datasets'''
from sklearn.datasets import fetch_20newsgroups
# import numpy as np
categories = ['alt.atheism', 'soc.religion.christian', 'comp.graphics', 'sci.med']
twenty_train = fetch_20newsgroups(subset='train',
categories=categories,
shuffle=True,
random_state=42)
print dir(twenty_train)
print twenty_train.keys()
# print twenty_train.data[0]
print twenty_train.target[0]
print len(twenty_train.filenames)
print twenty_train.filenames[0]
print twenty_train.target_names
if __name__ == '__main__':
main()
|
mit
| 4,733,438,516,823,196,000 | 30.378378 | 86 | 0.552586 | false | 4.157706 | false | false | false |
jiarong/SSUsearch
|
scripts/count-taxon.py
|
1
|
3226
|
#! /usr/bin/env python
# count the taxon number from mothur taxonomy file
# by gjr; 080614
"""
Count the taxon number for each taxon in mothur taxonomy file
% python <thisFile> <sample.gg.taxonomy> <outfile.table>
"""
import sys
import os
import collections
#EXCLUDE = ['Archaea', 'Eukaryota', 'unknown']
EXCLUDE = []
LEVELS = 7
NA='Unclassified'
def read_mothur_taxonomy(f):
"""
Parse mothur classify.seqs output
Parameters:
-----------
f : str
file name of .taxonomy file from classify.seqs
Returns:
--------
dictionary
an dictionary of read name and tuples (each level of taxonomy)
"""
na_lis = ['', 'unknown', 'Unclassified',
'unclassified', 'other', 'unassigned']
d = {}
for n, line in enumerate(open(f)):
if line.startswith('#'):
continue
line = line.rstrip()
name, taxa = line.rstrip().split('\t')
skip = False
for word in EXCLUDE:
if word in taxa:
skip = True
break
if skip:
continue
# the parsing of taxa works for both mothur output and this
taxa = taxa.rstrip(';') # for mothur classfy.seqs output
lis = taxa.split(';')
lis2 = []
for item in lis:
item = item.strip() # for copyrigher copy table ' ;' separater
if item.endswith(')'):
item = item.rsplit('(', 1)[0].strip()
# remove taxon level prefix, e.g. 'p__Firmicutes'
if '__' in item:
item = item.split('__', 1)[1]
#item = item.strip('"')
# green gene taxonomy has sapce
item = item.replace(' ', '_')
item = item.lower()
if item in na_lis:
item = NA
item = item.capitalize()
lis2.append(item)
t = tuple(lis2)
if name.endswith('/1'):
other = '{}/2'.format(name[:-2])
if other in d:
other_taxon = d[other]
if other_taxon.count(NA) > lis2.count(NA):
_ = d.pop(other)
d[name] = t
else:
d[name] = t
elif name.endswith('/2'):
other = '{}/1'.format(name[:-2])
if other in d:
other_taxon = d[other]
if other_taxon.count(NA) > lis2.count(NA):
_ = d.pop(other)
d[name] = t
else:
d[name] = t
else:
d[name] = t
return d
def main():
if len(sys.argv) != 3:
mes = ('Usage: python {} <sample.gg.taxonomy> <outfile.table>')
print >> sys.stderr, mes.format(os.path.basename(sys.argv[0]))
sys.exit(1)
taxonfile = sys.argv[1]
outfile = sys.argv[2]
d = read_mothur_taxonomy(taxonfile)
g_taxonomy = d.values()
d_count = collections.Counter(g_taxonomy)
with open(outfile, 'wb') as fw:
for key, cnt in sorted(d_count.items()):
taxon_string = ';'.join(key)
print >> fw, '{}\t{}'.format(taxon_string, cnt)
if __name__ == '__main__':
main()
|
bsd-3-clause
| -8,817,099,791,117,188,000 | 23.815385 | 77 | 0.490081 | false | 3.708046 | false | false | false |
giubil/trackit
|
api/files/api/app/s3_billing_transfer.py
|
1
|
8777
|
from object_storage import S3BucketObjectStore
from config import BILLING_FILE_REGEX, CLIENT_BILLING_BUCKET, IMPORT_BILLING_AWS_KEY, IMPORT_BILLING_AWS_SECRET, LOCAL_BILLS_DIR
from es import client
from es.awsdetailedlineitem import AWSDetailedLineitem
from contextlib import contextmanager
from zipfile import ZipFile
from tempfile import mkdtemp, TemporaryFile
from shutil import rmtree
from datetime import date, datetime
import os
import io
import csv
import calendar
import elasticsearch.helpers
import itertools
import traceback
import boto3
def _is_line_item(line):
return line['RecordType'] == 'LineItem'
def _str_to_date(s):
return datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
@contextmanager
def extract_zipped_csvs(zip_stream, conversion=csv.reader):
with TemporaryFile() as zip_file_stream:
while True:
chunk = zip_stream.read(2 ** 15)
if not chunk:
break
zip_file_stream.write(chunk)
zip_file = ZipFile(zip_file_stream)
def files():
for name in zip_file.namelist():
if not name.lower().endswith('.csv'):
continue
file = zip_file.open(name)
yield name, conversion(file)
try:
yield files()
except:
pass
_index_es = 'awsdetailedlineitem'
_type_es = 'a_ws_detailed_lineitem'
session = boto3.Session(aws_access_key_id=IMPORT_BILLING_AWS_KEY,
aws_secret_access_key=IMPORT_BILLING_AWS_SECRET)
_converted_fields = {
'PayerAccountId': str,
'LinkedAccountId': str,
# 'RecordId': int,
'RateId': int,
'SubscriptionId': int,
'PricingPlanId': int,
'UsageQuantity': float,
'Rate': float,
'BlendedRate': float,
'UnBlendedRate': float,
'Cost': float,
'BlendedCost': float,
'UnBlendedCost': float,
'ReservedInstance': (lambda s: s == 'Y'),
'UsageStartDate': _str_to_date,
'UsageEndDate': _str_to_date,
'UsageType': str,
}
_converted_name = {
'PayerAccountId': 'payer_account_id',
'LinkedAccountId': 'linked_account_id',
'RecordId': 'record_id',
'ProductName': 'product_name',
'RateId': 'rate_id',
'SubscriptionId': 'subscription_id',
'PricingPlanId': 'pricing_plan_id',
'UsageType': 'usage_type',
'Operation': 'operation',
'AvailabilityZone': 'availability_zone',
'ReservedInstance': 'reserved_instance',
'ItemDescription': 'item_description',
'UsageStartDate': 'usage_start_date',
'UsageEndDate': 'usage_end_date',
'UsageQuantity': 'usage_quantity',
'UsageType': 'usage_type',
'Rate': 'rate',
'BlendedRate': 'rate',
'UnBlendedRate': 'un_blended_rate',
'Cost': 'cost',
'BlendedCost': 'cost',
'UnBlendedCost': 'un_blended_cost',
'ResourceId': 'resource_id',
'Tags': 'tag',
}
_csv_path = lambda x: '{}{}'.format(LOCAL_BILLS_DIR, x)
def _line_to_document(line):
try:
line['Tags'] = []
deleted_fields = set(('InvoiceID', 'RecordType'))
for k, v in line.iteritems():
if k.startswith('aws:') or k.startswith('user:'):
if v:
line['Tags'].append({
'key': k,
'value': v,
})
deleted_fields.add(k)
elif not v and k != 'Tags':
deleted_fields.add(k)
if not line['Tags']:
deleted_fields.add('Tags')
for k in deleted_fields:
del line[k]
for k, v in line.iteritems():
if k in _converted_fields:
line[k] = _converted_fields[k](v)
res = {}
for k, v in line.iteritems():
if k in _converted_name:
res[_converted_name[k]] = v
return res
except:
print("------")
print(line)
traceback.print_exc()
return None
def _document_to_index(index):
def do_document_to_index(document):
try:
return {
'_index': index,
'_id': document['record_id'],
'_type': _type_es,
'_source': document,
} if 'record_id' in document else None
except:
print("------")
print(document)
traceback.print_exc()
return None
return do_document_to_index
def _clean_aws_discounts_in_es(month, es, account_id):
month = datetime.combine(month, datetime.min.time())
date_from = month.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
date_to = month.replace(day=calendar.monthrange(month.year, month.month)[1], hour=0, minute=59, second=59, microsecond=999999)
response = es.search(
index=_index_es,
filter_path=["hits.hits._id"],
body={"size": 10000, "query": {"bool": {"filter": [
{"term": {"item_description": "PAR_APN_ProgramFee_2500"}},
{"term": {"linked_account_id": account_id}},
{"range": {"usage_start_date": {"from": date_from, "to": date_to}}}
]}}})
if 'hits' not in response or 'hits' not in response['hits']:
return
ids = [
line['_id']
for line in response['hits']['hits']
]
if len(ids) < 0:
return
bulk_body = [
'{{"delete": {{"_index": "{}", "_type": "{}", "_id": "{}"}}}}'.format(_index_es, _type_es, id)
for id in ids
]
es.bulk('\n'.join(bulk_body), timeout='120s', request_timeout=120)
def _import_bill_to_es(bill, es, name):
tmp_file = bill.get_file() if CLIENT_BILLING_BUCKET else bill
account_id = BILLING_FILE_REGEX.search(name).group('account_id')
print name
print ' Cleaning'
_clean_aws_discounts_in_es(date(int(name[:-11][-4:]), int(name[:-8][-2:]), 1), es, account_id)
print ' Extracting'
with extract_zipped_csvs(tmp_file, lambda x: x) as files:
for fi, csvfile in files:
reader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
line_items = itertools.ifilter(_is_line_item, reader)
documents = itertools.ifilter(bool, itertools.imap(_line_to_document, line_items))
actions = itertools.ifilter(bool, itertools.imap(_document_to_index(_index_es), documents))
print ' Importing'
elasticsearch.helpers.bulk(es, actions, timeout='120s', request_timeout=120, chunk_size=200)
tmp_file.close()
print ' Ok'
def _upload_bill_to_s3(bill, session, force_yield=False):
if not CLIENT_BILLING_BUCKET:
if not os.path.exists(_csv_path(bill.key())):
print bill.key()
print ' Downloading'
if not os.path.exists(_csv_path('')):
os.mkdir(_csv_path(''))
bill.get_file(f=io.open(_csv_path(bill.key()), 'w+b'))
print ' Ok'
return bill.key()
return
s3 = session.resource('s3')
up_bill = S3BucketObjectStore(s3.Bucket(CLIENT_BILLING_BUCKET)).object(bill.key())
if not up_bill.exists() or up_bill.size() != bill.size():
print bill.key()
print ' Downloading'
f = bill.get_file()
print ' Uploading'
up_bill.put_file(f)
print ' Ok'
return bill.key()
elif force_yield:
return bill.key()
def prepare_bill_for_s3(key, force_yield=False):
'''
- Download bills (cf. BILLING_FILE_REGEX) from S3 with keys in key_ids if they differs from our S3.
- Upload downloaded bills on our S3.
- Yield name of uploaded bills.
:param key: models.AWSKey
:param force_yield: yield name of all bills instead of uploaded bills only
:return: generator (list of string)
'''
if key.billing_bucket_name is None:
return
client_session = key.get_boto_session()
client_s3 = client_session.resource('s3')
bucket = sorted(S3BucketObjectStore(client_s3.Bucket(key.billing_bucket_name)), key=lambda x: x.key(), reverse=True)
for bill in bucket:
m = BILLING_FILE_REGEX.match(bill.key())
if m is not None:
yield _upload_bill_to_s3(bill, session, force_yield)
def prepare_bill_for_es(tr_bills):
'''
- Download bills in tr_bills from our S3
- Process zip and csv
- Import data in ES
:param tr_bills: list of string
:return:
'''
if not tr_bills:
return
s3 = session.resource('s3')
AWSDetailedLineitem.init()
for bill in tr_bills:
if bill:
s3_bill = S3BucketObjectStore(s3.Bucket(CLIENT_BILLING_BUCKET)).object(bill) if CLIENT_BILLING_BUCKET else io.open(_csv_path(bill), 'r+b')
if not CLIENT_BILLING_BUCKET or s3_bill.exists():
_import_bill_to_es(s3_bill, client, bill)
|
apache-2.0
| 2,701,550,056,858,239,000 | 31.872659 | 150 | 0.583115 | false | 3.431196 | false | false | false |
ninly/pphrase
|
pphrase.py
|
1
|
5060
|
""" pphrase.py : Generate a random passphrase
Generate a random passphrase from a subset of the most common
words (max 10000) in Google's trillion-word corpus. See
http://xkcd.com/936 for the motivation and inspiration.
Licensed under terms of MIT license (see LICENSE-MIT)
Copyright (c) 2014 Jason Conklin, <[email protected]>
Usage:
pphrase.py [ -L | -R | -C | -T ] [ options ]
Options:
-h, --help Show usage help (this screen).
-v, --version Show version number and exit.
-L, --normal Normal output (like this) [default].
-R, --running Run output together (likethis).
-C, --camelcase Output in CamelCase (LikeThis).
-T, --titlecase Output in Title Case (Like This).
-w N, --words=N Number of words in passphrase
[default: 4].
-m MAXWORD, --maxword=MAXWORD Maximum word length, in characters
[default: 10].
-n MINWORD, --minword=MINWORD Maximum word length, in characters
[default: 2].
-p POOL, --poolsize=POOL Select from most common POOL words
[default: 2048].
"""
import os
import random
from docopt import docopt
basedir = os.path.dirname(os.path.abspath(__file__))+'/'
if __name__ == "__main__":
arguments = docopt(__doc__, version='0.0.4')
class ArgError(Exception):
"""Error class with no ability to can."""
def __init__(self, value="could not even."):
self.value = value
def __str__(self):
return str(self.value)
def sanitize_args():
"""Check input args for sanity."""
try:
numwords = int(arguments['--words'])
poolsize = int(arguments['--poolsize'])
minword = int(arguments['--minword'])
maxword = int(arguments['--maxword'])
except ValueError:
print("Error: Option arguments must be integers.")
return 1
try:
if (minword < 1) or (maxword < 1) or (numwords < 1):
raise ArgError("word count and length must be positive integers.")
if (poolsize > 10000) or (poolsize < 1):
raise ArgError("pool size must be between 1 and 10000.")
except ArgError as e:
print('Could not even: {}'.format(e))
return 1
return 0
def get_pool(filename = basedir+'wordlist/google-10000-english.txt'):
"""Generate word pool to user specifications."""
poolsize = int(arguments['--poolsize'])
minword = int(arguments['--minword'])
maxword = int(arguments['--maxword'])
with open(filename,'r') as f:
lines = list(f)
f.close()
words = list()
# cull outsized words
for line in lines:
if len(line.strip()) >= minword and len(line.strip()) <= maxword:
words.append(line.strip())
# only keep poolsize words
try:
if len(words) < poolsize:
# words_avail = len(words)
raise ArgError("only "+str(len(words))+" words in specified length range.")
except ArgError as e:
print('Could not even: {}'.format(e))
return
except:
print('Could not even: {}'.format(e))
return
else:
words = list(words)[:poolsize]
return words
def get_mode():
mode = str()
if arguments['--running']:
mode = 'running'
elif arguments['--camelcase']:
mode = 'camelcase'
elif arguments['--titlecase']:
mode = 'titlecase'
else:
mode = 'normal'
return mode
def build_pph(numwords, mode='normal'):
"""Build the passphrase."""
try:
wordpool = get_pool()
if not wordpool:
raise ValueError('Could not generate specified word pool.')
if len(wordpool) < numwords:
raise ValueError('Word pool not large enough to generate '\
+'passphrase of specified length.')
except ValueError as e:
print('Could not even: {}'.format(e))
return
pph_words = list()
pph_str = str()
while len(pph_words) < numwords:
next_word = random.choice(wordpool)
if next_word not in pph_words:
pph_words.append(next_word)
if (mode == 'normal'):
pph_str = ' '.join(pph_words)
if (mode == 'running'):
pph_str = ''.join(pph_words)
if (mode == 'titlecase'):
for i in xrange(numwords):
pph_words[i] = pph_words[i].capitalize()
pph_str = ' '.join(pph_words)
if (mode == 'camelcase'):
for i in xrange(numwords):
pph_words[i] = pph_words[i].capitalize()
pph_str = ''.join(pph_words)
return pph_str
def main():
"""Output passphrase."""
try:
if sanitize_args(): raise ArgError
except ArgError:
return
numwords = int(arguments['--words'])
mode = get_mode()
pph = build_pph(numwords, mode)
if pph: print(pph)
if __name__ == "__main__":
main()
|
mit
| 5,217,362,287,712,699,000 | 29.481928 | 87 | 0.556917 | false | 3.868502 | false | false | false |
dreamhost/akanda-appliance
|
test/unit/drivers/test_hostname.py
|
1
|
2016
|
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest2 import TestCase
import mock
from akanda.router.drivers import hostname, ip
CONFIG = mock.Mock()
CONFIG.hostname = 'akanda'
class HostnameTestCase(TestCase):
"""
"""
def setUp(self):
self.mock_execute = mock.patch('akanda.router.utils.execute').start()
self.mock_replace_file = mock.patch(
'akanda.router.utils.replace_file'
).start()
self.addCleanup(mock.patch.stopall)
self.mgr = hostname.HostnameManager()
def test_update_hostname(self):
self.mgr.update_hostname(CONFIG)
self.mock_execute.assert_has_calls([
mock.call(['/bin/hostname', 'akanda'], 'sudo'),
mock.call(['mv', '/tmp/hostname', '/etc/hostname'], 'sudo')
])
@mock.patch.object(ip.IPManager, 'get_management_address')
def test_update_hosts(self, addr):
expected = [
'127.0.0.1 localhost',
'::1 localhost ip6-localhost ip6-loopback',
'fdca:3ba5:a17a:acda:f816:3eff:fe66:33b6 akanda'
]
addr.return_value = 'fdca:3ba5:a17a:acda:f816:3eff:fe66:33b6'
self.mgr.update_hosts(CONFIG)
self.mock_execute.assert_has_calls([
mock.call(['mv', '/tmp/hosts', '/etc/hosts'], 'sudo')
])
self.mock_replace_file.assert_has_calls([mock.call(
'/tmp/hosts',
'\n'.join(expected))
])
|
apache-2.0
| 8,984,808,868,207,687,000 | 31.516129 | 77 | 0.638889 | false | 3.530648 | true | false | false |
Jorge-C/bipy
|
skbio/maths/stats/distribution.py
|
1
|
6102
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
"""Translations of functions from Release 2.3 of the Cephes Math Library,
which is (c) Stephen L. Moshier 1984, 1995.
"""
from __future__ import division
from math import atan, sqrt
from skbio.maths.stats.special import (fix_rounding_error, expm1, log1p, betai,
igamc, erf, erfc, GB, SQRTH, LP, LQ, EQ,
MACHEP, PI)
def chi_high(x, df):
"""Returns right-hand tail of chi-square distribution (x to infinity).
df, the degrees of freedom, ranges from 1 to infinity (assume integers).
Typically, df is (r-1)*(c-1) for a r by c table.
Result ranges from 0 to 1.
See Cephes docs for details.
"""
x = fix_rounding_error(x)
if x < 0:
raise ValueError("chi_high: x must be >= 0 (got %s)." % x)
if df < 1:
raise ValueError("chi_high: df must be >= 1 (got %s)." % df)
return igamc(df / 2, x / 2)
def z_high(x):
"""Returns right-hand tail of z distribution (0 to x).
x ranges from -infinity to +infinity; result ranges from 0 to 1
See Cephes docs for details."""
y = x * SQRTH
z = abs(y)
if z < SQRTH:
return 0.5 - 0.5 * erf(y)
else:
if x < 0:
return 1 - 0.5 * erfc(z)
else:
return 0.5 * erfc(z)
def zprob(x):
"""Returns both tails of z distribution (-inf to -x, inf to x)."""
return 2 * z_high(abs(x))
def t_low(t, df):
"""Returns left-hand tail of Student's t distribution (-infinity to x).
df, the degrees of freedom, ranges from 1 to infinity.
Typically, df is (n-1) for a sample size of n.
Result ranges from 0 to 1.
See Cephes docs for details.
"""
if df < 1:
raise ValueError("t_low: df must be >= 1 (got %s)." % df)
return stdtr(df, t)
def t_high(t, df):
"""Returns right-hand tail of Student's t distribution (x to infinity).
df, the degrees of freedom, ranges from 1 to infinity.
Typically, df is (n-1) for a sample size of n.
Result ranges from 0 to 1.
See Cephes docs for details.
"""
if df < 1:
raise ValueError("t_high: df must be >= 1 (got %s)." % df)
return stdtr(df, -t) # distribution is symmetric
def tprob(t, df):
"""Returns both tails of t distribution (-infinity to -x, infinity to x)"""
return 2 * t_high(abs(t), df)
def f_high(df1, df2, x):
"""Returns right-hand tail of f distribution (x to infinity).
Result ranges from 0 to 1.
See Cephes docs for details.
"""
return fdtrc(df1, df2, x)
def fdtrc(a, b, x):
"""Returns right tail of F distribution, x to infinity.
See Cephes docs for details.
"""
if min(a, b) < 1:
raise ValueError("F a and b (degrees of freedom) must both be >= 1.")
if x < 0:
raise ValueError("F distribution value of f must be >= 0.")
w = float(b) / (b + a * x)
return betai(0.5 * b, 0.5 * a, w)
def binomial_high(successes, trials, prob):
"""Returns right-hand binomial tail (X > successes) given prob(success)."""
if -1 <= successes < 0:
return 1
return bdtrc(successes, trials, prob)
def bdtrc(k, n, p):
"""Complement of binomial distribution, k+1 through n.
Uses formula bdtrc(k, n, p) = betai(k+1, n-k, p)
See Cephes docs for details.
"""
p = fix_rounding_error(p)
if (p < 0) or (p > 1):
raise ValueError("Binomial p must be between 0 and 1.")
if (k < 0) or (n < k):
raise ValueError("Binomial k must be between 0 and n.")
if k == n:
return 0
dn = n - k
if k == 0:
if p < .01:
dk = -expm1(dn * log1p(-p))
else:
dk = 1 - pow(1.0 - p, dn)
else:
dk = k + 1
dk = betai(dk, dn, p)
return dk
def stdtr(k, t):
"""Student's t distribution, -infinity to t.
See Cephes docs for details.
"""
if k <= 0:
raise ValueError('stdtr: df must be > 0.')
if t == 0:
return 0.5
if t < -2:
rk = k
z = rk / (rk + t * t)
return 0.5 * betai(0.5 * rk, 0.5, z)
# compute integral from -t to + t
if t < 0:
x = -t
else:
x = t
rk = k # degrees of freedom
z = 1 + (x * x) / rk
# test if k is odd or even
if (k & 1) != 0:
# odd k
xsqk = x / sqrt(rk)
p = atan(xsqk)
if k > 1:
f = 1
tz = 1
j = 3
while (j <= (k - 2)) and ((tz / f) > MACHEP):
tz *= (j - 1) / (z * j)
f += tz
j += 2
p += f * xsqk / z
p *= 2 / PI
else:
# even k
f = 1
tz = 1
j = 2
while (j <= (k - 2)) and ((tz / f) > MACHEP):
tz *= (j - 1) / (z * j)
f += tz
j += 2
p = f * x / sqrt(z * rk)
# common exit
if t < 0:
p = -p # note destruction of relative accuracy
p = 0.5 + 0.5 * p
return p
def pseries(a, b, x):
"""Power series for incomplete beta integral.
Use when b * x is small and x not too close to 1.
See Cephes docs for details.
"""
ai = 1 / a
u = (1 - b) * x
v = u / (a + 1)
t1 = v
t = u
n = 2
s = 0
z = MACHEP * ai
while abs(v) > z:
u = (n - b) * x / n
t *= u
v = t / (a + n)
s += v
n += 1
s += t1
s += ai
u = a * log(x)
if ((a + b) < MAXGAM) and (abs(u) < MAXLOG):
t = Gamma(a + b) / (Gamma(a) * Gamma(b))
s = s * t * pow(x, a)
else:
t = lgam(a + b) - lgam(a) - lgam(b) + u + log(s)
if t < MINLOG:
s = 0
else:
s = exp(t)
return(s)
|
bsd-3-clause
| -6,199,607,592,344,027,000 | 24.214876 | 79 | 0.493445 | false | 3.148607 | false | false | false |
gustavoatt/consultas
|
consultas_proyecto/consultas_proyecto/settings/base.py
|
1
|
7398
|
"""Common settings and globals."""
from os.path import abspath, basename, dirname, join, normpath
from sys import path
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Your Name', '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'es-VE'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key should only be used for development and testing.
SECRET_KEY = r"fh2#ni-%+2-lo@24x5=#9e%i1w^dh%6s1jv0$p$e207iswh3hg"
########## END SECRET CONFIGURATION
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
########## END SITE CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin panel and documentation:
'django.contrib.admin',
# 'django.contrib.admindocs',
)
THIRD_PARTY_APPS = (
# Database migration helpers:
'south',
# Form helpers
'floppyforms',
'crispy_forms',
# REST API
'rest_framework',
# Server
'gunicorn',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'pacientes_app',
'historias_app',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## CRISPY FORMS CONFIGURATION
CRISPY_TEMPLATE_PACK = 'bootstrap3'
########## END CRISPY FORMS CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = '%s.wsgi.application' % SITE_NAME
########## END WSGI CONFIGURATION
|
mit
| -4,859,612,239,694,173,000 | 27.785992 | 98 | 0.694107 | false | 3.432947 | true | false | false |
crccheck/gallery-cms
|
gallery/routes.py
|
1
|
2047
|
import os
from io import BytesIO
from itertools import islice
from pathlib import Path
from PIL import Image
from starlette.responses import Response, PlainTextResponse
# TODO centralize this
BASE_DIR = os.getenv("BASE_DIR", os.path.dirname(os.path.abspath(__file__)))
THUMBNAIL_SIZE = (300, 300)
DEFAULT_SIZE = 200
def thumbs(request):
# NOTE: PIL won't create a thumbnail larger than the original
size = int(request.query_params.get("size", DEFAULT_SIZE))
image_file = Path(BASE_DIR, request.path_params["path"])
if not image_file.exists():
return PlainTextResponse("Not found", status_code=404)
im = Image.open(image_file)
# TODO cache thumbnails
im.thumbnail((size, size))
fp = BytesIO()
im.save(fp, format="webp")
fp.seek(0)
# WISHLIST support 304 not modified, etag, content-disposition
# last_modified = image_file.stat().st_mtime
# Last-Modified: Wed, 21 Oct 2015 07:28:00 GMT
# last_modified_str = dt.datetime.fromtimestamp(last_modified).strftime(
# "%a, %e %b %Y %H:%M:%S"
# )
return Response(
fp.read(),
headers={
"Cache-Control": f"public, max-age={86400 * 7}",
# "Last-Modified": f"{last_modified_str} GMT",
},
media_type="image/webp",
)
def album_thumb(request):
size = int(request.query_params.get("size", DEFAULT_SIZE / 2))
album_path = Path(BASE_DIR, request.path_params["path"])
thumb = Image.new("RGB", (size, size))
first_four_images = islice(album_path.glob("*.jpg"), 4)
for idx, img_path in enumerate(first_four_images):
im = Image.open(str(img_path))
# TODO crop thumbnails to square before thumbnailing
im.thumbnail((size / 2, size / 2))
thumb.paste(im, (int(idx / 2) * int(size / 2), (idx % 2) * int(size / 2)))
fp = BytesIO()
thumb.save(fp, format="webp")
fp.seek(0)
return Response(
fp.read(),
headers={"Cache-Control": f"public, max-age=900"},
media_type="image/webp",
)
|
gpl-3.0
| -4,949,062,279,888,032,000 | 30.492308 | 82 | 0.625305 | false | 3.355738 | false | false | false |
FloatingGhost/skype4py
|
Skype4Py/call.py
|
1
|
18207
|
"""Calls, conferences.
"""
__docformat__ = 'restructuredtext en'
from .utils import *
from .enums import *
class DeviceMixin(object):
def _Device(self, Name, DeviceType=None, Set=type(None)):
args = args2dict(self._Property(Name, Cache=False))
if Set is type(None):
for dev, value in list(args.items()):
try:
args[dev] = int(value)
except ValueError:
pass
if DeviceType is None:
return args
return args.get(DeviceType, None)
elif DeviceType is None:
raise TypeError('DeviceType must be specified if Set is used')
if Set:
args[DeviceType] = tounicode(Set)
else:
args.pop(DeviceType, None)
for dev, value in list(args.items()):
args[dev] = quote(value, True)
self._Alter('SET_%s' % Name,
', '.join('%s=%s' % item for item in list(args.items())))
def CaptureMicDevice(self, DeviceType=None, Set=type(None)):
"""Queries or sets the mic capture device.
:Parameters:
DeviceType : `enums`.callIoDeviceType* or None
Mic capture device type.
Set
Value the device should be set to or None if it should be deactivated.
Querying all active devices:
Devices = CaptureMicDevice()
Returns a mapping of device types to their values. Only active devices are
returned.
Querying a specific device:
Value = CaptureMicDevice(DeviceType)
Returns a device value for the given DeviceType.
Setting a device value:
CaptureMicDevice(DeviceType, Value)
If Value is None, the device will be deactivated.
:note: This command functions for active calls only.
"""
return self._Device('CAPTURE_MIC', DeviceType, Set)
def InputDevice(self, DeviceType=None, Set=type(None)):
"""Queries or sets the sound input device.
:Parameters:
DeviceType : `enums`.callIoDeviceType* or None
Sound input device type.
Set
Value the device should be set to or None if it should be deactivated.
Querying all active devices:
Devices = InputDevice()
Returns a mapping of device types to their values. Only active devices are
returned.
Querying a specific device:
Value = InputDevice(DeviceType)
Returns a device value for the given DeviceType.
Setting a device value:
InputDevice(DeviceType, Value)
If Value is None, the device will be deactivated.
:note: This command functions for active calls only.
"""
return self._Device('INPUT', DeviceType, Set)
def OutputDevice(self, DeviceType=None, Set=type(None)):
"""Queries or sets the sound output device.
:Parameters:
DeviceType : `enums`.callIoDeviceType* or None
Sound output device type.
Set
Value the device should be set to or None if it should be deactivated.
Querying all active devices:
Devices = OutputDevice()
Returns a mapping of device types to their values. Only active devices are
returned.
Querying a specific device:
Value = OutputDevice(DeviceType)
Returns a device value for the given DeviceType.
Setting a device value:
OutputDevice(DeviceType, Value)
If Value is None, the device will be deactivated.
:note: This command functions for active calls only.
"""
return self._Device('OUTPUT', DeviceType, Set)
class Call(Cached, DeviceMixin):
"""Represents a voice/video call.
"""
_ValidateHandle = int
def __repr__(self):
return Cached.__repr__(self, 'Id')
def _Alter(self, AlterName, Args=None):
return self._Owner._Alter('CALL', self.Id, AlterName, Args)
def _Init(self):
self._MakeOwner()
def _Property(self, PropName, Set=None, Cache=True):
return self._Owner._Property('CALL', self.Id, PropName, Set, Cache)
def Answer(self):
"""Answers the call.
"""
#self._Property('STATUS', 'INPROGRESS')
self._Alter('ANSWER')
def CanTransfer(self, Target):
"""Queries if a call can be transferred to a contact or phone number.
:Parameters:
Target : str
Skypename or phone number the call is to be transferred to.
:return: True if call can be transferred, False otherwise.
:rtype: bool
"""
return self._Property('CAN_TRANSFER %s' % Target) == 'TRUE'
def Finish(self):
"""Ends the call.
"""
#self._Property('STATUS', 'FINISHED')
self._Alter('END', 'HANGUP')
def Forward(self):
"""Forwards a call.
"""
self._Alter('END', 'FORWARD_CALL')
def Hold(self):
"""Puts the call on hold.
"""
#self._Property('STATUS', 'ONHOLD')
self._Alter('HOLD')
def Join(self, Id):
"""Joins with another call to form a conference.
:Parameters:
Id : int
Call Id of the other call to join to the conference.
:return: Conference object.
:rtype: `Conference`
"""
#self._Alter('JOIN_CONFERENCE', Id)
reply = self._Owner._DoCommand('SET CALL %s JOIN_CONFERENCE %s' % (self.Id, Id),
'CALL %s CONF_ID' % self.Id)
return Conference(self._Owner, reply.split()[-1])
def MarkAsSeen(self):
"""Marks the call as seen.
"""
self.Seen = True
def RedirectToVoicemail(self):
"""Redirects a call to voicemail.
"""
self._Alter('END', 'REDIRECT_TO_VOICEMAIL')
def Resume(self):
"""Resumes the held call.
"""
#self.Answer()
self._Alter('RESUME')
def StartVideoReceive(self):
"""Starts video receive.
"""
self._Alter('START_VIDEO_RECEIVE')
def StartVideoSend(self):
"""Starts video send.
"""
self._Alter('START_VIDEO_SEND')
def StopVideoReceive(self):
"""Stops video receive.
"""
self._Alter('STOP_VIDEO_RECEIVE')
def StopVideoSend(self):
"""Stops video send.
"""
self._Alter('STOP_VIDEO_SEND')
def Transfer(self, *Targets):
"""Transfers a call to one or more contacts or phone numbers.
:Parameters:
Targets : str
one or more phone numbers or Skypenames the call is being transferred to.
:note: You can transfer an incoming call to a group by specifying more than one target,
first one of the group to answer will get the call.
:see: `CanTransfer`
"""
self._Alter('TRANSFER', ', '.join(Targets))
def _GetConferenceId(self):
return int(self._Property('CONF_ID'))
ConferenceId = property(_GetConferenceId,
doc="""Conference Id.
:type: int
""")
def _GetDatetime(self):
from datetime import datetime
return datetime.fromtimestamp(self.Timestamp)
Datetime = property(_GetDatetime,
doc="""Date and time of the call.
:type: datetime.datetime
:see: `Timestamp`
""")
def _SetDTMF(self, Value):
self._Alter('DTMF', Value)
DTMF = property(fset=_SetDTMF,
doc="""Set this property to send DTMF codes. Permitted symbols are: [0..9, #, \*].
:type: str
:note: This command functions for active calls only.
""")
def _GetDuration(self):
return int(self._Property('DURATION', Cache=False))
Duration = property(_GetDuration,
doc="""Duration of the call in seconds.
:type: int
""")
def _GetFailureReason(self):
return int(self._Property('FAILUREREASON'))
FailureReason = property(_GetFailureReason,
doc="""Call failure reason. Read if `Status` == `enums.clsFailed`.
:type: `enums`.cfr*
""")
def _GetForwardedBy(self):
return str(self._Property('FORWARDED_BY'))
ForwardedBy = property(_GetForwardedBy,
doc="""Skypename of the user who forwarded a call.
:type: str
""")
def _GetId(self):
return self._Handle
Id = property(_GetId,
doc="""Call Id.
:type: int
""")
def _GetInputStatus(self):
return (self._Property('VAA_INPUT_STATUS') == 'TRUE')
InputStatus = property(_GetInputStatus,
doc="""True if call voice input is enabled.
:type: bool
""")
def _GetParticipants(self):
count = int(self._Property('CONF_PARTICIPANTS_COUNT'))
return ParticipantCollection(self, range(count))
Participants = property(_GetParticipants,
doc="""Participants of a conference call not hosted by the user.
:type: `ParticipantCollection`
""")
def _GetPartnerDisplayName(self):
return self._Property('PARTNER_DISPNAME')
PartnerDisplayName = property(_GetPartnerDisplayName,
doc="""The DisplayName of the remote caller.
:type: unicode
""")
def _GetPartnerHandle(self):
return str(self._Property('PARTNER_HANDLE'))
PartnerHandle = property(_GetPartnerHandle,
doc="""The Skypename of the remote caller.
:type: str
""")
def _GetPstnNumber(self):
return str(self._Property('PSTN_NUMBER'))
PstnNumber = property(_GetPstnNumber,
doc="""PSTN number of the call.
:type: str
""")
def _GetPstnStatus(self):
return self._Property('PSTN_STATUS')
PstnStatus = property(_GetPstnStatus,
doc="""PSTN number status.
:type: unicode
""")
def _GetRate(self):
return int(self._Property('RATE'))
Rate = property(_GetRate,
doc="""Call rate. Expressed using `RatePrecision`. If you're just interested in the call rate
expressed in current currency, use `RateValue` instead.
:type: int
:see: `RateCurrency`, `RatePrecision`, `RateToText`, `RateValue`
""")
def _GetRateCurrency(self):
return self._Property('RATE_CURRENCY')
RateCurrency = property(_GetRateCurrency,
doc="""Call rate currency.
:type: unicode
:see: `Rate`, `RatePrecision`, `RateToText`, `RateValue`
""")
def _GetRatePrecision(self):
return int(self._Property('RATE_PRECISION'))
RatePrecision = property(_GetRatePrecision,
doc="""Call rate precision. Expressed as a number of times the call rate has to be divided by 10.
:type: int
:see: `Rate`, `RateCurrency`, `RateToText`, `RateValue`
""")
def _GetRateToText(self):
return ('%s %.3f' % (self.RateCurrency, self.RateValue)).strip()
RateToText = property(_GetRateToText,
doc="""Returns the call rate as a text with currency and properly formatted value.
:type: unicode
:see: `Rate`, `RateCurrency`, `RatePrecision`, `RateValue`
""")
def _GetRateValue(self):
if self.Rate < 0:
return 0.0
return float(self.Rate) / (10 ** self.RatePrecision)
RateValue = property(_GetRateValue,
doc="""Call rate value. Expressed in current currency.
:type: float
:see: `Rate`, `RateCurrency`, `RatePrecision`, `RateToText`
""")
def _GetSeen(self):
return (self._Property('SEEN') == 'TRUE')
def _SetSeen(self, Value):
self._Property('SEEN', cndexp(Value, 'TRUE', 'FALSE'))
Seen = property(_GetSeen, _SetSeen,
doc="""Queries/sets the seen status of the call. True if the call was seen, False otherwise.
:type: bool
:note: You cannot alter the call seen status from seen to unseen.
""")
def _GetStatus(self):
return str(self._Property('STATUS'))
def _SetStatus(self, Value):
self._Property('STATUS', str(Value))
Status = property(_GetStatus, _SetStatus,
doc="""The call status.
:type: `enums`.cls*
""")
def _GetSubject(self):
return self._Property('SUBJECT')
Subject = property(_GetSubject,
doc="""Call subject.
:type: unicode
""")
def _GetTargetIdentity(self):
return str(self._Property('TARGET_IDENTITY'))
TargetIdentity = property(_GetTargetIdentity,
doc="""Target number for incoming SkypeIn calls.
:type: str
""")
def _GetTimestamp(self):
return float(self._Property('TIMESTAMP'))
Timestamp = property(_GetTimestamp,
doc="""Call date and time expressed as a timestamp.
:type: float
:see: `Datetime`
""")
def _GetTransferActive(self):
return self._Property('TRANSFER_ACTIVE') == 'TRUE'
TransferActive = property(_GetTransferActive,
doc="""Returns True if the call has been transferred.
:type: bool
""")
def _GetTransferredBy(self):
return str(self._Property('TRANSFERRED_BY'))
TransferredBy = property(_GetTransferredBy,
doc="""Returns the Skypename of the user who transferred the call.
:type: str
""")
def _GetTransferredTo(self):
return str(self._Property('TRANSFERRED_TO'))
TransferredTo = property(_GetTransferredTo,
doc="""Returns the Skypename of the user or phone number the call has been transferred to.
:type: str
""")
def _GetTransferStatus(self):
return str(self._Property('TRANSFER_STATUS'))
TransferStatus = property(_GetTransferStatus,
doc="""Returns the call transfer status.
:type: `enums`.cls*
""")
def _GetType(self):
return str(self._Property('TYPE'))
Type = property(_GetType,
doc="""Call type.
:type: `enums`.clt*
""")
def _GetVideoReceiveStatus(self):
return str(self._Property('VIDEO_RECEIVE_STATUS'))
VideoReceiveStatus = property(_GetVideoReceiveStatus,
doc="""Call video receive status.
:type: `enums`.vss*
""")
def _GetVideoSendStatus(self):
return str(self._Property('VIDEO_SEND_STATUS'))
VideoSendStatus = property(_GetVideoSendStatus,
doc="""Call video send status.
:type: `enums`.vss*
""")
def _GetVideoStatus(self):
return str(self._Property('VIDEO_STATUS'))
VideoStatus = property(_GetVideoStatus,
doc="""Call video status.
:type: `enums`.cvs*
""")
def _GetVmAllowedDuration(self):
return int(self._Property('VM_ALLOWED_DURATION'))
VmAllowedDuration = property(_GetVmAllowedDuration,
doc="""Returns the permitted duration of a voicemail in seconds.
:type: int
""")
def _GetVmDuration(self):
return int(self._Property('VM_DURATION'))
VmDuration = property(_GetVmDuration,
doc="""Returns the duration of a voicemail.
:type: int
""")
class CallCollection(CachedCollection):
_CachedType = Call
class Participant(Cached):
"""Represents a conference call participant.
"""
_ValidateHandle = int
def __repr__(self):
return Cached.__repr__(self, 'Id', 'Idx', 'Handle')
def _Property(self, Prop):
# Prop: 0 = user name, 1 = call type, 2 = call status, 3 = display name
reply = self._Owner._Property('CONF_PARTICIPANT %d' % self.Idx)
return chop(reply, 3)[Prop]
def _GetCall(self):
return self._Owner
Call = property(_GetCall,
doc="""Call object.
:type: `Call`
""")
def _GetCallStatus(self):
return str(self._Property(2))
CallStatus = property(_GetCallStatus,
doc="""Call status of a participant in a conference call.
:type: `enums`.cls*
""")
def _GetCallType(self):
return str(self._Property(1))
CallType = property(_GetCallType,
doc="""Call type in a conference call.
:type: `enums`.clt*
""")
def _GetDisplayName(self):
return self._Property(3)
DisplayName = property(_GetDisplayName,
doc="""DisplayName of a participant in a conference call.
:type: unicode
""")
def _GetHandle(self):
return str(self._Property(0))
Handle = property(_GetHandle,
doc="""Skypename of a participant in a conference call.
:type: str
""")
def _GetId(self):
return self._Owner.Id
Id = property(_GetId,
doc="""Call Id.
:type: int
""")
def _GetIdx(self):
return self._Handle
Idx = property(_GetIdx,
doc="""Call participant index.
:type: int
""")
class ParticipantCollection(CachedCollection):
_CachedType = Participant
class Conference(Cached):
"""Represents a conference call.
"""
_ValidateHandle = int
def __repr__(self):
return Cached.__repr__(self, 'Id')
def Finish(self):
"""Finishes a conference so all active calls have the status
`enums.clsFinished`.
"""
for c in self._GetCalls():
c.Finish()
def Hold(self):
"""Places all calls in a conference on hold so all active calls
have the status `enums.clsLocalHold`.
"""
for c in self._GetCalls():
c.Hold()
def Resume(self):
"""Resumes a conference that was placed on hold so all active calls
have the status `enums.clsInProgress`.
"""
for c in self._GetCalls():
c.Resume()
def _GetActiveCalls(self):
return CallCollection(self._Owner, (x.Id for x in self._Owner.ActiveCalls if x.ConferenceId == self.Id))
ActiveCalls = property(_GetActiveCalls,
doc="""Active calls with the same conference ID.
:type: `CallCollection`
""")
def _GetCalls(self):
return CallCollection(self._Owner, (x.Id for x in self._Owner.Calls() if x.ConferenceId == self.Id))
Calls = property(_GetCalls,
doc="""Calls with the same conference ID.
:type: `CallCollection`
""")
def _GetId(self):
return self._Handle
Id = property(_GetId,
doc="""Id of a conference.
:type: int
""")
class ConferenceCollection(CachedCollection):
_CachedType = Conference
|
bsd-3-clause
| -244,468,576,796,218,620 | 24.752475 | 112 | 0.598396 | false | 4.0496 | false | false | false |
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/quests/380_BringOutTheFlavorOfIngredients/__init__.py
|
1
|
3959
|
# Made by disKret & DrLecter
import sys
from com.l2scoria import Config
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "380_BringOutTheFlavorOfIngredients"
#NPC
ROLLANT = 30069
#MOBS
DIRE_WOLF = 20205
KADIF_WEREWOLF = 20206
GIANT_MIST_LEECH = 20225
#ITEMS
RITRONS_FRUIT,MOON_FACE_FLOWER,LEECH_FLUIDS = range(5895,5898)
ANTIDOTE = 1831
RITRON_JELLY = 5960
JELLY_RECIPE = 5959
#mob:[chance,item,max]
DROPLIST = {
DIRE_WOLF:[10,RITRONS_FRUIT,4],
KADIF_WEREWOLF:[50,MOON_FACE_FLOWER,20],
GIANT_MIST_LEECH:[50,LEECH_FLUIDS,10]
}
#CHANCE
RECIPE_CHANCE = 55
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
if event == "30069-4.htm" :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event == "30069-12.htm" :
if st.getInt("cond") == 6 :
st.giveItems(JELLY_RECIPE,1)
st.playSound("ItemSound.quest_finish")
else :
htmltext = "I'll squeeze the jelly from your eyes"
st.exitQuest(1)
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
cond=st.getInt("cond")
if cond == 0 :
if player.getLevel() >= 24 :
htmltext = "30069-1.htm"
else:
htmltext = "30069-0.htm"
st.exitQuest(1)
elif cond == 1 :
htmltext = "30069-6.htm"
elif cond == 2 :
if st.getQuestItemsCount(ANTIDOTE) >= 2 and st.getQuestItemsCount(RITRONS_FRUIT) == 4 and st.getQuestItemsCount(MOON_FACE_FLOWER) == 20 and st.getQuestItemsCount(LEECH_FLUIDS) == 10 :
st.takeItems(RITRONS_FRUIT,-1)
st.takeItems(MOON_FACE_FLOWER,-1)
st.takeItems(LEECH_FLUIDS,-1)
st.takeItems(ANTIDOTE,2)
st.set("cond","3")
htmltext = "30069-7.htm"
else :
htmltext = "30069-6.htm"
elif cond == 3 :
st.set("cond","4")
htmltext = "30069-8.htm"
elif cond == 4 :
st.set("cond","5")
htmltext = "30069-9.htm"
elif cond == 5 :
st.set("cond","6")
htmltext = "30069-10.htm"
elif cond == 6 :
st.giveItems(RITRON_JELLY,1)
if st.getRandom(100) < RECIPE_CHANCE :
htmltext = "30069-11.htm"
else :
htmltext = "30069-13.htm"
st.playSound("ItemSound.quest_finish")
st.exitQuest(1)
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
if st.getInt("cond") == 1 :
chance,item,max = DROPLIST[npc.getNpcId()]
numItems,chance = divmod(chance*Config.RATE_DROP_QUEST,100)
count = st.getQuestItemsCount(item)
if count < max :
if st.getRandom(100) < chance :
numItems = numItems + 1
numItems = int(numItems)
if count + numItems > max :
numItems = max - count
if numItems != 0 :
st.giveItems(item,numItems)
if st.getQuestItemsCount(RITRONS_FRUIT) == 4 and st.getQuestItemsCount(MOON_FACE_FLOWER) == 20 and st.getQuestItemsCount(LEECH_FLUIDS) == 10 :
st.set("cond","2")
st.playSound("ItemSound.quest_middle")
else :
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(380,qn,"Bring Out The Flavor Of Ingredients")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(ROLLANT)
QUEST.addTalkId(ROLLANT)
for mob in DROPLIST.keys():
QUEST.addKillId(mob)
for item in range(5895,5898):
STARTED.addQuestDrop(ROLLANT,item,1)
|
gpl-3.0
| -8,783,527,725,578,094,000 | 28.333333 | 188 | 0.635767 | false | 2.751216 | false | false | false |
pepetreshere/odoo
|
addons/account_fleet/models/account_move.py
|
2
|
1946
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api, _
class AccountMove(models.Model):
_inherit = 'account.move'
def _post(self, soft=True):
vendor_bill_service = self.env.ref('account_fleet.data_fleet_service_type_vendor_bill', raise_if_not_found=False)
if not vendor_bill_service:
return super()._post(soft)
val_list = []
log_list = []
not_posted_before = self.filtered(lambda r: not r.posted_before)
posted = super()._post(soft) # We need the move name to be set, but we also need to know which move are posted for the first time.
for line in (not_posted_before & posted).line_ids.filtered(lambda ml: ml.vehicle_id):
val = {
'service_type_id': vendor_bill_service.id,
'vehicle_id': line.vehicle_id.id,
'amount': line.price_subtotal,
'vendor_id': line.partner_id.id,
'description': line.name,
}
log = _('Service Vendor Bill: <a href=# data-oe-model=account.move data-oe-id={move_id}>{move_name}</a>').format(
move_id=line.move_id.id,
move_name=line.move_id.name,
)
val_list.append(val)
log_list.append(log)
log_service_ids = self.env['fleet.vehicle.log.services'].create(val_list)
for log_service_id, log in zip(log_service_ids, log_list):
log_service_id.message_post(body=log)
return posted
class AccountMoveLine(models.Model):
_inherit = 'account.move.line'
vehicle_id = fields.Many2one('fleet.vehicle', string='Vehicle')
need_vehicle = fields.Boolean(compute='_compute_need_vehicle',
help="Technical field to decide whether the vehicle_id field is editable")
def _compute_need_vehicle(self):
self.need_vehicle = False
|
agpl-3.0
| 7,216,238,615,381,273,000 | 40.404255 | 139 | 0.607914 | false | 3.590406 | false | false | false |
LaboratoireMecaniqueLille/crappy
|
crappy/blocks/grapher.py
|
1
|
5641
|
# coding: utf-8
import numpy as np
from .block import Block
from .._global import OptionalModule
try:
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
except (ModuleNotFoundError, ImportError):
plt = OptionalModule("matplotlib")
Button = OptionalModule("matplotlib")
class Grapher(Block):
"""The grapher receive data from a block (via a :ref:`Link`) and plots it."""
def __init__(self,
*labels,
length=0,
freq=2,
maxpt=20000,
window_size=(8, 8),
window_pos=None,
interp=True,
backend="TkAgg"):
"""Sets the args and initializes the parent class.
Args:
*labels (:obj:`tuple`): Tuples of the columns labels of input data for
plotting. You can add as much as you want, depending on your
performances. The first value is the `x` label, the second is the `y`
label.
length (:obj:`int`, optional): If `0` the graph is static and displays
all data from the start of the assay. Else only displays the last
``length`` received chunks, and drops the previous ones.
freq (:obj:`float`, optional): The refresh rate of the graph. May cause
high memory consumption if set too high.
maxpt (:obj:`int`, optional): The maximum number of points displayed on
the graph. When reaching this limit, the block deletes one point out of
two but this is almost invisible to the user.
window_size (:obj:`tuple`, optional): The size of the graph, in inches.
window_pos (:obj:`tuple`, optional): The position of the graph in pixels.
The first value is for the `x` direction, the second for the `y`
direction. The origin is the top left corner. Works with multiple
screens.
interp (:obj:`bool`, optional): If :obj:`True`, the points of data will
be linked to the following by straight lines. Else, each value wil be
displayed as constant until the next update.
backend (:obj:`int`, optional): The :mod:`matplotlib` backend to use.
Example:
::
graph = Grapher(('t(s)', 'F(N)'), ('t(s)', 'def(%)'))
will plot a dynamic graph with two lines plot (`F=f(t)` and `def=f(t)`).
::
graph = Grapher(('def(%)', 'F(N)'), length=0)
will plot a static graph.
::
graph = Grapher(('t(s)', 'F(N)'), length=30)
will plot a dynamic graph displaying the last 30 chunks of data.
"""
Block.__init__(self)
self.niceness = 10
self.length = length
self.freq = freq
self.maxpt = maxpt
self.window_size = window_size
self.window_pos = window_pos
self.interp = interp
self.backend = backend
self.labels = labels
def prepare(self):
if self.backend:
plt.switch_backend(self.backend)
self.f = plt.figure(figsize=self.window_size)
self.ax = self.f.add_subplot(111)
self.lines = []
for _ in self.labels:
if self.interp:
self.lines.append(self.ax.plot([], [])[0])
else:
self.lines.append(self.ax.step([], [])[0])
# Keep only 1/factor points on each line
self.factor = [1 for _ in self.labels]
# Count to drop exactly 1/factor points, no more and no less
self.counter = [0 for _ in self.labels]
legend = [y for x, y in self.labels]
plt.legend(legend, bbox_to_anchor=(-0.03, 1.02, 1.06, .102), loc=3,
ncol=len(legend), mode="expand", borderaxespad=1)
plt.xlabel(self.labels[0][0])
plt.ylabel(self.labels[0][1])
plt.grid()
self.axclear = plt.axes([.8, .02, .15, .05])
self.bclear = Button(self.axclear, 'Clear')
self.bclear.on_clicked(self.clear)
if self.window_pos:
mng = plt.get_current_fig_manager()
mng.window.wm_geometry("+%s+%s" % self.window_pos)
plt.draw()
plt.pause(.001)
def clear(self, event=None):
for line in self.lines:
line.set_xdata([])
line.set_ydata([])
self.factor = [1 for _ in self.labels]
self.counter = [0 for _ in self.labels]
def loop(self):
# We need to recv data from all the links, but keep
# ALL of the data, even with the same label (so not get_all_last)
data = self.recv_all_delay()
for i, (lx, ly) in enumerate(self.labels):
x, y = 0, 0 # So that if we don't find it, we do nothing
for d in data:
if lx in d and ly in d: # Find the first input with both labels
dx = d[lx][self.factor[i]-self.counter[i]-1::self.factor[i]]
dy = d[ly][self.factor[i]-self.counter[i]-1::self.factor[i]]
self.counter[i] = (self.counter[i]+len(d[lx])) % self.factor[i]
x = np.hstack((self.lines[i].get_xdata(), dx))
y = np.hstack((self.lines[i].get_ydata(), dy))
break
if isinstance(x, int):
break
if self.length and len(x) >= self.length:
# Remove the beginning if the graph is dynamic
x = x[-self.length:]
y = y[-self.length:]
elif len(x) > self.maxpt:
# Reduce the number of points if we have to many to display
print("[Grapher] Too many points on the graph {} ({}>{})".format(
i, len(x), self.maxpt))
x, y = x[::2], y[::2]
self.factor[i] *= 2
print("[Grapher] Resampling factor is now {}".format(self.factor[i]))
self.lines[i].set_xdata(x)
self.lines[i].set_ydata(y)
self.ax.relim() # Update the window
self.ax.autoscale_view(True, True, True)
self.f.canvas.draw() # Update the graph
self.f.canvas.flush_events()
def finish(self):
plt.close("all")
|
gpl-2.0
| -8,069,995,091,647,567,000 | 35.160256 | 79 | 0.601489 | false | 3.523423 | false | false | false |
xi-studio/anime
|
newnet/show.py
|
1
|
1080
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import csc_matrix
head = np.random.randint(low=0,high=10,size=20)
tail = np.random.randint(low=0,high=10,size=20)
row = np.arange(20)
data = np.ones(20)
a = csc_matrix((data, (row,head)),shape=(20,10)).toarray()
b = csc_matrix((data, (row,tail)),shape=(20,10)).toarray()
def plotCM(cm,title,colorbarOn,givenAX):
ax = givenAX
idx = np.arange(10)
idy = np.arange(20)
plt.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=5.0)
ax.set_xticks(range(10))
ax.set_xticklabels(idx)
plt.title(title,size=12)
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j,i,int(cm[i,j]),va='center', ha='center')
#fig1=plt.subplot(1, 3, 1)
#plotCM(a,"Head Index","off",fig1.axes)
fig2=plt.subplot(1, 1, 1)
w = np.random.randn(20,1)
plt.matshow(w, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
for x in range(20):
fig2.axes.text(0,x,w[x,0],va='center', ha='center')
#fig3=plt.subplot(1, 3, 3)
#plotCM(b,"Tail Index","off",fig3.axes)
plt.show()
|
mit
| 6,583,698,324,841,650,000 | 23.545455 | 65 | 0.642593 | false | 2.443439 | false | false | false |
wahur666/kodok
|
python/Base64/ip-ban.py
|
1
|
1179
|
#!/usr/bin/python3
__author__ = 'Imre'
import sys
import os
import re
def main():
if len(sys.argv) == 2:
inputfile = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), sys.argv[1])
if os.path.isfile(inputfile):
output = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),'done.txt')
infile = open(inputfile, 'r')
outfile = open(output, 'w')
temp = infile.read().splitlines()
out = []
pattern = re.compile(r"\d*\.\d*\.\d*\.\d*")
for line in temp:
if line == '':
continue
else:
if re.search(pattern,line):
tags = line.split(".")
tags[2],tags[3] = "*","*"
a = ".".join(tags)
out.append(a)
else:
out.append(line)
out = list(set(out))
outfile.write("\n".join(out))
print('Done')
else:
print("nem letezo file")
else:
print('nicns eleg parameter')
if __name__ == '__main__': main()
|
gpl-3.0
| -950,800,508,332,692,500 | 30.891892 | 92 | 0.433418 | false | 3.827922 | false | false | false |
storecast/holon
|
holon/services/httplib.py
|
1
|
1461
|
from __future__ import absolute_import
from . import HttpService
from httplib import HTTPConnection, HTTPException, HTTPSConnection
from socket import timeout, error
import time
class HttpLibHttpService(HttpService):
"""
HttpService using python batteries' httplib.
"""
def __init__(self, *args, **kwargs):
super(HttpLibHttpService, self).__init__(*args, **kwargs)
if self.ssl:
self.connection_class = HTTPSConnection
else:
self.connection_class = HTTPConnection
def get_transport(self):
"""Helper method to improve testability."""
return self.connection_class(self.host, self.port,
timeout=self.connect_timeout)
def _call(self, body, headers):
start_time = time.time()
try:
connection = self.get_transport()
response = connection.getresponse()
except (HTTPException, timeout, error), e:
raise self.communication_error_class(u"%s failed with %s when attempting to make a call to %s with body %s" % (self.__class__.__name__, e.__class__.__name__, self.base_url, body))
else:
data = unicode(response.read(), "utf-8")
finally:
connection.close()
end_time = time.time()
return response.status, data, (end_time - start_time)*1000
@property
def protocol(self):
return self.connection_class._http_vsn_str
|
bsd-2-clause
| 40,881,219,972,397,120 | 34.634146 | 191 | 0.614648 | false | 4.297059 | false | false | false |
rockneurotiko/wirecloud
|
src/wirecloud/platform/workspace/mashupTemplateGenerator.py
|
1
|
12686
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import json
import six
from wirecloud.catalogue.models import CatalogueResource
from wirecloud.commons.utils.template.base import parse_contacts_info
from wirecloud.commons.utils.template.writers import rdf
from wirecloud.commons.utils.template.writers import xml
from wirecloud.platform.models import IWidget
from wirecloud.platform.wiring.utils import get_wiring_skeleton, parse_wiring_old_version
from wirecloud.platform.workspace.utils import VariableValueCacheManager
def get_iwidgets_description(included_iwidgets):
description = "Wirecloud Mashup composed of: "
description = ', '.join([iwidget.widget.resource.get_processed_info()['title'] for iwidget in included_iwidgets])
return description
def get_workspace_description(workspace):
included_iwidgets = IWidget.objects.filter(tab__workspace=workspace)
return get_iwidgets_description(included_iwidgets)
def get_current_operator_pref_value(operator, preference):
if preference['name'] in operator['preferences']:
return "%s" % operator['preferences'][preference['name']]['value']
else:
return preference['default']
def process_iwidget(workspace, iwidget, wiring, parametrization, readOnlyWidgets):
widget = iwidget.widget
widget_description = widget.resource.get_template().get_resource_info()
iwidget_id = str(iwidget.id)
iwidget_params = {}
if iwidget_id in parametrization:
iwidget_params = parametrization[iwidget_id]
cache_manager = VariableValueCacheManager(workspace, workspace.creator)
# input and output endpoints
for output_endpoint in widget_description['wiring']['outputs']:
wiring['outputs'].append({
'name': output_endpoint['name'],
'type': output_endpoint['type'],
'label': output_endpoint['label'],
'description': output_endpoint['description'],
'friendcode': output_endpoint['friendcode'],
})
for input_endpoint in widget_description['wiring']['inputs']:
wiring['inputs'].append({
'name': input_endpoint['name'],
'type': input_endpoint['type'],
'label': input_endpoint['label'],
'description': input_endpoint['description'],
'friendcode': input_endpoint['friendcode'],
'actionlabel': input_endpoint['actionlabel'],
})
# preferences
widget_preferences = widget_description['preferences']
preferences = {}
for pref in widget_preferences:
status = 'normal'
if pref['name'] in iwidget_params:
iwidget_param_desc = iwidget_params[pref['name']]
status = iwidget_param_desc.get('status', 'normal')
source = iwidget_param_desc.get('source', 'current')
if source == 'default':
if status == 'normal':
# Do not issue a Preference element for this preference
continue
value = None
elif source == 'current':
value = cache_manager.get_variable_value_from_varname(iwidget, pref['name'])
elif source == 'custom':
value = iwidget_param_desc['value']
else:
raise Exception('Invalid preference value source: %s' % source)
else:
value = cache_manager.get_variable_value_from_varname(iwidget, pref['name'])
preferences[pref['name']] = {
'readonly': status != 'normal',
'hidden': status == 'hidden',
}
if value is not None:
if pref['type'] == 'boolean':
value = str(value).lower()
elif pref['type'] == 'number':
value = str(value)
preferences[pref['name']]['value'] = value
# iWidget properties
widget_properties = widget_description['properties']
properties = {}
for prop in widget_properties:
status = 'normal'
if prop['name'] in iwidget_params:
iwidget_param_desc = iwidget_params[prop['name']]
status = iwidget_param_desc.get('status', 'normal')
source = iwidget_param_desc.get('source', 'current')
if source == 'default':
if status == 'normal':
# Do not issue a Property element for this property
continue
else:
value = None
elif source == 'current':
value = cache_manager.get_variable_value_from_varname(iwidget, prop['name'])
elif source == 'custom':
value = iwidget_param_desc['value']
else:
raise Exception('Invalid property value source: %s' % source)
else:
value = cache_manager.get_variable_value_from_varname(iwidget, prop['name'])
properties[prop['name']] = {
'readonly': status != 'normal',
'value': value,
}
return {
'id': iwidget_id,
'vendor': iwidget.widget.resource.vendor,
'name': iwidget.widget.resource.short_name,
'version': iwidget.widget.resource.version,
'title': iwidget.name,
'readonly': readOnlyWidgets,
'properties': properties,
'preferences': preferences,
'position': {
'x': str(iwidget.positions['widget']['left']),
'y': str(iwidget.positions['widget']['top']),
'z': str(iwidget.positions['widget']['zIndex']),
},
'rendering': {
'width': str(iwidget.positions['widget']['width']),
'height': str(iwidget.positions['widget']['height']),
'layout': str(iwidget.layout),
'fulldragboard': bool(iwidget.positions['widget']['fulldragboard']),
'minimized': bool(iwidget.positions['widget']['minimized']),
},
}
def build_json_template_from_workspace(options, workspace, user):
options['type'] = 'mashup'
options['params'] = []
options['embedmacs'] = options.get('embedmacs', False) is True
options['embedded'] = set()
options['translations'] = {}
options['translation_index_usage'] = {}
description = options.get('description', '').strip()
if description == '':
options['description'] = get_workspace_description(workspace)
if 'authors' not in options:
options['authors'] = ({'name': six.text_type(user)},)
elif isinstance(options['authors'], six.text_type):
options['authors'] = parse_contacts_info(options['authors'])
if 'contributors' not in options:
options['contributors'] = ()
elif isinstance(options['contributors'], six.text_type):
options['contributors'] = parse_contacts_info(options['contributors'])
options['requirements'] = []
readOnlyWidgets = options.get('readOnlyWidgets', False)
parametrization = options.get('parametrization')
if not parametrization:
parametrization = {}
if 'iwidgets' not in parametrization:
parametrization['iwidgets'] = {}
if 'ioperators' not in parametrization:
parametrization['ioperators'] = {}
# Workspace preferences
options['preferences'] = {}
for preference in workspace.workspacepreference_set.all():
if not preference.inherit:
options['preferences'][preference.name] = preference.value
# Tabs and their preferences
options['tabs'] = []
options['wiring'] = {
'inputs': [],
'outputs': [],
}
for tab in workspace.tab_set.order_by('position'):
preferences = {}
for preference in tab.tabpreference_set.all():
if not preference.inherit:
preferences[preference.name] = preference.value
resources = []
for iwidget in tab.iwidget_set.select_related('widget__resource').all():
resource_info = process_iwidget(workspace, iwidget, options['wiring'], parametrization['iwidgets'], readOnlyWidgets)
resources.append(resource_info)
if options['embedmacs']:
options['embedded'].add('/'.join((resource_info['vendor'], resource_info['name'], resource_info['version'])))
options['tabs'].append({
'name': tab.name,
'resources': resources,
'preferences': preferences,
})
# wiring conections and operators
readOnlyConnectables = options.get('readOnlyConnectables', False)
wiring_status = workspace.wiringStatus
if len(wiring_status) == 0:
wiring_status = get_wiring_skeleton()
# Set the wiring status' version
if wiring_status.get('version', '1.0') == '1.0':
wiring_status = parse_wiring_old_version(wiring_status)
options['wiring']['version'] = '2.0'
options['wiring']['operators'] = {}
for id_, operator in six.iteritems(wiring_status['operators']):
operator_data = {
'name': operator['name'],
'preferences': {},
}
vendor, name, version = operator['name'].split('/')
resource = CatalogueResource.objects.get(vendor=vendor, short_name=name, version=version)
operator_info = json.loads(resource.json_description)
operator_params = parametrization['ioperators'].get(id_, {})
for pref_index, preference in enumerate(operator_info['preferences']):
status = 'normal'
if preference['name'] in operator_params:
ioperator_param_desc = operator_params[preference['name']]
status = ioperator_param_desc.get('status', 'normal')
source = ioperator_param_desc.get('source', 'current')
if source == 'default':
if status == 'normal':
# Do not issue a Preference element for this preference
continue
value = None
elif source == 'current':
value = get_current_operator_pref_value(operator, preference)
elif source == 'custom':
value = ioperator_param_desc['value']
else:
raise Exception('Invalid preference value source: %s' % source)
else:
value = get_current_operator_pref_value(operator, preference)
operator_data['preferences'][preference['name']] = {
'readonly': status != 'normal',
'hidden': status == 'hidden',
}
if value is not None:
operator_data['preferences'][preference['name']]['value'] = value
options['wiring']['operators'][id_] = operator_data
if options['embedmacs']:
options['embedded'].add(operator['name'])
options['wiring']['connections'] = []
for connection in wiring_status['connections']:
options['wiring']['connections'].append({
'source': connection['source'],
'target': connection['target'],
'readonly': readOnlyConnectables,
})
options['wiring']['visualdescription'] = wiring_status['visualdescription']
embedded = options['embedded']
options['embedded'] = []
for resource in embedded:
(vendor, name, version) = resource.split('/')
options['embedded'].append({
'vendor': vendor,
'name': name,
'version': version,
'src': 'macs/%s_%s_%s.wgt' % (vendor, name, version)
})
del options['embedmacs']
return options
def build_xml_template_from_workspace(options, workspace, user, raw=False):
build_json_template_from_workspace(options, workspace, user)
return xml.write_xml_description(options, raw=raw)
def build_rdf_template_from_workspace(options, workspace, user):
build_json_template_from_workspace(options, workspace, user)
return rdf.build_rdf_graph(options)
|
agpl-3.0
| 8,939,400,542,287,359,000 | 37.093093 | 128 | 0.607647 | false | 4.327874 | false | false | false |
reedessick/pointy-Poisson
|
multiPopVectors2OmegaScan.py
|
1
|
6042
|
#!/usr/bin/python
usage = "multiPopVectors2OmegaScan.py [--options] vectors.txt"
description = "writes OmegaScan config files based on the multi-population vectors supplied. Assumes KW channel naming conventions. Also writes corresponding comand lines to run OmegaScans for each vector supplied."
author = "[email protected]"
import os
import subprocess as sp
from optparse import OptionParser
#-------------------------------------------------
parser=OptionParser(usage=usage, description=description)
parser.add_option("-v", "--verbose", default=False, action="store_true")
parser.add_option("", "--frame-type", default="H1_R", type="string")
parser.add_option("", "--timeRange", default=64, type="int")
parser.add_option("", "--freq-map", default=None, type="string", help="the output of FrChannels, used to map channel names to sample frequencies")
parser.add_option("", "--gwchan", default="H1:CAL-DELTAL_EXTERNAL_DQ", type="string")
parser.add_option("", "--output-dir", default=".", type="string")
parser.add_option("", "--condor", default=False, action="store_true", help="write a condor_sub file instead of a shell script")
parser.add_option("", "--accounting-group", default="ligo.dev.o2.detchar.explore.test", type="string")
parser.add_option("", "--accounting-group-user", default="reed.essick", type="string")
parser.add_option("", "--request-memory", default=2000000, type="int", help="measured in kB")
opts, args = parser.parse_args()
if len(args)!=1:
raise ValueError("Please supply exactly one input argument\n%s"%(usage))
vectors = args[0]
if opts.freq_map==None:
opts.freq_map = raw_input("--freq-map=")
if not os.path.exists(opts.output_dir):
os.makedirs( opts.output_dir )
ifo = opts.frame_type[0]
#-------------------------------------------------
if opts.verbose:
print "reading in channels from :"+vectors
file_obj = open(vectors, "r")
chans = file_obj.readline().strip().split()[1:] ### skip first column because that is the filename
file_obj.close()
Nchans = len(chans)
if opts.verbose:
print " found %d channels"%(Nchans)
### assume KW channel naming convention
channels = set()
chanmap = {}
for i, chan in enumerate(chans):
chan = chan.split("_")
chan = "%s:%s"%(chan[0], "_".join(chan[1:-2]))
channels.add( chan )
chanmap[i] = chan
if opts.verbose:
print "reading in sample frequencies from :"+opts.freq_map
file_obj = open(opts.freq_map, "r")
freq_map = dict( [l.strip().split() for l in file_obj] )
file_obj.close()
channels = dict( (chan, freq_map[chan]) for chan in channels )
#-------------------------------------------------
if opts.verbose:
print "writing Qscan config files for:"
gwdf_cmd = "gw_data_find -o %s --type %s"%(ifo, opts.frame_type) + " -s %d -e %d -u file"
os_cmd = "/home/omega/opt/omega/bin/wpipeline scan %.9f -r -c %s -o %s -f %s"
header = """# Q Scan configuration file
# Automatically generated with wconfigure.sh
# by user bhughey on 2009-07-09 10:33:18 PDT
# from sample frame files:
# /archive/frames/S6/L1/LHO/H-H1_RDS_R_L1-9311/H-H1_RDS_R_L1-931194752-64.gwf
[Context,Context]
[Parameters,Parameter Estimation]
[Notes,Notes]
[Aux Channels,Identified interesting Aux channels]
"""
template = """{
channelName: '%s'
frameType: '%s'
sampleFrequency: %s
searchTimeRange: %d
searchFrequencyRange: [0 Inf]
searchQRange: [4 64]
searchMaximumEnergyLoss: 0.2
whiteNoiseFalseRate: 1e-3
searchWindowDuration: 0.5
plotTimeRanges: [0.1 1 4 16]
plotFrequencyRange: []
plotNormalizedEnergyRange: [0 25.5]
alwaysPlotFlag: 1
}"""%('%s', opts.frame_type, '%s', opts.timeRange)
if opts.condor:
cmd_file = "%s/run_Qscan.sub"%(opts.output_dir)
cmd_obj = open(cmd_file, "w")
print >> cmd_obj, """universe = vanilla
executable = /home/omega/opt/omega/bin/wpipeline
getenv = True
accounting_group = %s
accounting_group_user = %s
log = %s/Qscan.log
error = %s/Qscan-$(cluster)-$(process).err
output = %s/Qscan-$(cluster)-$(process).out
request_memory = %d KB
notification = never"""%(opts.accounting_group, opts.accounting_group_user, opts.output_dir, opts.output_dir, opts.output_dir, opts.request_memory)
else:
cmd_file = "%s/run_Qscan.sh"%(opts.output_dir)
cmd_obj = open(cmd_file, "w")
file_obj = open(vectors, "r")
file_obj.readline()
for line in file_obj:
line = line.strip().split()
if opts.verbose:
print " "+line[0]
try:
gps = float(line[0])
except:
gps = float(line[0].split("/")[-2].split('-')[-1])
### write config file
participating = set()
for i, v in enumerate( [float(l) for l in line[1:]] ):
if v > 0:
participating.add( chanmap[i] )
outdir = "%s/%.6f"%(opts.output_dir, gps)
if not os.path.exists(outdir):
os.makedirs(outdir)
conf_file = "%s/Qscan.cnf"%(outdir)
if opts.verbose:
print " "+conf_file
conf_obj = open(conf_file, "w")
print >> conf_obj, header
print >> conf_obj, template%(opts.gwchan, freq_map[opts.gwchan])
for chan in sorted(participating): ### assumes KW naming conventions
print >> conf_obj, template%(chan, channels[chan])
conf_obj.close()
### set up command
this_cmd = gwdf_cmd%(int(gps), int(gps)+1)
if opts.verbose:
print " "+this_cmd
frame = sp.Popen( this_cmd.split(), stdout=sp.PIPE).communicate()[0].split()[0]
directory = os.path.dirname( frame.replace("file://localhost","") )
that_cmd = os_cmd%(gps, conf_file, outdir, directory)
if opts.verbose:
print " "+that_cmd
if opts.condor:
print >> cmd_obj, "arguments = \" %s \"\nqueue 1"%(" ".join(that_cmd.split()[1:]))
else:
print >> cmd_obj, that_cmd
cmd_obj.close()
if opts.verbose:
if opts.condor:
print "now run :\ncondor_submit %s"%(cmd_file)
else:
print "now run :\n%s"%(cmd_file)
|
mit
| -8,005,188,117,140,197,000 | 31.483871 | 215 | 0.624297 | false | 3.173319 | false | false | false |
googleapis/googleapis-gen
|
google/cloud/automl/v1beta1/automl-v1beta1-py/google/cloud/automl_v1beta1/types/model_evaluation.py
|
1
|
6961
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.automl_v1beta1.types import classification
from google.cloud.automl_v1beta1.types import detection
from google.cloud.automl_v1beta1.types import regression
from google.cloud.automl_v1beta1.types import text_extraction
from google.cloud.automl_v1beta1.types import text_sentiment
from google.cloud.automl_v1beta1.types import translation
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.automl.v1beta1',
manifest={
'ModelEvaluation',
},
)
class ModelEvaluation(proto.Message):
r"""Evaluation results of a model.
Attributes:
classification_evaluation_metrics (google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics):
Model evaluation metrics for image, text,
video and tables classification.
Tables problem is considered a classification
when the target column is CATEGORY DataType.
regression_evaluation_metrics (google.cloud.automl_v1beta1.types.RegressionEvaluationMetrics):
Model evaluation metrics for Tables
regression. Tables problem is considered a
regression when the target column has FLOAT64
DataType.
translation_evaluation_metrics (google.cloud.automl_v1beta1.types.TranslationEvaluationMetrics):
Model evaluation metrics for translation.
image_object_detection_evaluation_metrics (google.cloud.automl_v1beta1.types.ImageObjectDetectionEvaluationMetrics):
Model evaluation metrics for image object
detection.
video_object_tracking_evaluation_metrics (google.cloud.automl_v1beta1.types.VideoObjectTrackingEvaluationMetrics):
Model evaluation metrics for video object
tracking.
text_sentiment_evaluation_metrics (google.cloud.automl_v1beta1.types.TextSentimentEvaluationMetrics):
Evaluation metrics for text sentiment models.
text_extraction_evaluation_metrics (google.cloud.automl_v1beta1.types.TextExtractionEvaluationMetrics):
Evaluation metrics for text extraction
models.
name (str):
Output only. Resource name of the model evaluation. Format:
``projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}``
annotation_spec_id (str):
Output only. The ID of the annotation spec that the model
evaluation applies to. The The ID is empty for the overall
model evaluation. For Tables annotation specs in the dataset
do not exist and this ID is always not set, but for
CLASSIFICATION
[prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]
the
[display_name][google.cloud.automl.v1beta1.ModelEvaluation.display_name]
field is used.
display_name (str):
Output only. The value of
[display_name][google.cloud.automl.v1beta1.AnnotationSpec.display_name]
at the moment when the model was trained. Because this field
returns a value at model training time, for different models
trained from the same dataset, the values may differ, since
display names could had been changed between the two model's
trainings. For Tables CLASSIFICATION
[prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]
distinct values of the target column at the moment of the
model evaluation are populated here. The display_name is
empty for the overall model evaluation.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this model
evaluation was created.
evaluated_example_count (int):
Output only. The number of examples used for model
evaluation, i.e. for which ground truth from time of model
creation is compared against the predicted annotations
created by the model. For overall ModelEvaluation (i.e. with
annotation_spec_id not set) this is the total number of all
examples used for evaluation. Otherwise, this is the count
of examples that according to the ground truth were
annotated by the
[annotation_spec_id][google.cloud.automl.v1beta1.ModelEvaluation.annotation_spec_id].
"""
classification_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=8,
oneof='metrics',
message=classification.ClassificationEvaluationMetrics,
)
regression_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=24,
oneof='metrics',
message=regression.RegressionEvaluationMetrics,
)
translation_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=9,
oneof='metrics',
message=translation.TranslationEvaluationMetrics,
)
image_object_detection_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=12,
oneof='metrics',
message=detection.ImageObjectDetectionEvaluationMetrics,
)
video_object_tracking_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=14,
oneof='metrics',
message=detection.VideoObjectTrackingEvaluationMetrics,
)
text_sentiment_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=11,
oneof='metrics',
message=text_sentiment.TextSentimentEvaluationMetrics,
)
text_extraction_evaluation_metrics = proto.Field(
proto.MESSAGE,
number=13,
oneof='metrics',
message=text_extraction.TextExtractionEvaluationMetrics,
)
name = proto.Field(
proto.STRING,
number=1,
)
annotation_spec_id = proto.Field(
proto.STRING,
number=2,
)
display_name = proto.Field(
proto.STRING,
number=15,
)
create_time = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
evaluated_example_count = proto.Field(
proto.INT32,
number=6,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| -8,033,426,745,816,520,000 | 39.947059 | 124 | 0.679787 | false | 4.380743 | false | false | false |
MDAnalysis/mdanalysis
|
benchmarks/benchmarks/analysis/rdf.py
|
1
|
1204
|
import MDAnalysis
try:
from MDAnalysisTests.datafiles import TPR, XTC
except:
pass
try:
from MDAnalysis.analysis.rdf import InterRDF
except:
pass
class SimpleRdfBench(object):
"""Benchmarks for MDAnalysis.analysis.rdf
"""
params = ([20,75,200],
[[0,5], [0,15], [0,20]],
[1, 100, 1000, 10000])
param_names = ['nbins',
'range_val',
'natoms']
def setup(self, nbins, range_val, natoms):
self.sel_str = 'name OW'
self.u = MDAnalysis.Universe(TPR, XTC)
try:
self.sel = self.u.select_atoms(self.sel_str)[:natoms]
except AttributeError:
self.sel = self.u.selectAtoms(self.sel_str)[:natoms]
# do not include initialization of the
# InterRDF object in the benchmark itself
self.rdf = InterRDF(g1=self.sel,
g2=self.sel,
nbins=nbins,
range=range_val)
def time_interrdf(self, nbins, range_val, natoms):
"""Benchmark a full trajectory parse
by MDAnalysis.analysis.rdf.InterRDF
"""
self.rdf.run()
|
gpl-2.0
| -4,898,817,287,443,108,000 | 24.083333 | 65 | 0.539037 | false | 3.727554 | false | false | false |
xl1994/tfConvo
|
genImg.py
|
1
|
3379
|
# -*- Coding: UTF-8 -*-
import os
from captcha.image import ImageCaptcha
import numpy as np
from PIL import Image
import tensorflow as tf
import random
# import time
NUMBER = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
ALPHABET_LOWER = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',\
'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
ALPHABET_UPPER = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',\
'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
def gen_captcha_text(char_set=NUMBER+ALPHABET_LOWER+ALPHABET_UPPER, captcha_size=4):
'''
NOT WELL DEFINED YET
'''
CAPTCHA_TEXT = []
for i in range(captcha_size):
C = random.choice(char_set)
CAPTCHA_TEXT.append(C)
return CAPTCHA_TEXT
def gen_captcha_data(captcha_text):
'''
NOT WELL DEFINED YET
'''
img = ImageCaptcha()
captcha_text = ' '.join(captcha_text)
captcha_data = img.generate(captcha_text)
captcha_data = Image.open(captcha_data)
captcha_data = np.array(captcha_data)
return captcha_text, captcha_data
# IMAGE DATE TO TFRECORDS
def img_to_tfrecords(output_filename, input_directory, classes, width=128, height=128):
'''
CLASS OF IMAGE
'''
writer = tf.python_io.TFRecordWriter(output_filename)
for index, name in enumerate(classes):
class_path = input_directory + '/' + name
for img_name in os.listdir(class_path):
img_path = class_path + '/' + img_name
img = Image.open(img_path)
# img = img.resize(width, height)
img_raw = img.tobytes()
example = tf.train.Example(features=tf.train.Features(feature={\
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[index])),\
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))}))
writer.write(example.SerializeToString())
writer.close()
return output_filename
def imgGen(num, charset, dstr):
flag = True
class_set = set()
try:
for item in charset:
classes_path = os.getcwd() + '/' + dstr + '/' + item
if not item in class_set:
class_set.add(item)
else:
continue
if not os.path.exists(classes_path):
os.makedirs(dstr+ '/' + item)
for i in range(num):
FILE_NAME = classes_path + '/label_' + str(i) + '.jpg'
ImageCaptcha().write(item, FILE_NAME)
img = Image.open(FILE_NAME)
region = (0,0,img.size[0]/4,img.size[1])
img = img.crop(region)
img.save(FILE_NAME)
except Exception as e:
print str(e)
flag = False
return flag
def imgTrain(num, charset):
return imgGen(num, charset, 'train')
def imgValidation(num, charset):
return imgGen(num, charset, 'valid')
if __name__ == '__main__':
# number of sample each character
num_train = 400
num_valid = 80
charset = NUMBER + ALPHABET_LOWER + ALPHABET_UPPER
if imgTrain(num_train, charset):
print 'Train: each charatcter',num_train,'images generated!'
if imgValidation(num_valid, charset):
print 'Validation: each charatcter',num_valid,'images generated!'
|
mit
| -1,317,025,532,587,306,800 | 34.197917 | 94 | 0.553122 | false | 3.199811 | false | false | false |
Keeper-Security/Commander
|
keepercommander/custom/create_delete.py
|
1
|
1159
|
# _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Copyright 2017 Keeper Security Inc.
# Contact: [email protected]
#
# Example showing how to create a record and upload
# to the server, then deleting the record from the
# server.
#
import getpass
import string
import random
from keepercommander.record import Record
from keepercommander.params import KeeperParams
from keepercommander import display, api
my_params = KeeperParams()
while not my_params.user:
my_params.user = getpass.getpass(prompt='User(Email): ', stream=None)
while not my_params.password:
my_params.password = getpass.getpass(prompt='Master Password: ', stream=None)
api.sync_down(my_params)
# Add record
r = Record()
r.title = 'Test Record'
r.login = '[email protected]'
# generate a 32-char random password
r.password = ''.join(random.SystemRandom().choice(string.printable) for _ in range(32))
if api.add_record(my_params, r):
print('Added record UID='+r.record_uid)
# Delete the record
if r.record_uid:
api.delete_record(my_params, r.record_uid)
|
mit
| -5,221,725,862,456,658,000 | 22.632653 | 88 | 0.66494 | false | 3.047368 | false | false | false |
dadorado37/Trabajos_Python
|
empresa_arranque_gana.py
|
1
|
7489
|
# este link es para asegurarse de que un usuario ingrese un numero entero y no un caracter
# https://mail.python.org/pipermail/python-es/2011-September/030635.html
# empresa_arranque_gana.py
seleccion_menu_uno = 5
# define the function blocks
def uno():
print("\nEsa opcion es correcta\n")
print "cliente categoria 1"
print "nombre del cliente: ", nombre_cliente
print "la cantidad de escobas es: ", cantidad_escobas
print "el costo total de las escobas es: ", costo_total_escobas
print "la cantidad de recogedores es: ", cantidad_recogedores
print "el costo total de los recogedores es: ", costo_total_recogedores
print "la cantidad de aromatizantes es: ", cantidad_aromatizantes
print "el costo total de los aromatizantes es: ", costo_total_aromatizantes
print "la cantidad total de productos es: ", cantidad_total_productos
print "el subtotal de la compra es: ", subtotal_compra
descuento_compra = (subtotal_compra * 5) / 100
total_compra = subtotal_compra - descuento_compra
print "el total de compra es: ", total_compra
def dos():
print("\nEsa opcion es correcta\n")
print "cliente categoria 2"
print "nombre del cliente: ", nombre_cliente
print "la cantidad de escobas es: ", cantidad_escobas
print "el costo total de las escobas es: ", costo_total_escobas
print "la cantidad de recogedores es: ", cantidad_recogedores
print "el costo total de los recogedores es: ", costo_total_recogedores
print "la cantidad de aromatizantes es: ", cantidad_aromatizantes
print "el costo total de los aromatizantes es: ", costo_total_aromatizantes
print "la cantidad total de productos es: ", cantidad_total_productos
print "el subtotal de la compra es: ", subtotal_compra
descuento_compra = (subtotal_compra * 8) / 100
total_compra = subtotal_compra - descuento_compra
print "el total de compra es: ", total_compra
def tres():
print("\nEsa opcion es correcta\n")
print "cliente categoria 3"
print "nombre del cliente: ", nombre_cliente
print "la cantidad de escobas es: ", cantidad_escobas
print "el costo total de las escobas es: ", costo_total_escobas
print "la cantidad de recogedores es: ", cantidad_recogedores
print "el costo total de los recogedores es: ", costo_total_recogedores
print "la cantidad de aromatizantes es: ", cantidad_aromatizantes
print "el costo total de los aromatizantes es: ", costo_total_aromatizantes
print "la cantidad total de productos es: ", cantidad_total_productos
print "el subtotal de la compra es: ", subtotal_compra
descuento_compra = (subtotal_compra * 12) / 100
total_compra = subtotal_compra - descuento_compra
print "el total de compra es: ", total_compra
def cuatro():
print("\nEsa opcion es correcta\n")
print "cliente categoria 4"
print "nombre del cliente: ", nombre_cliente
print "la cantidad de escobas es: ", cantidad_escobas
print "el costo total de las escobas es: ", costo_total_escobas
print "la cantidad de recogedores es: ", cantidad_recogedores
print "el costo total de los recogedores es: ", costo_total_recogedores
print "la cantidad de aromatizantes es: ", cantidad_aromatizantes
print "el costo total de los aromatizantes es: ", costo_total_aromatizantes
print "la cantidad total de productos es: ", cantidad_total_productos
print "el subtotal de la compra es: ", subtotal_compra
descuento_compra = (subtotal_compra * 15) / 100
total_compra = subtotal_compra - descuento_compra
print "el total de compra es: ", total_compra
costo_escoba = 5000
costo_recogedor = 2000
costo_aromatizante = 3000
print "cual es su nombre"
nombre_cliente = raw_input()
print "\ndesea comprar escobas S / N "
desea_comprar_escobas = raw_input()
while (desea_comprar_escobas != 's') or (desea_comprar_escobas != 'S') or (desea_comprar_escobas != 'n') or (desea_comprar_escobas != 'N'):
if (desea_comprar_escobas == 's') or (desea_comprar_escobas == 'S') or (desea_comprar_escobas != 'n') or (desea_comprar_escobas != 'N'):
break
else:
print "\ningreso la opcion incorrecta"
print "desea comprar escobas S / N "
desea_comprar_escobas = raw_input()
print "\nok ingreso la opcion correcta"
print "\ndesea comprar recogedores S / N "
desea_comprar_recogedores = raw_input()
while (desea_comprar_recogedores != 's')or(desea_comprar_recogedores != 'S') or (desea_comprar_recogedores != 'n')or(desea_comprar_recogedores != 'N'):
if (desea_comprar_recogedores == 's')or(desea_comprar_recogedores == 'S') or (desea_comprar_recogedores != 'n')or(desea_comprar_recogedores != 'N'):
break
else:
print "\ningreso la opcion incorrecta"
print "desea comprar recogedores S / N "
desea_comprar_recogedores = raw_input()
print "\nok ingreso la opcion correcta"
print "\ndesea comprar aromatizantes S / N "
desea_comprar_aromatizantes = raw_input()
while (desea_comprar_aromatizantes != 's')or(desea_comprar_aromatizantes != 'S') or (desea_comprar_aromatizantes != 'n')or(desea_comprar_aromatizantes != 'N'):
if (desea_comprar_aromatizantes == 's')or(desea_comprar_aromatizantes == 'S') or (desea_comprar_aromatizantes != 'n')or(desea_comprar_aromatizantes != 'N'):
break
else:
print "\ningreso la opcion incorrecta"
print "desea comprar aromatizantes S / N "
desea_comprar_aromatizantes = raw_input()
print "\nok ingreso la opcion correcta\n"
if (desea_comprar_escobas == 's') or (desea_comprar_escobas == 'S'):
while 1:
print "digite la cantidad de escobas"
cantidad_escobas = raw_input()
if cantidad_escobas.isdigit():
cantidad_escobas = int(cantidad_escobas)
break
elif (desea_comprar_escobas == 'n') or (desea_comprar_escobas == 'N'):
cantidad_escobas = 0
if (desea_comprar_recogedores == 's')or(desea_comprar_recogedores == 'S'):
while 1:
print "digite la cantidad de recogedores"
cantidad_recogedores = raw_input()
if cantidad_recogedores.isdigit():
cantidad_recogedores = int(cantidad_recogedores)
break
elif (desea_comprar_recogedores == 'n') or (desea_comprar_recogedores == 'N'):
cantidad_recogedores = 0
if (desea_comprar_aromatizantes == 's') or (desea_comprar_aromatizantes == 'S'):
while 1:
print "digite la cantidad de aromatizantes"
cantidad_aromatizantes = raw_input()
if cantidad_aromatizantes.isdigit():
cantidad_aromatizantes = int(cantidad_aromatizantes)
break
elif (desea_comprar_aromatizantes == 'n') or (desea_comprar_aromatizantes == 'N'):
cantidad_aromatizantes = 0
costo_total_escobas = costo_escoba * cantidad_escobas
costo_total_recogedores = costo_recogedor * cantidad_recogedores
costo_total_aromatizantes = costo_aromatizante * cantidad_aromatizantes
cantidad_total_productos = cantidad_escobas + cantidad_recogedores + cantidad_aromatizantes
subtotal_compra = costo_total_escobas + costo_total_recogedores + costo_total_aromatizantes
while seleccion_menu_uno > 4:
# map the inputs to the function blocks
menu_uno = {1 : uno,
2 : dos,
3 : tres,
4 : cuatro,}
while 1:
print("opcion 1.- cliente categoria 1 se le descuenta el 5%")
print("opcion 2.- cliente categoria 2 se le descuenta el 8%")
print("opcion 3.- cliente categoria 3 se le descuenta el 12%")
print("opcion 4.- cliente categoria 4 se le descuenta el 15%")
print "\nescoja una opcion\n\n"
seleccion_menu_uno = raw_input()
if seleccion_menu_uno.isdigit():
seleccion_menu_uno = int(seleccion_menu_uno)
try:
menu_uno[seleccion_menu_uno]()
except:
print("\nEsa opcion no es correcta")
break
else:
print("\nEsa opcion no es correcta")
|
gpl-2.0
| -7,989,632,861,453,903,000 | 40.153846 | 159 | 0.72533 | false | 2.446586 | false | false | false |
Hattivat/hypergolic-django
|
hypergolic/catalog/views/guidance_system_views.py
|
1
|
1876
|
from django.views.generic.detail import DetailView
from django.views.generic.edit import UpdateView, DeleteView
from .base import GenericListView, GenericCreateView
from ..models import GuidanceSystem
from ..forms import GuidanceSystemForm
from django.core.urlresolvers import reverse_lazy
from django.core.urlresolvers import reverse
class GuidanceSystemListView(GenericListView):
model = GuidanceSystem
display_data = ('energy_consumption', 'description', 'illustration')
class GuidanceSystemDetailView(DetailView):
model = GuidanceSystem
template_name = "catalog/electric_detail.html"
class GuidanceSystemCreateView(GenericCreateView):
model = GuidanceSystem
form_class = GuidanceSystemForm
# fields = ['name', 'description', 'sources', 'illustration']
success_url = reverse_lazy("guidance_system_list")
def form_valid(self, form):
obj = form.save(commit=False)
obj.creator = self.request.user
obj.save()
return super(GuidanceSystemCreateView, self).form_valid(form)
def get_success_url(self):
return reverse("guidance_system_detail", args=(self.object.pk,))
class GuidanceSystemUpdateView(UpdateView):
model = GuidanceSystem
form_class = GuidanceSystemForm
# fields = ['name', 'description', 'sources', 'illustration']
template_name = "catalog/generic_update.html"
initial = {}
def form_valid(self, form):
obj = form.save(commit=False)
obj.modifier = self.request.user
obj.save()
return super(GuidanceSystemUpdateView, self).form_valid(form)
def get_success_url(self):
return reverse("guidance_system_detail", args=(self.object.pk,))
class GuidanceSystemDeleteView(DeleteView):
model = GuidanceSystem
template_name = "catalog/generic_delete.html"
success_url = reverse_lazy("guidance_system_list")
|
agpl-3.0
| -2,839,638,340,755,405,300 | 32.5 | 72 | 0.723348 | false | 3.789899 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.