max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
src/Resources.py | softwarespartan/AGT | 0 | 6632251 | from boto.s3.key import Key
from boto.s3.connection import S3Connection,OrdinaryCallingFormat
import re,os,pyDate,Utils;
import multiprocessing
WL_SP3_BUCKET = 'edu.mbevis.osu.data' ;
WL_NAV_BUCKET = 'edu.mbevis.osu.data' ;
WL_RES_BUCKET = 'edu.mbevis.osu.resources' ;
WL_SOLN_BUCKET= 'edu.mbevis.osu.solutions' ;
WL_RNX_BUCKET = 'edu.mbevis.osu.data'
WL_STN_BUCKET = 'edu.mbevis.osu.data'
WL_APR_BUCKET = 'edu.mbevis.osu.data'
# local dir relative work_dir for resources
WL_RESOURCES_LOCAL = 'resources' ;
class ResourceException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def get_sp3(year,doy,org,outdir=None):
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str (doy );
# initialize a date object
date = pyDate.Date(year=year, doy=doy);
# create string version of the gps week
gps_week_str = str(date.gpsWeek);
# make sure that the string is 5 characters
if date.gpsWeek < 1000: gps_week_str = '0'+gps_week_str;
# create the file name of the sp3
sp3_file_name_base = org+gps_week_str+str(date.gpsWeekDay)+'.sp3';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SP3_BUCKET) ;
bucketKey = Key(bucket) ;
file_list = [];
for f in bucket.list(prefix=sp3_file_name_base) : file_list.append(f.key);
# check if the sp3 file listing was empty
if len(file_list) == 0:
raise ResourceException('sp3 resource: '+sp3_file_name_base+' could not be located');
# make sure no more than a single match occurred
if len(file_list) > 1:
raise ResourceException('sp3 resource: '+sp3_file_name_base+' matches multiple files');
# just be explicit about it
sp3_file_name = file_list[0];
# create the full path to file on local system
sp3_file_path = os.path.join(outdir,sp3_file_name);
# create the s3 object
bucketKey.key = sp3_file_name;
# pull the file
bucketKey.get_contents_to_filename(sp3_file_path);
# that's all
return sp3_file_path;
def get_nav(year,doy,org,outdir=None):
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str (doy );
# create the file name of the nav
nav_file_name = org+doy+'0.'+year[2:]+'n.Z';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# create the sp3 file path
nav_file_path = os.path.join(outdir,nav_file_name);
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_NAV_BUCKET) ;
bucketKey = bucket.get_key(nav_file_name) ;
if bucketKey is None:
raise ResourceException('nav resource: '+nav_file_name+' could not be located');
# create the s3 object
bucketKey.key = nav_file_name;
# pull the file
bucketKey.get_contents_to_filename(nav_file_path);
# that's all
return nav_file_path;
def get_rnx(year,doy,stn_list,outdir=None):
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str (doy );
# init
rnx_file_list = list();
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat());
bucket = conn.get_bucket(WL_RNX_BUCKET);
for stnId in stn_list:
# parse the station id and extract the 4-char station code
#(ns,code) = Utils.parse_stnId(stnId);
# no more namespaces
code = stnId;
# create the file name of the sp3
rnx_file_name = code+doy+'0.'+year[2:]+'d.Z';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# create the sp3 file path
rnx_file_path = os.path.join(outdir,rnx_file_name);
# create key path to file in rnx
#rnx_key_path = '/'.join([ns,year,doy,rnx_file_name]);
rnx_key_path = rnx_file_name;
bucketKey = bucket.get_key(rnx_key_path) ;
if bucketKey is None:
# create the file name of the rnx with session 1
rnx_file_name = code+str(doy)+'1.'+str(year)[2:]+'d.Z';
# create key path to file in s3
#rnx_key_path = '/'.join([ns,str(year),str(doy),rnx_file_name]);
rnx_key_path = rnx_file_name;
# check for session 1 file
bucketKey = bucket.get_key(rnx_key_path);
if bucketKey is None:
os.sys.stderr.write('rnx resource: '+stnId+' could not be located for '+year+' '+doy+'\n');
continue;
# create the s3 object
bucketKey.key = rnx_key_path;
# pull the file
bucketKey.get_contents_to_filename(rnx_file_path);
# add the rinex file path to the file list
rnx_file_list.append(rnx_file_path);
return rnx_file_list;
def action(params):
params[0].get_contents_to_filename(params[1])
def get_rnx_parallel(year, doy, stn_list, outdir=None):
if len(stn_list) ==0: return
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str(doy);
# init
rnx_file_list = list();
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat());
bucket = conn.get_bucket(WL_RNX_BUCKET);
list_of_bucket_keys = list()
for stnId in stn_list:
# parse the station id and extract the 4-char station code
#(ns, code) = Utils.parse_stnId(stnId);
code = stnId;
# create the file name of the sp3
rnx_file_name = code + doy + '0.' + year[2:] + 'd.Z';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# create the sp3 file path
rnx_file_path = os.path.join(outdir, rnx_file_name);
# create key path to file in rnx
#rnx_key_path = '/'.join([ns, year, doy, rnx_file_name]);
rnx_key_path = rnx_file_name;
bucketKey = bucket.get_key(rnx_key_path);
if bucketKey is None:
# create the file name of the rnx with session 1
rnx_file_name = code + str(doy) + '1.' + str(year)[2:] + 'd.Z';
# create key path to file in s3
#rnx_key_path = '/'.join([ns, str(year), str(doy), rnx_file_name]);
rnx_key_path = rnx_file_name
# check for session 1 file
bucketKey = bucket.get_key(rnx_key_path);
if bucketKey is None:
os.sys.stderr.write('rnx resource: ' + stnId + ' could not be located for ' + year + ' ' + doy + '\n');
continue;
# create the s3 object
bucketKey.key = rnx_key_path;
# enqueue bucket key for download
list_of_bucket_keys.append((bucketKey,rnx_file_path));
# update list of rinex file procesed
rnx_file_list.append(rnx_file_path);
poolsz = max(1,min(16,len(rnx_file_list)))
pool = multiprocessing.Pool(poolsz);
pool.map(action, list_of_bucket_keys)
pool.close()
pool.join()
# pull the file
#bucketKey.get_contents_to_filename(rnx_file_path);
# add the rinex file path to the file list
return rnx_file_list;
def get_stn_info(year,doy,stn_list,outdir=None):
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str(doy);
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat());
bucket = conn.get_bucket(WL_STN_BUCKET);
if outdir is None: outdir = '.';
stn_info_file_name = year+'-'+doy+'.info'
bucketKey = bucket.get_key(stn_info_file_name);
# create the s3 object
bucketKey.key = stn_info_file_name;
# generate output path
out_file_path = os.path.join(outdir,stn_info_file_name)
# pull the file
bucketKey.get_contents_to_filename(out_file_path);
return out_file_path
# def get_stn_info_info(year,doy,stn_list,outdir=None):
#
# if len(stn_list) == 0: return
#
# # init
# file_list = list();
#
# # init s3 connection to the metadata bucket
# conn = S3Connection(calling_format=OrdinaryCallingFormat());
# bucket = conn.get_bucket(WL_STN_BUCKET);
#
# list_of_bucket_keys = list()
#
# for stnId in stn_list:
#
# # parse the station id and extract the 4-char station code
# (ns,code) = Utils.parse_stnId(stnId);
#
# # set outdir to current directory if not set
# if outdir is None: outdir = '.';
#
# # set the file name for the station info
# stn_info_file_name = '.'.join((ns,code,'station','info'));
#
# # next, create the path for the station info file
# stn_info_file_path = os.path.join(outdir,stn_info_file_name);
#
# bucketKey = bucket.get_key(stn_info_file_name) ;
#
# # let the user know that the file does not exist and continue
# if bucketKey is None:
# os.sys.stderr.write('station info resource: '+stnId+' could not be located\n');
# continue;
#
# # create the s3 object
# bucketKey.key = stn_info_file_name;
#
# # enqueue
# list_of_bucket_keys.append((bucketKey,stn_info_file_path))
#
# # add to list of files
# file_list.append(stn_info_file_path);
#
# # pull the file
# bucketKey.get_contents_to_filename(stn_info_file_path);
#
# poolsz = min(16, len(file_list))
# pool = multiprocessing.Pool(poolsz);
# pool.map(action, list_of_bucket_keys)
# pool.close()
# pool.join()
#
# return file_list;
def get_apr(year,doy,stn_list,outdir=None):
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str(doy);
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat());
bucket = conn.get_bucket(WL_STN_BUCKET);
if outdir is None: outdir = '.';
file_name = year+'-'+doy+'.apr'
bucketKey = bucket.get_key(file_name);
# create the s3 object
bucketKey.key = file_name;
# generate output path
out_file_path = os.path.join(outdir,file_name)
# pull the file
bucketKey.get_contents_to_filename(out_file_path);
return out_file_path
# def get_apr(year,doy,dns,outdir=None):
#
# year = Utils.get_norm_year_str(year);
# doy = Utils.get_norm_doy_str (doy );
#
# # set outdir to current directory if not set
# if outdir is None: outdir = '.';
#
# # set the file name for the station info
# apr_file_name = '.'.join((dns,year,doy,'apr'));
#
# # next, create the path for the station info file
# apr_file_path = os.path.join(outdir,apr_file_name);
#
# # init s3 connection to the metadata bucket
# conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
# bucket = conn.get_bucket(WL_APR_BUCKET) ;
# bucketKey = bucket.get_key(apr_file_name) ;
#
# # make sure we're on track here
# if bucketKey is None:
# raise ResourceException('could not locate resource: '+apr_file_name);
#
# # create the s3 object
# bucketKey.key = apr_file_name;
#
# # pull the file
# bucketKey.get_contents_to_filename(apr_file_path);
#
# # thats a wrap
# return apr_file_path;
def get_bin(program,outdir=None):
# make sure program specified is not bogus
if program is None or program == "":
raise ResourceException('invalid program name');
# figure out what platform we're on
pid = Utils.get_platform_id();
# compute the resource id
rid = Utils.get_resource_delimiter().join((program,pid));
# add the file and compression suffix
rid = '.'.join((rid,'tar','gz'));
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# compute the full file path
bin_file_path = os.path.join(outdir,rid);
# init s3 connection to the resources bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_RES_BUCKET) ;
bucketKey = bucket.get_key(rid) ;
if bucketKey is None:
raise ResourceException('binary resource: '+rid+' could not be located');
# set the key to download
bucketKey.key = rid;
# pull the resource
bucketKey.get_contents_to_filename(bin_file_path);
# all done;
return bin_file_path;
def get_tables(program,outdir=None):
# make sure program specified is not bogus
if program is None or program == "":
raise ResourceException('invalid program name');
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# compute the resource id
rid = Utils.get_resource_delimiter().join((program,'tables'));
# add the file suffix and the compression suffix
rid = '.'.join((rid,'tar','gz'));
# compute the full file path for tables resource
tables_file_path = os.path.join(outdir,rid);
# init s3 connection to the resources bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_RES_BUCKET) ;
bucketKey = bucket.get_key(rid) ;
if bucketKey is None:
raise ResourceException('tables resource: '+rid+' could not be located');
# set the key to download
bucketKey.key = rid;
# pull the resource
bucketKey.get_contents_to_filename(tables_file_path);
# yup yup
return tables_file_path
def pushSNX(key_path,file_path):
# parse the name of the file
file_name = os.path.basename(file_path);
# create the file key path into S3
file_key_path = "/".join((key_path,file_name));
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SOLN_BUCKET) ;
bucketKey = Key(bucket) ;
print "pushing snx file",file_path,"-->", file_key_path
# create the s3 object
bucketKey.key = file_key_path; bucketKey.set_contents_from_filename(file_path);
def pushSP3(file_path):
# parse the name of the file
file_name = os.path.basename(file_path);
# create the file key path into S3
file_key_path = file_name;
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SP3_BUCKET) ;
bucketKey = Key(bucket) ;
# create the s3 object
bucketKey.key = file_key_path; bucketKey.set_contents_from_filename(file_path);
def pushOUT(key_path,file_path):
# parse the name of the file
file_name = os.path.basename(file_path);
# create the file key path into S3
file_key_path = "/".join((key_path,file_name));
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SOLN_BUCKET) ;
bucketKey = Key(bucket) ;
print "pushing out file",file_path,"-->", file_key_path
# create the s3 object
bucketKey.key = file_key_path; bucketKey.set_contents_from_filename(file_path);
def get_snx(key_path,outdir=None):
# init list of files copied
snx_file_list = list();
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# make sure to expand any user symbols
outdir = os.path.expanduser(outdir);
# initialize pattern to match sinex files
# Should we match the second '.'?
# will this match 'file.snx'?
pattern = re.compile('.*\.snx\..*');
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SOLN_BUCKET) ;
bucketKey = Key(bucket) ;
# ok now get list of snx files at the key path
file_keys = bucket.list(prefix=key_path);
# copy each file to the outpath with same keypath
for fk in file_keys:
# make sure it's a sinex file
if not pattern.match(fk.key): continue;
# fix the file name for unpadded gps week
file_name = Utils.fix_gps_week(fk.key);
# create file path w.r.t. outdir
file_path = os.path.join(outdir,file_name);
# try in initialize the output path
file_root = os.path.split(file_path)[0];
# make the root if it does not exist
try:
if not os.path.isdir(file_root): os.makedirs(file_root);
except Exception as e:
os.sys.stderr.write(str(e)+'\n');
continue;
# set the bucket key
bucketKey.key = fk;
# get the snx resource
bucketKey.get_contents_to_filename(file_path);
# add the file to the file list
snx_file_list.append(file_path);
return snx_file_list;
def get_resources(key_path,ext=None,outdir=None):
# init list of files copied
res_file_list = list();
# set the file extension to everything, if not set
if ext is None: ext = '*';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# make sure to expand any user symbols
outdir = os.path.expanduser(outdir);
# help user out before compile regex to translate literal "."
ext = ext.replace('.', '\.');
# initialize pattern to match files
# Should we match the second '.'?
# will this match 'file.snx'?
pattern = re.compile('.*'+ext);
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SOLN_BUCKET) ;
bucketKey = Key(bucket) ;
# ok now get list of snx files at the key path
file_keys = bucket.list(prefix=key_path);
# copy each file to the outpath with same keypath
for fk in file_keys:
# make sure it's a sinex file
if not pattern.match(fk.key): continue;
# fix the file name for unpadded gps week
file_name = Utils.fix_gps_week(fk.key);
# create file path w.r.t. outdir
file_path = os.path.join(outdir,file_name);
# try in initialize the output path
file_root = os.path.split(file_path)[0];
# make the root if it does not exist
try:
if not os.path.isdir(file_root): os.makedirs(file_root);
except Exception as e:
os.sys.stderr.write(str(e)+'\n');
continue;
# set the bucket key
bucketKey.key = fk;
# get the snx resource
bucketKey.get_contents_to_filename(file_path);
# add the file to the file list
res_file_list.append(file_path);
return res_file_list;
def list_resources(key_path,ext=None,outdir=None):
# init list of files copied
res_file_list = list();
# set the file extension to everything, if not set
if ext is None: ext = '*';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# make sure to expand any user symbols
outdir = os.path.expanduser(outdir);
# help user out before compile regex to translate literal "."
ext = ext.replace('.', '\.');
# initialize pattern to match files
# Should we match the second '.'?
# will this match 'file.snx'?
pattern = re.compile('.*'+ext);
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SOLN_BUCKET) ;
bucketKey = Key(bucket) ;
# ok now get list of snx files at the key path
file_keys = bucket.list(prefix=key_path);
# copy each file to the outpath with same keypath
for fk in file_keys:
# make sure it's a sinex file
if not pattern.match(fk.key): continue;
# fix the file name for unpadded gps week
file_name = Utils.fix_gps_week(fk.key);
# create file path w.r.t. outdir
file_path = os.path.join(outdir,file_name);
# try in initialize the output path
# file_root = os.path.split(file_path)[0];
#
# # make the root if it does not exist
# try:
# if not os.path.isdir(file_root): os.makedirs(file_root);
# except Exception as e:
# os.sys.stderr.write(str(e)+'\n');
# continue;
#
# # set the bucket key
# bucketKey.key = fk;
#
# # get the snx resource
# bucketKey.get_contents_to_filename(file_path);
#
# # add the file to the file list
res_file_list.append(file_path);
return res_file_list;
def soln_exists(date,expt,org,net='n0'):
# init s3 connection
conn = S3Connection(calling_format=OrdinaryCallingFormat());
# create a bucket object into s3
bucket = conn.get_bucket(WL_SOLN_BUCKET);
# construct the relative path to where the file should be
relPath = date.yyyy()+"/"+date.ddd()+"/"+expt+"/"+org+"/"+net
# construct the name of the sinex file
fileName = org+date.wwwwd()+".snx.gz"
# full file path
fullFilePath = relPath + "/" + fileName
# create a file in to the bucket
key = Key(bucket,fullFilePath)
return key.exists(),fullFilePath
if __name__ == '__main__':
#files = get_snx('2009/123/odot/g06','~/tmp');
#for f in files: print f;
#files = list_resources('2009/123/odot/g06/n1','.mat.gz');
#for f in files: print f;
date = pyDate.Date(year=2016,doy=101)
expt = 'glbf'
org = 'n08'
net = 'n0'
exists = soln_exists(date,expt,org,net)
print("file: "+exists[1]+", "+str(exists[0])) | from boto.s3.key import Key
from boto.s3.connection import S3Connection,OrdinaryCallingFormat
import re,os,pyDate,Utils;
import multiprocessing
WL_SP3_BUCKET = 'edu.mbevis.osu.data' ;
WL_NAV_BUCKET = 'edu.mbevis.osu.data' ;
WL_RES_BUCKET = 'edu.mbevis.osu.resources' ;
WL_SOLN_BUCKET= 'edu.mbevis.osu.solutions' ;
WL_RNX_BUCKET = 'edu.mbevis.osu.data'
WL_STN_BUCKET = 'edu.mbevis.osu.data'
WL_APR_BUCKET = 'edu.mbevis.osu.data'
# local dir relative work_dir for resources
WL_RESOURCES_LOCAL = 'resources' ;
class ResourceException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def get_sp3(year,doy,org,outdir=None):
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str (doy );
# initialize a date object
date = pyDate.Date(year=year, doy=doy);
# create string version of the gps week
gps_week_str = str(date.gpsWeek);
# make sure that the string is 5 characters
if date.gpsWeek < 1000: gps_week_str = '0'+gps_week_str;
# create the file name of the sp3
sp3_file_name_base = org+gps_week_str+str(date.gpsWeekDay)+'.sp3';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SP3_BUCKET) ;
bucketKey = Key(bucket) ;
file_list = [];
for f in bucket.list(prefix=sp3_file_name_base) : file_list.append(f.key);
# check if the sp3 file listing was empty
if len(file_list) == 0:
raise ResourceException('sp3 resource: '+sp3_file_name_base+' could not be located');
# make sure no more than a single match occurred
if len(file_list) > 1:
raise ResourceException('sp3 resource: '+sp3_file_name_base+' matches multiple files');
# just be explicit about it
sp3_file_name = file_list[0];
# create the full path to file on local system
sp3_file_path = os.path.join(outdir,sp3_file_name);
# create the s3 object
bucketKey.key = sp3_file_name;
# pull the file
bucketKey.get_contents_to_filename(sp3_file_path);
# that's all
return sp3_file_path;
def get_nav(year,doy,org,outdir=None):
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str (doy );
# create the file name of the nav
nav_file_name = org+doy+'0.'+year[2:]+'n.Z';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# create the sp3 file path
nav_file_path = os.path.join(outdir,nav_file_name);
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_NAV_BUCKET) ;
bucketKey = bucket.get_key(nav_file_name) ;
if bucketKey is None:
raise ResourceException('nav resource: '+nav_file_name+' could not be located');
# create the s3 object
bucketKey.key = nav_file_name;
# pull the file
bucketKey.get_contents_to_filename(nav_file_path);
# that's all
return nav_file_path;
def get_rnx(year,doy,stn_list,outdir=None):
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str (doy );
# init
rnx_file_list = list();
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat());
bucket = conn.get_bucket(WL_RNX_BUCKET);
for stnId in stn_list:
# parse the station id and extract the 4-char station code
#(ns,code) = Utils.parse_stnId(stnId);
# no more namespaces
code = stnId;
# create the file name of the sp3
rnx_file_name = code+doy+'0.'+year[2:]+'d.Z';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# create the sp3 file path
rnx_file_path = os.path.join(outdir,rnx_file_name);
# create key path to file in rnx
#rnx_key_path = '/'.join([ns,year,doy,rnx_file_name]);
rnx_key_path = rnx_file_name;
bucketKey = bucket.get_key(rnx_key_path) ;
if bucketKey is None:
# create the file name of the rnx with session 1
rnx_file_name = code+str(doy)+'1.'+str(year)[2:]+'d.Z';
# create key path to file in s3
#rnx_key_path = '/'.join([ns,str(year),str(doy),rnx_file_name]);
rnx_key_path = rnx_file_name;
# check for session 1 file
bucketKey = bucket.get_key(rnx_key_path);
if bucketKey is None:
os.sys.stderr.write('rnx resource: '+stnId+' could not be located for '+year+' '+doy+'\n');
continue;
# create the s3 object
bucketKey.key = rnx_key_path;
# pull the file
bucketKey.get_contents_to_filename(rnx_file_path);
# add the rinex file path to the file list
rnx_file_list.append(rnx_file_path);
return rnx_file_list;
def action(params):
params[0].get_contents_to_filename(params[1])
def get_rnx_parallel(year, doy, stn_list, outdir=None):
if len(stn_list) ==0: return
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str(doy);
# init
rnx_file_list = list();
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat());
bucket = conn.get_bucket(WL_RNX_BUCKET);
list_of_bucket_keys = list()
for stnId in stn_list:
# parse the station id and extract the 4-char station code
#(ns, code) = Utils.parse_stnId(stnId);
code = stnId;
# create the file name of the sp3
rnx_file_name = code + doy + '0.' + year[2:] + 'd.Z';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# create the sp3 file path
rnx_file_path = os.path.join(outdir, rnx_file_name);
# create key path to file in rnx
#rnx_key_path = '/'.join([ns, year, doy, rnx_file_name]);
rnx_key_path = rnx_file_name;
bucketKey = bucket.get_key(rnx_key_path);
if bucketKey is None:
# create the file name of the rnx with session 1
rnx_file_name = code + str(doy) + '1.' + str(year)[2:] + 'd.Z';
# create key path to file in s3
#rnx_key_path = '/'.join([ns, str(year), str(doy), rnx_file_name]);
rnx_key_path = rnx_file_name
# check for session 1 file
bucketKey = bucket.get_key(rnx_key_path);
if bucketKey is None:
os.sys.stderr.write('rnx resource: ' + stnId + ' could not be located for ' + year + ' ' + doy + '\n');
continue;
# create the s3 object
bucketKey.key = rnx_key_path;
# enqueue bucket key for download
list_of_bucket_keys.append((bucketKey,rnx_file_path));
# update list of rinex file procesed
rnx_file_list.append(rnx_file_path);
poolsz = max(1,min(16,len(rnx_file_list)))
pool = multiprocessing.Pool(poolsz);
pool.map(action, list_of_bucket_keys)
pool.close()
pool.join()
# pull the file
#bucketKey.get_contents_to_filename(rnx_file_path);
# add the rinex file path to the file list
return rnx_file_list;
def get_stn_info(year,doy,stn_list,outdir=None):
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str(doy);
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat());
bucket = conn.get_bucket(WL_STN_BUCKET);
if outdir is None: outdir = '.';
stn_info_file_name = year+'-'+doy+'.info'
bucketKey = bucket.get_key(stn_info_file_name);
# create the s3 object
bucketKey.key = stn_info_file_name;
# generate output path
out_file_path = os.path.join(outdir,stn_info_file_name)
# pull the file
bucketKey.get_contents_to_filename(out_file_path);
return out_file_path
# def get_stn_info_info(year,doy,stn_list,outdir=None):
#
# if len(stn_list) == 0: return
#
# # init
# file_list = list();
#
# # init s3 connection to the metadata bucket
# conn = S3Connection(calling_format=OrdinaryCallingFormat());
# bucket = conn.get_bucket(WL_STN_BUCKET);
#
# list_of_bucket_keys = list()
#
# for stnId in stn_list:
#
# # parse the station id and extract the 4-char station code
# (ns,code) = Utils.parse_stnId(stnId);
#
# # set outdir to current directory if not set
# if outdir is None: outdir = '.';
#
# # set the file name for the station info
# stn_info_file_name = '.'.join((ns,code,'station','info'));
#
# # next, create the path for the station info file
# stn_info_file_path = os.path.join(outdir,stn_info_file_name);
#
# bucketKey = bucket.get_key(stn_info_file_name) ;
#
# # let the user know that the file does not exist and continue
# if bucketKey is None:
# os.sys.stderr.write('station info resource: '+stnId+' could not be located\n');
# continue;
#
# # create the s3 object
# bucketKey.key = stn_info_file_name;
#
# # enqueue
# list_of_bucket_keys.append((bucketKey,stn_info_file_path))
#
# # add to list of files
# file_list.append(stn_info_file_path);
#
# # pull the file
# bucketKey.get_contents_to_filename(stn_info_file_path);
#
# poolsz = min(16, len(file_list))
# pool = multiprocessing.Pool(poolsz);
# pool.map(action, list_of_bucket_keys)
# pool.close()
# pool.join()
#
# return file_list;
def get_apr(year,doy,stn_list,outdir=None):
year = Utils.get_norm_year_str(year);
doy = Utils.get_norm_doy_str(doy);
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat());
bucket = conn.get_bucket(WL_STN_BUCKET);
if outdir is None: outdir = '.';
file_name = year+'-'+doy+'.apr'
bucketKey = bucket.get_key(file_name);
# create the s3 object
bucketKey.key = file_name;
# generate output path
out_file_path = os.path.join(outdir,file_name)
# pull the file
bucketKey.get_contents_to_filename(out_file_path);
return out_file_path
# def get_apr(year,doy,dns,outdir=None):
#
# year = Utils.get_norm_year_str(year);
# doy = Utils.get_norm_doy_str (doy );
#
# # set outdir to current directory if not set
# if outdir is None: outdir = '.';
#
# # set the file name for the station info
# apr_file_name = '.'.join((dns,year,doy,'apr'));
#
# # next, create the path for the station info file
# apr_file_path = os.path.join(outdir,apr_file_name);
#
# # init s3 connection to the metadata bucket
# conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
# bucket = conn.get_bucket(WL_APR_BUCKET) ;
# bucketKey = bucket.get_key(apr_file_name) ;
#
# # make sure we're on track here
# if bucketKey is None:
# raise ResourceException('could not locate resource: '+apr_file_name);
#
# # create the s3 object
# bucketKey.key = apr_file_name;
#
# # pull the file
# bucketKey.get_contents_to_filename(apr_file_path);
#
# # thats a wrap
# return apr_file_path;
def get_bin(program,outdir=None):
# make sure program specified is not bogus
if program is None or program == "":
raise ResourceException('invalid program name');
# figure out what platform we're on
pid = Utils.get_platform_id();
# compute the resource id
rid = Utils.get_resource_delimiter().join((program,pid));
# add the file and compression suffix
rid = '.'.join((rid,'tar','gz'));
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# compute the full file path
bin_file_path = os.path.join(outdir,rid);
# init s3 connection to the resources bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_RES_BUCKET) ;
bucketKey = bucket.get_key(rid) ;
if bucketKey is None:
raise ResourceException('binary resource: '+rid+' could not be located');
# set the key to download
bucketKey.key = rid;
# pull the resource
bucketKey.get_contents_to_filename(bin_file_path);
# all done;
return bin_file_path;
def get_tables(program,outdir=None):
# make sure program specified is not bogus
if program is None or program == "":
raise ResourceException('invalid program name');
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# compute the resource id
rid = Utils.get_resource_delimiter().join((program,'tables'));
# add the file suffix and the compression suffix
rid = '.'.join((rid,'tar','gz'));
# compute the full file path for tables resource
tables_file_path = os.path.join(outdir,rid);
# init s3 connection to the resources bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_RES_BUCKET) ;
bucketKey = bucket.get_key(rid) ;
if bucketKey is None:
raise ResourceException('tables resource: '+rid+' could not be located');
# set the key to download
bucketKey.key = rid;
# pull the resource
bucketKey.get_contents_to_filename(tables_file_path);
# yup yup
return tables_file_path
def pushSNX(key_path,file_path):
# parse the name of the file
file_name = os.path.basename(file_path);
# create the file key path into S3
file_key_path = "/".join((key_path,file_name));
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SOLN_BUCKET) ;
bucketKey = Key(bucket) ;
print "pushing snx file",file_path,"-->", file_key_path
# create the s3 object
bucketKey.key = file_key_path; bucketKey.set_contents_from_filename(file_path);
def pushSP3(file_path):
# parse the name of the file
file_name = os.path.basename(file_path);
# create the file key path into S3
file_key_path = file_name;
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SP3_BUCKET) ;
bucketKey = Key(bucket) ;
# create the s3 object
bucketKey.key = file_key_path; bucketKey.set_contents_from_filename(file_path);
def pushOUT(key_path,file_path):
# parse the name of the file
file_name = os.path.basename(file_path);
# create the file key path into S3
file_key_path = "/".join((key_path,file_name));
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SOLN_BUCKET) ;
bucketKey = Key(bucket) ;
print "pushing out file",file_path,"-->", file_key_path
# create the s3 object
bucketKey.key = file_key_path; bucketKey.set_contents_from_filename(file_path);
def get_snx(key_path,outdir=None):
# init list of files copied
snx_file_list = list();
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# make sure to expand any user symbols
outdir = os.path.expanduser(outdir);
# initialize pattern to match sinex files
# Should we match the second '.'?
# will this match 'file.snx'?
pattern = re.compile('.*\.snx\..*');
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SOLN_BUCKET) ;
bucketKey = Key(bucket) ;
# ok now get list of snx files at the key path
file_keys = bucket.list(prefix=key_path);
# copy each file to the outpath with same keypath
for fk in file_keys:
# make sure it's a sinex file
if not pattern.match(fk.key): continue;
# fix the file name for unpadded gps week
file_name = Utils.fix_gps_week(fk.key);
# create file path w.r.t. outdir
file_path = os.path.join(outdir,file_name);
# try in initialize the output path
file_root = os.path.split(file_path)[0];
# make the root if it does not exist
try:
if not os.path.isdir(file_root): os.makedirs(file_root);
except Exception as e:
os.sys.stderr.write(str(e)+'\n');
continue;
# set the bucket key
bucketKey.key = fk;
# get the snx resource
bucketKey.get_contents_to_filename(file_path);
# add the file to the file list
snx_file_list.append(file_path);
return snx_file_list;
def get_resources(key_path,ext=None,outdir=None):
# init list of files copied
res_file_list = list();
# set the file extension to everything, if not set
if ext is None: ext = '*';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# make sure to expand any user symbols
outdir = os.path.expanduser(outdir);
# help user out before compile regex to translate literal "."
ext = ext.replace('.', '\.');
# initialize pattern to match files
# Should we match the second '.'?
# will this match 'file.snx'?
pattern = re.compile('.*'+ext);
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SOLN_BUCKET) ;
bucketKey = Key(bucket) ;
# ok now get list of snx files at the key path
file_keys = bucket.list(prefix=key_path);
# copy each file to the outpath with same keypath
for fk in file_keys:
# make sure it's a sinex file
if not pattern.match(fk.key): continue;
# fix the file name for unpadded gps week
file_name = Utils.fix_gps_week(fk.key);
# create file path w.r.t. outdir
file_path = os.path.join(outdir,file_name);
# try in initialize the output path
file_root = os.path.split(file_path)[0];
# make the root if it does not exist
try:
if not os.path.isdir(file_root): os.makedirs(file_root);
except Exception as e:
os.sys.stderr.write(str(e)+'\n');
continue;
# set the bucket key
bucketKey.key = fk;
# get the snx resource
bucketKey.get_contents_to_filename(file_path);
# add the file to the file list
res_file_list.append(file_path);
return res_file_list;
def list_resources(key_path,ext=None,outdir=None):
# init list of files copied
res_file_list = list();
# set the file extension to everything, if not set
if ext is None: ext = '*';
# set outdir to current directory if not set
if outdir is None: outdir = '.';
# make sure to expand any user symbols
outdir = os.path.expanduser(outdir);
# help user out before compile regex to translate literal "."
ext = ext.replace('.', '\.');
# initialize pattern to match files
# Should we match the second '.'?
# will this match 'file.snx'?
pattern = re.compile('.*'+ext);
# init s3 connection to the metadata bucket
conn = S3Connection(calling_format=OrdinaryCallingFormat()) ;
bucket = conn.get_bucket(WL_SOLN_BUCKET) ;
bucketKey = Key(bucket) ;
# ok now get list of snx files at the key path
file_keys = bucket.list(prefix=key_path);
# copy each file to the outpath with same keypath
for fk in file_keys:
# make sure it's a sinex file
if not pattern.match(fk.key): continue;
# fix the file name for unpadded gps week
file_name = Utils.fix_gps_week(fk.key);
# create file path w.r.t. outdir
file_path = os.path.join(outdir,file_name);
# try in initialize the output path
# file_root = os.path.split(file_path)[0];
#
# # make the root if it does not exist
# try:
# if not os.path.isdir(file_root): os.makedirs(file_root);
# except Exception as e:
# os.sys.stderr.write(str(e)+'\n');
# continue;
#
# # set the bucket key
# bucketKey.key = fk;
#
# # get the snx resource
# bucketKey.get_contents_to_filename(file_path);
#
# # add the file to the file list
res_file_list.append(file_path);
return res_file_list;
def soln_exists(date,expt,org,net='n0'):
# init s3 connection
conn = S3Connection(calling_format=OrdinaryCallingFormat());
# create a bucket object into s3
bucket = conn.get_bucket(WL_SOLN_BUCKET);
# construct the relative path to where the file should be
relPath = date.yyyy()+"/"+date.ddd()+"/"+expt+"/"+org+"/"+net
# construct the name of the sinex file
fileName = org+date.wwwwd()+".snx.gz"
# full file path
fullFilePath = relPath + "/" + fileName
# create a file in to the bucket
key = Key(bucket,fullFilePath)
return key.exists(),fullFilePath
if __name__ == '__main__':
#files = get_snx('2009/123/odot/g06','~/tmp');
#for f in files: print f;
#files = list_resources('2009/123/odot/g06/n1','.mat.gz');
#for f in files: print f;
date = pyDate.Date(year=2016,doy=101)
expt = 'glbf'
org = 'n08'
net = 'n0'
exists = soln_exists(date,expt,org,net)
print("file: "+exists[1]+", "+str(exists[0])) | en | 0.665097 | # local dir relative work_dir for resources # initialize a date object # create string version of the gps week # make sure that the string is 5 characters # create the file name of the sp3 # set outdir to current directory if not set # init s3 connection to the metadata bucket # check if the sp3 file listing was empty # make sure no more than a single match occurred # just be explicit about it # create the full path to file on local system # create the s3 object # pull the file # that's all # create the file name of the nav # set outdir to current directory if not set # create the sp3 file path # init s3 connection to the metadata bucket # create the s3 object # pull the file # that's all # init # init s3 connection to the metadata bucket # parse the station id and extract the 4-char station code #(ns,code) = Utils.parse_stnId(stnId); # no more namespaces # create the file name of the sp3 # set outdir to current directory if not set # create the sp3 file path # create key path to file in rnx #rnx_key_path = '/'.join([ns,year,doy,rnx_file_name]); # create the file name of the rnx with session 1 # create key path to file in s3 #rnx_key_path = '/'.join([ns,str(year),str(doy),rnx_file_name]); # check for session 1 file # create the s3 object # pull the file # add the rinex file path to the file list # init # init s3 connection to the metadata bucket # parse the station id and extract the 4-char station code #(ns, code) = Utils.parse_stnId(stnId); # create the file name of the sp3 # set outdir to current directory if not set # create the sp3 file path # create key path to file in rnx #rnx_key_path = '/'.join([ns, year, doy, rnx_file_name]); # create the file name of the rnx with session 1 # create key path to file in s3 #rnx_key_path = '/'.join([ns, str(year), str(doy), rnx_file_name]); # check for session 1 file # create the s3 object # enqueue bucket key for download # update list of rinex file procesed # pull the file #bucketKey.get_contents_to_filename(rnx_file_path); # add the rinex file path to the file list # init s3 connection to the metadata bucket # create the s3 object # generate output path # pull the file # def get_stn_info_info(year,doy,stn_list,outdir=None): # # if len(stn_list) == 0: return # # # init # file_list = list(); # # # init s3 connection to the metadata bucket # conn = S3Connection(calling_format=OrdinaryCallingFormat()); # bucket = conn.get_bucket(WL_STN_BUCKET); # # list_of_bucket_keys = list() # # for stnId in stn_list: # # # parse the station id and extract the 4-char station code # (ns,code) = Utils.parse_stnId(stnId); # # # set outdir to current directory if not set # if outdir is None: outdir = '.'; # # # set the file name for the station info # stn_info_file_name = '.'.join((ns,code,'station','info')); # # # next, create the path for the station info file # stn_info_file_path = os.path.join(outdir,stn_info_file_name); # # bucketKey = bucket.get_key(stn_info_file_name) ; # # # let the user know that the file does not exist and continue # if bucketKey is None: # os.sys.stderr.write('station info resource: '+stnId+' could not be located\n'); # continue; # # # create the s3 object # bucketKey.key = stn_info_file_name; # # # enqueue # list_of_bucket_keys.append((bucketKey,stn_info_file_path)) # # # add to list of files # file_list.append(stn_info_file_path); # # # pull the file # bucketKey.get_contents_to_filename(stn_info_file_path); # # poolsz = min(16, len(file_list)) # pool = multiprocessing.Pool(poolsz); # pool.map(action, list_of_bucket_keys) # pool.close() # pool.join() # # return file_list; # init s3 connection to the metadata bucket # create the s3 object # generate output path # pull the file # def get_apr(year,doy,dns,outdir=None): # # year = Utils.get_norm_year_str(year); # doy = Utils.get_norm_doy_str (doy ); # # # set outdir to current directory if not set # if outdir is None: outdir = '.'; # # # set the file name for the station info # apr_file_name = '.'.join((dns,year,doy,'apr')); # # # next, create the path for the station info file # apr_file_path = os.path.join(outdir,apr_file_name); # # # init s3 connection to the metadata bucket # conn = S3Connection(calling_format=OrdinaryCallingFormat()) ; # bucket = conn.get_bucket(WL_APR_BUCKET) ; # bucketKey = bucket.get_key(apr_file_name) ; # # # make sure we're on track here # if bucketKey is None: # raise ResourceException('could not locate resource: '+apr_file_name); # # # create the s3 object # bucketKey.key = apr_file_name; # # # pull the file # bucketKey.get_contents_to_filename(apr_file_path); # # # thats a wrap # return apr_file_path; # make sure program specified is not bogus # figure out what platform we're on # compute the resource id # add the file and compression suffix # set outdir to current directory if not set # compute the full file path # init s3 connection to the resources bucket # set the key to download # pull the resource # all done; # make sure program specified is not bogus # set outdir to current directory if not set # compute the resource id # add the file suffix and the compression suffix # compute the full file path for tables resource # init s3 connection to the resources bucket # set the key to download # pull the resource # yup yup # parse the name of the file # create the file key path into S3 # init s3 connection to the metadata bucket # create the s3 object # parse the name of the file # create the file key path into S3 # init s3 connection to the metadata bucket # create the s3 object # parse the name of the file # create the file key path into S3 # init s3 connection to the metadata bucket # create the s3 object # init list of files copied # set outdir to current directory if not set # make sure to expand any user symbols # initialize pattern to match sinex files # Should we match the second '.'? # will this match 'file.snx'? # init s3 connection to the metadata bucket # ok now get list of snx files at the key path # copy each file to the outpath with same keypath # make sure it's a sinex file # fix the file name for unpadded gps week # create file path w.r.t. outdir # try in initialize the output path # make the root if it does not exist # set the bucket key # get the snx resource # add the file to the file list # init list of files copied # set the file extension to everything, if not set # set outdir to current directory if not set # make sure to expand any user symbols # help user out before compile regex to translate literal "." # initialize pattern to match files # Should we match the second '.'? # will this match 'file.snx'? # init s3 connection to the metadata bucket # ok now get list of snx files at the key path # copy each file to the outpath with same keypath # make sure it's a sinex file # fix the file name for unpadded gps week # create file path w.r.t. outdir # try in initialize the output path # make the root if it does not exist # set the bucket key # get the snx resource # add the file to the file list # init list of files copied # set the file extension to everything, if not set # set outdir to current directory if not set # make sure to expand any user symbols # help user out before compile regex to translate literal "." # initialize pattern to match files # Should we match the second '.'? # will this match 'file.snx'? # init s3 connection to the metadata bucket # ok now get list of snx files at the key path # copy each file to the outpath with same keypath # make sure it's a sinex file # fix the file name for unpadded gps week # create file path w.r.t. outdir # try in initialize the output path # file_root = os.path.split(file_path)[0]; # # # make the root if it does not exist # try: # if not os.path.isdir(file_root): os.makedirs(file_root); # except Exception as e: # os.sys.stderr.write(str(e)+'\n'); # continue; # # # set the bucket key # bucketKey.key = fk; # # # get the snx resource # bucketKey.get_contents_to_filename(file_path); # # # add the file to the file list # init s3 connection # create a bucket object into s3 # construct the relative path to where the file should be # construct the name of the sinex file # full file path # create a file in to the bucket #files = get_snx('2009/123/odot/g06','~/tmp'); #for f in files: print f; #files = list_resources('2009/123/odot/g06/n1','.mat.gz'); #for f in files: print f; | 2.138402 | 2 |
install/app_store/tk-framework-qtwidgets/v2.6.5/python/elided_label/elided_label.py | JoanAzpeitia/lp_sg | 0 | 6632252 | # Copyright (c) 2015 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
"""
A QLabel that elides text and adds ellipses if the text doesn't fit
correctly within the widget frame. Handles rich-text.
"""
import sgtk
from sgtk.platform.qt import QtCore, QtGui
utils = sgtk.platform.import_framework("tk-framework-shotgunutils", "utils")
class ElidedLabel(QtGui.QLabel):
"""
Label that gracefully handles when the text doesn't fit
within the given space.
"""
def __init__(self, parent=None):
"""
:param parent: The parent QWidget
:type parent: :class:`~PySide.QtGui.QWidget`
"""
QtGui.QLabel.__init__(self, parent)
self._elide_mode = QtCore.Qt.ElideRight
self._actual_text = ""
self._line_width = 0
self._ideal_width = None
self.setSizePolicy(
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Preferred,
)
def sizeHint(self):
base_size_hint = super(ElidedLabel, self).sizeHint()
return QtCore.QSize(
self._get_width_hint(),
base_size_hint.height()
)
def _get_width_hint(self):
if not self._ideal_width:
doc = QtGui.QTextDocument()
try:
# add the extra space to buffer the width a bit
doc.setHtml(self._actual_text + " ")
doc.setDefaultFont(self.font())
width = doc.idealWidth()
except Exception:
width = self.width()
finally:
utils.safe_delete_later(doc)
self._ideal_width = width
return self._ideal_width
def _get_elide_mode(self):
"""
Returns current elide mode
:returns: The current elide mode, either QtCore.Qt.ElideLeft or QtCore.Qt.ElideRight
"""
return self._elide_mode
def _set_elide_mode(self, value):
"""
Set the current elide mode.
:param value: The elide mode to use - must be either QtCore.Qt.ElideLeft or QtCore.Qt.ElideRight
"""
if (value != QtCore.Qt.ElideLeft
and value != QtCore.Qt.ElideRight):
raise ValueError("elide_mode must be set to either QtCore.Qt.ElideLeft or QtCore.Qt.ElideRight")
self._elide_mode = value
self._update_elided_text()
#: Property to get or set the elide mode. The value provided
#: should be either QtCore.Qt.ElideLeft or QtCore.Qt.ElideRight
elide_mode = property(_get_elide_mode, _set_elide_mode)
def text(self):
"""
Overridden base method to return the original unmodified text
:returns: The original unmodified text
"""
return self._actual_text
def setText(self, text):
"""
Overridden base method to set the text on the label
:param text: The text to set on the label
"""
# clear out the ideal width so that the widget can recalculate based on
# the new text
self._ideal_width = None
self._actual_text = text
self._update_elided_text()
# if we're elided, make the tooltip show the full text
if super(ElidedLabel, self).text() != self._actual_text:
# wrap the actual text in a paragraph so that it wraps nicely
self.setToolTip("<p>%s</p>" % (self._actual_text,))
else:
self.setToolTip("")
def resizeEvent(self, event):
"""
Overridden base method called when the widget is resized.
:param event: The resize event
"""
self._update_elided_text()
def _update_elided_text(self):
"""
Update the elided text on the label
"""
text = self._elide_text(self._actual_text, self._elide_mode)
QtGui.QLabel.setText(self, text)
def _elide_text(self, text, elide_mode):
"""
Elide the specified text using the specified mode
:param text: The text to elide
:param elide_mode: The elide mode to use
:returns: The elided text.
"""
# target width is the label width:
target_width = self.width()
# Use a QTextDocument to measure html/richtext width
doc = QtGui.QTextDocument()
try:
doc.setHtml(text)
doc.setDefaultFont(self.font())
# if line width is already less than the target width then great!
line_width = doc.idealWidth()
if line_width <= target_width:
self._line_width = line_width
return text
# depending on the elide mode, insert ellipses in the correct place
cursor = QtGui.QTextCursor(doc)
ellipses = ""
if elide_mode != QtCore.Qt.ElideNone:
# add the ellipses in the correct place:
ellipses = "..."
if elide_mode == QtCore.Qt.ElideLeft:
cursor.setPosition(0)
elif elide_mode == QtCore.Qt.ElideRight:
char_count = doc.characterCount()
cursor.setPosition(char_count-1)
cursor.insertText(ellipses)
ellipses_len = len(ellipses)
# remove characters until the text fits within the target width:
while line_width > target_width:
start_line_width = line_width
# if string is less than the ellipses length then just return
# an empty string
char_count = doc.characterCount()
if char_count <= ellipses_len:
self._line_width = 0
return ""
# calculate the number of characters to remove - should always remove at least 1
# to be sure the text gets shorter!
line_width = doc.idealWidth()
p = target_width/line_width
# play it safe and remove a couple less than the calculated amount
chars_to_delete = max(1, char_count - int(float(char_count) * p)-2)
# remove the characters:
if elide_mode == QtCore.Qt.ElideLeft:
start = ellipses_len
end = chars_to_delete + ellipses_len
else:
# default is to elide right
start = max(0, char_count - chars_to_delete - ellipses_len - 1)
end = max(0, char_count - ellipses_len - 1)
cursor.setPosition(start)
cursor.setPosition(end, QtGui.QTextCursor.KeepAnchor)
cursor.removeSelectedText()
# update line width:
line_width = doc.idealWidth()
if line_width == start_line_width:
break
self._line_width = line_width
return doc.toHtml()
finally:
utils.safe_delete_later(doc)
@property
def line_width(self):
"""
(:obj:`int`) width of the line of text in pixels
"""
return self._line_width
| # Copyright (c) 2015 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
"""
A QLabel that elides text and adds ellipses if the text doesn't fit
correctly within the widget frame. Handles rich-text.
"""
import sgtk
from sgtk.platform.qt import QtCore, QtGui
utils = sgtk.platform.import_framework("tk-framework-shotgunutils", "utils")
class ElidedLabel(QtGui.QLabel):
"""
Label that gracefully handles when the text doesn't fit
within the given space.
"""
def __init__(self, parent=None):
"""
:param parent: The parent QWidget
:type parent: :class:`~PySide.QtGui.QWidget`
"""
QtGui.QLabel.__init__(self, parent)
self._elide_mode = QtCore.Qt.ElideRight
self._actual_text = ""
self._line_width = 0
self._ideal_width = None
self.setSizePolicy(
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Preferred,
)
def sizeHint(self):
base_size_hint = super(ElidedLabel, self).sizeHint()
return QtCore.QSize(
self._get_width_hint(),
base_size_hint.height()
)
def _get_width_hint(self):
if not self._ideal_width:
doc = QtGui.QTextDocument()
try:
# add the extra space to buffer the width a bit
doc.setHtml(self._actual_text + " ")
doc.setDefaultFont(self.font())
width = doc.idealWidth()
except Exception:
width = self.width()
finally:
utils.safe_delete_later(doc)
self._ideal_width = width
return self._ideal_width
def _get_elide_mode(self):
"""
Returns current elide mode
:returns: The current elide mode, either QtCore.Qt.ElideLeft or QtCore.Qt.ElideRight
"""
return self._elide_mode
def _set_elide_mode(self, value):
"""
Set the current elide mode.
:param value: The elide mode to use - must be either QtCore.Qt.ElideLeft or QtCore.Qt.ElideRight
"""
if (value != QtCore.Qt.ElideLeft
and value != QtCore.Qt.ElideRight):
raise ValueError("elide_mode must be set to either QtCore.Qt.ElideLeft or QtCore.Qt.ElideRight")
self._elide_mode = value
self._update_elided_text()
#: Property to get or set the elide mode. The value provided
#: should be either QtCore.Qt.ElideLeft or QtCore.Qt.ElideRight
elide_mode = property(_get_elide_mode, _set_elide_mode)
def text(self):
"""
Overridden base method to return the original unmodified text
:returns: The original unmodified text
"""
return self._actual_text
def setText(self, text):
"""
Overridden base method to set the text on the label
:param text: The text to set on the label
"""
# clear out the ideal width so that the widget can recalculate based on
# the new text
self._ideal_width = None
self._actual_text = text
self._update_elided_text()
# if we're elided, make the tooltip show the full text
if super(ElidedLabel, self).text() != self._actual_text:
# wrap the actual text in a paragraph so that it wraps nicely
self.setToolTip("<p>%s</p>" % (self._actual_text,))
else:
self.setToolTip("")
def resizeEvent(self, event):
"""
Overridden base method called when the widget is resized.
:param event: The resize event
"""
self._update_elided_text()
def _update_elided_text(self):
"""
Update the elided text on the label
"""
text = self._elide_text(self._actual_text, self._elide_mode)
QtGui.QLabel.setText(self, text)
def _elide_text(self, text, elide_mode):
"""
Elide the specified text using the specified mode
:param text: The text to elide
:param elide_mode: The elide mode to use
:returns: The elided text.
"""
# target width is the label width:
target_width = self.width()
# Use a QTextDocument to measure html/richtext width
doc = QtGui.QTextDocument()
try:
doc.setHtml(text)
doc.setDefaultFont(self.font())
# if line width is already less than the target width then great!
line_width = doc.idealWidth()
if line_width <= target_width:
self._line_width = line_width
return text
# depending on the elide mode, insert ellipses in the correct place
cursor = QtGui.QTextCursor(doc)
ellipses = ""
if elide_mode != QtCore.Qt.ElideNone:
# add the ellipses in the correct place:
ellipses = "..."
if elide_mode == QtCore.Qt.ElideLeft:
cursor.setPosition(0)
elif elide_mode == QtCore.Qt.ElideRight:
char_count = doc.characterCount()
cursor.setPosition(char_count-1)
cursor.insertText(ellipses)
ellipses_len = len(ellipses)
# remove characters until the text fits within the target width:
while line_width > target_width:
start_line_width = line_width
# if string is less than the ellipses length then just return
# an empty string
char_count = doc.characterCount()
if char_count <= ellipses_len:
self._line_width = 0
return ""
# calculate the number of characters to remove - should always remove at least 1
# to be sure the text gets shorter!
line_width = doc.idealWidth()
p = target_width/line_width
# play it safe and remove a couple less than the calculated amount
chars_to_delete = max(1, char_count - int(float(char_count) * p)-2)
# remove the characters:
if elide_mode == QtCore.Qt.ElideLeft:
start = ellipses_len
end = chars_to_delete + ellipses_len
else:
# default is to elide right
start = max(0, char_count - chars_to_delete - ellipses_len - 1)
end = max(0, char_count - ellipses_len - 1)
cursor.setPosition(start)
cursor.setPosition(end, QtGui.QTextCursor.KeepAnchor)
cursor.removeSelectedText()
# update line width:
line_width = doc.idealWidth()
if line_width == start_line_width:
break
self._line_width = line_width
return doc.toHtml()
finally:
utils.safe_delete_later(doc)
@property
def line_width(self):
"""
(:obj:`int`) width of the line of text in pixels
"""
return self._line_width
| en | 0.737358 | # Copyright (c) 2015 Shotgun Software Inc. # # CONFIDENTIAL AND PROPRIETARY # # This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit # Source Code License included in this distribution package. See LICENSE. # By accessing, using, copying or modifying this work you indicate your # agreement to the Shotgun Pipeline Toolkit Source Code License. All rights # not expressly granted therein are reserved by Shotgun Software Inc. A QLabel that elides text and adds ellipses if the text doesn't fit correctly within the widget frame. Handles rich-text. Label that gracefully handles when the text doesn't fit within the given space. :param parent: The parent QWidget :type parent: :class:`~PySide.QtGui.QWidget` # add the extra space to buffer the width a bit Returns current elide mode :returns: The current elide mode, either QtCore.Qt.ElideLeft or QtCore.Qt.ElideRight Set the current elide mode. :param value: The elide mode to use - must be either QtCore.Qt.ElideLeft or QtCore.Qt.ElideRight #: Property to get or set the elide mode. The value provided #: should be either QtCore.Qt.ElideLeft or QtCore.Qt.ElideRight Overridden base method to return the original unmodified text :returns: The original unmodified text Overridden base method to set the text on the label :param text: The text to set on the label # clear out the ideal width so that the widget can recalculate based on # the new text # if we're elided, make the tooltip show the full text # wrap the actual text in a paragraph so that it wraps nicely Overridden base method called when the widget is resized. :param event: The resize event Update the elided text on the label Elide the specified text using the specified mode :param text: The text to elide :param elide_mode: The elide mode to use :returns: The elided text. # target width is the label width: # Use a QTextDocument to measure html/richtext width # if line width is already less than the target width then great! # depending on the elide mode, insert ellipses in the correct place # add the ellipses in the correct place: # remove characters until the text fits within the target width: # if string is less than the ellipses length then just return # an empty string # calculate the number of characters to remove - should always remove at least 1 # to be sure the text gets shorter! # play it safe and remove a couple less than the calculated amount # remove the characters: # default is to elide right # update line width: (:obj:`int`) width of the line of text in pixels | 2.629557 | 3 |
python_project_movies/server.py | Pulecz/edu | 2 | 6632253 | import moviedb.db
import moviedb.sqla
import flask
from flask import Flask
from flask import jsonify
app = Flask(__name__)
db_instance = moviedb.sqla.SqlAlchemyFilmStorage(
"sqlite:///sqla.db")
@app.route('/')
def hello_world():
all_movies = db_instance.get_all()
return flask.render_template(
"sample.html", all_movies=all_movies)
@app.route('/film/<wanted_film>')
def show_film(wanted_film):
"loads json.database and returns a movie or all in json"
print('looking for', wanted_film) # for debug
# populate a db
empty_db = moviedb.db.MemoryFilmStorage()
populated_db = moviedb.db.restore_database(empty_db, 'films.json')
# define empty dict for result
result = {}
if wanted_film == '*': # get all movies
list_of_films = [film.to_dict() for film in populated_db]
# save list_of_films to result with title as a key
for film in list_of_films:
result[film["title"]] = film
else: # return only wanted_film
for film in populated_db:
if film.title == wanted_film:
result[wanted_film] = film.to_dict()
# use flask.jsonify for return json
return jsonify(**result)
app.run('0.0.0.0', 5000, True)
| import moviedb.db
import moviedb.sqla
import flask
from flask import Flask
from flask import jsonify
app = Flask(__name__)
db_instance = moviedb.sqla.SqlAlchemyFilmStorage(
"sqlite:///sqla.db")
@app.route('/')
def hello_world():
all_movies = db_instance.get_all()
return flask.render_template(
"sample.html", all_movies=all_movies)
@app.route('/film/<wanted_film>')
def show_film(wanted_film):
"loads json.database and returns a movie or all in json"
print('looking for', wanted_film) # for debug
# populate a db
empty_db = moviedb.db.MemoryFilmStorage()
populated_db = moviedb.db.restore_database(empty_db, 'films.json')
# define empty dict for result
result = {}
if wanted_film == '*': # get all movies
list_of_films = [film.to_dict() for film in populated_db]
# save list_of_films to result with title as a key
for film in list_of_films:
result[film["title"]] = film
else: # return only wanted_film
for film in populated_db:
if film.title == wanted_film:
result[wanted_film] = film.to_dict()
# use flask.jsonify for return json
return jsonify(**result)
app.run('0.0.0.0', 5000, True)
| en | 0.708822 | # for debug # populate a db # define empty dict for result # get all movies # save list_of_films to result with title as a key # return only wanted_film # use flask.jsonify for return json | 3.181064 | 3 |
setup.py | may-/vizseq | 0 | 6632254 | <reponame>may-/vizseq<gh_stars>0
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from setuptools import setup, find_packages
import sys
if sys.version_info < (3,):
sys.exit('Sorry, Python 3 is required for vizseq.')
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license_content = f.read()
with open('vizseq/VERSION') as f:
version = f.read()
setup(
name='vizseq',
version=version,
description='Visual Analysis Toolkit for Text Generation Tasks',
url='https://github.com/facebookresearch/vizseq',
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
long_description=readme,
long_description_content_type='text/markdown',
license='MIT',
setup_requires=[
'setuptools>=18.0',
],
install_requires=[
'numpy',
'sacrebleu==1.5.1',
'torch',
'tqdm',
'nltk>=3.5',
'py-rouge',
'langid',
'google-cloud-translate',
'jinja2',
'IPython',
'matplotlib',
'tornado',
'pandas',
'soundfile',
'laserembeddings',
'bert-score',
'tornado_http_auth',
'basicauth'
],
packages=find_packages(exclude=['examples', 'tests']),
package_data={'vizseq': ['_templates/*.html', 'VERSION']},
test_suite='tests',
zip_safe=False,
)
| # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from setuptools import setup, find_packages
import sys
if sys.version_info < (3,):
sys.exit('Sorry, Python 3 is required for vizseq.')
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license_content = f.read()
with open('vizseq/VERSION') as f:
version = f.read()
setup(
name='vizseq',
version=version,
description='Visual Analysis Toolkit for Text Generation Tasks',
url='https://github.com/facebookresearch/vizseq',
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
long_description=readme,
long_description_content_type='text/markdown',
license='MIT',
setup_requires=[
'setuptools>=18.0',
],
install_requires=[
'numpy',
'sacrebleu==1.5.1',
'torch',
'tqdm',
'nltk>=3.5',
'py-rouge',
'langid',
'google-cloud-translate',
'jinja2',
'IPython',
'matplotlib',
'tornado',
'pandas',
'soundfile',
'laserembeddings',
'bert-score',
'tornado_http_auth',
'basicauth'
],
packages=find_packages(exclude=['examples', 'tests']),
package_data={'vizseq': ['_templates/*.html', 'VERSION']},
test_suite='tests',
zip_safe=False,
) | en | 0.94216 | # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # | 1.33741 | 1 |
outlook_msg/constants.py | hiendang/outlook_msg | 17 | 6632255 | import enum
PROPERTIES_NAME = '__properties_version1.0'
ATTACHMENTS_PREFIX = '__attach_version1.0_#'
SUBSTG_PREFIX = '__substg1.0_'
PROPERTY_IDS = {
'0x0C1F': 'PidTagSenderEmailAddress',
'0x0037': 'PidTagSubject',
'0x1000': 'PidTagBody',
'0x1013': 'PidTagBodyHtml',
'0x1009': 'PidTagRtfCompressed',
'0x0E1B': 'PidTagHasAttachments',
'0x0E13': 'PidTagMessageAttachments',
# Attachments
'0x3701': 'PidTagAttachDataBinary',
'0x3705': 'PidTagAttachMethod',
'0x3707': 'PidTagAttachLongFilename'
}
PROPERTY_TYPES = {
'0x001F': 'PtypeString', # Null-terminated String in UTF-16LE
'0x0003': 'PtypInteger32',
'0x0102': 'PtypBinary',
'0x000B': 'PtypBoolean', # 1 or 0
# 8 bytes; a 64-bit integer representing the number of 100-nanosecond intervals since January 1, 1601
'0x0040': 'PtypTime',
'0x0048': 'PtypGuid', # 16 bytes; a GUID with Data1, Data2, and Data3 fields in little-endian format
'0x0001': 'PtypNull', # Null/Placeholder
# '0x0000': '', # Special, ROP, to be handled specially
}
class HeaderFormat(enum.Enum):
TOP_LEVEL = enum.auto()
EMBEDDED_MESSAGE_OBJECT = enum.auto()
ATTACHMENT_OBJECT = enum.auto()
RECIPIENT_OBJECT = enum.auto()
class AttachMethod(enum.Enum):
# 2.2.2.9 PidTagAttachMethod Property
JustCreated = 0
ByValue = 1
ByReference = 2
Undefined = 3
ByReferenceOnly = 4
EmbeddedMessage = 5
Storage = 6
WebReference = 7
| import enum
PROPERTIES_NAME = '__properties_version1.0'
ATTACHMENTS_PREFIX = '__attach_version1.0_#'
SUBSTG_PREFIX = '__substg1.0_'
PROPERTY_IDS = {
'0x0C1F': 'PidTagSenderEmailAddress',
'0x0037': 'PidTagSubject',
'0x1000': 'PidTagBody',
'0x1013': 'PidTagBodyHtml',
'0x1009': 'PidTagRtfCompressed',
'0x0E1B': 'PidTagHasAttachments',
'0x0E13': 'PidTagMessageAttachments',
# Attachments
'0x3701': 'PidTagAttachDataBinary',
'0x3705': 'PidTagAttachMethod',
'0x3707': 'PidTagAttachLongFilename'
}
PROPERTY_TYPES = {
'0x001F': 'PtypeString', # Null-terminated String in UTF-16LE
'0x0003': 'PtypInteger32',
'0x0102': 'PtypBinary',
'0x000B': 'PtypBoolean', # 1 or 0
# 8 bytes; a 64-bit integer representing the number of 100-nanosecond intervals since January 1, 1601
'0x0040': 'PtypTime',
'0x0048': 'PtypGuid', # 16 bytes; a GUID with Data1, Data2, and Data3 fields in little-endian format
'0x0001': 'PtypNull', # Null/Placeholder
# '0x0000': '', # Special, ROP, to be handled specially
}
class HeaderFormat(enum.Enum):
TOP_LEVEL = enum.auto()
EMBEDDED_MESSAGE_OBJECT = enum.auto()
ATTACHMENT_OBJECT = enum.auto()
RECIPIENT_OBJECT = enum.auto()
class AttachMethod(enum.Enum):
# 2.2.2.9 PidTagAttachMethod Property
JustCreated = 0
ByValue = 1
ByReference = 2
Undefined = 3
ByReferenceOnly = 4
EmbeddedMessage = 5
Storage = 6
WebReference = 7
| en | 0.73964 | #' # Attachments # Null-terminated String in UTF-16LE # 1 or 0 # 8 bytes; a 64-bit integer representing the number of 100-nanosecond intervals since January 1, 1601 # 16 bytes; a GUID with Data1, Data2, and Data3 fields in little-endian format # Null/Placeholder # '0x0000': '', # Special, ROP, to be handled specially # 2.2.2.9 PidTagAttachMethod Property | 2.034198 | 2 |
test/mlprogram/entrypoint/test_evaluate.py | HiroakiMikami/mlprogram | 9 | 6632256 | import multiprocessing as mp
import os
import tempfile
import pytest
import torch
from mlprogram import distributed
from mlprogram.builtins import Environment
from mlprogram.entrypoint import evaluate
from mlprogram.entrypoint.evaluate import EvaluateSynthesizer, Result
from mlprogram.metrics import Accuracy, Bleu, use_environment
from mlprogram.synthesizers import Result as DecoderResult
from mlprogram.utils.data import ListDataset
context = mp.get_context("spawn")
class MockModel:
def load_state_dict(self, state_dict):
self.state_dict = state_dict
def state_dict(self):
return {}
def to(self, *args, **kwargs):
pass
class MockSynthesizer:
def __init__(self, model):
self.model = model
def __call__(self, input):
yield DecoderResult(self.model.state_dict["name"],
self.model.state_dict["score"],
True,
1)
def synthesize(input):
input = input["query"]
output = []
if input == "query0":
output = ["c0", "c1", "c2"]
elif input == "query1":
output = ["c2", "c3", "c0"]
else:
output = ["c2", "c3", "c5"]
for i, s in enumerate(output):
yield DecoderResult(s, -i, True, 1)
class TestEvaluateSynthesizer(object):
def test_simple_case(self):
accuracy = use_environment(
Accuracy(), in_keys=["actual", ["ground_truth", "expected"]],
value_key="actual"
)
dataset = ListDataset([
Environment(
{"query": "query0", "ground_truth": "c0"},
set(["ground_truth"])
),
Environment(
{"query": "query1", "ground_truth": "c0"},
set(["ground_truth"])
),
Environment(
{"query": "query2", "ground_truth": "c0"},
set(["ground_truth"])
),
])
results = EvaluateSynthesizer(dataset, synthesize,
metrics={"accuracy": accuracy})()
assert results.metrics == \
{1: {"accuracy": 1.0 / 3.0}, 3: {"accuracy": 2.0 / 3.0}}
assert 3 == len(results.results)
results.results[0].time = 0.0
results.results[1].time = 0.0
results.results[2].time = 0.0
assert Result({"query": "query0",
"ground_truth": "c0"},
["c0", "c1", "c2"],
{1: {"accuracy": 1.0}, 3: {"accuracy": 1.0}},
True, 0.0) == results.results[0]
assert Result({"query": "query1",
"ground_truth": "c0"},
["c2", "c3", "c0"],
{1: {"accuracy": 0.0}, 3: {"accuracy": 1.0}},
True, 0.0) == results.results[1]
assert Result({"query": "query2",
"ground_truth": "c0"},
["c2", "c3", "c5"],
{1: {"accuracy": 0.0}, 3: {"accuracy": 0.0}},
True, 0.0) == results.results[2]
def _run(self, init_dir, dataset, metrics, rank):
distributed.initialize(init_dir, rank, 2)
return EvaluateSynthesizer(dataset, synthesize,
metrics=metrics)()
def test_multiprocess(self):
accuracy = use_environment(
Accuracy(), in_keys=["actual", ["ground_truth", "expected"]],
value_key="actual"
)
dataset = ListDataset([
Environment(
{"query": "query0", "ground_truth": "c0"},
set(["ground_truth"])
),
Environment(
{"query": "query1", "ground_truth": "c0"},
set(["ground_truth"])
),
Environment(
{"query": "query2", "ground_truth": "c0"},
set(["ground_truth"])
),
])
with tempfile.TemporaryDirectory() as init_dir:
with context.Pool(2) as pool:
procs = []
for i in range(2):
p = pool.apply_async(
self._run,
args=(init_dir, dataset, {"accuracy": accuracy}, i),
)
procs.append(p)
out = [p.get() for p in procs]
r0 = out[0]
r1 = out[1]
assert r0 == r1
results = r0
assert results.metrics == {1: {"accuracy": 1.0 / 3},
3: {"accuracy": 2.0 / 3}}
assert 3 == len(results.results)
results.results[0].time = 0.0
results.results[1].time = 0.0
results.results[2].time = 0.0
results.results.sort(key=lambda x: x.sample["query"])
assert Result({"query": "query0",
"ground_truth": "c0"},
["c0", "c1", "c2"],
{1: {"accuracy": 1.0}, 3: {"accuracy": 1.0}},
True, 0.0) == results.results[0]
assert Result({"query": "query1",
"ground_truth": "c0"},
["c2", "c3", "c0"],
{1: {"accuracy": 0.0}, 3: {"accuracy": 1.0}},
True, 0.0) == results.results[1]
assert Result({"query": "query2",
"ground_truth": "c0"},
["c2", "c3", "c5"],
{1: {"accuracy": 0.0}, 3: {"accuracy": 0.0}},
True, 0.0) == results.results[2]
@pytest.fixture
def dataset():
return ListDataset([
Environment({"query": "query", "ground_truth": "name0"},
set(["ground_truth"]))
])
@pytest.fixture
def model():
return MockModel()
@pytest.fixture
def synthesizer(model):
return MockSynthesizer(model)
class TestEvaluate(object):
def test_happy_path(self, dataset, model, synthesizer):
with tempfile.TemporaryDirectory() as tmpdir:
input = os.path.join(tmpdir, "input")
output = os.path.join(tmpdir, "output")
os.makedirs(input)
os.makedirs(os.path.join(input, "model"))
torch.save({"score": 1.0, "model": {"score": 1.0, "name": "tmp"}},
os.path.join(input, "model", "0"))
evaluate(input, output, dataset,
model, synthesizer,
{
"accuracy": use_environment(
Accuracy(),
in_keys=["actual", ["ground_truth", "expected"]],
value_key="actual",
),
"bleu": use_environment(
Bleu(),
in_keys=["actual", ["ground_truth", "expected"]],
value_key="actual",
),
})
assert os.path.exists(os.path.join(output, "result.pt"))
assert os.path.exists(
os.path.join(output, "result_metrics.json"))
def test_multiple_models(self, dataset, model, synthesizer):
with tempfile.TemporaryDirectory() as tmpdir:
input = os.path.join(tmpdir, "input")
output = os.path.join(tmpdir, "output")
os.makedirs(input)
os.makedirs(os.path.join(input, "model"))
torch.save({"score": 0.5, "model": {"score": 0.5, "name": "tmp"}},
os.path.join(input, "model", "0"))
torch.save({"score": 1.0, "model": {"score": 1.0, "name": "tmp"}},
os.path.join(input, "model", "1"))
evaluate(input, output, dataset,
model, synthesizer,
{
"accuracy": use_environment(
Accuracy(),
in_keys=["actual", ["ground_truth", "expected"]],
value_key="actual",
),
"bleu": use_environment(
Bleu(),
in_keys=["actual", ["ground_truth", "expected"]],
value_key="actual",
),
})
assert os.path.exists(os.path.join(output, "result.pt"))
assert os.path.exists(
os.path.join(output, "result_metrics.json"))
def _run(self, init_dir, input, output, model, synthesizer, dataset, rank):
distributed.initialize(init_dir, rank, 2)
evaluate(
input, output, dataset,
model, synthesizer,
{
"accuracy": use_environment(
Accuracy(),
in_keys=["actual", ["ground_truth", "expected"]],
value_key="actual",
),
"bleu": use_environment(
Bleu(),
in_keys=["actual", ["ground_truth", "expected"]],
value_key="actual",
),
}
)
def test_multiprocess(self, dataset, model, synthesizer):
with tempfile.TemporaryDirectory() as tmpdir:
input = os.path.join(tmpdir, "input")
output = os.path.join(tmpdir, "output")
os.makedirs(input)
os.makedirs(os.path.join(input, "model"))
torch.save({"score": 0.5, "model": {"score": 0.5, "name": "tmp"}},
os.path.join(input, "model", "0"))
with tempfile.TemporaryDirectory() as init_dir:
with context.Pool(2) as pool:
procs = []
for i in range(2):
p = pool.apply_async(
self._run,
args=(init_dir, input, output, model, synthesizer,
dataset, i),
)
procs.append(p)
[p.get() for p in procs]
assert os.path.exists(os.path.join(output, "result.pt"))
assert os.path.exists(
os.path.join(output, "result_metrics.json"))
| import multiprocessing as mp
import os
import tempfile
import pytest
import torch
from mlprogram import distributed
from mlprogram.builtins import Environment
from mlprogram.entrypoint import evaluate
from mlprogram.entrypoint.evaluate import EvaluateSynthesizer, Result
from mlprogram.metrics import Accuracy, Bleu, use_environment
from mlprogram.synthesizers import Result as DecoderResult
from mlprogram.utils.data import ListDataset
context = mp.get_context("spawn")
class MockModel:
def load_state_dict(self, state_dict):
self.state_dict = state_dict
def state_dict(self):
return {}
def to(self, *args, **kwargs):
pass
class MockSynthesizer:
def __init__(self, model):
self.model = model
def __call__(self, input):
yield DecoderResult(self.model.state_dict["name"],
self.model.state_dict["score"],
True,
1)
def synthesize(input):
input = input["query"]
output = []
if input == "query0":
output = ["c0", "c1", "c2"]
elif input == "query1":
output = ["c2", "c3", "c0"]
else:
output = ["c2", "c3", "c5"]
for i, s in enumerate(output):
yield DecoderResult(s, -i, True, 1)
class TestEvaluateSynthesizer(object):
def test_simple_case(self):
accuracy = use_environment(
Accuracy(), in_keys=["actual", ["ground_truth", "expected"]],
value_key="actual"
)
dataset = ListDataset([
Environment(
{"query": "query0", "ground_truth": "c0"},
set(["ground_truth"])
),
Environment(
{"query": "query1", "ground_truth": "c0"},
set(["ground_truth"])
),
Environment(
{"query": "query2", "ground_truth": "c0"},
set(["ground_truth"])
),
])
results = EvaluateSynthesizer(dataset, synthesize,
metrics={"accuracy": accuracy})()
assert results.metrics == \
{1: {"accuracy": 1.0 / 3.0}, 3: {"accuracy": 2.0 / 3.0}}
assert 3 == len(results.results)
results.results[0].time = 0.0
results.results[1].time = 0.0
results.results[2].time = 0.0
assert Result({"query": "query0",
"ground_truth": "c0"},
["c0", "c1", "c2"],
{1: {"accuracy": 1.0}, 3: {"accuracy": 1.0}},
True, 0.0) == results.results[0]
assert Result({"query": "query1",
"ground_truth": "c0"},
["c2", "c3", "c0"],
{1: {"accuracy": 0.0}, 3: {"accuracy": 1.0}},
True, 0.0) == results.results[1]
assert Result({"query": "query2",
"ground_truth": "c0"},
["c2", "c3", "c5"],
{1: {"accuracy": 0.0}, 3: {"accuracy": 0.0}},
True, 0.0) == results.results[2]
def _run(self, init_dir, dataset, metrics, rank):
distributed.initialize(init_dir, rank, 2)
return EvaluateSynthesizer(dataset, synthesize,
metrics=metrics)()
def test_multiprocess(self):
accuracy = use_environment(
Accuracy(), in_keys=["actual", ["ground_truth", "expected"]],
value_key="actual"
)
dataset = ListDataset([
Environment(
{"query": "query0", "ground_truth": "c0"},
set(["ground_truth"])
),
Environment(
{"query": "query1", "ground_truth": "c0"},
set(["ground_truth"])
),
Environment(
{"query": "query2", "ground_truth": "c0"},
set(["ground_truth"])
),
])
with tempfile.TemporaryDirectory() as init_dir:
with context.Pool(2) as pool:
procs = []
for i in range(2):
p = pool.apply_async(
self._run,
args=(init_dir, dataset, {"accuracy": accuracy}, i),
)
procs.append(p)
out = [p.get() for p in procs]
r0 = out[0]
r1 = out[1]
assert r0 == r1
results = r0
assert results.metrics == {1: {"accuracy": 1.0 / 3},
3: {"accuracy": 2.0 / 3}}
assert 3 == len(results.results)
results.results[0].time = 0.0
results.results[1].time = 0.0
results.results[2].time = 0.0
results.results.sort(key=lambda x: x.sample["query"])
assert Result({"query": "query0",
"ground_truth": "c0"},
["c0", "c1", "c2"],
{1: {"accuracy": 1.0}, 3: {"accuracy": 1.0}},
True, 0.0) == results.results[0]
assert Result({"query": "query1",
"ground_truth": "c0"},
["c2", "c3", "c0"],
{1: {"accuracy": 0.0}, 3: {"accuracy": 1.0}},
True, 0.0) == results.results[1]
assert Result({"query": "query2",
"ground_truth": "c0"},
["c2", "c3", "c5"],
{1: {"accuracy": 0.0}, 3: {"accuracy": 0.0}},
True, 0.0) == results.results[2]
@pytest.fixture
def dataset():
return ListDataset([
Environment({"query": "query", "ground_truth": "name0"},
set(["ground_truth"]))
])
@pytest.fixture
def model():
return MockModel()
@pytest.fixture
def synthesizer(model):
return MockSynthesizer(model)
class TestEvaluate(object):
def test_happy_path(self, dataset, model, synthesizer):
with tempfile.TemporaryDirectory() as tmpdir:
input = os.path.join(tmpdir, "input")
output = os.path.join(tmpdir, "output")
os.makedirs(input)
os.makedirs(os.path.join(input, "model"))
torch.save({"score": 1.0, "model": {"score": 1.0, "name": "tmp"}},
os.path.join(input, "model", "0"))
evaluate(input, output, dataset,
model, synthesizer,
{
"accuracy": use_environment(
Accuracy(),
in_keys=["actual", ["ground_truth", "expected"]],
value_key="actual",
),
"bleu": use_environment(
Bleu(),
in_keys=["actual", ["ground_truth", "expected"]],
value_key="actual",
),
})
assert os.path.exists(os.path.join(output, "result.pt"))
assert os.path.exists(
os.path.join(output, "result_metrics.json"))
def test_multiple_models(self, dataset, model, synthesizer):
with tempfile.TemporaryDirectory() as tmpdir:
input = os.path.join(tmpdir, "input")
output = os.path.join(tmpdir, "output")
os.makedirs(input)
os.makedirs(os.path.join(input, "model"))
torch.save({"score": 0.5, "model": {"score": 0.5, "name": "tmp"}},
os.path.join(input, "model", "0"))
torch.save({"score": 1.0, "model": {"score": 1.0, "name": "tmp"}},
os.path.join(input, "model", "1"))
evaluate(input, output, dataset,
model, synthesizer,
{
"accuracy": use_environment(
Accuracy(),
in_keys=["actual", ["ground_truth", "expected"]],
value_key="actual",
),
"bleu": use_environment(
Bleu(),
in_keys=["actual", ["ground_truth", "expected"]],
value_key="actual",
),
})
assert os.path.exists(os.path.join(output, "result.pt"))
assert os.path.exists(
os.path.join(output, "result_metrics.json"))
def _run(self, init_dir, input, output, model, synthesizer, dataset, rank):
distributed.initialize(init_dir, rank, 2)
evaluate(
input, output, dataset,
model, synthesizer,
{
"accuracy": use_environment(
Accuracy(),
in_keys=["actual", ["ground_truth", "expected"]],
value_key="actual",
),
"bleu": use_environment(
Bleu(),
in_keys=["actual", ["ground_truth", "expected"]],
value_key="actual",
),
}
)
def test_multiprocess(self, dataset, model, synthesizer):
with tempfile.TemporaryDirectory() as tmpdir:
input = os.path.join(tmpdir, "input")
output = os.path.join(tmpdir, "output")
os.makedirs(input)
os.makedirs(os.path.join(input, "model"))
torch.save({"score": 0.5, "model": {"score": 0.5, "name": "tmp"}},
os.path.join(input, "model", "0"))
with tempfile.TemporaryDirectory() as init_dir:
with context.Pool(2) as pool:
procs = []
for i in range(2):
p = pool.apply_async(
self._run,
args=(init_dir, input, output, model, synthesizer,
dataset, i),
)
procs.append(p)
[p.get() for p in procs]
assert os.path.exists(os.path.join(output, "result.pt"))
assert os.path.exists(
os.path.join(output, "result_metrics.json"))
| none | 1 | 2.177581 | 2 |
|
handgesturecode-master/proj.py | NikisCodes/Machine-Learning | 7 | 6632257 | <filename>handgesturecode-master/proj.py
# coding: utf-8
# In[ ]:
import numpy as np
import mxnet as mx
import time
import pandas as pd
import cv2
import logging
logging.getLogger().setLevel(logging.DEBUG) # logging to stdout
# In[ ]:
#import matplotlib.pyplot as plt
#get_ipython().magic('matplotlib inline')
# In[ ]:
data0 = pd.read_csv('fdata/fdata.csv', names=['name','state'])
# In[ ]:
#data0.head()
# In[ ]:
#data0['state'].unique()
# In[ ]:
num_class = len(data0['state'].unique())
ges_to_num = dict({(g,i) for i, g in enumerate(data0['state'].unique())})
num_to_ges = dict({(i,g) for i, g in enumerate(data0['state'].unique())})
#num_class, ges_to_num
# In[ ]:
data0 = data0.replace({'state':ges_to_num})
# In[ ]:
#data0.shape[0]
# In[ ]:
labels = np.empty((data0.shape[0]))
res_width, res_height = 200, 200
imgs = np.empty(shape=(data0.shape[0],1,res_width,res_height))
#imgs.shape, labels.shape
# In[ ]:
prefix = 'fdata/pic/'
outfix = 'fdata/bi_pic/'
for i, (im_name, state) in enumerate(data0.values):
im_path = prefix + im_name
print im_path
img = cv2.imread(im_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
res = cv2.resize(gray,(200, 200), interpolation=cv2.INTER_CUBIC)
imgs[i][0] = res
labels[i] = state
# In[ ]:
train_data, train_label = imgs, labels
# test_data, test_label = imgs[23:], labels[2:]
train_data.shape, train_label.shape#, test_data.shape, test_label.shape
# In[ ]:
batch_size = 10
train_iter = mx.io.NDArrayIter(train_data, train_label, batch_size, shuffle=True)
# eval_iter = mx.io.NDArrayIter(test_data, test_label, batch_size)
# In[ ]:
data = mx.sym.var('data')
conv1 = mx.sym.Convolution(data=data, kernel=(5,5), num_filter=20, name='conv1')
bn1 = mx.sym.BatchNorm(conv1, fix_gamma=True)
tanh1 = mx.sym.Activation(data=bn1, act_type='tanh')
pool1 = mx.sym.Pooling(data=tanh1, pool_type='max', kernel=(2,2), stride=(2,2))
conv2 = mx.sym.Convolution(data=pool1, kernel=(5,5), num_filter=50, name='conv2')
bn2 = mx.sym.BatchNorm(conv2, fix_gamma=True)
tanh2 = mx.sym.Activation(data=bn2, act_type='tanh')
pool2 = mx.sym.Pooling(data=tanh2, pool_type='max', kernel=(2,2), stride=(2,2))
flat = mx.sym.flatten(data=pool2)
fc1 = mx.sym.FullyConnected(data=flat, num_hidden=500)
tanh3 = mx.sym.Activation(data=fc1, act_type='tanh')
fc2 = mx.sym.FullyConnected(data=tanh3, num_hidden=num_class)
convnet = mx.sym.SoftmaxOutput(data=fc2, name='softmax')
mx.viz.plot_network(convnet)
# In[ ]:
model = mx.mod.Module(symbol=convnet, context=mx.gpu())
# In[ ]:
model.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)
model.init_params(initializer=mx.init.Uniform(scale=.1))
model.init_optimizer(optimizer='sgd', optimizer_params={'learning_rate':0.1})
metric = mx.metric.Accuracy()
# In[ ]:
chk_prefix='models/chkpt'
for epoch in range(1200):
train_iter.reset()
metric.reset()
st = time.time()
for batch in train_iter:
model.forward(data_batch=batch, is_train=True)
model.update_metric(metric, batch.label)
model.backward()
model.update()
if epoch % 50 == 0:
# model_path = '{}_{}'.format(chk_prefix, epoch)
model.save_checkpoint(chk_prefix, epoch)
et = time.time()-st
print('Epoch %d, Training %s, Time %.2f' % (epoch, metric.get(), et))
# model.score(train_iter, metric)
| <filename>handgesturecode-master/proj.py
# coding: utf-8
# In[ ]:
import numpy as np
import mxnet as mx
import time
import pandas as pd
import cv2
import logging
logging.getLogger().setLevel(logging.DEBUG) # logging to stdout
# In[ ]:
#import matplotlib.pyplot as plt
#get_ipython().magic('matplotlib inline')
# In[ ]:
data0 = pd.read_csv('fdata/fdata.csv', names=['name','state'])
# In[ ]:
#data0.head()
# In[ ]:
#data0['state'].unique()
# In[ ]:
num_class = len(data0['state'].unique())
ges_to_num = dict({(g,i) for i, g in enumerate(data0['state'].unique())})
num_to_ges = dict({(i,g) for i, g in enumerate(data0['state'].unique())})
#num_class, ges_to_num
# In[ ]:
data0 = data0.replace({'state':ges_to_num})
# In[ ]:
#data0.shape[0]
# In[ ]:
labels = np.empty((data0.shape[0]))
res_width, res_height = 200, 200
imgs = np.empty(shape=(data0.shape[0],1,res_width,res_height))
#imgs.shape, labels.shape
# In[ ]:
prefix = 'fdata/pic/'
outfix = 'fdata/bi_pic/'
for i, (im_name, state) in enumerate(data0.values):
im_path = prefix + im_name
print im_path
img = cv2.imread(im_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
res = cv2.resize(gray,(200, 200), interpolation=cv2.INTER_CUBIC)
imgs[i][0] = res
labels[i] = state
# In[ ]:
train_data, train_label = imgs, labels
# test_data, test_label = imgs[23:], labels[2:]
train_data.shape, train_label.shape#, test_data.shape, test_label.shape
# In[ ]:
batch_size = 10
train_iter = mx.io.NDArrayIter(train_data, train_label, batch_size, shuffle=True)
# eval_iter = mx.io.NDArrayIter(test_data, test_label, batch_size)
# In[ ]:
data = mx.sym.var('data')
conv1 = mx.sym.Convolution(data=data, kernel=(5,5), num_filter=20, name='conv1')
bn1 = mx.sym.BatchNorm(conv1, fix_gamma=True)
tanh1 = mx.sym.Activation(data=bn1, act_type='tanh')
pool1 = mx.sym.Pooling(data=tanh1, pool_type='max', kernel=(2,2), stride=(2,2))
conv2 = mx.sym.Convolution(data=pool1, kernel=(5,5), num_filter=50, name='conv2')
bn2 = mx.sym.BatchNorm(conv2, fix_gamma=True)
tanh2 = mx.sym.Activation(data=bn2, act_type='tanh')
pool2 = mx.sym.Pooling(data=tanh2, pool_type='max', kernel=(2,2), stride=(2,2))
flat = mx.sym.flatten(data=pool2)
fc1 = mx.sym.FullyConnected(data=flat, num_hidden=500)
tanh3 = mx.sym.Activation(data=fc1, act_type='tanh')
fc2 = mx.sym.FullyConnected(data=tanh3, num_hidden=num_class)
convnet = mx.sym.SoftmaxOutput(data=fc2, name='softmax')
mx.viz.plot_network(convnet)
# In[ ]:
model = mx.mod.Module(symbol=convnet, context=mx.gpu())
# In[ ]:
model.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)
model.init_params(initializer=mx.init.Uniform(scale=.1))
model.init_optimizer(optimizer='sgd', optimizer_params={'learning_rate':0.1})
metric = mx.metric.Accuracy()
# In[ ]:
chk_prefix='models/chkpt'
for epoch in range(1200):
train_iter.reset()
metric.reset()
st = time.time()
for batch in train_iter:
model.forward(data_batch=batch, is_train=True)
model.update_metric(metric, batch.label)
model.backward()
model.update()
if epoch % 50 == 0:
# model_path = '{}_{}'.format(chk_prefix, epoch)
model.save_checkpoint(chk_prefix, epoch)
et = time.time()-st
print('Epoch %d, Training %s, Time %.2f' % (epoch, metric.get(), et))
# model.score(train_iter, metric)
| en | 0.246213 | # coding: utf-8 # In[ ]: # logging to stdout # In[ ]: #import matplotlib.pyplot as plt #get_ipython().magic('matplotlib inline') # In[ ]: # In[ ]: #data0.head() # In[ ]: #data0['state'].unique() # In[ ]: #num_class, ges_to_num # In[ ]: # In[ ]: #data0.shape[0] # In[ ]: #imgs.shape, labels.shape # In[ ]: # In[ ]: # test_data, test_label = imgs[23:], labels[2:] #, test_data.shape, test_label.shape # In[ ]: # eval_iter = mx.io.NDArrayIter(test_data, test_label, batch_size) # In[ ]: # In[ ]: # In[ ]: # In[ ]: # model_path = '{}_{}'.format(chk_prefix, epoch) # model.score(train_iter, metric) | 2.456936 | 2 |
tests/test_web_maps.py | blurks/clld | 32 | 6632258 | <filename>tests/test_web_maps.py<gh_stars>10-100
import pytest
from clld.db.models import common
from clld.web.maps import (
Map, ParameterMap, LanguageMap, SelectedLanguagesMap, CombinationMap, FilterLegend,
Layer,
)
@pytest.mark.parametrize(
"route,params,map_cls,ctx_cls,ctx_id",
[
('language', dict(z=3, lat=1, lng=1), Map, common.Parameter, 'parameter'),
('language', {'z': 'abc'}, Map, common.Parameter, 'parameter'),
('parameter', {}, ParameterMap, common.Parameter, 'parameter'),
('parameter', {}, ParameterMap, common.Parameter, 'no-domain'),
('language', {}, LanguageMap, common.Language, 'language'),
])
def test_Map(request_factory, route, params, map_cls, ctx_cls, ctx_id):
with request_factory(matched_route=route, params=params) as req:
m = map_cls(ctx_cls.get(ctx_id), req)
m.render()
def test_SelectedLanguagesMap(env):
m = SelectedLanguagesMap(None, env['request'], [common.Language.first()])
m.render()
def test_CombinationMap(env):
ctx = common.Combination(common.Parameter.first())
assert ctx.domain
ctx.multiple = [common.Language.first()]
dt = CombinationMap(ctx, env['request'])
dt.render()
def test_layers(env):
class TestMap(Map):
def get_layers(self):
yield Layer('l1', 'ln1', [], representation=555)
yield Layer('l2', 'ln2', [], representation=333)
m = TestMap(None, env['request'])
assert '888' in m.render()
def test_FilterLegend(request_factory):
from clld.web.datatables import Languages
class FLanguages(Languages):
def col_defs(self):
cols = Languages.col_defs(self)
cols[1].choices = ['name']
return cols
class FMap(Map):
def get_legends(self):
yield FilterLegend(
self,
'',
col='name',
dt=FLanguages(self.req, common.Language))
with request_factory(matched_route='language') as req:
map_ = FMap(common.Language.first(), req)
map_.render()
| <filename>tests/test_web_maps.py<gh_stars>10-100
import pytest
from clld.db.models import common
from clld.web.maps import (
Map, ParameterMap, LanguageMap, SelectedLanguagesMap, CombinationMap, FilterLegend,
Layer,
)
@pytest.mark.parametrize(
"route,params,map_cls,ctx_cls,ctx_id",
[
('language', dict(z=3, lat=1, lng=1), Map, common.Parameter, 'parameter'),
('language', {'z': 'abc'}, Map, common.Parameter, 'parameter'),
('parameter', {}, ParameterMap, common.Parameter, 'parameter'),
('parameter', {}, ParameterMap, common.Parameter, 'no-domain'),
('language', {}, LanguageMap, common.Language, 'language'),
])
def test_Map(request_factory, route, params, map_cls, ctx_cls, ctx_id):
with request_factory(matched_route=route, params=params) as req:
m = map_cls(ctx_cls.get(ctx_id), req)
m.render()
def test_SelectedLanguagesMap(env):
m = SelectedLanguagesMap(None, env['request'], [common.Language.first()])
m.render()
def test_CombinationMap(env):
ctx = common.Combination(common.Parameter.first())
assert ctx.domain
ctx.multiple = [common.Language.first()]
dt = CombinationMap(ctx, env['request'])
dt.render()
def test_layers(env):
class TestMap(Map):
def get_layers(self):
yield Layer('l1', 'ln1', [], representation=555)
yield Layer('l2', 'ln2', [], representation=333)
m = TestMap(None, env['request'])
assert '888' in m.render()
def test_FilterLegend(request_factory):
from clld.web.datatables import Languages
class FLanguages(Languages):
def col_defs(self):
cols = Languages.col_defs(self)
cols[1].choices = ['name']
return cols
class FMap(Map):
def get_legends(self):
yield FilterLegend(
self,
'',
col='name',
dt=FLanguages(self.req, common.Language))
with request_factory(matched_route='language') as req:
map_ = FMap(common.Language.first(), req)
map_.render()
| none | 1 | 2.189091 | 2 |
|
rps.py | enockmecheo/project3 | 0 | 6632259 | print()
import random
input("Welcome to Rock, Paper, Scissors! Press enter to start.")
my_score=0
comp_score=0
while True:
print()
my_choice=input("Rock, Paper, or Scissors?: ").lower()
while my_choice != "rock" and my_choice != "paper" and my_choice != "scissors":
my_choice = input("Invalid Choice! Please try again: ").lower()
random_num=random.randint(0,2)
if random_num == 0:
comp_choice="rock"
elif random_num == 1:
comp_choice="paper"
elif random_num == 2:
comp_choice="scissors"
print()
print("You chose: ",my_choice)
print("The computer chose: ",comp_choice)
print()
if my_choice=="rock":
if comp_choice=="rock":
print("It's a tie")
elif comp_choice=="paper":
print("You Lost! Paper covers the Rock")
comp_score += 1
elif comp_choice=="scissors":
print("You Won!! The Rock smashes Scissors")
my_score += 1
elif my_choice=="paper":
if comp_choice=="paper":
print("It's a tie")
elif comp_choice=="scissors":
print("You Lost! Scissors cut the Paper")
comp_score += 1
elif comp_choice=="rock":
print("You Won!! Paper covers the Rock")
my_score += 1
elif my_choice=="scissors":
if comp_choice=="scissors":
print("It's a tie")
elif comp_choice=="rock":
print("You Lost! Rock smashes the Scissors")
comp_score += 1
elif comp_choice=="paper":
print("You Won!! Scissors cut the Paper")
my_score += 1
print()
print("You have",my_score,"wins")
print("The computer has",comp_score,"wins")
print()
repeat = input("Do you want to continue? (Y/N) ").lower()
while repeat != "y" and repeat != "n":
repeat = input("Invalid Choice! Please try again: ").lower()
if repeat == "n":
print()
print("Thanks for the game! Come back soon")
print()
break
print("\n--------------------\n")
| print()
import random
input("Welcome to Rock, Paper, Scissors! Press enter to start.")
my_score=0
comp_score=0
while True:
print()
my_choice=input("Rock, Paper, or Scissors?: ").lower()
while my_choice != "rock" and my_choice != "paper" and my_choice != "scissors":
my_choice = input("Invalid Choice! Please try again: ").lower()
random_num=random.randint(0,2)
if random_num == 0:
comp_choice="rock"
elif random_num == 1:
comp_choice="paper"
elif random_num == 2:
comp_choice="scissors"
print()
print("You chose: ",my_choice)
print("The computer chose: ",comp_choice)
print()
if my_choice=="rock":
if comp_choice=="rock":
print("It's a tie")
elif comp_choice=="paper":
print("You Lost! Paper covers the Rock")
comp_score += 1
elif comp_choice=="scissors":
print("You Won!! The Rock smashes Scissors")
my_score += 1
elif my_choice=="paper":
if comp_choice=="paper":
print("It's a tie")
elif comp_choice=="scissors":
print("You Lost! Scissors cut the Paper")
comp_score += 1
elif comp_choice=="rock":
print("You Won!! Paper covers the Rock")
my_score += 1
elif my_choice=="scissors":
if comp_choice=="scissors":
print("It's a tie")
elif comp_choice=="rock":
print("You Lost! Rock smashes the Scissors")
comp_score += 1
elif comp_choice=="paper":
print("You Won!! Scissors cut the Paper")
my_score += 1
print()
print("You have",my_score,"wins")
print("The computer has",comp_score,"wins")
print()
repeat = input("Do you want to continue? (Y/N) ").lower()
while repeat != "y" and repeat != "n":
repeat = input("Invalid Choice! Please try again: ").lower()
if repeat == "n":
print()
print("Thanks for the game! Come back soon")
print()
break
print("\n--------------------\n")
| none | 1 | 4.279267 | 4 |
|
moveTypes.py | peaceknight05/sstmon | 0 | 6632260 | <reponame>peaceknight05/sstmon<gh_stars>0
import effect
from enum import Enum
class MoveTypes(Enum):
LEAF_WHIRLWIND = 0
QUASI_PROTECT = 1
REBOUND_SMASH = 2
NATURES_FURY = 3
CRIMSON_CHARGE = 4
TACKLE = 5
UNNERVING_GROWL = 6
DART_BULLET = 7 | import effect
from enum import Enum
class MoveTypes(Enum):
LEAF_WHIRLWIND = 0
QUASI_PROTECT = 1
REBOUND_SMASH = 2
NATURES_FURY = 3
CRIMSON_CHARGE = 4
TACKLE = 5
UNNERVING_GROWL = 6
DART_BULLET = 7 | none | 1 | 2.214812 | 2 |
|
Published/SourceCode/Python/API/DynamicArrayStack.py | ChalmersGU-data-structure-courses/OpenDSA | 0 | 6632261 |
from API import Stack
class DynamicArrayStack(Stack):
_minCapacity = 8 # Minimum capacity of internalArray
_minLoadFactor = 0.5 # Must be smaller than 1/CapacityMultiplier
_capacityMultiplier = 1.5 # Factor to grow/shrink the capacity
def __init__(self):
self._internalArray = [None] * self._minCapacity # Internal array containing the stack elements
self._stackSize = 0 # Size of stack, and index of the next free slot
def push(self, x):
if self._stackSize >= len(self._internalArray):
self._resizeArray(len(self._internalArray) * self._capacityMultiplier)
self._internalArray[self._stackSize] = x
self._stackSize += 1
def peek(self):
if not (self._stackSize > 0): raise IndexError("peek from empty stack")
return self._internalArray[self._stackSize-1]
def pop(self):
if not (self._stackSize > 0): raise IndexError("pop from empty stack")
self._stackSize -= 1
x = self._internalArray[self._stackSize]
self._internalArray[self._stackSize] = None # For garbage collection
if self._stackSize <= len(self._internalArray) * self._minLoadFactor:
self._resizeArray(len(self._internalArray) / self._capacityMultiplier)
return x
def _resizeArray(self, newCapacity):
if newCapacity < self._minCapacity: return
newArray = [None] * int(newCapacity)
for i in range(self._stackSize):
newArray[i] = self._internalArray[i]
self._internalArray = newArray
def isEmpty(self):
return self._stackSize == 0
def size(self):
return self._stackSize
def __iter__(self):
for i in reversed(range(self._stackSize)):
yield self._internalArray[i]
#######################################################################################
## What comes below is purely for debugging and testing purposes - it can be removed ##
def _printList(l):
print(len(l._internalArray), "[", "- " * (len(l._internalArray) - l.size()) + "|",
" ".join(str(e) for e in l), "]", l.size())
if __name__ == '__main__':
a = DynamicArrayStack()
for i in range(23):
a.push(chr(i+65))
if a.size() % 5 == 0:
_printList(a)
_printList(a)
while not a.isEmpty():
assert a.peek() == a.pop(), (a,)
if a.size() % 3 == 2:
_printList(a)
_printList(a)
|
from API import Stack
class DynamicArrayStack(Stack):
_minCapacity = 8 # Minimum capacity of internalArray
_minLoadFactor = 0.5 # Must be smaller than 1/CapacityMultiplier
_capacityMultiplier = 1.5 # Factor to grow/shrink the capacity
def __init__(self):
self._internalArray = [None] * self._minCapacity # Internal array containing the stack elements
self._stackSize = 0 # Size of stack, and index of the next free slot
def push(self, x):
if self._stackSize >= len(self._internalArray):
self._resizeArray(len(self._internalArray) * self._capacityMultiplier)
self._internalArray[self._stackSize] = x
self._stackSize += 1
def peek(self):
if not (self._stackSize > 0): raise IndexError("peek from empty stack")
return self._internalArray[self._stackSize-1]
def pop(self):
if not (self._stackSize > 0): raise IndexError("pop from empty stack")
self._stackSize -= 1
x = self._internalArray[self._stackSize]
self._internalArray[self._stackSize] = None # For garbage collection
if self._stackSize <= len(self._internalArray) * self._minLoadFactor:
self._resizeArray(len(self._internalArray) / self._capacityMultiplier)
return x
def _resizeArray(self, newCapacity):
if newCapacity < self._minCapacity: return
newArray = [None] * int(newCapacity)
for i in range(self._stackSize):
newArray[i] = self._internalArray[i]
self._internalArray = newArray
def isEmpty(self):
return self._stackSize == 0
def size(self):
return self._stackSize
def __iter__(self):
for i in reversed(range(self._stackSize)):
yield self._internalArray[i]
#######################################################################################
## What comes below is purely for debugging and testing purposes - it can be removed ##
def _printList(l):
print(len(l._internalArray), "[", "- " * (len(l._internalArray) - l.size()) + "|",
" ".join(str(e) for e in l), "]", l.size())
if __name__ == '__main__':
a = DynamicArrayStack()
for i in range(23):
a.push(chr(i+65))
if a.size() % 5 == 0:
_printList(a)
_printList(a)
while not a.isEmpty():
assert a.peek() == a.pop(), (a,)
if a.size() % 3 == 2:
_printList(a)
_printList(a)
| en | 0.514466 | # Minimum capacity of internalArray # Must be smaller than 1/CapacityMultiplier # Factor to grow/shrink the capacity # Internal array containing the stack elements # Size of stack, and index of the next free slot # For garbage collection ####################################################################################### ## What comes below is purely for debugging and testing purposes - it can be removed ## | 3.07955 | 3 |
number_theory/euclids_algorithm.py | sanjayjha97/datastructures | 0 | 6632262 | # Euclid's Lemma : d divides a and b, if and only if d divides a-b and b
# Euclid's Algorithm
def gcd(a, b):
if a < b:
a, b = b, a
while a % b != 0:
a, b = b, a % b
return b
# Recursive Euclid's Algorithm
def recursive_gcd(a, b):
if b == 0:
return a
return recursive_gcd(b, a % b) | # Euclid's Lemma : d divides a and b, if and only if d divides a-b and b
# Euclid's Algorithm
def gcd(a, b):
if a < b:
a, b = b, a
while a % b != 0:
a, b = b, a % b
return b
# Recursive Euclid's Algorithm
def recursive_gcd(a, b):
if b == 0:
return a
return recursive_gcd(b, a % b) | en | 0.558616 | # Euclid's Lemma : d divides a and b, if and only if d divides a-b and b # Euclid's Algorithm # Recursive Euclid's Algorithm | 3.775764 | 4 |
data_structures/test_trie.py | prekolna/AlgorithmsGreatestHits | 1 | 6632263 | <filename>data_structures/test_trie.py
import unittest
import random
import re
from .trie import Trie
class TrieTests(unittest.TestCase):
def setUp(self):
self.test_emails = [ "<EMAIL>", "<EMAIL>", "<EMAIL>", "<EMAIL>" ]
self.not_present_email = "<EMAIL>"
self.vowel_pattern = re.compile("[aeiouAEIOU]")
self.under_test = Trie()
for test_email in self.test_emails:
self.under_test.add_string(test_email)
def test_add_string_and_find(self):
for test_email in self.test_emails:
self.assertEqual(self.under_test.find(test_email), test_email)
def test_find_returns_none_if_not_found(self):
self.assertEqual(self.under_test.find(self.not_present_email), "")
def test_str(self):
under_test_str = str(self.under_test)
for test_email in self.test_emails:
self.assertTrue(test_email in under_test_str)
def test_find_prefix(self):
for test_email in self.test_emails:
self.assertEqual(self.under_test.find(test_email.split("@")[0]), "")
def test_fuzzy_find_exact_match(self):
for test_email in self.test_emails:
self.assertEqual([ test_email ], self.under_test.fuzzy_find(test_email))
def test_fuzzy_find_wrong_case(self):
for test_email in self.test_emails:
self.assertEqual([ test_email ], self.under_test.fuzzy_find(test_email.upper()))
def test_fuzzy_find_wrong_vowel(self):
def get_random_vowel(_):
return random.choice("aeiou")
for test_email in self.test_emails:
to_fuzzy_find = self.vowel_pattern.sub(get_random_vowel, test_email)
self.assertEqual([ test_email ], self.under_test.fuzzy_find(to_fuzzy_find))
def test_fuzzy_find_returns_empty_list_if_not_found(self):
self.assertEqual([ ], self.under_test.fuzzy_find(self.not_present_email))
def test_fuzzy_find_with_repeated_letters(self):
for test_email in self.test_emails:
to_fuzzy_find = "d" + test_email
self.assertEqual([ test_email ], self.under_test.fuzzy_find(to_fuzzy_find))
| <filename>data_structures/test_trie.py
import unittest
import random
import re
from .trie import Trie
class TrieTests(unittest.TestCase):
def setUp(self):
self.test_emails = [ "<EMAIL>", "<EMAIL>", "<EMAIL>", "<EMAIL>" ]
self.not_present_email = "<EMAIL>"
self.vowel_pattern = re.compile("[aeiouAEIOU]")
self.under_test = Trie()
for test_email in self.test_emails:
self.under_test.add_string(test_email)
def test_add_string_and_find(self):
for test_email in self.test_emails:
self.assertEqual(self.under_test.find(test_email), test_email)
def test_find_returns_none_if_not_found(self):
self.assertEqual(self.under_test.find(self.not_present_email), "")
def test_str(self):
under_test_str = str(self.under_test)
for test_email in self.test_emails:
self.assertTrue(test_email in under_test_str)
def test_find_prefix(self):
for test_email in self.test_emails:
self.assertEqual(self.under_test.find(test_email.split("@")[0]), "")
def test_fuzzy_find_exact_match(self):
for test_email in self.test_emails:
self.assertEqual([ test_email ], self.under_test.fuzzy_find(test_email))
def test_fuzzy_find_wrong_case(self):
for test_email in self.test_emails:
self.assertEqual([ test_email ], self.under_test.fuzzy_find(test_email.upper()))
def test_fuzzy_find_wrong_vowel(self):
def get_random_vowel(_):
return random.choice("aeiou")
for test_email in self.test_emails:
to_fuzzy_find = self.vowel_pattern.sub(get_random_vowel, test_email)
self.assertEqual([ test_email ], self.under_test.fuzzy_find(to_fuzzy_find))
def test_fuzzy_find_returns_empty_list_if_not_found(self):
self.assertEqual([ ], self.under_test.fuzzy_find(self.not_present_email))
def test_fuzzy_find_with_repeated_letters(self):
for test_email in self.test_emails:
to_fuzzy_find = "d" + test_email
self.assertEqual([ test_email ], self.under_test.fuzzy_find(to_fuzzy_find))
| none | 1 | 3.544395 | 4 |
|
scripts/fundamentals/cls_multiple_inheritance_example.py | duttashi/learnpy | 0 | 6632264 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 18 13:41:59 2020
An example of multiple inheritance
Notes:
1. Any class can inherit from multiple classes
2. Python normally uses "depth-first" order,
when searching inheriting classes.
3. But when two classes inherit from the same class.
4. Python eliminates the first mention of that class
from the mro (method resolution call).
@author: Ashish
"""
class A(object):
def dothis(self):
print("Doing this in A")
class B(A):
pass
class C(A):
def dothis(self):
print("Doing this in C")
class D(B,C):
pass
# create class instance objects
d_instance = D()
d_instance.dothis()
print(D.mro())
| # -*- coding: utf-8 -*-
"""
Created on Sun Oct 18 13:41:59 2020
An example of multiple inheritance
Notes:
1. Any class can inherit from multiple classes
2. Python normally uses "depth-first" order,
when searching inheriting classes.
3. But when two classes inherit from the same class.
4. Python eliminates the first mention of that class
from the mro (method resolution call).
@author: Ashish
"""
class A(object):
def dothis(self):
print("Doing this in A")
class B(A):
pass
class C(A):
def dothis(self):
print("Doing this in C")
class D(B,C):
pass
# create class instance objects
d_instance = D()
d_instance.dothis()
print(D.mro()) | en | 0.750583 | # -*- coding: utf-8 -*- Created on Sun Oct 18 13:41:59 2020
An example of multiple inheritance
Notes:
1. Any class can inherit from multiple classes
2. Python normally uses "depth-first" order,
when searching inheriting classes.
3. But when two classes inherit from the same class.
4. Python eliminates the first mention of that class
from the mro (method resolution call).
@author: Ashish # create class instance objects | 4.294317 | 4 |
miniproject/first/food.py | annisanurdiana/python_programming | 5 | 6632265 | <filename>miniproject/first/food.py
from menu_item import MenuItem
class Food(MenuItem):
def __init__(self, name, price, calorie_count):
super().__init__(name, price)
self.calorie_count = calorie_count
def info(self):
return self.name + ': $' + str(self.price) + ' (' + str(self.calorie_count) + 'kkal)'
def calorie_info(self):
print('kcal: ' + str(self.calorie_count))
| <filename>miniproject/first/food.py
from menu_item import MenuItem
class Food(MenuItem):
def __init__(self, name, price, calorie_count):
super().__init__(name, price)
self.calorie_count = calorie_count
def info(self):
return self.name + ': $' + str(self.price) + ' (' + str(self.calorie_count) + 'kkal)'
def calorie_info(self):
print('kcal: ' + str(self.calorie_count))
| none | 1 | 3.472027 | 3 |
|
aysa_commands/_docker.py | alejandrobernardis/aysa-commands | 1 | 6632266 | # Author: <NAME>
# Email: <EMAIL>is at gmail.com
# Created: 2019/10/30
# ~
###############################################################################
# Docker Registry Documentation: https://docs.docker.com/registry/ #
###############################################################################
# TODO (0608156): implementar autenticación por token.
# https://docs.docker.com/registry/configuration/#auth
# TODO (0608156): implementar un paginador para el catálogo y tags dentro de
# la api
import re
import json
import requests
from requests.auth import HTTPBasicAuth
TAG_SEP = ':'
REPO_SEP = '/'
MANIFEST_VERSION = 'v2'
MEDIA_TYPES = {
'v1': 'application/vnd.docker.distribution.manifest.v1+json',
'v2': 'application/vnd.docker.distribution.manifest.v2+json',
'v2f': 'application/vnd.docker.distribution.manifest.list.v2+json'
}
rx_schema = re.compile(r'(localhost|.*\.local(?:host)?(?::\d{1,5})?)$', re.I)
rx_registry = re.compile(r'^(localhost|[\w\-]+(\.[\w\-]+)+)(?::\d{1,5})?/',
re.I)
rx_repository = re.compile(r'^[a-z0-9]+(?:[/:._-][a-z0-9]+)*$')
def get_media_type(value=MANIFEST_VERSION, key='Accept', obj=True):
value = MEDIA_TYPES[value if value in MEDIA_TYPES else MANIFEST_VERSION]
return {key: value} if obj is True else value
def remove_registry(value):
registry = get_registry(value)
if registry is not None:
value = value.replace(registry, '')
return value
def get_tag(value):
value = remove_registry(value)
if TAG_SEP not in value:
return None
return value.rsplit(TAG_SEP, 1)[-1]
def get_repository(value):
return remove_registry(value).rsplit(TAG_SEP, 1)[0]
def get_namespace(value):
value = get_repository(value)
if REPO_SEP not in value:
return None
return value.rsplit(REPO_SEP, 1)[0]
def get_image(value):
return get_repository(value).rsplit(REPO_SEP, 1)[-1]
def get_registry(value):
r = rx_registry.match(value)
if r is not None:
return r.group()
return None
def get_parts(value):
"""
Formato del string:
- {url:port}/{namespace}/{repository}:{tag}
"""
if not rx_repository.match(get_repository(value)):
raise RegistryError('El endpoint "{}" está mal formateado.'
.format(value))
return {
'registry': get_registry(value),
'repository': get_repository(value),
'namespace': get_namespace(value),
'image': get_image(value),
'tag': get_tag(value),
}
def validate_token(value, exclude='|#@'):
return value and ''.join([x for x in value if x not in exclude]) == value
def scheme(endpoint):
return 'http' if rx_schema.match(endpoint) else 'https'
class Registry:
"""Registry Client (simple)"""
def __init__(self, host, insecure=False, verify=True, credentials=None,
**kwargs):
self.host = host
self.insecure = insecure
self.verify = verify if insecure is False else True
self.scheme = scheme(host) if insecure is False else 'http'
self.credentials = credentials
def get_baseurl(self):
return '{}://{}/v2'.format(self.scheme, self.host)
def get_credentials(self, split=False):
if split is True:
return self.credentials.split(':')
return self.credentials
def session(self, headers=None, timeout=10):
s = requests.Session()
if self.credentials is not None:
s.auth = HTTPBasicAuth(*self.get_credentials(True))
s.headers.update(headers or {})
s.headers['User-Agent'] = 'AySA-Command-Line-Tool'
s.verify = self.verify
s.timeout = timeout
return s
def request(self, method, *args, **kwargs):
headers = kwargs.pop('headers', {})
with self.session(headers) as req:
response = req.request(method, *args, **kwargs)
try:
response.raise_for_status()
except requests.HTTPError:
data = response.json()
if 'errors' in data:
error = data['errors'][0]
raise RegistryError('{code}: {message}'.format(**error))
return response
class Entity:
url = None
url_template = None
methods_supported = None
def __init__(self, client):
self.client = client
def set_url(self, **kwargs):
if self.url_template is None:
raise RegistryError('Método "set_url" no está soportado '
'para la entidad: "{}".'
.format(self.__class__.__name__))
self.url = self.url_template.format(**kwargs)
def request(self, method, *args, **kwargs):
method = method.upper()
if self.methods_supported and method not in self.methods_supported:
raise RegistryError('Método "{}" no soportado para "{}".'
.format(method, self.url))
url = self.client.get_baseurl() + self.url
response = self.client.request(method, url, *args, **kwargs)
return response
def json(self, method, *args, **kwargs):
return self.request(method, *args, **kwargs).json()
class IterEntity(Entity):
response_key = None
response_data = None
def __init__(self, client, prefix_filter=None):
self.client = client
self.prefix_filter = prefix_filter
def get(self, *args, **kwargs):
response_data = self.json('GET', *args, **kwargs)
if self.response_key not in response_data:
raise RegistryError('La clave "{}" no se encuentra dentro de la '
'respuesta.'.format(self.response_key))
self.response_data = response_data[self.response_key]
def __iter__(self):
if self.response_data is None:
self.get()
for item in self.response_data:
if self.prefix_filter and not item.startswith(self.prefix_filter):
continue
yield item
class Catalog(IterEntity):
url = '/_catalog'
methods_supported = 'GET'
response_key = 'repositories'
class Tags(IterEntity):
url_template = '/{name}/tags/list'
methods_supported = 'GET'
response_key = 'tags'
def __init__(self, client, name, prefix_filter=None):
super().__init__(client, prefix_filter)
self.set_url(name=name)
class SlimManifest(Entity):
url_template = '/{name}/manifests/{reference}'
media_type = 'v2'
methods_supported = 'GET,PUT,DELETE'
def __init__(self, client, name, reference):
super().__init__(client)
self.set_url(name=name, reference=reference)
def request(self, method, *args, **kwargs):
headers = kwargs.pop('headers', {})
media_type = get_media_type(self.media_type, obj=False)
update = {'Accept': '*/*', 'Content-Type': media_type} \
if method in ('PUT', 'DELETE') else {'Accept': media_type}
headers.update(update)
kwargs['headers'] = headers
return super().request(method, *args, **kwargs)
class FatManifest(SlimManifest):
media_type = 'v2f'
methods_supported = 'GET'
class Api:
def __init__(self, host, insecure=False, verify=True, credentials=None,
**kwargs):
self.registry = Registry(host, insecure, verify, credentials)
def catalog(self, prefix_filter=None):
return Catalog(self.registry, prefix_filter)
def tags(self, name, prefix_filter=None):
return Tags(self.registry, name, prefix_filter)
def put_tag(self, name, reference, target):
return self.put_manifest(name, target, self.manifest(name, reference))
def delete_tag(self, name, reference):
return self.del_manifest(name, self.digest(name, reference))
def digest(self, name, reference, **kwargs):
r = self.get_manifest(name, reference)
return r.headers.get('Docker-Content-Digest', None)
def manifest(self, name, reference, fat=False, obj=False, **kwargs):
r = self.get_manifest(name, reference, fat).json()
return Manifest(r) if obj is True else r
def get_manifest(self, name, reference, fat=False, **kwargs):
return self._manifest(name, reference, fat)\
.request('GET', **kwargs)
def put_manifest(self, name, reference, manifest, **kwargs):
return self._manifest(name, reference)\
.request('PUT', json=manifest, **kwargs)
def del_manifest(self, name, reference, **kwargs):
return self._manifest(name, reference)\
.request('DELETE', **kwargs)
def _manifest(self, name, reference, fat=False):
args = (self.registry, name, reference)
return SlimManifest(*args) if fat is False else FatManifest(*args)
class Image:
registry = None
repository = None
namespace = None
image = None
tag = None
def __init__(self, value):
for k, v in get_parts(value).items():
setattr(self, k, v)
self.value = value
@property
def image_tag(self):
return '{}:{}'.format(self.repository, self.tag)
@property
def full(self):
return '{}{}'.format(self.registry or '', self.image_tag)
def __str__(self):
return '<{} Namespace="{}" Image="{}" Tag="{}">'\
.format(self.registry or '',
self.namespace or '',
self.image or '',
self.tag or '')
def __repr__(self):
return self.image
def __lt__(self, other):
return self.image < other.image
def __gt__(self, other):
return self.image > other.image
class Manifest:
def __init__(self, raw):
self._raw = raw
self._history = None
@property
def name(self):
return self._raw.get('name', None)
@property
def tag(self):
return self._raw.get('tag', None)
@property
def layers(self):
return self._raw.get('fsLayers', self._raw.get('layers', None))
@property
def history(self):
try:
if self._history is None:
raw = self._raw['history'][0]['v1Compatibility']
self._history = json.loads(raw)
return self._history
except Exception:
return {}
@property
def created(self):
return self.history.get('created', None)
@property
def schema(self):
return self._raw.get('schemaVersion', None)
class RegistryError(Exception):
pass
| # Author: <NAME>
# Email: <EMAIL>is at gmail.com
# Created: 2019/10/30
# ~
###############################################################################
# Docker Registry Documentation: https://docs.docker.com/registry/ #
###############################################################################
# TODO (0608156): implementar autenticación por token.
# https://docs.docker.com/registry/configuration/#auth
# TODO (0608156): implementar un paginador para el catálogo y tags dentro de
# la api
import re
import json
import requests
from requests.auth import HTTPBasicAuth
TAG_SEP = ':'
REPO_SEP = '/'
MANIFEST_VERSION = 'v2'
MEDIA_TYPES = {
'v1': 'application/vnd.docker.distribution.manifest.v1+json',
'v2': 'application/vnd.docker.distribution.manifest.v2+json',
'v2f': 'application/vnd.docker.distribution.manifest.list.v2+json'
}
rx_schema = re.compile(r'(localhost|.*\.local(?:host)?(?::\d{1,5})?)$', re.I)
rx_registry = re.compile(r'^(localhost|[\w\-]+(\.[\w\-]+)+)(?::\d{1,5})?/',
re.I)
rx_repository = re.compile(r'^[a-z0-9]+(?:[/:._-][a-z0-9]+)*$')
def get_media_type(value=MANIFEST_VERSION, key='Accept', obj=True):
value = MEDIA_TYPES[value if value in MEDIA_TYPES else MANIFEST_VERSION]
return {key: value} if obj is True else value
def remove_registry(value):
registry = get_registry(value)
if registry is not None:
value = value.replace(registry, '')
return value
def get_tag(value):
value = remove_registry(value)
if TAG_SEP not in value:
return None
return value.rsplit(TAG_SEP, 1)[-1]
def get_repository(value):
return remove_registry(value).rsplit(TAG_SEP, 1)[0]
def get_namespace(value):
value = get_repository(value)
if REPO_SEP not in value:
return None
return value.rsplit(REPO_SEP, 1)[0]
def get_image(value):
return get_repository(value).rsplit(REPO_SEP, 1)[-1]
def get_registry(value):
r = rx_registry.match(value)
if r is not None:
return r.group()
return None
def get_parts(value):
"""
Formato del string:
- {url:port}/{namespace}/{repository}:{tag}
"""
if not rx_repository.match(get_repository(value)):
raise RegistryError('El endpoint "{}" está mal formateado.'
.format(value))
return {
'registry': get_registry(value),
'repository': get_repository(value),
'namespace': get_namespace(value),
'image': get_image(value),
'tag': get_tag(value),
}
def validate_token(value, exclude='|#@'):
return value and ''.join([x for x in value if x not in exclude]) == value
def scheme(endpoint):
return 'http' if rx_schema.match(endpoint) else 'https'
class Registry:
"""Registry Client (simple)"""
def __init__(self, host, insecure=False, verify=True, credentials=None,
**kwargs):
self.host = host
self.insecure = insecure
self.verify = verify if insecure is False else True
self.scheme = scheme(host) if insecure is False else 'http'
self.credentials = credentials
def get_baseurl(self):
return '{}://{}/v2'.format(self.scheme, self.host)
def get_credentials(self, split=False):
if split is True:
return self.credentials.split(':')
return self.credentials
def session(self, headers=None, timeout=10):
s = requests.Session()
if self.credentials is not None:
s.auth = HTTPBasicAuth(*self.get_credentials(True))
s.headers.update(headers or {})
s.headers['User-Agent'] = 'AySA-Command-Line-Tool'
s.verify = self.verify
s.timeout = timeout
return s
def request(self, method, *args, **kwargs):
headers = kwargs.pop('headers', {})
with self.session(headers) as req:
response = req.request(method, *args, **kwargs)
try:
response.raise_for_status()
except requests.HTTPError:
data = response.json()
if 'errors' in data:
error = data['errors'][0]
raise RegistryError('{code}: {message}'.format(**error))
return response
class Entity:
url = None
url_template = None
methods_supported = None
def __init__(self, client):
self.client = client
def set_url(self, **kwargs):
if self.url_template is None:
raise RegistryError('Método "set_url" no está soportado '
'para la entidad: "{}".'
.format(self.__class__.__name__))
self.url = self.url_template.format(**kwargs)
def request(self, method, *args, **kwargs):
method = method.upper()
if self.methods_supported and method not in self.methods_supported:
raise RegistryError('Método "{}" no soportado para "{}".'
.format(method, self.url))
url = self.client.get_baseurl() + self.url
response = self.client.request(method, url, *args, **kwargs)
return response
def json(self, method, *args, **kwargs):
return self.request(method, *args, **kwargs).json()
class IterEntity(Entity):
response_key = None
response_data = None
def __init__(self, client, prefix_filter=None):
self.client = client
self.prefix_filter = prefix_filter
def get(self, *args, **kwargs):
response_data = self.json('GET', *args, **kwargs)
if self.response_key not in response_data:
raise RegistryError('La clave "{}" no se encuentra dentro de la '
'respuesta.'.format(self.response_key))
self.response_data = response_data[self.response_key]
def __iter__(self):
if self.response_data is None:
self.get()
for item in self.response_data:
if self.prefix_filter and not item.startswith(self.prefix_filter):
continue
yield item
class Catalog(IterEntity):
url = '/_catalog'
methods_supported = 'GET'
response_key = 'repositories'
class Tags(IterEntity):
url_template = '/{name}/tags/list'
methods_supported = 'GET'
response_key = 'tags'
def __init__(self, client, name, prefix_filter=None):
super().__init__(client, prefix_filter)
self.set_url(name=name)
class SlimManifest(Entity):
url_template = '/{name}/manifests/{reference}'
media_type = 'v2'
methods_supported = 'GET,PUT,DELETE'
def __init__(self, client, name, reference):
super().__init__(client)
self.set_url(name=name, reference=reference)
def request(self, method, *args, **kwargs):
headers = kwargs.pop('headers', {})
media_type = get_media_type(self.media_type, obj=False)
update = {'Accept': '*/*', 'Content-Type': media_type} \
if method in ('PUT', 'DELETE') else {'Accept': media_type}
headers.update(update)
kwargs['headers'] = headers
return super().request(method, *args, **kwargs)
class FatManifest(SlimManifest):
media_type = 'v2f'
methods_supported = 'GET'
class Api:
def __init__(self, host, insecure=False, verify=True, credentials=None,
**kwargs):
self.registry = Registry(host, insecure, verify, credentials)
def catalog(self, prefix_filter=None):
return Catalog(self.registry, prefix_filter)
def tags(self, name, prefix_filter=None):
return Tags(self.registry, name, prefix_filter)
def put_tag(self, name, reference, target):
return self.put_manifest(name, target, self.manifest(name, reference))
def delete_tag(self, name, reference):
return self.del_manifest(name, self.digest(name, reference))
def digest(self, name, reference, **kwargs):
r = self.get_manifest(name, reference)
return r.headers.get('Docker-Content-Digest', None)
def manifest(self, name, reference, fat=False, obj=False, **kwargs):
r = self.get_manifest(name, reference, fat).json()
return Manifest(r) if obj is True else r
def get_manifest(self, name, reference, fat=False, **kwargs):
return self._manifest(name, reference, fat)\
.request('GET', **kwargs)
def put_manifest(self, name, reference, manifest, **kwargs):
return self._manifest(name, reference)\
.request('PUT', json=manifest, **kwargs)
def del_manifest(self, name, reference, **kwargs):
return self._manifest(name, reference)\
.request('DELETE', **kwargs)
def _manifest(self, name, reference, fat=False):
args = (self.registry, name, reference)
return SlimManifest(*args) if fat is False else FatManifest(*args)
class Image:
registry = None
repository = None
namespace = None
image = None
tag = None
def __init__(self, value):
for k, v in get_parts(value).items():
setattr(self, k, v)
self.value = value
@property
def image_tag(self):
return '{}:{}'.format(self.repository, self.tag)
@property
def full(self):
return '{}{}'.format(self.registry or '', self.image_tag)
def __str__(self):
return '<{} Namespace="{}" Image="{}" Tag="{}">'\
.format(self.registry or '',
self.namespace or '',
self.image or '',
self.tag or '')
def __repr__(self):
return self.image
def __lt__(self, other):
return self.image < other.image
def __gt__(self, other):
return self.image > other.image
class Manifest:
def __init__(self, raw):
self._raw = raw
self._history = None
@property
def name(self):
return self._raw.get('name', None)
@property
def tag(self):
return self._raw.get('tag', None)
@property
def layers(self):
return self._raw.get('fsLayers', self._raw.get('layers', None))
@property
def history(self):
try:
if self._history is None:
raw = self._raw['history'][0]['v1Compatibility']
self._history = json.loads(raw)
return self._history
except Exception:
return {}
@property
def created(self):
return self.history.get('created', None)
@property
def schema(self):
return self._raw.get('schemaVersion', None)
class RegistryError(Exception):
pass
| es | 0.279406 | # Author: <NAME> # Email: <EMAIL>is at gmail.com # Created: 2019/10/30 # ~ ############################################################################### # Docker Registry Documentation: https://docs.docker.com/registry/ # ############################################################################### # TODO (0608156): implementar autenticación por token. # https://docs.docker.com/registry/configuration/#auth # TODO (0608156): implementar un paginador para el catálogo y tags dentro de # la api Formato del string: - {url:port}/{namespace}/{repository}:{tag} #@'): Registry Client (simple) | 1.970483 | 2 |
openstack/telemetry/v2/capability.py | mail2nsrajesh/python-openstacksdk | 0 | 6632267 | <gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource2 as resource
from openstack.telemetry import telemetry_service
class Capability(resource.Resource):
""".. caution:: This API is a work in progress and is subject to change."""
resource_key = 'capability'
resources_key = 'capabilities'
base_path = '/capabilities'
service = telemetry_service.TelemetryService()
# Supported Operations
allow_list = True
# Properties
is_enabled = resource.Body('enabled', type=bool)
@classmethod
def list(cls, session, paginated=False, **params):
resp = session.get(cls.base_path, endpoint_filter=cls.service,
params=params)
resp = resp.json()
for key, value in resp['api'].items():
yield cls.existing(id=key, enabled=value)
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource2 as resource
from openstack.telemetry import telemetry_service
class Capability(resource.Resource):
""".. caution:: This API is a work in progress and is subject to change."""
resource_key = 'capability'
resources_key = 'capabilities'
base_path = '/capabilities'
service = telemetry_service.TelemetryService()
# Supported Operations
allow_list = True
# Properties
is_enabled = resource.Body('enabled', type=bool)
@classmethod
def list(cls, session, paginated=False, **params):
resp = session.get(cls.base_path, endpoint_filter=cls.service,
params=params)
resp = resp.json()
for key, value in resp['api'].items():
yield cls.existing(id=key, enabled=value) | en | 0.874687 | # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. .. caution:: This API is a work in progress and is subject to change. # Supported Operations # Properties | 1.781276 | 2 |
pydantic/utils.py | kataev/pydantic | 0 | 6632268 | <reponame>kataev/pydantic
import inspect
import re
import sys
from contextlib import contextmanager
from enum import Enum
from functools import lru_cache
from importlib import import_module
from textwrap import dedent
from typing import _eval_type # type: ignore
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Pattern, Tuple, Type, Union
from . import errors
try:
import email_validator
except ImportError:
email_validator = None
try:
from typing import _TypingBase as typing_base # type: ignore
except ImportError:
from typing import _Final as typing_base # type: ignore
try:
from typing import ForwardRef # type: ignore
except ImportError:
# python 3.6
ForwardRef = None
if TYPE_CHECKING: # pragma: no cover
from .main import BaseModel # noqa: F401
if sys.version_info < (3, 7):
from typing import Callable
AnyCallable = Callable[..., Any]
else:
from collections.abc import Callable
from typing import Callable as TypingCallable
AnyCallable = TypingCallable[..., Any]
PRETTY_REGEX = re.compile(r'([\w ]*?) *<(.*)> *')
AnyType = Type[Any]
def validate_email(value: str) -> Tuple[str, str]:
"""
Brutally simple email address validation. Note unlike most email address validation
* raw ip address (literal) domain parts are not allowed.
* "<NAME> <<EMAIL>>" style "pretty" email addresses are processed
* the local part check is extremely basic. This raises the possibility of unicode spoofing, but no better
solution is really possible.
* spaces are striped from the beginning and end of addresses but no error is raised
See RFC 5322 but treat it with suspicion, there seems to exist no universally acknowledged test for a valid email!
"""
if email_validator is None:
raise ImportError('email-validator is not installed, run `pip install pydantic[email]`')
m = PRETTY_REGEX.fullmatch(value)
name: Optional[str] = None
if m:
name, value = m.groups()
email = value.strip()
try:
email_validator.validate_email(email, check_deliverability=False)
except email_validator.EmailNotValidError as e:
raise errors.EmailError() from e
return name or email[: email.index('@')], email.lower()
def _rfc_1738_quote(text: str) -> str:
return re.sub(r'[:@/]', lambda m: '%{:X}'.format(ord(m.group(0))), text)
def make_dsn(
*,
driver: str,
user: str = None,
password: str = None,
host: str = None,
port: str = None,
name: str = None,
query: Dict[str, Any] = None,
) -> str:
"""
Create a DSN from from connection settings.
Stolen approximately from sqlalchemy/engine/url.py:URL.
"""
s = driver + '://'
if user is not None:
s += _rfc_1738_quote(user)
if password is not None:
s += ':' + _rfc_1738_quote(password)
s += '@'
if host is not None:
if ':' in host:
s += '[{}]'.format(host)
else:
s += host
if port is not None:
s += ':{}'.format(int(port))
if name is not None:
s += '/' + name
query = query or {}
if query:
keys = list(query)
keys.sort()
s += '?' + '&'.join('{}={}'.format(k, query[k]) for k in keys)
return s
def import_string(dotted_path: str) -> Any:
"""
Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import fails.
"""
try:
module_path, class_name = dotted_path.strip(' ').rsplit('.', 1)
except ValueError as e:
raise ImportError(f'"{dotted_path}" doesn\'t look like a module path') from e
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError as e:
raise ImportError(f'Module "{module_path}" does not define a "{class_name}" attribute') from e
def truncate(v: str, *, max_len: int = 80) -> str:
"""
Truncate a value and add a unicode ellipsis (three dots) to the end if it was too long
"""
if isinstance(v, str) and len(v) > (max_len - 2):
# -3 so quote + string + … + quote has correct length
return repr(v[: (max_len - 3)] + '…')
v = repr(v)
if len(v) > max_len:
v = v[: max_len - 1] + '…'
return v
def display_as_type(v: AnyType) -> str:
if not isinstance(v, typing_base) and not isinstance(v, type):
v = type(v)
if lenient_issubclass(v, Enum):
if issubclass(v, int):
return 'int'
elif issubclass(v, str):
return 'str'
else:
return 'enum'
try:
return v.__name__
except AttributeError:
# happens with unions
return str(v)
ExcType = Type[Exception]
@contextmanager
def change_exception(raise_exc: ExcType, *except_types: ExcType) -> Generator[None, None, None]:
try:
yield
except except_types as e:
raise raise_exc from e
def clean_docstring(d: str) -> str:
return dedent(d).strip(' \r\n\t')
def list_like(v: AnyType) -> bool:
return isinstance(v, (list, tuple, set)) or inspect.isgenerator(v)
def validate_field_name(bases: List[Type['BaseModel']], field_name: str) -> None:
"""
Ensure that the field's name does not shadow an existing attribute of the model.
"""
for base in bases:
if getattr(base, field_name, None):
raise NameError(
f'Field name "{field_name}" shadows a BaseModel attribute; '
f'use a different field name with "alias=\'{field_name}\'".'
)
@lru_cache(maxsize=None)
def url_regex_generator(*, relative: bool, require_tld: bool) -> Pattern[str]:
"""
Url regex generator taken from Marshmallow library,
for details please follow library source code:
https://github.com/marshmallow-code/marshmallow/blob/298870ef6c089fb4d91efae9ca4168453ffe00d2/marshmallow/validate.py#L37
"""
return re.compile(
r''.join(
(
r'^',
r'(' if relative else r'',
r'(?:[a-z0-9\.\-\+]*)://', # scheme is validated separately
r'(?:[^:@]+?:[^:@]*?@|)', # basic auth
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+',
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|', # domain...
r'localhost|', # localhost...
(
r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.?)|' if not require_tld else r''
), # allow dotless hostnames
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|', # ...or ipv4
r'\[[A-F0-9]*:[A-F0-9:]+\])', # ...or ipv6
r'(?::\d+)?', # optional port
r')?' if relative else r'', # host is optional, allow for relative URLs
r'(?:/?|[/?]\S+)$',
)
),
re.IGNORECASE,
)
def lenient_issubclass(cls: Any, class_or_tuple: Union[AnyType, Tuple[AnyType, ...]]) -> bool:
return isinstance(cls, type) and issubclass(cls, class_or_tuple)
def in_ipython() -> bool:
"""
Check whether we're in an ipython environment, including jupyter notebooks.
"""
try:
__IPYTHON__ # type: ignore
except NameError:
return False
else: # pragma: no cover
return True
def resolve_annotations(raw_annotations: Dict[str, AnyType], module_name: Optional[str]) -> Dict[str, AnyType]:
"""
Partially taken from typing.get_type_hints.
Resolve string or ForwardRef annotations into type objects if possible.
"""
if module_name:
base_globals: Optional[Dict[str, Any]] = sys.modules[module_name].__dict__
else:
base_globals = None
annotations = {}
for name, value in raw_annotations.items():
if isinstance(value, str):
value = ForwardRef(value, is_argument=False)
try:
value = _eval_type(value, base_globals, None)
except NameError:
# this is ok, it can be fixed with update_forward_refs
pass
annotations[name] = value
return annotations
def is_callable_type(type_: AnyType) -> bool:
return type_ is Callable or getattr(type_, '__origin__', None) is Callable
| import inspect
import re
import sys
from contextlib import contextmanager
from enum import Enum
from functools import lru_cache
from importlib import import_module
from textwrap import dedent
from typing import _eval_type # type: ignore
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Pattern, Tuple, Type, Union
from . import errors
try:
import email_validator
except ImportError:
email_validator = None
try:
from typing import _TypingBase as typing_base # type: ignore
except ImportError:
from typing import _Final as typing_base # type: ignore
try:
from typing import ForwardRef # type: ignore
except ImportError:
# python 3.6
ForwardRef = None
if TYPE_CHECKING: # pragma: no cover
from .main import BaseModel # noqa: F401
if sys.version_info < (3, 7):
from typing import Callable
AnyCallable = Callable[..., Any]
else:
from collections.abc import Callable
from typing import Callable as TypingCallable
AnyCallable = TypingCallable[..., Any]
PRETTY_REGEX = re.compile(r'([\w ]*?) *<(.*)> *')
AnyType = Type[Any]
def validate_email(value: str) -> Tuple[str, str]:
"""
Brutally simple email address validation. Note unlike most email address validation
* raw ip address (literal) domain parts are not allowed.
* "<NAME> <<EMAIL>>" style "pretty" email addresses are processed
* the local part check is extremely basic. This raises the possibility of unicode spoofing, but no better
solution is really possible.
* spaces are striped from the beginning and end of addresses but no error is raised
See RFC 5322 but treat it with suspicion, there seems to exist no universally acknowledged test for a valid email!
"""
if email_validator is None:
raise ImportError('email-validator is not installed, run `pip install pydantic[email]`')
m = PRETTY_REGEX.fullmatch(value)
name: Optional[str] = None
if m:
name, value = m.groups()
email = value.strip()
try:
email_validator.validate_email(email, check_deliverability=False)
except email_validator.EmailNotValidError as e:
raise errors.EmailError() from e
return name or email[: email.index('@')], email.lower()
def _rfc_1738_quote(text: str) -> str:
return re.sub(r'[:@/]', lambda m: '%{:X}'.format(ord(m.group(0))), text)
def make_dsn(
*,
driver: str,
user: str = None,
password: str = None,
host: str = None,
port: str = None,
name: str = None,
query: Dict[str, Any] = None,
) -> str:
"""
Create a DSN from from connection settings.
Stolen approximately from sqlalchemy/engine/url.py:URL.
"""
s = driver + '://'
if user is not None:
s += _rfc_1738_quote(user)
if password is not None:
s += ':' + _rfc_1738_quote(password)
s += '@'
if host is not None:
if ':' in host:
s += '[{}]'.format(host)
else:
s += host
if port is not None:
s += ':{}'.format(int(port))
if name is not None:
s += '/' + name
query = query or {}
if query:
keys = list(query)
keys.sort()
s += '?' + '&'.join('{}={}'.format(k, query[k]) for k in keys)
return s
def import_string(dotted_path: str) -> Any:
"""
Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import fails.
"""
try:
module_path, class_name = dotted_path.strip(' ').rsplit('.', 1)
except ValueError as e:
raise ImportError(f'"{dotted_path}" doesn\'t look like a module path') from e
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError as e:
raise ImportError(f'Module "{module_path}" does not define a "{class_name}" attribute') from e
def truncate(v: str, *, max_len: int = 80) -> str:
"""
Truncate a value and add a unicode ellipsis (three dots) to the end if it was too long
"""
if isinstance(v, str) and len(v) > (max_len - 2):
# -3 so quote + string + … + quote has correct length
return repr(v[: (max_len - 3)] + '…')
v = repr(v)
if len(v) > max_len:
v = v[: max_len - 1] + '…'
return v
def display_as_type(v: AnyType) -> str:
if not isinstance(v, typing_base) and not isinstance(v, type):
v = type(v)
if lenient_issubclass(v, Enum):
if issubclass(v, int):
return 'int'
elif issubclass(v, str):
return 'str'
else:
return 'enum'
try:
return v.__name__
except AttributeError:
# happens with unions
return str(v)
ExcType = Type[Exception]
@contextmanager
def change_exception(raise_exc: ExcType, *except_types: ExcType) -> Generator[None, None, None]:
try:
yield
except except_types as e:
raise raise_exc from e
def clean_docstring(d: str) -> str:
return dedent(d).strip(' \r\n\t')
def list_like(v: AnyType) -> bool:
return isinstance(v, (list, tuple, set)) or inspect.isgenerator(v)
def validate_field_name(bases: List[Type['BaseModel']], field_name: str) -> None:
"""
Ensure that the field's name does not shadow an existing attribute of the model.
"""
for base in bases:
if getattr(base, field_name, None):
raise NameError(
f'Field name "{field_name}" shadows a BaseModel attribute; '
f'use a different field name with "alias=\'{field_name}\'".'
)
@lru_cache(maxsize=None)
def url_regex_generator(*, relative: bool, require_tld: bool) -> Pattern[str]:
"""
Url regex generator taken from Marshmallow library,
for details please follow library source code:
https://github.com/marshmallow-code/marshmallow/blob/298870ef6c089fb4d91efae9ca4168453ffe00d2/marshmallow/validate.py#L37
"""
return re.compile(
r''.join(
(
r'^',
r'(' if relative else r'',
r'(?:[a-z0-9\.\-\+]*)://', # scheme is validated separately
r'(?:[^:@]+?:[^:@]*?@|)', # basic auth
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+',
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|', # domain...
r'localhost|', # localhost...
(
r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.?)|' if not require_tld else r''
), # allow dotless hostnames
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|', # ...or ipv4
r'\[[A-F0-9]*:[A-F0-9:]+\])', # ...or ipv6
r'(?::\d+)?', # optional port
r')?' if relative else r'', # host is optional, allow for relative URLs
r'(?:/?|[/?]\S+)$',
)
),
re.IGNORECASE,
)
def lenient_issubclass(cls: Any, class_or_tuple: Union[AnyType, Tuple[AnyType, ...]]) -> bool:
return isinstance(cls, type) and issubclass(cls, class_or_tuple)
def in_ipython() -> bool:
"""
Check whether we're in an ipython environment, including jupyter notebooks.
"""
try:
__IPYTHON__ # type: ignore
except NameError:
return False
else: # pragma: no cover
return True
def resolve_annotations(raw_annotations: Dict[str, AnyType], module_name: Optional[str]) -> Dict[str, AnyType]:
"""
Partially taken from typing.get_type_hints.
Resolve string or ForwardRef annotations into type objects if possible.
"""
if module_name:
base_globals: Optional[Dict[str, Any]] = sys.modules[module_name].__dict__
else:
base_globals = None
annotations = {}
for name, value in raw_annotations.items():
if isinstance(value, str):
value = ForwardRef(value, is_argument=False)
try:
value = _eval_type(value, base_globals, None)
except NameError:
# this is ok, it can be fixed with update_forward_refs
pass
annotations[name] = value
return annotations
def is_callable_type(type_: AnyType) -> bool:
return type_ is Callable or getattr(type_, '__origin__', None) is Callable | en | 0.813459 | # type: ignore # type: ignore # type: ignore # type: ignore # python 3.6 # pragma: no cover # noqa: F401 Brutally simple email address validation. Note unlike most email address validation * raw ip address (literal) domain parts are not allowed. * "<NAME> <<EMAIL>>" style "pretty" email addresses are processed * the local part check is extremely basic. This raises the possibility of unicode spoofing, but no better solution is really possible. * spaces are striped from the beginning and end of addresses but no error is raised See RFC 5322 but treat it with suspicion, there seems to exist no universally acknowledged test for a valid email! Create a DSN from from connection settings. Stolen approximately from sqlalchemy/engine/url.py:URL. Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the last name in the path. Raise ImportError if the import fails. Truncate a value and add a unicode ellipsis (three dots) to the end if it was too long # -3 so quote + string + … + quote has correct length # happens with unions Ensure that the field's name does not shadow an existing attribute of the model. Url regex generator taken from Marshmallow library, for details please follow library source code: https://github.com/marshmallow-code/marshmallow/blob/298870ef6c089fb4d91efae9ca4168453ffe00d2/marshmallow/validate.py#L37 # scheme is validated separately # basic auth # domain... # localhost... # allow dotless hostnames # ...or ipv4 # ...or ipv6 # optional port # host is optional, allow for relative URLs Check whether we're in an ipython environment, including jupyter notebooks. # type: ignore # pragma: no cover Partially taken from typing.get_type_hints. Resolve string or ForwardRef annotations into type objects if possible. # this is ok, it can be fixed with update_forward_refs | 2.369275 | 2 |
cargonet/dataset/avgdelayv1.py | romnnn/rail-stgcnn | 2 | 6632269 | import os.path
import shutil
import statistics
from collections import defaultdict
from datetime import datetime, timedelta
import networkx as nx
import torch
import torch_geometric
from torch_geometric.data import Dataset
import cargonet.preprocessing.datalake.retrieval as retrieval
import cargonet.preprocessing.graphs.tgraph as tgraph
from cargonet.dataset.dataset import RailDataset
from cargonet.utils.link2node import link2node
from cargonet.utils.pdf import concat_pdfs
class EdgeAverageDelayDatasetV1(RailDataset):
node_feature_mapping = ["stationId", "imId", "country"]
edge_feature_mapping = ["delay", "distance", "current"]
def __init__(
self,
root,
name=None,
transform=None,
pre_transform=None,
limit=1,
plot_download=False,
plot_processing=False,
force_reprocess=False,
force_redownload=False,
normalize_net=True,
verbose=True,
lookbehind_steps=3,
lookahead_steps=1,
lookahead=timedelta(hours=1),
lookbehind=timedelta(minutes=10),
interval=timedelta(minutes=10),
batch=timedelta(hours=24),
padding=timedelta(hours=0),
bbox=None,
running_avg_lookbehind_steps=1,
):
self.running_avg_lookbehind_steps = max(1, running_avg_lookbehind_steps)
super().__init__(
root=root,
name=name,
transform=transform,
pre_transform=pre_transform,
limit=limit,
plot_download=plot_download,
plot_processing=plot_processing,
force_reprocess=force_reprocess,
force_redownload=force_redownload,
normalize_net=normalize_net,
verbose=verbose,
lookbehind_steps=lookbehind_steps,
lookahead_steps=lookahead_steps,
lookahead=lookahead,
lookbehind=lookbehind,
interval=interval,
batch=batch,
padding=padding,
bbox=bbox,
)
assert self.undirected
def download(self):
super().download()
@property
def processed_file_names(self):
return [
"processed_%d.pt" % f
for f in range(self.running_avg_lookbehind_steps, len(self.raw_paths))
]
@staticmethod
def aggregate(acc, states):
# Start with the full network and iteratively apply the considered states
_acc = acc.copy()
_acc = _acc.to_undirected()
avg = defaultdict(list)
if False:
for _, data in acc.nodes(data=True):
print("Acc node features:", data.keys())
break
for u, v, data in acc.edges(data=True):
print("Acc edge features:", data.keys())
break
for _, data in states[0].nodes(data=True):
print("State node features:", data.keys())
break
for u, v, data in states[0].edges(data=True):
print("State edge features:", data.keys())
break
for s in states:
s = s.to_undirected()
for u, v, data in s.edges(data=True):
avg[(u, v)].append(data["delay"])
# Apply running averages
for edge, delays in avg.items():
delay = statistics.mean(delays)
try:
_acc.edges[edge]["delay"] = delay / 100
_acc.edges[edge]["current"] = len(delays)
except KeyError:
pass
# print("KEY ERROR!!")
return _acc
def extract_features(
self,
nx_g,
edge_features=None,
node_features=None,
verbose=True,
node_mapping=None,
):
"""
Edges are important here
"""
edge_features = edge_features or []
node_features = node_features or []
n_edges = nx_g.number_of_edges()
edge_attrs = torch.zeros(n_edges, len(edge_features), dtype=torch.float)
for i, edge in enumerate(nx_g.edges):
u, v = edge
if node_mapping:
u, v = node_mapping[u], node_mapping[v]
edges[i][0], edges[i][1] = u, v
for j, feature in enumerate(edge_features):
try:
edge_attrs[i][j] = nx_g.edges[u, v][feature]
except (TypeError, ValueError, KeyError) as e:
print(
"extract_features edge attr error: ",
e,
feature,
nx_g.edges[edge],
)
if verbose:
delay = edge_attrs[:, self.edge_feature_mapping.index("delay")]
print("delay: min=%d max=%d" % (delay.min().item(), delay.max().item()))
return torch_geometric.data.Data(edge_attr=edge_attrs)
def process(self):
states_count = len(self.raw_paths)
total_states = range(self.running_avg_lookbehind_steps, states_count)
assert len(self.processed_file_names) == len(total_states)
for i in total_states:
# Read transport state at time step t and some previous steps
self.vlog(
"Processing t[%d:%d] (%d/%d, %d states)"
% (
i - self.running_avg_lookbehind_steps,
i,
i,
states_count,
self.running_avg_lookbehind_steps,
)
)
states = [
nx.read_gpickle(raw_path)
for raw_path in self.raw_paths[
i - self.running_avg_lookbehind_steps : i
]
]
# Enforce undirected
assert all([isinstance(s, nx.Graph) for s in states])
combined = self.aggregate(self.nx_net, states)
# Plot combined graph
if self.plot_processing:
self.debug_plot(
i, combined, prefix="combined", size=1, labels=False, opaque=True
)
# Extract important features and convert nx graph to a tg graph
data = self.extract_features(
combined,
node_mapping=self.net_mapping,
edge_features=self.edge_feature_mapping,
node_features=self.node_feature_mapping,
verbose=self.verbose,
)
# Apply filters and transformations
if self.pre_filter is not None and not self.pre_filter(data):
continue
if self.pre_transform is not None:
data = self.pre_transform(data)
torch.save(
data,
os.path.join(
self.processed_dir,
self.processed_file_names[i - self.running_avg_lookbehind_steps],
),
)
class NodeAverageDelayDatasetV1(EdgeAverageDelayDatasetV1):
node_feature_mapping = EdgeAverageDelayDatasetV1.edge_feature_mapping
edge_feature_mapping = EdgeAverageDelayDatasetV1.node_feature_mapping
def extract_features(
self,
nx_g,
edge_features=None,
node_features=None,
verbose=True,
node_mapping=None,
):
"""
Nodes are important here
"""
edge_features = edge_features or []
node_features = node_features or []
# Assume the data is given with delay as edge attributes
n_nodes = nx_g.number_of_edges()
nodes = torch.zeros(n_nodes, len(node_features), dtype=torch.float)
for u, v, data in nx_g.edges(data=True):
for j, feature in enumerate(node_features):
try:
n = self.mapping[(u, v)]
nodes[n][j] = data[feature]
except (TypeError, ValueError, KeyError) as e:
raise
print(
"extract_features node attr error:",
e,
data,
feature,
data[feature],
type(data[feature]),
)
if verbose:
delay = nodes[:, node_features.index("delay")]
print(
"delay: mean=%d min=%d max=%d"
% (delay.mean().item(), delay.min().item(), delay.max().item())
)
return torch_geometric.data.Data(x=nodes)
def process(self):
super().process()
def download(self):
super().download()
def convert_to_tg_net(self, net):
"""
Convert full net to tg and set the mapping
"""
net, mapping = link2node(net, self.mapping)
return self.nx_to_tg(net), mapping
def build_dataset(limit, plot_download, plot_processing, rebuild, reprocess, verbose):
dataset_name = "average-delay-dataset-v1"
base_path = os.path.dirname(os.path.realpath(__file__))
base_dataset_path = os.path.join(base_path, "../../datasets")
assert os.path.exists(base_dataset_path)
dataset_path = os.path.join(base_dataset_path, dataset_name)
try:
print("Loading dataset")
dataset = NodeAverageDelayDatasetV1(
root=dataset_path,
name=dataset_name,
limit=limit,
plot_download=plot_download,
plot_processing=plot_processing,
force_redownload=rebuild,
force_reprocess=reprocess,
verbose=verbose,
)
except Exception as e:
raise
print("loading dataset error: ", e)
| import os.path
import shutil
import statistics
from collections import defaultdict
from datetime import datetime, timedelta
import networkx as nx
import torch
import torch_geometric
from torch_geometric.data import Dataset
import cargonet.preprocessing.datalake.retrieval as retrieval
import cargonet.preprocessing.graphs.tgraph as tgraph
from cargonet.dataset.dataset import RailDataset
from cargonet.utils.link2node import link2node
from cargonet.utils.pdf import concat_pdfs
class EdgeAverageDelayDatasetV1(RailDataset):
node_feature_mapping = ["stationId", "imId", "country"]
edge_feature_mapping = ["delay", "distance", "current"]
def __init__(
self,
root,
name=None,
transform=None,
pre_transform=None,
limit=1,
plot_download=False,
plot_processing=False,
force_reprocess=False,
force_redownload=False,
normalize_net=True,
verbose=True,
lookbehind_steps=3,
lookahead_steps=1,
lookahead=timedelta(hours=1),
lookbehind=timedelta(minutes=10),
interval=timedelta(minutes=10),
batch=timedelta(hours=24),
padding=timedelta(hours=0),
bbox=None,
running_avg_lookbehind_steps=1,
):
self.running_avg_lookbehind_steps = max(1, running_avg_lookbehind_steps)
super().__init__(
root=root,
name=name,
transform=transform,
pre_transform=pre_transform,
limit=limit,
plot_download=plot_download,
plot_processing=plot_processing,
force_reprocess=force_reprocess,
force_redownload=force_redownload,
normalize_net=normalize_net,
verbose=verbose,
lookbehind_steps=lookbehind_steps,
lookahead_steps=lookahead_steps,
lookahead=lookahead,
lookbehind=lookbehind,
interval=interval,
batch=batch,
padding=padding,
bbox=bbox,
)
assert self.undirected
def download(self):
super().download()
@property
def processed_file_names(self):
return [
"processed_%d.pt" % f
for f in range(self.running_avg_lookbehind_steps, len(self.raw_paths))
]
@staticmethod
def aggregate(acc, states):
# Start with the full network and iteratively apply the considered states
_acc = acc.copy()
_acc = _acc.to_undirected()
avg = defaultdict(list)
if False:
for _, data in acc.nodes(data=True):
print("Acc node features:", data.keys())
break
for u, v, data in acc.edges(data=True):
print("Acc edge features:", data.keys())
break
for _, data in states[0].nodes(data=True):
print("State node features:", data.keys())
break
for u, v, data in states[0].edges(data=True):
print("State edge features:", data.keys())
break
for s in states:
s = s.to_undirected()
for u, v, data in s.edges(data=True):
avg[(u, v)].append(data["delay"])
# Apply running averages
for edge, delays in avg.items():
delay = statistics.mean(delays)
try:
_acc.edges[edge]["delay"] = delay / 100
_acc.edges[edge]["current"] = len(delays)
except KeyError:
pass
# print("KEY ERROR!!")
return _acc
def extract_features(
self,
nx_g,
edge_features=None,
node_features=None,
verbose=True,
node_mapping=None,
):
"""
Edges are important here
"""
edge_features = edge_features or []
node_features = node_features or []
n_edges = nx_g.number_of_edges()
edge_attrs = torch.zeros(n_edges, len(edge_features), dtype=torch.float)
for i, edge in enumerate(nx_g.edges):
u, v = edge
if node_mapping:
u, v = node_mapping[u], node_mapping[v]
edges[i][0], edges[i][1] = u, v
for j, feature in enumerate(edge_features):
try:
edge_attrs[i][j] = nx_g.edges[u, v][feature]
except (TypeError, ValueError, KeyError) as e:
print(
"extract_features edge attr error: ",
e,
feature,
nx_g.edges[edge],
)
if verbose:
delay = edge_attrs[:, self.edge_feature_mapping.index("delay")]
print("delay: min=%d max=%d" % (delay.min().item(), delay.max().item()))
return torch_geometric.data.Data(edge_attr=edge_attrs)
def process(self):
states_count = len(self.raw_paths)
total_states = range(self.running_avg_lookbehind_steps, states_count)
assert len(self.processed_file_names) == len(total_states)
for i in total_states:
# Read transport state at time step t and some previous steps
self.vlog(
"Processing t[%d:%d] (%d/%d, %d states)"
% (
i - self.running_avg_lookbehind_steps,
i,
i,
states_count,
self.running_avg_lookbehind_steps,
)
)
states = [
nx.read_gpickle(raw_path)
for raw_path in self.raw_paths[
i - self.running_avg_lookbehind_steps : i
]
]
# Enforce undirected
assert all([isinstance(s, nx.Graph) for s in states])
combined = self.aggregate(self.nx_net, states)
# Plot combined graph
if self.plot_processing:
self.debug_plot(
i, combined, prefix="combined", size=1, labels=False, opaque=True
)
# Extract important features and convert nx graph to a tg graph
data = self.extract_features(
combined,
node_mapping=self.net_mapping,
edge_features=self.edge_feature_mapping,
node_features=self.node_feature_mapping,
verbose=self.verbose,
)
# Apply filters and transformations
if self.pre_filter is not None and not self.pre_filter(data):
continue
if self.pre_transform is not None:
data = self.pre_transform(data)
torch.save(
data,
os.path.join(
self.processed_dir,
self.processed_file_names[i - self.running_avg_lookbehind_steps],
),
)
class NodeAverageDelayDatasetV1(EdgeAverageDelayDatasetV1):
node_feature_mapping = EdgeAverageDelayDatasetV1.edge_feature_mapping
edge_feature_mapping = EdgeAverageDelayDatasetV1.node_feature_mapping
def extract_features(
self,
nx_g,
edge_features=None,
node_features=None,
verbose=True,
node_mapping=None,
):
"""
Nodes are important here
"""
edge_features = edge_features or []
node_features = node_features or []
# Assume the data is given with delay as edge attributes
n_nodes = nx_g.number_of_edges()
nodes = torch.zeros(n_nodes, len(node_features), dtype=torch.float)
for u, v, data in nx_g.edges(data=True):
for j, feature in enumerate(node_features):
try:
n = self.mapping[(u, v)]
nodes[n][j] = data[feature]
except (TypeError, ValueError, KeyError) as e:
raise
print(
"extract_features node attr error:",
e,
data,
feature,
data[feature],
type(data[feature]),
)
if verbose:
delay = nodes[:, node_features.index("delay")]
print(
"delay: mean=%d min=%d max=%d"
% (delay.mean().item(), delay.min().item(), delay.max().item())
)
return torch_geometric.data.Data(x=nodes)
def process(self):
super().process()
def download(self):
super().download()
def convert_to_tg_net(self, net):
"""
Convert full net to tg and set the mapping
"""
net, mapping = link2node(net, self.mapping)
return self.nx_to_tg(net), mapping
def build_dataset(limit, plot_download, plot_processing, rebuild, reprocess, verbose):
dataset_name = "average-delay-dataset-v1"
base_path = os.path.dirname(os.path.realpath(__file__))
base_dataset_path = os.path.join(base_path, "../../datasets")
assert os.path.exists(base_dataset_path)
dataset_path = os.path.join(base_dataset_path, dataset_name)
try:
print("Loading dataset")
dataset = NodeAverageDelayDatasetV1(
root=dataset_path,
name=dataset_name,
limit=limit,
plot_download=plot_download,
plot_processing=plot_processing,
force_redownload=rebuild,
force_reprocess=reprocess,
verbose=verbose,
)
except Exception as e:
raise
print("loading dataset error: ", e)
| en | 0.893031 | # Start with the full network and iteratively apply the considered states # Apply running averages # print("KEY ERROR!!") Edges are important here # Read transport state at time step t and some previous steps # Enforce undirected # Plot combined graph # Extract important features and convert nx graph to a tg graph # Apply filters and transformations Nodes are important here # Assume the data is given with delay as edge attributes Convert full net to tg and set the mapping | 2.021258 | 2 |
model/mosesdecoder/contrib/moses-speedtest/testsuite_common.py | saeedesm/UNMT_AH | 3 | 6632270 | <gh_stars>1-10
"""Common functions of the testsuitce"""
import os
#Clour constants
class bcolors:
PURPLE = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
class LogLine:
"""A class to contain logfile line"""
def __init__(self, date, time, revision, testname, real, user, system, branch):
self.date = date
self.time = time
self.revision = revision
self.testname = testname
self.real = real
self.system = system
self.user = user
self.branch = branch
class Result:
"""A class to contain results of benchmarking"""
def __init__(self, testname, previous, current, revision, branch, prevrev, prevbranch):
self.testname = testname
self.previous = previous
self.current = current
self.change = previous - current
self.revision = revision
self.branch = branch
self.prevbranch = prevbranch
self.prevrev = prevrev
#Produce a percentage with fewer digits
self.percentage = float(format(1 - current/previous, '.4f'))
def processLogLine(logline):
"""Parses the log line into a nice datastructure"""
logline = logline.split()
log = LogLine(logline[0], logline[1], logline[2], logline[4],\
float(logline[6]), float(logline[8]), float(logline[10]), logline[12])
return log
def getLastTwoLines(filename, logdir):
"""Just a call to tail to get the diff between the last two runs"""
try:
line1, line2 = os.popen("tail -n2 " + logdir + '/' + filename)
except ValueError: #Check for new tests
tempfile = open(logdir + '/' + filename)
line1 = tempfile.readline()
tempfile.close()
return (line1, '\n')
return (line1, line2)
| """Common functions of the testsuitce"""
import os
#Clour constants
class bcolors:
PURPLE = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
class LogLine:
"""A class to contain logfile line"""
def __init__(self, date, time, revision, testname, real, user, system, branch):
self.date = date
self.time = time
self.revision = revision
self.testname = testname
self.real = real
self.system = system
self.user = user
self.branch = branch
class Result:
"""A class to contain results of benchmarking"""
def __init__(self, testname, previous, current, revision, branch, prevrev, prevbranch):
self.testname = testname
self.previous = previous
self.current = current
self.change = previous - current
self.revision = revision
self.branch = branch
self.prevbranch = prevbranch
self.prevrev = prevrev
#Produce a percentage with fewer digits
self.percentage = float(format(1 - current/previous, '.4f'))
def processLogLine(logline):
"""Parses the log line into a nice datastructure"""
logline = logline.split()
log = LogLine(logline[0], logline[1], logline[2], logline[4],\
float(logline[6]), float(logline[8]), float(logline[10]), logline[12])
return log
def getLastTwoLines(filename, logdir):
"""Just a call to tail to get the diff between the last two runs"""
try:
line1, line2 = os.popen("tail -n2 " + logdir + '/' + filename)
except ValueError: #Check for new tests
tempfile = open(logdir + '/' + filename)
line1 = tempfile.readline()
tempfile.close()
return (line1, '\n')
return (line1, line2) | en | 0.909571 | Common functions of the testsuitce #Clour constants A class to contain logfile line A class to contain results of benchmarking #Produce a percentage with fewer digits Parses the log line into a nice datastructure Just a call to tail to get the diff between the last two runs #Check for new tests | 2.958709 | 3 |
rplugin/python3/wilder/__init__.py | ratheesh/wilder.nvim | 0 | 6632271 | <filename>rplugin/python3/wilder/__init__.py
import asyncio
import concurrent.futures
import difflib
import fnmatch
import functools
import glob
import importlib
from importlib.util import find_spec
import itertools
import multiprocessing
import os
from pathlib import Path
import pwd
import shutil
import threading
import time
if find_spec('pynvim'):
import pynvim as neovim
else:
import neovim
@neovim.plugin
class Wilder(object):
def __init__(self, nvim):
self.nvim = nvim
self.has_init = False
self.queue = multiprocessing.Queue()
self.events = []
self.lock = threading.Lock()
self.executor = None
self.cached_buffer = {'bufnr': -1, 'undotree_seq_cur': -1, 'buffer': []}
self.run_id = -1
def handle(self, ctx, x, command='resolve'):
self.nvim.call('wilder#' + command, ctx, x)
def echomsg(self, x):
self.nvim.session.threadsafe_call(lambda: self.nvim.command('echomsg "' + x + '"'))
def run_in_background(self, fn, args):
event = threading.Event()
ctx = args[0]
with self.lock:
if ctx['run_id'] < self.run_id:
return
self.run_id = ctx['run_id']
while len(self.events) > 0:
e = self.events.pop(0)
e.set()
self.events.append(event)
self.executor.submit(functools.partial( fn, *([event] + args), ))
def consumer(self):
while True:
args = self.queue.get()
ctx = args[0]
res = args[1]
while not self.queue.empty():
new_args = self.queue.get_nowait()
new_ctx = new_args[0]
if (new_ctx['run_id'] > ctx['run_id'] or
(new_ctx['run_id'] == ctx['run_id'] and new_ctx['step'] > ctx['step'])):
args = new_args
ctx = args[0]
res = args[1]
if len(args) > 2:
command = args[2]
self.nvim.async_call(self.handle, ctx, res, command=command)
else:
self.nvim.async_call(self.handle, ctx, res)
@neovim.function('_wilder_init', sync=True)
def init(self, args):
if self.has_init:
return
self.has_init = True
opts = args[0]
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=opts['num_workers'])
t = threading.Thread(target=self.consumer, daemon=True)
t.start()
@neovim.function('_wilder_python_sleep', sync=False, allow_nested=True)
def sleep(self, args):
self.run_in_background(self.sleep_handler, args)
def sleep_handler(self, event, ctx, t, x):
if event.is_set():
return
time.sleep(t)
self.queue.put((ctx, x,))
@neovim.function('_wilder_python_search', sync=False)
def search(self, args):
if args[2] == "":
self.handle(args[1], [])
return
bufnr = self.nvim.current.buffer.number
undotree_seq_cur = self.nvim.eval('undotree().seq_cur')
if (bufnr != self.cached_buffer['bufnr'] or
undotree_seq_cur != self.cached_buffer['undotree_seq_cur']):
self.cached_buffer = {
'bufnr': bufnr,
'undotree_seq_cur': undotree_seq_cur,
'buffer': list(self.nvim.current.buffer),
}
self.run_in_background(self.search_handler, args + [self.cached_buffer['buffer']])
def search_handler(self, event, ctx, opts, x, buf):
if event.is_set():
return
try:
module_name = opts['engine'] if 'engine' in opts else 're'
max_candidates = opts['max_candidates'] if 'max_candidates' in opts else 300
seen = set()
candidates = []
re = importlib.import_module(module_name)
# re2 does not use re.UNICODE by default
pattern = re.compile(x, re.UNICODE)
for line in buf:
if event.is_set():
return
for match in pattern.finditer(line):
if event.is_set():
return
candidate = match.group()
if not candidate in seen:
seen.add(candidate)
candidates.append(candidate)
if max_candidates > 0 and len(candidates) >= max_candidates:
self.queue.put((ctx, candidates,))
return
self.queue.put((ctx, candidates,))
except Exception as e:
self.queue.put((ctx, 'python_search: ' + str(e), 'reject',))
finally:
with self.lock:
self.events.remove(event)
@neovim.function('_wilder_python_uniq', sync=False, allow_nested=True)
def uniq(self, args):
self.run_in_background(self.uniq_handler, args)
def uniq_handler(self, event, ctx, candidates):
if event.is_set():
return
seen = set()
try:
res = [x for x in candidates if not (x in seen or seen.add(x))]
self.queue.put((ctx, res,))
except Exception as e:
self.queue.put((ctx, 'python_uniq: ' + str(e), 'reject',))
@neovim.function('_wilder_python_sort', sync=False, allow_nested=True)
def sort(self, args):
self.run_in_background(self.sort_handler, args)
def sort_handler(self, event, ctx, candidates):
if event.is_set():
return
try:
res = sorted(candidates)
self.queue.put((ctx, res,))
except Exception as e:
self.queue.put((ctx, 'python_sort: ' + str(e), 'reject',))
@neovim.function('_wilder_python_get_file_completion', sync=False)
def get_file_completion(self, args):
if args[2] == 'file_in_path':
path_opt = self.nvim.eval('&path')
directories = path_opt.split(',')
directories += [self.nvim.eval('expand("%:h")')]
elif args[2] == 'shellcmd':
path = os.environ['PATH']
directories = path.split(':')
else:
directories = [self.nvim.eval('getcwd()')]
wildignore_opt = self.nvim.eval('&wildignore')
self.run_in_background(self.get_file_completion_handler, args + [wildignore_opt, directories])
def get_file_completion_handler(self,
event,
ctx,
expand_arg,
expand_type,
has_wildcard,
path_prefix,
wildignore_opt,
directories):
if event.is_set():
return
try:
res = []
wildignore_list = wildignore_opt.split(',')
for directory in directories:
if event.is_set():
return
if not directory:
continue
if has_wildcard:
tail = os.path.basename(expand_arg)
show_hidden = tail.startswith('.')
pattern = ''
wildcard = os.path.join(directory, expand_arg)
wildcard = os.path.expandvars(wildcard)
it = glob.iglob(wildcard, recursive=True)
else:
path = os.path.join(directory, expand_arg)
(head, tail) = os.path.split(path)
show_hidden = tail.startswith('.')
pattern = tail + '*'
try:
it = os.scandir(head)
except FileNotFoundError:
continue
for entry in it:
if event.is_set():
return
try:
if has_wildcard:
entry = Path(entry)
try:
entry = entry.relative_to(directory)
except ValueError:
pass
if entry.name.startswith('.') and not show_hidden:
continue
if expand_type == 'dir' and not entry.is_dir():
continue
ignore = False
for wildignore in wildignore_list:
if fnmatch.fnmatch(entry.name, wildignore):
ignore = True
break
if ignore:
continue
if not has_wildcard and pattern and not fnmatch.fnmatch(entry.name, pattern):
continue
if expand_type == 'shellcmd' and (
not entry.is_file() or not os.access(os.path.join(directory, entry.name), os.X_OK)):
continue
if has_wildcard and Path(entry) == Path(path_prefix):
continue
if entry.is_dir():
res.append((str(entry) if has_wildcard else entry.name) + os.sep)
else:
res.append(str(entry) if has_wildcard else entry.name)
except OSError:
pass
res = sorted(res)
head = os.path.dirname(expand_arg)
if not has_wildcard:
res = list(map(lambda f: os.path.join(head, f) if head else f, res))
if expand_arg == '.':
res.insert(0, '../')
res.insert(0, './')
elif expand_arg == '..':
res.insert(0, '../')
self.queue.put((ctx, res,))
except Exception as e:
self.queue.put((ctx, 'python_get_file_completion: ' + str(e), 'reject',))
def get_basename(self, f):
if f.endswith(os.sep) or f.endswith('/'):
return os.path.basename(f[:-1])
return os.path.basename(f)
@neovim.function('_wilder_python_get_users', sync=False, allow_nested=True)
def get_users(self, args):
self.run_in_background(self.get_users_handler, args)
def get_users_handler(self, event, ctx, expand_arg, expand_type):
if event.is_set():
return
try:
res = []
for user in pwd.getpwall():
if user.pw_name.startswith(expand_arg):
res.append(user.pw_name)
res = sorted(res)
self.queue.put((ctx, res,))
except Exception as e:
self.queue.put((ctx, 'python_get_users: ' + str(e), 'reject',))
@neovim.function('_wilder_python_filter', sync=False, allow_nested=True)
def filter(self, args):
self.run_in_background(self.filter_handler, args)
def filter_handler(self, event, ctx, pattern, candidates, engine, has_file_args):
if event.is_set():
return
try:
re = importlib.import_module(engine)
# re2 does not use re.UNICODE by default
pattern = re.compile(pattern, re.UNICODE)
res = filter(lambda x: pattern.search(x if not has_file_args else self.get_basename(x)), candidates)
self.queue.put((ctx, list(res),))
except Exception as e:
self.queue.put((ctx, 'python_filter: ' + str(e), 'reject',))
@neovim.function('_wilder_python_sort_difflib', sync=False, allow_nested=True)
def sort_difflib(self, args):
self.run_in_background(self.sort_difflib_handler, args)
def sort_difflib_handler(self, event, ctx, candidates, query, quick=True):
if event.is_set():
return
try:
if quick:
res = sorted(candidates, key=lambda x: -difflib.SequenceMatcher(
None, x, query).quick_ratio())
else:
res = sorted(candidates, key=lambda x: -difflib.SequenceMatcher(
None, x, query).ratio())
self.queue.put((ctx, list(res),))
except Exception as e:
self.queue.put((ctx, 'python_sort_difflib: ' + str(e), 'reject',))
@neovim.function('_wilder_python_sort_fuzzywuzzy', sync=False, allow_nested=True)
def sort_fuzzywuzzy(self, args):
self.run_in_background(self.sort_fuzzywuzzy_handler, args)
def sort_fuzzywuzzy_handler(self, event, ctx, candidates, query, partial=True):
if event.is_set():
return
try:
fuzzy = importlib.import_module('fuzzywuzzy.fuzz')
if partial:
res = sorted(candidates, key=lambda x: -fuzzy.partial_ratio(x, query))
else:
res = sorted(candidates, key=lambda x: -fuzzy.ratio(x, query))
self.queue.put((ctx, list(res),))
except Exception as e:
self.queue.put((ctx, 'python_sort_fuzzywuzzy: ' + str(e), 'reject',))
@neovim.function('_wilder_python_common_subsequence_spans', sync=True)
def common_subsequence_spans(self, args):
string = args[0]
query = args[1]
case_sensitive = args[2]
if not case_sensitive:
string = string.upper()
query = query.upper()
result = []
blocks = difflib.SequenceMatcher(None, string, query).get_matching_blocks()
for block in blocks[: -1]:
start = block.a
end = block.a + block.size
byte_start = len(string[: start].encode('utf-8'))
byte_len = len(string[start : end].encode('utf-8'))
result.append([byte_start, byte_len])
return result
@neovim.function('_wilder_python_pcre2_capture_spans', sync=True)
def capture_spans(self, args):
pattern = args[0]
string = args[1]
module_name = args[2]
re = importlib.import_module(module_name)
match = re.match(pattern, string)
if not match or not match.lastindex:
return []
captures = []
for i in range(1, match.lastindex + 1):
start = match.start(i)
end = match.end(i)
if start == -1 or end == -1 or start == end:
continue
byte_start = len(string[: start].encode('utf-8'))
byte_len = len(string[start : end].encode('utf-8'))
captures.append([byte_start, byte_len])
return captures
| <filename>rplugin/python3/wilder/__init__.py
import asyncio
import concurrent.futures
import difflib
import fnmatch
import functools
import glob
import importlib
from importlib.util import find_spec
import itertools
import multiprocessing
import os
from pathlib import Path
import pwd
import shutil
import threading
import time
if find_spec('pynvim'):
import pynvim as neovim
else:
import neovim
@neovim.plugin
class Wilder(object):
def __init__(self, nvim):
self.nvim = nvim
self.has_init = False
self.queue = multiprocessing.Queue()
self.events = []
self.lock = threading.Lock()
self.executor = None
self.cached_buffer = {'bufnr': -1, 'undotree_seq_cur': -1, 'buffer': []}
self.run_id = -1
def handle(self, ctx, x, command='resolve'):
self.nvim.call('wilder#' + command, ctx, x)
def echomsg(self, x):
self.nvim.session.threadsafe_call(lambda: self.nvim.command('echomsg "' + x + '"'))
def run_in_background(self, fn, args):
event = threading.Event()
ctx = args[0]
with self.lock:
if ctx['run_id'] < self.run_id:
return
self.run_id = ctx['run_id']
while len(self.events) > 0:
e = self.events.pop(0)
e.set()
self.events.append(event)
self.executor.submit(functools.partial( fn, *([event] + args), ))
def consumer(self):
while True:
args = self.queue.get()
ctx = args[0]
res = args[1]
while not self.queue.empty():
new_args = self.queue.get_nowait()
new_ctx = new_args[0]
if (new_ctx['run_id'] > ctx['run_id'] or
(new_ctx['run_id'] == ctx['run_id'] and new_ctx['step'] > ctx['step'])):
args = new_args
ctx = args[0]
res = args[1]
if len(args) > 2:
command = args[2]
self.nvim.async_call(self.handle, ctx, res, command=command)
else:
self.nvim.async_call(self.handle, ctx, res)
@neovim.function('_wilder_init', sync=True)
def init(self, args):
if self.has_init:
return
self.has_init = True
opts = args[0]
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=opts['num_workers'])
t = threading.Thread(target=self.consumer, daemon=True)
t.start()
@neovim.function('_wilder_python_sleep', sync=False, allow_nested=True)
def sleep(self, args):
self.run_in_background(self.sleep_handler, args)
def sleep_handler(self, event, ctx, t, x):
if event.is_set():
return
time.sleep(t)
self.queue.put((ctx, x,))
@neovim.function('_wilder_python_search', sync=False)
def search(self, args):
if args[2] == "":
self.handle(args[1], [])
return
bufnr = self.nvim.current.buffer.number
undotree_seq_cur = self.nvim.eval('undotree().seq_cur')
if (bufnr != self.cached_buffer['bufnr'] or
undotree_seq_cur != self.cached_buffer['undotree_seq_cur']):
self.cached_buffer = {
'bufnr': bufnr,
'undotree_seq_cur': undotree_seq_cur,
'buffer': list(self.nvim.current.buffer),
}
self.run_in_background(self.search_handler, args + [self.cached_buffer['buffer']])
def search_handler(self, event, ctx, opts, x, buf):
if event.is_set():
return
try:
module_name = opts['engine'] if 'engine' in opts else 're'
max_candidates = opts['max_candidates'] if 'max_candidates' in opts else 300
seen = set()
candidates = []
re = importlib.import_module(module_name)
# re2 does not use re.UNICODE by default
pattern = re.compile(x, re.UNICODE)
for line in buf:
if event.is_set():
return
for match in pattern.finditer(line):
if event.is_set():
return
candidate = match.group()
if not candidate in seen:
seen.add(candidate)
candidates.append(candidate)
if max_candidates > 0 and len(candidates) >= max_candidates:
self.queue.put((ctx, candidates,))
return
self.queue.put((ctx, candidates,))
except Exception as e:
self.queue.put((ctx, 'python_search: ' + str(e), 'reject',))
finally:
with self.lock:
self.events.remove(event)
@neovim.function('_wilder_python_uniq', sync=False, allow_nested=True)
def uniq(self, args):
self.run_in_background(self.uniq_handler, args)
def uniq_handler(self, event, ctx, candidates):
if event.is_set():
return
seen = set()
try:
res = [x for x in candidates if not (x in seen or seen.add(x))]
self.queue.put((ctx, res,))
except Exception as e:
self.queue.put((ctx, 'python_uniq: ' + str(e), 'reject',))
@neovim.function('_wilder_python_sort', sync=False, allow_nested=True)
def sort(self, args):
self.run_in_background(self.sort_handler, args)
def sort_handler(self, event, ctx, candidates):
if event.is_set():
return
try:
res = sorted(candidates)
self.queue.put((ctx, res,))
except Exception as e:
self.queue.put((ctx, 'python_sort: ' + str(e), 'reject',))
@neovim.function('_wilder_python_get_file_completion', sync=False)
def get_file_completion(self, args):
if args[2] == 'file_in_path':
path_opt = self.nvim.eval('&path')
directories = path_opt.split(',')
directories += [self.nvim.eval('expand("%:h")')]
elif args[2] == 'shellcmd':
path = os.environ['PATH']
directories = path.split(':')
else:
directories = [self.nvim.eval('getcwd()')]
wildignore_opt = self.nvim.eval('&wildignore')
self.run_in_background(self.get_file_completion_handler, args + [wildignore_opt, directories])
def get_file_completion_handler(self,
event,
ctx,
expand_arg,
expand_type,
has_wildcard,
path_prefix,
wildignore_opt,
directories):
if event.is_set():
return
try:
res = []
wildignore_list = wildignore_opt.split(',')
for directory in directories:
if event.is_set():
return
if not directory:
continue
if has_wildcard:
tail = os.path.basename(expand_arg)
show_hidden = tail.startswith('.')
pattern = ''
wildcard = os.path.join(directory, expand_arg)
wildcard = os.path.expandvars(wildcard)
it = glob.iglob(wildcard, recursive=True)
else:
path = os.path.join(directory, expand_arg)
(head, tail) = os.path.split(path)
show_hidden = tail.startswith('.')
pattern = tail + '*'
try:
it = os.scandir(head)
except FileNotFoundError:
continue
for entry in it:
if event.is_set():
return
try:
if has_wildcard:
entry = Path(entry)
try:
entry = entry.relative_to(directory)
except ValueError:
pass
if entry.name.startswith('.') and not show_hidden:
continue
if expand_type == 'dir' and not entry.is_dir():
continue
ignore = False
for wildignore in wildignore_list:
if fnmatch.fnmatch(entry.name, wildignore):
ignore = True
break
if ignore:
continue
if not has_wildcard and pattern and not fnmatch.fnmatch(entry.name, pattern):
continue
if expand_type == 'shellcmd' and (
not entry.is_file() or not os.access(os.path.join(directory, entry.name), os.X_OK)):
continue
if has_wildcard and Path(entry) == Path(path_prefix):
continue
if entry.is_dir():
res.append((str(entry) if has_wildcard else entry.name) + os.sep)
else:
res.append(str(entry) if has_wildcard else entry.name)
except OSError:
pass
res = sorted(res)
head = os.path.dirname(expand_arg)
if not has_wildcard:
res = list(map(lambda f: os.path.join(head, f) if head else f, res))
if expand_arg == '.':
res.insert(0, '../')
res.insert(0, './')
elif expand_arg == '..':
res.insert(0, '../')
self.queue.put((ctx, res,))
except Exception as e:
self.queue.put((ctx, 'python_get_file_completion: ' + str(e), 'reject',))
def get_basename(self, f):
if f.endswith(os.sep) or f.endswith('/'):
return os.path.basename(f[:-1])
return os.path.basename(f)
@neovim.function('_wilder_python_get_users', sync=False, allow_nested=True)
def get_users(self, args):
self.run_in_background(self.get_users_handler, args)
def get_users_handler(self, event, ctx, expand_arg, expand_type):
if event.is_set():
return
try:
res = []
for user in pwd.getpwall():
if user.pw_name.startswith(expand_arg):
res.append(user.pw_name)
res = sorted(res)
self.queue.put((ctx, res,))
except Exception as e:
self.queue.put((ctx, 'python_get_users: ' + str(e), 'reject',))
@neovim.function('_wilder_python_filter', sync=False, allow_nested=True)
def filter(self, args):
self.run_in_background(self.filter_handler, args)
def filter_handler(self, event, ctx, pattern, candidates, engine, has_file_args):
if event.is_set():
return
try:
re = importlib.import_module(engine)
# re2 does not use re.UNICODE by default
pattern = re.compile(pattern, re.UNICODE)
res = filter(lambda x: pattern.search(x if not has_file_args else self.get_basename(x)), candidates)
self.queue.put((ctx, list(res),))
except Exception as e:
self.queue.put((ctx, 'python_filter: ' + str(e), 'reject',))
@neovim.function('_wilder_python_sort_difflib', sync=False, allow_nested=True)
def sort_difflib(self, args):
self.run_in_background(self.sort_difflib_handler, args)
def sort_difflib_handler(self, event, ctx, candidates, query, quick=True):
if event.is_set():
return
try:
if quick:
res = sorted(candidates, key=lambda x: -difflib.SequenceMatcher(
None, x, query).quick_ratio())
else:
res = sorted(candidates, key=lambda x: -difflib.SequenceMatcher(
None, x, query).ratio())
self.queue.put((ctx, list(res),))
except Exception as e:
self.queue.put((ctx, 'python_sort_difflib: ' + str(e), 'reject',))
@neovim.function('_wilder_python_sort_fuzzywuzzy', sync=False, allow_nested=True)
def sort_fuzzywuzzy(self, args):
self.run_in_background(self.sort_fuzzywuzzy_handler, args)
def sort_fuzzywuzzy_handler(self, event, ctx, candidates, query, partial=True):
if event.is_set():
return
try:
fuzzy = importlib.import_module('fuzzywuzzy.fuzz')
if partial:
res = sorted(candidates, key=lambda x: -fuzzy.partial_ratio(x, query))
else:
res = sorted(candidates, key=lambda x: -fuzzy.ratio(x, query))
self.queue.put((ctx, list(res),))
except Exception as e:
self.queue.put((ctx, 'python_sort_fuzzywuzzy: ' + str(e), 'reject',))
@neovim.function('_wilder_python_common_subsequence_spans', sync=True)
def common_subsequence_spans(self, args):
string = args[0]
query = args[1]
case_sensitive = args[2]
if not case_sensitive:
string = string.upper()
query = query.upper()
result = []
blocks = difflib.SequenceMatcher(None, string, query).get_matching_blocks()
for block in blocks[: -1]:
start = block.a
end = block.a + block.size
byte_start = len(string[: start].encode('utf-8'))
byte_len = len(string[start : end].encode('utf-8'))
result.append([byte_start, byte_len])
return result
@neovim.function('_wilder_python_pcre2_capture_spans', sync=True)
def capture_spans(self, args):
pattern = args[0]
string = args[1]
module_name = args[2]
re = importlib.import_module(module_name)
match = re.match(pattern, string)
if not match or not match.lastindex:
return []
captures = []
for i in range(1, match.lastindex + 1):
start = match.start(i)
end = match.end(i)
if start == -1 or end == -1 or start == end:
continue
byte_start = len(string[: start].encode('utf-8'))
byte_len = len(string[start : end].encode('utf-8'))
captures.append([byte_start, byte_len])
return captures
| en | 0.538337 | #' + command, ctx, x) # re2 does not use re.UNICODE by default # re2 does not use re.UNICODE by default | 1.991832 | 2 |
wg-manager-backend/database/models.py | SH-Daemon/wg-manager | 417 | 6632272 | <reponame>SH-Daemon/wg-manager<filename>wg-manager-backend/database/models.py
import datetime
import sqlalchemy
from sqlalchemy import Integer, Column, DateTime
from sqlalchemy.orm import relationship, backref
from database.database import Base
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, index=True)
email = Column(sqlalchemy.String, unique=True, index=True)
password = Column(sqlalchemy.String)
username = Column(sqlalchemy.String, unique=True)
full_name = Column(sqlalchemy.String)
role = Column(sqlalchemy.String)
class UserAPIKey(Base):
__tablename__ = "api_keys"
id = Column(Integer, primary_key=True, autoincrement=True)
key = Column(sqlalchemy.String, unique=True)
user_id = Column(Integer, sqlalchemy.ForeignKey('users.id', ondelete="CASCADE", onupdate="CASCADE"))
user = relationship("User", foreign_keys=[user_id])
created_date = Column(DateTime, default=datetime.datetime.utcnow)
class WGServer(Base):
__tablename__ = "server"
id = Column(Integer, primary_key=True, index=True)
interface = Column(sqlalchemy.String, unique=True, index=True)
subnet = Column(sqlalchemy.Integer, nullable=False)
address = Column(sqlalchemy.String, unique=True)
v6_address = Column(sqlalchemy.String, unique=True)
v6_subnet = Column(sqlalchemy.Integer, nullable=False)
listen_port = Column(sqlalchemy.String, unique=True)
private_key = Column(sqlalchemy.String)
public_key = Column(sqlalchemy.String)
endpoint = Column(sqlalchemy.String)
dns = Column(sqlalchemy.String)
allowed_ips = Column(sqlalchemy.String)
keep_alive = Column(sqlalchemy.Integer, default=0)
read_only = Column(sqlalchemy.Integer, default=0)
post_up = Column(sqlalchemy.String)
post_down = Column(sqlalchemy.String)
is_running = Column(sqlalchemy.Boolean)
configuration = Column(sqlalchemy.Text)
peers = relationship("WGPeer", cascade="all, delete", passive_deletes=True, lazy="joined")
class WGPeer(Base):
__tablename__ = "peer"
id = Column(Integer, primary_key=True, index=True)
name = Column(sqlalchemy.String, default="Unnamed")
address = Column(sqlalchemy.String)
v6_address = Column(sqlalchemy.String)
public_key = Column(sqlalchemy.String)
private_key = Column(sqlalchemy.String)
shared_key = Column(sqlalchemy.Text)
dns = Column(sqlalchemy.Text)
allowed_ips = Column(sqlalchemy.String)
keep_alive = Column(sqlalchemy.Integer, default=0)
read_only = Column(sqlalchemy.Integer, default=0)
server_id = Column(Integer, sqlalchemy.ForeignKey('server.id', ondelete="CASCADE", onupdate="CASCADE"))
server = relationship("WGServer", backref=backref("server"))
configuration = Column(sqlalchemy.Text)
| import datetime
import sqlalchemy
from sqlalchemy import Integer, Column, DateTime
from sqlalchemy.orm import relationship, backref
from database.database import Base
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, index=True)
email = Column(sqlalchemy.String, unique=True, index=True)
password = Column(sqlalchemy.String)
username = Column(sqlalchemy.String, unique=True)
full_name = Column(sqlalchemy.String)
role = Column(sqlalchemy.String)
class UserAPIKey(Base):
__tablename__ = "api_keys"
id = Column(Integer, primary_key=True, autoincrement=True)
key = Column(sqlalchemy.String, unique=True)
user_id = Column(Integer, sqlalchemy.ForeignKey('users.id', ondelete="CASCADE", onupdate="CASCADE"))
user = relationship("User", foreign_keys=[user_id])
created_date = Column(DateTime, default=datetime.datetime.utcnow)
class WGServer(Base):
__tablename__ = "server"
id = Column(Integer, primary_key=True, index=True)
interface = Column(sqlalchemy.String, unique=True, index=True)
subnet = Column(sqlalchemy.Integer, nullable=False)
address = Column(sqlalchemy.String, unique=True)
v6_address = Column(sqlalchemy.String, unique=True)
v6_subnet = Column(sqlalchemy.Integer, nullable=False)
listen_port = Column(sqlalchemy.String, unique=True)
private_key = Column(sqlalchemy.String)
public_key = Column(sqlalchemy.String)
endpoint = Column(sqlalchemy.String)
dns = Column(sqlalchemy.String)
allowed_ips = Column(sqlalchemy.String)
keep_alive = Column(sqlalchemy.Integer, default=0)
read_only = Column(sqlalchemy.Integer, default=0)
post_up = Column(sqlalchemy.String)
post_down = Column(sqlalchemy.String)
is_running = Column(sqlalchemy.Boolean)
configuration = Column(sqlalchemy.Text)
peers = relationship("WGPeer", cascade="all, delete", passive_deletes=True, lazy="joined")
class WGPeer(Base):
__tablename__ = "peer"
id = Column(Integer, primary_key=True, index=True)
name = Column(sqlalchemy.String, default="Unnamed")
address = Column(sqlalchemy.String)
v6_address = Column(sqlalchemy.String)
public_key = Column(sqlalchemy.String)
private_key = Column(sqlalchemy.String)
shared_key = Column(sqlalchemy.Text)
dns = Column(sqlalchemy.Text)
allowed_ips = Column(sqlalchemy.String)
keep_alive = Column(sqlalchemy.Integer, default=0)
read_only = Column(sqlalchemy.Integer, default=0)
server_id = Column(Integer, sqlalchemy.ForeignKey('server.id', ondelete="CASCADE", onupdate="CASCADE"))
server = relationship("WGServer", backref=backref("server"))
configuration = Column(sqlalchemy.Text) | none | 1 | 2.745812 | 3 |
|
charm/priors.py | pacargile/charm | 0 | 6632273 | <reponame>pacargile/charm<gh_stars>0
import numpy as np
class priors(object):
"""docstring for priors"""
def __init__(self, inpriordict):
super(priors, self).__init__()
# find uniform priors and put them into a
# dictionary used for the prior transformation
self.priordict = {}
# put any additional priors into a dictionary so that
# they can be applied in the lnprior_* functions
self.additionalpriors = {}
for kk in inpriordict.keys():
for ii in inpriordict[kk].keys():
if ii == 'uniform':
self.priordict[kk] = inpriordict[kk]['uniform']
else:
try:
self.additionalpriors[kk][ii] = inpriordict[kk][ii]
except KeyError:
self.additionalpriors[kk] = {ii:inpriordict[kk][ii]}
def priortrans(self,upars):
# split upars
ux,usig_x,uy,usig_y,uz,usig_z = upars
outarr = []
if 'X' in self.priordict.keys():
x = (
(max(self.priordict['X'])-min(self.priordict['X']))*ux +
min(self.priordict['X'])
)
else:
x = (1e+5 - -1e+5)*ux - 1e+5
if 'sig_X' in self.priordict.keys():
sig_x = (
(max(self.priordict['sig_X'])-min(self.priordict['sig_X']))*usig_x +
min(self.priordict['sig_X'])
)
else:
sig_x = (10000.0 - 0.0)*usig_x + 0.0
if 'Y' in self.priordict.keys():
y = (
(max(self.priordict['Y'])-min(self.priordict['Y']))*uy +
min(self.priordict['Y'])
)
else:
y = (1e+5 - -1e+5)*uy - 1e+5
if 'sig_Y' in self.priordict.keys():
sig_y = (
(max(self.priordict['sig_Y'])-min(self.priordict['sig_Y']))*usig_y +
min(self.priordict['sig_Y'])
)
else:
sig_y = (10000.0 - 0.0)*usig_y + 0.0
if 'Z' in self.priordict.keys():
z = (
(max(self.priordict['Z'])-min(self.priordict['Z']))*uz +
min(self.priordict['Z'])
)
else:
z = (1e+5 - -1e+5)*uz - 1e+5
if 'sig_Z' in self.priordict.keys():
sig_z = (
(max(self.priordict['sig_Z'])-min(self.priordict['sig_Z']))*usig_z +
min(self.priordict['sig_Z'])
)
else:
sig_z = (10000.0 - 0.0)*usig_z + 0.0
pars = [x,sig_x,y,sig_y,z,sig_z]
return pars
| import numpy as np
class priors(object):
"""docstring for priors"""
def __init__(self, inpriordict):
super(priors, self).__init__()
# find uniform priors and put them into a
# dictionary used for the prior transformation
self.priordict = {}
# put any additional priors into a dictionary so that
# they can be applied in the lnprior_* functions
self.additionalpriors = {}
for kk in inpriordict.keys():
for ii in inpriordict[kk].keys():
if ii == 'uniform':
self.priordict[kk] = inpriordict[kk]['uniform']
else:
try:
self.additionalpriors[kk][ii] = inpriordict[kk][ii]
except KeyError:
self.additionalpriors[kk] = {ii:inpriordict[kk][ii]}
def priortrans(self,upars):
# split upars
ux,usig_x,uy,usig_y,uz,usig_z = upars
outarr = []
if 'X' in self.priordict.keys():
x = (
(max(self.priordict['X'])-min(self.priordict['X']))*ux +
min(self.priordict['X'])
)
else:
x = (1e+5 - -1e+5)*ux - 1e+5
if 'sig_X' in self.priordict.keys():
sig_x = (
(max(self.priordict['sig_X'])-min(self.priordict['sig_X']))*usig_x +
min(self.priordict['sig_X'])
)
else:
sig_x = (10000.0 - 0.0)*usig_x + 0.0
if 'Y' in self.priordict.keys():
y = (
(max(self.priordict['Y'])-min(self.priordict['Y']))*uy +
min(self.priordict['Y'])
)
else:
y = (1e+5 - -1e+5)*uy - 1e+5
if 'sig_Y' in self.priordict.keys():
sig_y = (
(max(self.priordict['sig_Y'])-min(self.priordict['sig_Y']))*usig_y +
min(self.priordict['sig_Y'])
)
else:
sig_y = (10000.0 - 0.0)*usig_y + 0.0
if 'Z' in self.priordict.keys():
z = (
(max(self.priordict['Z'])-min(self.priordict['Z']))*uz +
min(self.priordict['Z'])
)
else:
z = (1e+5 - -1e+5)*uz - 1e+5
if 'sig_Z' in self.priordict.keys():
sig_z = (
(max(self.priordict['sig_Z'])-min(self.priordict['sig_Z']))*usig_z +
min(self.priordict['sig_Z'])
)
else:
sig_z = (10000.0 - 0.0)*usig_z + 0.0
pars = [x,sig_x,y,sig_y,z,sig_z]
return pars | en | 0.855956 | docstring for priors # find uniform priors and put them into a # dictionary used for the prior transformation # put any additional priors into a dictionary so that # they can be applied in the lnprior_* functions # split upars | 2.43842 | 2 |
user_2.py | irachitrastogi/A-Distributed-memory-Store | 0 | 6632274 | <reponame>irachitrastogi/A-Distributed-memory-Store
import socket
import os
import multiprocessing as mp
import time
import json
user_id = 1
#######################################################################################################
def user_write_request(user_id, value_to_be_written):
print("Write request initiated...................................")
port = (8000) + user_id
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client.connect(('0.0.0.0', port))
except socket.error, e:
print("Socket error: ", e)
return
send_write_message = '{"activity":"user_write_request","value_to_be_written":"'+ value_to_be_written+'"}'
client.send(send_write_message)
from_server = client.recv(4096)
rcvd_mssg = from_server
print('Received write message udpate statue from Group man: ', rcvd_mssg)
#node_status[node_id-1] = True
#node_age[node_id-1] = rcvd_mssg["age"]
client.close()
#print('My port is :', port, 'I got the message: ', node_age[node_id-1])
######################################################################################################
def user_read_request(user_id):
print("Read request initiated...................................")
port = (8000) + user_id
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client.connect(('0.0.0.0', port))
except socket.error, e:
print("Socket error: ", e)
return
send_write_message = '{"activity":"user_read_request"}'
client.send(send_write_message)
from_server = client.recv(4096)
rcvd_mssg = from_server
print('Received read result: ', rcvd_mssg)
#node_status[node_id-1] = True
#node_age[node_id-1] = rcvd_mssg["age"]
client.close()
#print('My port is :', port, 'I got the message: ', node_age[node_id-1])
############################################################################################################
seek_intent = raw_input("Do you want to read / write (r/w) : ")
print(seek_intent)
user_write_value = ""
if seek_intent == "w":
user_write_value = raw_input("Enter alphabet to be written : ")
print(user_write_value)
if seek_intent=="w":
time.sleep(5)
user_write_request(user_id, user_write_value)
if seek_intent=="r":
time.sleep(5)
user_read_request(user_id)
################################################################################################
| import socket
import os
import multiprocessing as mp
import time
import json
user_id = 1
#######################################################################################################
def user_write_request(user_id, value_to_be_written):
print("Write request initiated...................................")
port = (8000) + user_id
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client.connect(('0.0.0.0', port))
except socket.error, e:
print("Socket error: ", e)
return
send_write_message = '{"activity":"user_write_request","value_to_be_written":"'+ value_to_be_written+'"}'
client.send(send_write_message)
from_server = client.recv(4096)
rcvd_mssg = from_server
print('Received write message udpate statue from Group man: ', rcvd_mssg)
#node_status[node_id-1] = True
#node_age[node_id-1] = rcvd_mssg["age"]
client.close()
#print('My port is :', port, 'I got the message: ', node_age[node_id-1])
######################################################################################################
def user_read_request(user_id):
print("Read request initiated...................................")
port = (8000) + user_id
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client.connect(('0.0.0.0', port))
except socket.error, e:
print("Socket error: ", e)
return
send_write_message = '{"activity":"user_read_request"}'
client.send(send_write_message)
from_server = client.recv(4096)
rcvd_mssg = from_server
print('Received read result: ', rcvd_mssg)
#node_status[node_id-1] = True
#node_age[node_id-1] = rcvd_mssg["age"]
client.close()
#print('My port is :', port, 'I got the message: ', node_age[node_id-1])
############################################################################################################
seek_intent = raw_input("Do you want to read / write (r/w) : ")
print(seek_intent)
user_write_value = ""
if seek_intent == "w":
user_write_value = raw_input("Enter alphabet to be written : ")
print(user_write_value)
if seek_intent=="w":
time.sleep(5)
user_write_request(user_id, user_write_value)
if seek_intent=="r":
time.sleep(5)
user_read_request(user_id)
################################################################################################ | de | 0.681376 | ####################################################################################################### #node_status[node_id-1] = True #node_age[node_id-1] = rcvd_mssg["age"] #print('My port is :', port, 'I got the message: ', node_age[node_id-1]) ###################################################################################################### #node_status[node_id-1] = True #node_age[node_id-1] = rcvd_mssg["age"] #print('My port is :', port, 'I got the message: ', node_age[node_id-1]) ############################################################################################################ ################################################################################################ | 2.459507 | 2 |
test_connect.py | sbyount/learning_python_2016 | 0 | 6632275 | from netmiko import ConnectHandler
import logging
logging.basicConfig(filename='test.log', level=logging.DEBUG)
logger = logging.getLogger("my_log")
logger.debug('This message should go to the log file')
cisco_1002x = {
'device_type': 'cisco_ios',
'ip': '10.9.18.38',
'username': 'zg46503',
'password': '*******',
'port' : 22, # optional, defaults to 22
'secret': '', # optional, defaults to ''
'verbose': False, # optional, defaults to False
'global_delay_factor': 4, # Try 2 and then 4
}
cisco_1921 = {
'device_type': 'cisco_ios_telnet', # use telnet instead of ssh
'ip': '10.192.64.190',
'username': 'netsupport',
'password': '<PASSWORD>',
'port' : 23, # optional, defaults to 22
'secret': '', # optional, defaults to ''
'verbose': False, # optional, defaults to False
}
cisco_3945 = {
'device_type': 'cisco_ios',
'ip': '10.192.65.35',
'username': 'netsupport',
'password': '<PASSWORD>',
'port' : 22, # optional, defaults to 22
'secret': '', # optional, defaults to ''
'verbose': False, # optional, defaults to False
}
net_connect = ConnectHandler(**cisco_1002x)
#net_connect.send_command('\n')
output = net_connect.send_command('show ip int brief')
print(output)
| from netmiko import ConnectHandler
import logging
logging.basicConfig(filename='test.log', level=logging.DEBUG)
logger = logging.getLogger("my_log")
logger.debug('This message should go to the log file')
cisco_1002x = {
'device_type': 'cisco_ios',
'ip': '10.9.18.38',
'username': 'zg46503',
'password': '*******',
'port' : 22, # optional, defaults to 22
'secret': '', # optional, defaults to ''
'verbose': False, # optional, defaults to False
'global_delay_factor': 4, # Try 2 and then 4
}
cisco_1921 = {
'device_type': 'cisco_ios_telnet', # use telnet instead of ssh
'ip': '10.192.64.190',
'username': 'netsupport',
'password': '<PASSWORD>',
'port' : 23, # optional, defaults to 22
'secret': '', # optional, defaults to ''
'verbose': False, # optional, defaults to False
}
cisco_3945 = {
'device_type': 'cisco_ios',
'ip': '10.192.65.35',
'username': 'netsupport',
'password': '<PASSWORD>',
'port' : 22, # optional, defaults to 22
'secret': '', # optional, defaults to ''
'verbose': False, # optional, defaults to False
}
net_connect = ConnectHandler(**cisco_1002x)
#net_connect.send_command('\n')
output = net_connect.send_command('show ip int brief')
print(output)
| en | 0.209127 | # optional, defaults to 22 # optional, defaults to '' # optional, defaults to False # Try 2 and then 4 # use telnet instead of ssh # optional, defaults to 22 # optional, defaults to '' # optional, defaults to False # optional, defaults to 22 # optional, defaults to '' # optional, defaults to False #net_connect.send_command('\n') | 2.435554 | 2 |
extensions/.stubs/clrclasses/__clrclasses__/System/Web/__init__.py | vicwjb/Pycad | 1 | 6632276 | <filename>extensions/.stubs/clrclasses/__clrclasses__/System/Web/__init__.py
from __clrclasses__.System import Enum as _n_0_t_0
from __clrclasses__.System import IComparable as _n_0_t_1
from __clrclasses__.System import IFormattable as _n_0_t_2
from __clrclasses__.System import IConvertible as _n_0_t_3
from __clrclasses__.System.Runtime.InteropServices import _Attribute as _n_1_t_0
from __clrclasses__.System.Security import CodeAccessPermission as _n_2_t_0
from __clrclasses__.System.Security import IPermission as _n_2_t_1
from __clrclasses__.System.Security import IStackWalk as _n_2_t_2
from __clrclasses__.System.Security.Permissions import IUnrestrictedPermission as _n_3_t_0
from __clrclasses__.System.Security.Permissions import PermissionState as _n_3_t_1
from __clrclasses__.System.Security.Permissions import CodeAccessSecurityAttribute as _n_3_t_2
from __clrclasses__.System.Security.Permissions import SecurityAction as _n_3_t_3
import typing
class AspNetHostingPermission(_n_2_t_0, _n_2_t_1, _n_2_t_2, _n_3_t_0):
@property
def Level(self) -> AspNetHostingPermissionLevel:"""Level { get; set; } -> AspNetHostingPermissionLevel"""
def __init__(self, level: AspNetHostingPermissionLevel) -> AspNetHostingPermission:...
def __init__(self, state: _n_3_t_1) -> AspNetHostingPermission:...
class AspNetHostingPermissionAttribute(_n_3_t_2, _n_1_t_0):
@property
def Level(self) -> AspNetHostingPermissionLevel:"""Level { get; set; } -> AspNetHostingPermissionLevel"""
def __init__(self, action: _n_3_t_3) -> AspNetHostingPermissionAttribute:...
class AspNetHostingPermissionLevel(_n_0_t_0, _n_0_t_1, _n_0_t_2, _n_0_t_3):
High: int
Low: int
Medium: int
Minimal: int
_None: int
Unrestricted: int
value__: int
| <filename>extensions/.stubs/clrclasses/__clrclasses__/System/Web/__init__.py
from __clrclasses__.System import Enum as _n_0_t_0
from __clrclasses__.System import IComparable as _n_0_t_1
from __clrclasses__.System import IFormattable as _n_0_t_2
from __clrclasses__.System import IConvertible as _n_0_t_3
from __clrclasses__.System.Runtime.InteropServices import _Attribute as _n_1_t_0
from __clrclasses__.System.Security import CodeAccessPermission as _n_2_t_0
from __clrclasses__.System.Security import IPermission as _n_2_t_1
from __clrclasses__.System.Security import IStackWalk as _n_2_t_2
from __clrclasses__.System.Security.Permissions import IUnrestrictedPermission as _n_3_t_0
from __clrclasses__.System.Security.Permissions import PermissionState as _n_3_t_1
from __clrclasses__.System.Security.Permissions import CodeAccessSecurityAttribute as _n_3_t_2
from __clrclasses__.System.Security.Permissions import SecurityAction as _n_3_t_3
import typing
class AspNetHostingPermission(_n_2_t_0, _n_2_t_1, _n_2_t_2, _n_3_t_0):
@property
def Level(self) -> AspNetHostingPermissionLevel:"""Level { get; set; } -> AspNetHostingPermissionLevel"""
def __init__(self, level: AspNetHostingPermissionLevel) -> AspNetHostingPermission:...
def __init__(self, state: _n_3_t_1) -> AspNetHostingPermission:...
class AspNetHostingPermissionAttribute(_n_3_t_2, _n_1_t_0):
@property
def Level(self) -> AspNetHostingPermissionLevel:"""Level { get; set; } -> AspNetHostingPermissionLevel"""
def __init__(self, action: _n_3_t_3) -> AspNetHostingPermissionAttribute:...
class AspNetHostingPermissionLevel(_n_0_t_0, _n_0_t_1, _n_0_t_2, _n_0_t_3):
High: int
Low: int
Medium: int
Minimal: int
_None: int
Unrestricted: int
value__: int
| en | 0.207725 | Level { get; set; } -> AspNetHostingPermissionLevel Level { get; set; } -> AspNetHostingPermissionLevel | 1.775521 | 2 |
employees/urls.py | rcdosado/URO-MIS | 0 | 6632277 | from django.conf.urls import url, include
from . import views
urlpatterns = [
url(r'^dashboard/$', views.dashboard, name='dashboard'),
url(r'^logout/$', views.LogoutView.as_view(), name='logout'),
url(r'^signup/$', views.SignupView.as_view(), name='signup'),
url('^', include('django.contrib.auth.urls'))
]
| from django.conf.urls import url, include
from . import views
urlpatterns = [
url(r'^dashboard/$', views.dashboard, name='dashboard'),
url(r'^logout/$', views.LogoutView.as_view(), name='logout'),
url(r'^signup/$', views.SignupView.as_view(), name='signup'),
url('^', include('django.contrib.auth.urls'))
]
| none | 1 | 1.726767 | 2 |
|
meizituspider.py | Stephen-Pierre/meizitu_Spider | 8 | 6632278 | import requests
from bs4 import BeautifulSoup
import lxml
import os
import time
import tkinter as tk
from tkinter.filedialog import askdirectory
headers = {'Referer':'https://www.mzitu.com','User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3679.0 Safari/537.36'}
# 构造每一页的链接
def create_page_url_list(start_page, end_page):
url_list = []
for i in range(int(start_page), int(end_page)+1):
url_list.append("https://www.mzitu.com/page/{}/".format(i))
# 调用 get_chapter_url爬取
for url in url_list:
get_chapter_url(str(url))
# 获取图片总页数
def get_max_page():
url = "https://www.mzitu.com/"
response = requests.get(url=url, headers=headers)
soup = BeautifulSoup(response.content, "lxml")
# 获取图片总页数
max_page = soup.select("body > div.main > div.main-content > div.pagination > div > a:nth-child(6)")[0].contents[0]
return max_page
# 获取每一页个专题的链接
def get_chapter_url(page_url):
chapter_url = page_url
response = requests.get(url=chapter_url, headers=headers)
soup = BeautifulSoup(response.content, "lxml")
# 获取专题链接暂存在字典中
res_dict = {}
res = soup.find_all("div", class_="main-content")[0].find_all("div", class_="postlist")[0].find_all("a", target="_blank")
for i in range(1, len(res), 2):
url = res[i].get("href")
title = res[i].contents[0]
res_dict[url] = title
download_image(res_dict)
# 获取每个专题的所有图片链接并下载
def download_image(url_dict):
for url, title in url_dict.items():
# 根据标题创建文件夹用于保存文件
title = str(title)
path = "{0}/{1}".format(path_chosen, title)
if not os.path.exists(path):
os.makedirs(path)
# 分析每个图片的下载地址并下载图片
response = requests.get(url=url, headers=headers)
soup = BeautifulSoup(response.content, "lxml")
# 获取每个专题的图片数量
max_content_page = int(soup.select("body > div.main > div.content > div.pagenavi > a:nth-child(7) > span")[0].contents[0])
# 构造每一个图片页的URL
for page in range(1, max_content_page+1):
img_url = url + '/' + str(page)
# 获取每张图片的下载地址并下载图片
result = requests.get(url=img_url, headers=headers)
soup = BeautifulSoup(result.content, "lxml")
download_url = soup.find_all('img',alt="{}".format(title))[0].get("src")
image = requests.get(url=download_url, headers=headers)
# 保存图片
with open("{0}/{1}.jpg".format(path, page), 'wb') as fp:
fp.write(image.content)
time.sleep(1)
path_chosen = os.getcwd()
# 定制图形界面
def main():
top = tk.Tk()
top.title("妹子图专用爬虫")
top.geometry("400x300")
# 提示用户输入要爬取的页码范围
# 调用get_max_page获取当前最大页码
cur_max_page = get_max_page()
label1 = tk.Label(top,text = "请输入要爬取的页码:", font = ("宋体", 18))
label1.grid(row=0,sticky = tk.W)
label2 = tk.Label(top, text="(提示:当前共有{}页)".format(cur_max_page))
label2.grid(row=1, sticky = tk.W)
label3 = tk.Label(top,text = "请输入起始页码:", font = ("宋体", 14))
label3.grid(row=2,sticky = tk.W)
v1 = tk.IntVar()
page_area1 = tk.Entry(top,textvariable=v1)
v1.set(1)
page_area1.grid(row = 3, sticky = tk.W)
label4 = tk.Label(top,text = "请输入结束页码:", font = ("宋体", 14))
label4.grid(row=4,sticky = tk.W)
v2 = tk.IntVar()
page_area2 = tk.Entry(top, textvariable=v2)
v2.set(1)
page_area2.grid(row = 6, sticky = tk.W)
# 选择路径函数
def selectPath():
global path_chosen
path_ = askdirectory(title = "请选择保存路径")
label5 = tk.Label(top,text = "保存路径:{}".format(path_), font = ("宋体", 12))
label5.grid(row=8,sticky = tk.W)
path_chosen = path_
# 选择路径按钮
button0 = tk.Button(top, text="选择保存路径", font=("宋体", 18), command=selectPath)
button0.grid(row = 7,sticky = tk.W)
# 开始按钮
button1 = tk.Button(top, text="Start", font=("宋体", 18), command=lambda : create_page_url_list(page_area1.get(), page_area2.get()))
button1.grid(row = 9,sticky = tk.W)
top.mainloop()
if __name__ == "__main__":
main() | import requests
from bs4 import BeautifulSoup
import lxml
import os
import time
import tkinter as tk
from tkinter.filedialog import askdirectory
headers = {'Referer':'https://www.mzitu.com','User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3679.0 Safari/537.36'}
# 构造每一页的链接
def create_page_url_list(start_page, end_page):
url_list = []
for i in range(int(start_page), int(end_page)+1):
url_list.append("https://www.mzitu.com/page/{}/".format(i))
# 调用 get_chapter_url爬取
for url in url_list:
get_chapter_url(str(url))
# 获取图片总页数
def get_max_page():
url = "https://www.mzitu.com/"
response = requests.get(url=url, headers=headers)
soup = BeautifulSoup(response.content, "lxml")
# 获取图片总页数
max_page = soup.select("body > div.main > div.main-content > div.pagination > div > a:nth-child(6)")[0].contents[0]
return max_page
# 获取每一页个专题的链接
def get_chapter_url(page_url):
chapter_url = page_url
response = requests.get(url=chapter_url, headers=headers)
soup = BeautifulSoup(response.content, "lxml")
# 获取专题链接暂存在字典中
res_dict = {}
res = soup.find_all("div", class_="main-content")[0].find_all("div", class_="postlist")[0].find_all("a", target="_blank")
for i in range(1, len(res), 2):
url = res[i].get("href")
title = res[i].contents[0]
res_dict[url] = title
download_image(res_dict)
# 获取每个专题的所有图片链接并下载
def download_image(url_dict):
for url, title in url_dict.items():
# 根据标题创建文件夹用于保存文件
title = str(title)
path = "{0}/{1}".format(path_chosen, title)
if not os.path.exists(path):
os.makedirs(path)
# 分析每个图片的下载地址并下载图片
response = requests.get(url=url, headers=headers)
soup = BeautifulSoup(response.content, "lxml")
# 获取每个专题的图片数量
max_content_page = int(soup.select("body > div.main > div.content > div.pagenavi > a:nth-child(7) > span")[0].contents[0])
# 构造每一个图片页的URL
for page in range(1, max_content_page+1):
img_url = url + '/' + str(page)
# 获取每张图片的下载地址并下载图片
result = requests.get(url=img_url, headers=headers)
soup = BeautifulSoup(result.content, "lxml")
download_url = soup.find_all('img',alt="{}".format(title))[0].get("src")
image = requests.get(url=download_url, headers=headers)
# 保存图片
with open("{0}/{1}.jpg".format(path, page), 'wb') as fp:
fp.write(image.content)
time.sleep(1)
path_chosen = os.getcwd()
# 定制图形界面
def main():
top = tk.Tk()
top.title("妹子图专用爬虫")
top.geometry("400x300")
# 提示用户输入要爬取的页码范围
# 调用get_max_page获取当前最大页码
cur_max_page = get_max_page()
label1 = tk.Label(top,text = "请输入要爬取的页码:", font = ("宋体", 18))
label1.grid(row=0,sticky = tk.W)
label2 = tk.Label(top, text="(提示:当前共有{}页)".format(cur_max_page))
label2.grid(row=1, sticky = tk.W)
label3 = tk.Label(top,text = "请输入起始页码:", font = ("宋体", 14))
label3.grid(row=2,sticky = tk.W)
v1 = tk.IntVar()
page_area1 = tk.Entry(top,textvariable=v1)
v1.set(1)
page_area1.grid(row = 3, sticky = tk.W)
label4 = tk.Label(top,text = "请输入结束页码:", font = ("宋体", 14))
label4.grid(row=4,sticky = tk.W)
v2 = tk.IntVar()
page_area2 = tk.Entry(top, textvariable=v2)
v2.set(1)
page_area2.grid(row = 6, sticky = tk.W)
# 选择路径函数
def selectPath():
global path_chosen
path_ = askdirectory(title = "请选择保存路径")
label5 = tk.Label(top,text = "保存路径:{}".format(path_), font = ("宋体", 12))
label5.grid(row=8,sticky = tk.W)
path_chosen = path_
# 选择路径按钮
button0 = tk.Button(top, text="选择保存路径", font=("宋体", 18), command=selectPath)
button0.grid(row = 7,sticky = tk.W)
# 开始按钮
button1 = tk.Button(top, text="Start", font=("宋体", 18), command=lambda : create_page_url_list(page_area1.get(), page_area2.get()))
button1.grid(row = 9,sticky = tk.W)
top.mainloop()
if __name__ == "__main__":
main() | zh | 0.951006 | # 构造每一页的链接 # 调用 get_chapter_url爬取 # 获取图片总页数 # 获取图片总页数 # 获取每一页个专题的链接 # 获取专题链接暂存在字典中 # 获取每个专题的所有图片链接并下载 # 根据标题创建文件夹用于保存文件 # 分析每个图片的下载地址并下载图片 # 获取每个专题的图片数量 # 构造每一个图片页的URL # 获取每张图片的下载地址并下载图片 # 保存图片 # 定制图形界面 # 提示用户输入要爬取的页码范围 # 调用get_max_page获取当前最大页码 # 选择路径函数 # 选择路径按钮 # 开始按钮 | 3.234546 | 3 |
tests/utils.py | girish946/supertokens-python | 36 | 6632279 | <gh_stars>10-100
# Copyright (c) 2021, VRAI Labs and/or its affiliates. All rights reserved.
#
# This software is licensed under the Apache License, Version 2.0 (the
# "License") as published by the Apache Software Foundation.
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime, timezone
from http.cookies import SimpleCookie
from os import environ, scandir, kill, remove
from shutil import rmtree
from signal import SIGTERM
from subprocess import run, DEVNULL
from time import sleep
from requests.models import Response
from supertokens_python.recipe.emailpassword import EmailPasswordRecipe
from supertokens_python.recipe.emailverification import EmailVerificationRecipe
from supertokens_python.recipe.jwt import JWTRecipe
from supertokens_python.recipe.session import SessionRecipe
from yaml import dump, load, FullLoader
from supertokens_python import Supertokens
from supertokens_python.process_state import ProcessState
from supertokens_python.recipe.thirdparty import ThirdPartyRecipe
from supertokens_python.recipe.thirdpartyemailpassword import ThirdPartyEmailPasswordRecipe
INSTALLATION_PATH = environ['SUPERTOKENS_PATH']
SUPERTOKENS_PROCESS_DIR = INSTALLATION_PATH + '/.started'
LICENSE_FILE_PATH = INSTALLATION_PATH + '/licenseKey'
CONFIG_YAML_FILE_PATH = INSTALLATION_PATH + '/config.yaml'
ORIGINAL_LICENSE_FILE_PATH = INSTALLATION_PATH + '/temp/licenseKey'
ORIGINAL_CONFIG_YAML_FILE_PATH = INSTALLATION_PATH + '/temp/config.yaml'
WEB_SERVER_TEMP_DIR = INSTALLATION_PATH + '/webserver-temp'
API_VERSION_TEST_NON_SUPPORTED_SV = ['0.0', '1.0', '1.1', '2.1']
API_VERSION_TEST_NON_SUPPORTED_CV = ['0.1', '0.2', '1.2', '2.0', '3.0']
API_VERSION_TEST_MULTIPLE_SUPPORTED_SV = ['0.0', '1.0', '1.1', '2.1']
API_VERSION_TEST_MULTIPLE_SUPPORTED_CV = ['0.1', '0.2', '1.1', '2.1', '3.0']
API_VERSION_TEST_MULTIPLE_SUPPORTED_RESULT = '2.1'
API_VERSION_TEST_SINGLE_SUPPORTED_SV = ['0.0', '1.0', '1.1', '2.0']
API_VERSION_TEST_SINGLE_SUPPORTED_CV = ['0.1', '0.2', '1.1', '2.1', '3.0']
API_VERSION_TEST_SINGLE_SUPPORTED_RESULT = '1.1'
API_VERSION_TEST_BASIC_RESULT = ['2.0', '2.1', '2.2', '2.3', '2.9']
SUPPORTED_CORE_DRIVER_INTERFACE_FILE = './coreDriverInterfaceSupported.json'
TEST_ENABLE_ANTI_CSRF_CONFIG_KEY = 'enable_anti_csrf'
TEST_ACCESS_TOKEN_PATH_VALUE = '/test'
TEST_ACCESS_TOKEN_PATH_CONFIG_KEY = 'access_token_path'
TEST_REFRESH_TOKEN_PATH_KEY_VALUE = '/refresh'
TEST_REFRESH_TOKEN_PATH_KEY_VALUE_TEST_DECORATOR = '/refresh'
TEST_REFRESH_TOKEN_PATH_CONFIG_KEY = 'refresh_api_path'
TEST_SESSION_EXPIRED_STATUS_CODE_VALUE = 401
TEST_SESSION_EXPIRED_STATUS_CODE_CONFIG_KEY = 'session_expired_status_code'
TEST_COOKIE_DOMAIN_VALUE = 'test.supertokens.io'
TEST_COOKIE_DOMAIN_CONFIG_KEY = 'cookie_domain'
TEST_ACCESS_TOKEN_MAX_AGE_VALUE = 7200 # seconds
TEST_ACCESS_TOKEN_MAX_AGE_CONFIG_KEY = 'access_token_validity'
TEST_REFRESH_TOKEN_MAX_AGE_VALUE = 720 # minutes
TEST_REFRESH_TOKEN_MAX_AGE_CONFIG_KEY = 'refresh_token_validity'
TEST_COOKIE_SAME_SITE_VALUE = 'Lax'
TEST_COOKIE_SAME_SITE_CONFIG_KEY = 'cookie_same_site'
TEST_COOKIE_SECURE_VALUE = False
TEST_COOKIE_SECURE_CONFIG_KEY = 'cookie_secure'
TEST_DRIVER_CONFIG_COOKIE_DOMAIN = 'supertokens.io'
TEST_DRIVER_CONFIG_COOKIE_SECURE = False
TEST_DRIVER_CONFIG_COOKIE_SAME_SITE = 'lax'
TEST_DRIVER_CONFIG_ACCESS_TOKEN_PATH = '/'
TEST_DRIVER_CONFIG_REFRESH_TOKEN_PATH = '/auth/session/refresh'
ACCESS_CONTROL_EXPOSE_HEADER = 'Access-Control-Expose-Headers'
ACCESS_CONTROL_EXPOSE_HEADER_ANTI_CSRF_ENABLE = 'front-token, id-refresh-token, anti-csrf'
ACCESS_CONTROL_EXPOSE_HEADER_ANTI_CSRF_DISABLE = 'id-refresh-token'
TEST_ID_TOKEN = "<KEY>"
def set_key_value_in_config(key, value):
f = open(CONFIG_YAML_FILE_PATH, 'r')
data = load(f, Loader=FullLoader)
f.close()
data[key] = value
f = open(CONFIG_YAML_FILE_PATH, 'w')
dump(data, f)
f.close()
def drop_key(key):
f = open(CONFIG_YAML_FILE_PATH, 'r')
data = load(f, Loader=FullLoader)
f.close()
data.pop(key)
f = open(CONFIG_YAML_FILE_PATH, 'w')
dump(data, f)
f.close()
def __stop_st(retry=50):
process_ids = __get_list_of_process_ids()
for pid in process_ids:
kill(int(pid), SIGTERM)
process_ids = __get_list_of_process_ids()
if len(process_ids) != 0:
if retry == 0:
raise Exception('')
sleep(0.5)
__stop_st(retry - 1)
sleep(1)
def start_st(host='localhost', port='3567'):
pid_after = pid_before = __get_list_of_process_ids()
run('cd ' + INSTALLATION_PATH + ' && java -Djava.security.egd=file:/dev/urandom -classpath '
'"./core/*:./plugin-interface/*" io.supertokens.Main ./ DEV host='
+ host + ' port=' + str(port) + ' &', shell=True, stdout=DEVNULL)
for _ in range(35):
pid_after = __get_list_of_process_ids()
if len(pid_after) != len(pid_before):
break
sleep(0.5)
if len(pid_after) == len(pid_before):
raise Exception('could not start ST process')
def setup_st():
try:
run("cd " + INSTALLATION_PATH + " && cp temp/licenseKey ./licenseKey")
except BaseException:
run("cd " + INSTALLATION_PATH +
" && cp temp/config.yaml ./config.yaml", shell=True)
def clean_st():
try:
remove(LICENSE_FILE_PATH)
except FileNotFoundError:
pass
try:
remove(CONFIG_YAML_FILE_PATH)
except FileNotFoundError:
pass
try:
rmtree(SUPERTOKENS_PROCESS_DIR)
except FileNotFoundError:
pass
try:
rmtree(WEB_SERVER_TEMP_DIR)
except FileNotFoundError:
pass
def __get_list_of_process_ids():
process_ids = []
try:
processes = scandir(SUPERTOKENS_PROCESS_DIR)
for process in processes:
f = open(SUPERTOKENS_PROCESS_DIR + '/' + process.name, 'r')
process_ids.append(f.read())
f.close()
except FileNotFoundError:
pass
return process_ids
def reset():
__stop_st()
ProcessState.get_instance().reset()
Supertokens.reset()
SessionRecipe.reset()
ThirdPartyEmailPasswordRecipe.reset()
EmailPasswordRecipe.reset()
EmailVerificationRecipe.reset()
ThirdPartyRecipe.reset()
JWTRecipe.reset()
def get_cookie_from_response(response, cookie_name):
cookies = extract_all_cookies(response)
if cookie_name in cookies:
return cookies[cookie_name]
return None
def extract_all_cookies(response: Response):
if response.headers.get('set-cookie') is None:
return {}
cookie_headers = SimpleCookie(
response.headers.get('set-cookie'))
cookies = dict()
for key, morsel in cookie_headers.items():
cookies[key] = {
'value': morsel.value,
'name': key
}
for k, v in morsel.items():
if (k == 'secure' or k == 'httponly') and v == '':
cookies[key][k] = None
elif k == 'samesite':
if len(v) > 0 and v[-1] == ',':
v = v[:-1]
cookies[key][k] = v
else:
cookies[key][k] = v
return cookies
def get_unix_timestamp(expiry):
return int(datetime.strptime(
expiry, '%a, %d %b %Y %H:%M:%S GMT').replace(tzinfo=timezone.utc).timestamp())
def verify_within_5_second_diff(n1, n2):
return -5 <= (n1 - n2) <= 5
def sign_up_request(app, email, password):
return app.post(
url="/auth/signup",
headers={
"Content-Type": "application/json"
},
json={
'formFields':
[{
"id": "password",
"value": password
},
{
"id": "email",
"value": email
}]
})
def email_verify_token_request(
app, accessToken, idRefreshTokenFromCookie, antiCsrf, userId):
return app.post(
url="/auth/user/email/verify/token",
headers={
"Content-Type": "application/json",
'anti-csrf': antiCsrf
},
cookies={
'sAccessToken': accessToken,
'sIdRefreshToken': idRefreshTokenFromCookie,
},
data=str.encode(userId))
| # Copyright (c) 2021, VRAI Labs and/or its affiliates. All rights reserved.
#
# This software is licensed under the Apache License, Version 2.0 (the
# "License") as published by the Apache Software Foundation.
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime, timezone
from http.cookies import SimpleCookie
from os import environ, scandir, kill, remove
from shutil import rmtree
from signal import SIGTERM
from subprocess import run, DEVNULL
from time import sleep
from requests.models import Response
from supertokens_python.recipe.emailpassword import EmailPasswordRecipe
from supertokens_python.recipe.emailverification import EmailVerificationRecipe
from supertokens_python.recipe.jwt import JWTRecipe
from supertokens_python.recipe.session import SessionRecipe
from yaml import dump, load, FullLoader
from supertokens_python import Supertokens
from supertokens_python.process_state import ProcessState
from supertokens_python.recipe.thirdparty import ThirdPartyRecipe
from supertokens_python.recipe.thirdpartyemailpassword import ThirdPartyEmailPasswordRecipe
INSTALLATION_PATH = environ['SUPERTOKENS_PATH']
SUPERTOKENS_PROCESS_DIR = INSTALLATION_PATH + '/.started'
LICENSE_FILE_PATH = INSTALLATION_PATH + '/licenseKey'
CONFIG_YAML_FILE_PATH = INSTALLATION_PATH + '/config.yaml'
ORIGINAL_LICENSE_FILE_PATH = INSTALLATION_PATH + '/temp/licenseKey'
ORIGINAL_CONFIG_YAML_FILE_PATH = INSTALLATION_PATH + '/temp/config.yaml'
WEB_SERVER_TEMP_DIR = INSTALLATION_PATH + '/webserver-temp'
API_VERSION_TEST_NON_SUPPORTED_SV = ['0.0', '1.0', '1.1', '2.1']
API_VERSION_TEST_NON_SUPPORTED_CV = ['0.1', '0.2', '1.2', '2.0', '3.0']
API_VERSION_TEST_MULTIPLE_SUPPORTED_SV = ['0.0', '1.0', '1.1', '2.1']
API_VERSION_TEST_MULTIPLE_SUPPORTED_CV = ['0.1', '0.2', '1.1', '2.1', '3.0']
API_VERSION_TEST_MULTIPLE_SUPPORTED_RESULT = '2.1'
API_VERSION_TEST_SINGLE_SUPPORTED_SV = ['0.0', '1.0', '1.1', '2.0']
API_VERSION_TEST_SINGLE_SUPPORTED_CV = ['0.1', '0.2', '1.1', '2.1', '3.0']
API_VERSION_TEST_SINGLE_SUPPORTED_RESULT = '1.1'
API_VERSION_TEST_BASIC_RESULT = ['2.0', '2.1', '2.2', '2.3', '2.9']
SUPPORTED_CORE_DRIVER_INTERFACE_FILE = './coreDriverInterfaceSupported.json'
TEST_ENABLE_ANTI_CSRF_CONFIG_KEY = 'enable_anti_csrf'
TEST_ACCESS_TOKEN_PATH_VALUE = '/test'
TEST_ACCESS_TOKEN_PATH_CONFIG_KEY = 'access_token_path'
TEST_REFRESH_TOKEN_PATH_KEY_VALUE = '/refresh'
TEST_REFRESH_TOKEN_PATH_KEY_VALUE_TEST_DECORATOR = '/refresh'
TEST_REFRESH_TOKEN_PATH_CONFIG_KEY = 'refresh_api_path'
TEST_SESSION_EXPIRED_STATUS_CODE_VALUE = 401
TEST_SESSION_EXPIRED_STATUS_CODE_CONFIG_KEY = 'session_expired_status_code'
TEST_COOKIE_DOMAIN_VALUE = 'test.supertokens.io'
TEST_COOKIE_DOMAIN_CONFIG_KEY = 'cookie_domain'
TEST_ACCESS_TOKEN_MAX_AGE_VALUE = 7200 # seconds
TEST_ACCESS_TOKEN_MAX_AGE_CONFIG_KEY = 'access_token_validity'
TEST_REFRESH_TOKEN_MAX_AGE_VALUE = 720 # minutes
TEST_REFRESH_TOKEN_MAX_AGE_CONFIG_KEY = 'refresh_token_validity'
TEST_COOKIE_SAME_SITE_VALUE = 'Lax'
TEST_COOKIE_SAME_SITE_CONFIG_KEY = 'cookie_same_site'
TEST_COOKIE_SECURE_VALUE = False
TEST_COOKIE_SECURE_CONFIG_KEY = 'cookie_secure'
TEST_DRIVER_CONFIG_COOKIE_DOMAIN = 'supertokens.io'
TEST_DRIVER_CONFIG_COOKIE_SECURE = False
TEST_DRIVER_CONFIG_COOKIE_SAME_SITE = 'lax'
TEST_DRIVER_CONFIG_ACCESS_TOKEN_PATH = '/'
TEST_DRIVER_CONFIG_REFRESH_TOKEN_PATH = '/auth/session/refresh'
ACCESS_CONTROL_EXPOSE_HEADER = 'Access-Control-Expose-Headers'
ACCESS_CONTROL_EXPOSE_HEADER_ANTI_CSRF_ENABLE = 'front-token, id-refresh-token, anti-csrf'
ACCESS_CONTROL_EXPOSE_HEADER_ANTI_CSRF_DISABLE = 'id-refresh-token'
TEST_ID_TOKEN = "<KEY>"
def set_key_value_in_config(key, value):
f = open(CONFIG_YAML_FILE_PATH, 'r')
data = load(f, Loader=FullLoader)
f.close()
data[key] = value
f = open(CONFIG_YAML_FILE_PATH, 'w')
dump(data, f)
f.close()
def drop_key(key):
f = open(CONFIG_YAML_FILE_PATH, 'r')
data = load(f, Loader=FullLoader)
f.close()
data.pop(key)
f = open(CONFIG_YAML_FILE_PATH, 'w')
dump(data, f)
f.close()
def __stop_st(retry=50):
process_ids = __get_list_of_process_ids()
for pid in process_ids:
kill(int(pid), SIGTERM)
process_ids = __get_list_of_process_ids()
if len(process_ids) != 0:
if retry == 0:
raise Exception('')
sleep(0.5)
__stop_st(retry - 1)
sleep(1)
def start_st(host='localhost', port='3567'):
pid_after = pid_before = __get_list_of_process_ids()
run('cd ' + INSTALLATION_PATH + ' && java -Djava.security.egd=file:/dev/urandom -classpath '
'"./core/*:./plugin-interface/*" io.supertokens.Main ./ DEV host='
+ host + ' port=' + str(port) + ' &', shell=True, stdout=DEVNULL)
for _ in range(35):
pid_after = __get_list_of_process_ids()
if len(pid_after) != len(pid_before):
break
sleep(0.5)
if len(pid_after) == len(pid_before):
raise Exception('could not start ST process')
def setup_st():
try:
run("cd " + INSTALLATION_PATH + " && cp temp/licenseKey ./licenseKey")
except BaseException:
run("cd " + INSTALLATION_PATH +
" && cp temp/config.yaml ./config.yaml", shell=True)
def clean_st():
try:
remove(LICENSE_FILE_PATH)
except FileNotFoundError:
pass
try:
remove(CONFIG_YAML_FILE_PATH)
except FileNotFoundError:
pass
try:
rmtree(SUPERTOKENS_PROCESS_DIR)
except FileNotFoundError:
pass
try:
rmtree(WEB_SERVER_TEMP_DIR)
except FileNotFoundError:
pass
def __get_list_of_process_ids():
process_ids = []
try:
processes = scandir(SUPERTOKENS_PROCESS_DIR)
for process in processes:
f = open(SUPERTOKENS_PROCESS_DIR + '/' + process.name, 'r')
process_ids.append(f.read())
f.close()
except FileNotFoundError:
pass
return process_ids
def reset():
__stop_st()
ProcessState.get_instance().reset()
Supertokens.reset()
SessionRecipe.reset()
ThirdPartyEmailPasswordRecipe.reset()
EmailPasswordRecipe.reset()
EmailVerificationRecipe.reset()
ThirdPartyRecipe.reset()
JWTRecipe.reset()
def get_cookie_from_response(response, cookie_name):
cookies = extract_all_cookies(response)
if cookie_name in cookies:
return cookies[cookie_name]
return None
def extract_all_cookies(response: Response):
if response.headers.get('set-cookie') is None:
return {}
cookie_headers = SimpleCookie(
response.headers.get('set-cookie'))
cookies = dict()
for key, morsel in cookie_headers.items():
cookies[key] = {
'value': morsel.value,
'name': key
}
for k, v in morsel.items():
if (k == 'secure' or k == 'httponly') and v == '':
cookies[key][k] = None
elif k == 'samesite':
if len(v) > 0 and v[-1] == ',':
v = v[:-1]
cookies[key][k] = v
else:
cookies[key][k] = v
return cookies
def get_unix_timestamp(expiry):
return int(datetime.strptime(
expiry, '%a, %d %b %Y %H:%M:%S GMT').replace(tzinfo=timezone.utc).timestamp())
def verify_within_5_second_diff(n1, n2):
return -5 <= (n1 - n2) <= 5
def sign_up_request(app, email, password):
return app.post(
url="/auth/signup",
headers={
"Content-Type": "application/json"
},
json={
'formFields':
[{
"id": "password",
"value": password
},
{
"id": "email",
"value": email
}]
})
def email_verify_token_request(
app, accessToken, idRefreshTokenFromCookie, antiCsrf, userId):
return app.post(
url="/auth/user/email/verify/token",
headers={
"Content-Type": "application/json",
'anti-csrf': antiCsrf
},
cookies={
'sAccessToken': accessToken,
'sIdRefreshToken': idRefreshTokenFromCookie,
},
data=str.encode(userId)) | en | 0.892619 | # Copyright (c) 2021, VRAI Labs and/or its affiliates. All rights reserved. # # This software is licensed under the Apache License, Version 2.0 (the # "License") as published by the Apache Software Foundation. # # You may not use this file except in compliance with the License. You may # obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # seconds # minutes | 1.509005 | 2 |
tests/base/database/test_helpers.py | vralex/RumbleRunner | 2 | 6632280 | from base import DBHelpers, SessionScope, TelegramUser
from tests.utils import InBotTestCase
class TestDBUtils(InBotTestCase):
def test_select_and_update_by_tg_id(self):
new_user = TelegramUser(tg_id=1, first_name='aaa')
session = SessionScope.session()
session.add(new_user)
user_as_is = DBHelpers.select_and_update_by_tg_id(TelegramUser, 1)
self.assertEqual(user_as_is.tg_id, 1)
self.assertEqual(user_as_is.first_name, 'aaa')
self.assertEqual(user_as_is.last_name, None)
user_updated = DBHelpers.select_and_update_by_tg_id(TelegramUser, 1, first_name='aaa_new', last_name='aaa_last')
self.assertEqual(user_updated.tg_id, 1)
self.assertEqual(user_updated.first_name, 'aaa_new')
self.assertEqual(user_updated.last_name, 'aaa_last')
self.assertEqual(DBHelpers.select_and_update_by_tg_id(TelegramUser, 1).first_name, 'aaa_new')
very_new_user = DBHelpers.select_and_update_by_tg_id(TelegramUser, 15, first_name='aaa_new')
self.assertEqual(very_new_user.tg_id, 15)
self.assertEqual(very_new_user.first_name, 'aaa_new')
self.assertEqual(very_new_user.last_name, None)
| from base import DBHelpers, SessionScope, TelegramUser
from tests.utils import InBotTestCase
class TestDBUtils(InBotTestCase):
def test_select_and_update_by_tg_id(self):
new_user = TelegramUser(tg_id=1, first_name='aaa')
session = SessionScope.session()
session.add(new_user)
user_as_is = DBHelpers.select_and_update_by_tg_id(TelegramUser, 1)
self.assertEqual(user_as_is.tg_id, 1)
self.assertEqual(user_as_is.first_name, 'aaa')
self.assertEqual(user_as_is.last_name, None)
user_updated = DBHelpers.select_and_update_by_tg_id(TelegramUser, 1, first_name='aaa_new', last_name='aaa_last')
self.assertEqual(user_updated.tg_id, 1)
self.assertEqual(user_updated.first_name, 'aaa_new')
self.assertEqual(user_updated.last_name, 'aaa_last')
self.assertEqual(DBHelpers.select_and_update_by_tg_id(TelegramUser, 1).first_name, 'aaa_new')
very_new_user = DBHelpers.select_and_update_by_tg_id(TelegramUser, 15, first_name='aaa_new')
self.assertEqual(very_new_user.tg_id, 15)
self.assertEqual(very_new_user.first_name, 'aaa_new')
self.assertEqual(very_new_user.last_name, None)
| none | 1 | 2.29226 | 2 |
|
python_modules/dagster-graphql/dagster_graphql_tests/test_multiprocessing.py | david-alexander-white/dagster | 1 | 6632281 | import csv
import os
import time
from collections import OrderedDict
from copy import deepcopy
from dagster_graphql.implementation.pipeline_execution_manager import (
QueueingSubprocessExecutionManager,
SubprocessExecutionManager,
)
from dagster import (
ExecutionTargetHandle,
Field,
InputDefinition,
Int,
Materialization,
OutputDefinition,
Path,
String,
as_dagster_type,
composite_solid,
input_hydration_config,
lambda_solid,
output_materialization_config,
pipeline,
solid,
)
from dagster.core.definitions.pipeline import ExecutionSelector
from dagster.core.events import DagsterEventType
from dagster.core.instance import DagsterInstance
from dagster.core.storage.pipeline_run import PipelineRun, PipelineRunStatus
from dagster.core.utils import make_new_run_id
from dagster.utils import file_relative_path, safe_tempfile_path
class PoorMansDataFrame_(list):
pass
@input_hydration_config(Path)
def df_input_schema(_context, path):
with open(path, 'r') as fd:
return PoorMansDataFrame_(
[OrderedDict(sorted(x.items(), key=lambda x: x[0])) for x in csv.DictReader(fd)]
)
@output_materialization_config(Path)
def df_output_schema(_context, path, value):
with open(path, 'w') as fd:
writer = csv.DictWriter(fd, fieldnames=value[0].keys())
writer.writeheader()
writer.writerows(rowdicts=value)
return Materialization.file(path)
PoorMansDataFrame = as_dagster_type(
PoorMansDataFrame_,
input_hydration_config=df_input_schema,
output_materialization_config=df_output_schema,
)
def get_events_of_type(events, event_type):
return [
event
for event in events
if event.is_dagster_event and event.dagster_event.event_type == event_type
]
def test_running():
run_id = make_new_run_id()
handle = ExecutionTargetHandle.for_pipeline_python_file(__file__, 'passing_pipeline')
environment_dict = {
'solids': {'sum_solid': {'inputs': {'num': file_relative_path(__file__, 'data/num.csv')}}}
}
selector = ExecutionSelector('csv_hello_world')
instance = DagsterInstance.local_temp()
pipeline_run = instance.create_run(
PipelineRun(
pipeline_name=passing_pipeline.name,
run_id=run_id,
selector=selector,
environment_dict=environment_dict,
mode='default',
reexecution_config=None,
step_keys_to_execute=None,
tags=None,
status=PipelineRunStatus.NOT_STARTED,
)
)
execution_manager = SubprocessExecutionManager(instance)
execution_manager.execute_pipeline(handle, passing_pipeline, pipeline_run, instance)
execution_manager.join()
assert instance.get_run_by_id(run_id).status == PipelineRunStatus.SUCCESS
events = instance.all_logs(run_id)
assert events
process_start_events = get_events_of_type(events, DagsterEventType.PIPELINE_PROCESS_START)
assert len(process_start_events) == 1
process_started_events = get_events_of_type(events, DagsterEventType.PIPELINE_PROCESS_STARTED)
assert len(process_started_events) == 1
process_exited_events = get_events_of_type(events, DagsterEventType.PIPELINE_PROCESS_EXITED)
assert len(process_exited_events) == 1
def test_failing():
run_id = make_new_run_id()
handle = ExecutionTargetHandle.for_pipeline_python_file(__file__, 'failing_pipeline')
environment_dict = {
'solids': {'sum_solid': {'inputs': {'num': file_relative_path(__file__, 'data/num.csv')}}}
}
selector = ExecutionSelector('csv_hello_world')
instance = DagsterInstance.local_temp()
pipeline_run = instance.create_run(
PipelineRun(
pipeline_name=failing_pipeline.name,
run_id=run_id,
selector=selector,
environment_dict=environment_dict,
mode='default',
reexecution_config=None,
step_keys_to_execute=None,
tags=None,
status=PipelineRunStatus.NOT_STARTED,
)
)
execution_manager = SubprocessExecutionManager(instance)
execution_manager.execute_pipeline(handle, failing_pipeline, pipeline_run, instance)
execution_manager.join()
assert instance.get_run_by_id(run_id).status == PipelineRunStatus.FAILURE
assert instance.all_logs(run_id)
def test_execution_crash():
run_id = make_new_run_id()
handle = ExecutionTargetHandle.for_pipeline_python_file(__file__, 'crashy_pipeline')
environment_dict = {
'solids': {'sum_solid': {'inputs': {'num': file_relative_path(__file__, 'data/num.csv')}}}
}
selector = ExecutionSelector('csv_hello_world')
instance = DagsterInstance.local_temp()
pipeline_run = instance.create_run(
PipelineRun(
pipeline_name=crashy_pipeline.name,
run_id=run_id,
selector=selector,
environment_dict=environment_dict,
mode='default',
reexecution_config=None,
step_keys_to_execute=None,
tags=None,
status=PipelineRunStatus.NOT_STARTED,
)
)
execution_manager = SubprocessExecutionManager(instance)
execution_manager.execute_pipeline(handle, crashy_pipeline, pipeline_run, instance)
execution_manager.join()
assert instance.get_run_by_id(run_id).status == PipelineRunStatus.FAILURE
last_log = instance.all_logs(run_id)[-1]
assert last_log.message.startswith(
'Exception: Pipeline execution process for {run_id} unexpectedly exited\n'.format(
run_id=run_id
)
)
@lambda_solid(
input_defs=[InputDefinition('num', PoorMansDataFrame)],
output_def=OutputDefinition(PoorMansDataFrame),
)
def sum_solid(num):
sum_df = deepcopy(num)
for x in sum_df:
x['sum'] = x['num1'] + x['num2']
return PoorMansDataFrame(sum_df)
@lambda_solid(
input_defs=[InputDefinition('sum_df', PoorMansDataFrame)],
output_def=OutputDefinition(PoorMansDataFrame),
)
def error_solid(sum_df): # pylint: disable=W0613
raise Exception('foo')
@lambda_solid(
input_defs=[InputDefinition('sum_df', PoorMansDataFrame)],
output_def=OutputDefinition(PoorMansDataFrame),
)
def crashy_solid(sum_df): # pylint: disable=W0613
os._exit(1) # pylint: disable=W0212
@pipeline
def passing_pipeline():
return sum_solid()
@pipeline
def failing_pipeline():
return error_solid(sum_solid())
@pipeline
def crashy_pipeline():
crashy_solid(sum_solid())
@solid(config={'foo': Field(String)})
def node_a(context):
return context.solid_config['foo']
@solid(config={'bar': Int})
def node_b(context, input_):
return input_ * context.solid_config['bar']
@composite_solid
def composite_with_nested_config_solid():
return node_b(node_a())
@pipeline
def composite_pipeline():
return composite_with_nested_config_solid()
@composite_solid(
config_fn=lambda _, cfg: {
'node_a': {'config': {'foo': cfg['foo']}},
'node_b': {'config': {'bar': cfg['bar']}},
},
config={'foo': Field(String), 'bar': Int},
)
def composite_with_nested_config_solid_and_config_mapping():
return node_b(node_a())
@pipeline
def composite_pipeline_with_config_mapping():
return composite_with_nested_config_solid_and_config_mapping()
def test_multiprocessing_execution_for_composite_solid():
environment_dict = {
'solids': {
'composite_with_nested_config_solid': {
'solids': {'node_a': {'config': {'foo': 'baz'}}, 'node_b': {'config': {'bar': 3}}}
}
}
}
run_id = make_new_run_id()
handle = ExecutionTargetHandle.for_pipeline_python_file(__file__, 'composite_pipeline')
instance = DagsterInstance.local_temp()
pipeline_run = instance.create_run(
PipelineRun(
pipeline_name=composite_pipeline.name,
run_id=run_id,
selector=ExecutionSelector('nonce'),
environment_dict=environment_dict,
mode='default',
reexecution_config=None,
step_keys_to_execute=None,
tags=None,
status=PipelineRunStatus.NOT_STARTED,
)
)
execution_manager = SubprocessExecutionManager(instance)
execution_manager.execute_pipeline(handle, composite_pipeline, pipeline_run, instance)
execution_manager.join()
assert instance.get_run_by_id(run_id).status == PipelineRunStatus.SUCCESS
environment_dict = {
'solids': {
'composite_with_nested_config_solid': {
'solids': {'node_a': {'config': {'foo': 'baz'}}, 'node_b': {'config': {'bar': 3}}}
}
},
'execution': {'multiprocess': {}},
'storage': {'filesystem': {}},
}
run_id = make_new_run_id()
pipeline_run = instance.create_run(
PipelineRun(
pipeline_name=composite_pipeline.name,
run_id=run_id,
selector=ExecutionSelector('nonce'),
environment_dict=environment_dict,
mode='default',
reexecution_config=None,
step_keys_to_execute=None,
tags=None,
status=PipelineRunStatus.NOT_STARTED,
)
)
execution_manager = SubprocessExecutionManager(instance)
execution_manager.execute_pipeline(handle, composite_pipeline, pipeline_run, instance)
execution_manager.join()
def test_multiprocessing_execution_for_composite_solid_with_config_mapping():
environment_dict = {
'solids': {
'composite_with_nested_config_solid_and_config_mapping': {
'config': {'foo': 'baz', 'bar': 3}
}
}
}
run_id = make_new_run_id()
handle = ExecutionTargetHandle.for_pipeline_python_file(
__file__, 'composite_pipeline_with_config_mapping'
)
instance = DagsterInstance.local_temp()
pipeline_run = instance.create_run(
PipelineRun(
pipeline_name=composite_pipeline_with_config_mapping.name,
run_id=run_id,
selector=ExecutionSelector('nonce'),
environment_dict=environment_dict,
mode='default',
reexecution_config=None,
step_keys_to_execute=None,
tags=None,
status=PipelineRunStatus.NOT_STARTED,
)
)
execution_manager = SubprocessExecutionManager(instance)
execution_manager.execute_pipeline(
handle, composite_pipeline_with_config_mapping, pipeline_run, instance
)
execution_manager.join()
assert instance.get_run_by_id(run_id).status == PipelineRunStatus.SUCCESS
environment_dict = {
'solids': {
'composite_with_nested_config_solid_and_config_mapping': {
'config': {'foo': 'baz', 'bar': 3}
}
},
'execution': {'multiprocess': {}},
'storage': {'filesystem': {}},
}
run_id = make_new_run_id()
pipeline_run = instance.create_run(
PipelineRun(
pipeline_name=composite_pipeline.name,
run_id=run_id,
selector=ExecutionSelector('nonce'),
environment_dict=environment_dict,
mode='default',
reexecution_config=None,
step_keys_to_execute=None,
tags=None,
status=PipelineRunStatus.NOT_STARTED,
)
)
execution_manager = SubprocessExecutionManager(instance)
execution_manager.execute_pipeline(handle, composite_pipeline, pipeline_run, instance)
execution_manager.join()
assert instance.get_run_by_id(run_id).status == PipelineRunStatus.SUCCESS
@solid(config={'file': Field(Path)})
def loop(context):
with open(context.solid_config['file'], 'w') as ff:
ff.write('yup')
while True:
time.sleep(0.1)
@pipeline
def infinite_loop_pipeline():
loop()
def test_has_run_query_and_terminate():
run_id_one = make_new_run_id()
handle = ExecutionTargetHandle.for_pipeline_python_file(__file__, 'infinite_loop_pipeline')
instance = DagsterInstance.local_temp()
with safe_tempfile_path() as path:
pipeline_run = instance.create_run(
PipelineRun.create_empty_run(
pipeline_name=infinite_loop_pipeline.name,
run_id=run_id_one,
environment_dict={'solids': {'loop': {'config': {'file': path}}}},
)
)
execution_manager = SubprocessExecutionManager(instance)
execution_manager.execute_pipeline(handle, infinite_loop_pipeline, pipeline_run, instance)
while not os.path.exists(path):
time.sleep(0.1)
assert os.path.exists(path)
assert execution_manager.is_process_running(run_id_one)
assert execution_manager.terminate(run_id_one)
assert instance.get_run_by_id(run_id_one).is_finished
assert not execution_manager.is_process_running(run_id_one)
assert not execution_manager.terminate(run_id_one)
assert not os.path.exists(path)
def test_two_runs_running():
run_id_one = make_new_run_id()
run_id_two = make_new_run_id()
handle = ExecutionTargetHandle.for_pipeline_python_file(__file__, 'infinite_loop_pipeline')
with safe_tempfile_path() as file_one, safe_tempfile_path() as file_two:
instance = DagsterInstance.local_temp()
execution_manager = SubprocessExecutionManager(instance)
pipeline_run_one = instance.create_run(
PipelineRun.create_empty_run(
pipeline_name=infinite_loop_pipeline.name,
run_id=run_id_one,
environment_dict={'solids': {'loop': {'config': {'file': file_one}}}},
)
)
execution_manager.execute_pipeline(
handle, infinite_loop_pipeline, pipeline_run_one, instance
)
pipeline_run_two = instance.create_run(
PipelineRun.create_empty_run(
pipeline_name=infinite_loop_pipeline.name,
run_id=run_id_two,
environment_dict={'solids': {'loop': {'config': {'file': file_two}}}},
)
)
execution_manager.execute_pipeline(
handle, infinite_loop_pipeline, pipeline_run_two, instance
)
# ensure both runs have begun execution
while not os.path.exists(file_one) and not os.path.exists(file_two):
time.sleep(0.1)
assert execution_manager.is_process_running(run_id_one)
assert execution_manager.is_process_running(run_id_two)
assert execution_manager.terminate(run_id_one)
assert not execution_manager.is_process_running(run_id_one)
assert execution_manager.is_process_running(run_id_two)
assert execution_manager.terminate(run_id_two)
assert not execution_manager.is_process_running(run_id_one)
assert not execution_manager.is_process_running(run_id_two)
def test_max_concurrency_zero():
run_id = make_new_run_id()
handle = ExecutionTargetHandle.for_pipeline_python_file(__file__, 'infinite_loop_pipeline')
with safe_tempfile_path() as filepath:
instance = DagsterInstance.local_temp()
execution_manager = QueueingSubprocessExecutionManager(instance, max_concurrent_runs=0)
pipeline_run = instance.create_run(
PipelineRun.create_empty_run(
pipeline_name=infinite_loop_pipeline.name,
run_id=run_id,
environment_dict={'solids': {'loop': {'config': {'file': filepath}}}},
)
)
execution_manager.execute_pipeline(handle, infinite_loop_pipeline, pipeline_run, instance)
assert not execution_manager.is_active(run_id)
assert not os.path.exists(filepath)
def test_max_concurrency_one():
handle = ExecutionTargetHandle.for_pipeline_python_file(__file__, 'infinite_loop_pipeline')
run_id_one = make_new_run_id()
run_id_two = make_new_run_id()
with safe_tempfile_path() as file_one, safe_tempfile_path() as file_two:
instance = DagsterInstance.local_temp()
execution_manager = QueueingSubprocessExecutionManager(instance, max_concurrent_runs=1)
run_one = instance.create_run(
PipelineRun.create_empty_run(
pipeline_name=infinite_loop_pipeline.name,
run_id=run_id_one,
environment_dict={'solids': {'loop': {'config': {'file': file_one}}}},
)
)
run_two = instance.create_run(
PipelineRun.create_empty_run(
pipeline_name=infinite_loop_pipeline.name,
run_id=run_id_two,
environment_dict={'solids': {'loop': {'config': {'file': file_two}}}},
)
)
execution_manager.execute_pipeline(handle, infinite_loop_pipeline, run_one, instance)
execution_manager.execute_pipeline(handle, infinite_loop_pipeline, run_two, instance)
while not os.path.exists(file_one):
execution_manager.check()
time.sleep(0.1)
assert execution_manager.is_active(run_id_one)
assert not execution_manager.is_active(run_id_two)
assert not os.path.exists(file_two)
assert execution_manager.terminate(run_id_one)
while not os.path.exists(file_two):
execution_manager.check()
time.sleep(0.1)
assert not execution_manager.is_active(run_id_one)
assert execution_manager.is_active(run_id_two)
assert execution_manager.terminate(run_id_two)
| import csv
import os
import time
from collections import OrderedDict
from copy import deepcopy
from dagster_graphql.implementation.pipeline_execution_manager import (
QueueingSubprocessExecutionManager,
SubprocessExecutionManager,
)
from dagster import (
ExecutionTargetHandle,
Field,
InputDefinition,
Int,
Materialization,
OutputDefinition,
Path,
String,
as_dagster_type,
composite_solid,
input_hydration_config,
lambda_solid,
output_materialization_config,
pipeline,
solid,
)
from dagster.core.definitions.pipeline import ExecutionSelector
from dagster.core.events import DagsterEventType
from dagster.core.instance import DagsterInstance
from dagster.core.storage.pipeline_run import PipelineRun, PipelineRunStatus
from dagster.core.utils import make_new_run_id
from dagster.utils import file_relative_path, safe_tempfile_path
class PoorMansDataFrame_(list):
pass
@input_hydration_config(Path)
def df_input_schema(_context, path):
with open(path, 'r') as fd:
return PoorMansDataFrame_(
[OrderedDict(sorted(x.items(), key=lambda x: x[0])) for x in csv.DictReader(fd)]
)
@output_materialization_config(Path)
def df_output_schema(_context, path, value):
with open(path, 'w') as fd:
writer = csv.DictWriter(fd, fieldnames=value[0].keys())
writer.writeheader()
writer.writerows(rowdicts=value)
return Materialization.file(path)
PoorMansDataFrame = as_dagster_type(
PoorMansDataFrame_,
input_hydration_config=df_input_schema,
output_materialization_config=df_output_schema,
)
def get_events_of_type(events, event_type):
return [
event
for event in events
if event.is_dagster_event and event.dagster_event.event_type == event_type
]
def test_running():
run_id = make_new_run_id()
handle = ExecutionTargetHandle.for_pipeline_python_file(__file__, 'passing_pipeline')
environment_dict = {
'solids': {'sum_solid': {'inputs': {'num': file_relative_path(__file__, 'data/num.csv')}}}
}
selector = ExecutionSelector('csv_hello_world')
instance = DagsterInstance.local_temp()
pipeline_run = instance.create_run(
PipelineRun(
pipeline_name=passing_pipeline.name,
run_id=run_id,
selector=selector,
environment_dict=environment_dict,
mode='default',
reexecution_config=None,
step_keys_to_execute=None,
tags=None,
status=PipelineRunStatus.NOT_STARTED,
)
)
execution_manager = SubprocessExecutionManager(instance)
execution_manager.execute_pipeline(handle, passing_pipeline, pipeline_run, instance)
execution_manager.join()
assert instance.get_run_by_id(run_id).status == PipelineRunStatus.SUCCESS
events = instance.all_logs(run_id)
assert events
process_start_events = get_events_of_type(events, DagsterEventType.PIPELINE_PROCESS_START)
assert len(process_start_events) == 1
process_started_events = get_events_of_type(events, DagsterEventType.PIPELINE_PROCESS_STARTED)
assert len(process_started_events) == 1
process_exited_events = get_events_of_type(events, DagsterEventType.PIPELINE_PROCESS_EXITED)
assert len(process_exited_events) == 1
def test_failing():
run_id = make_new_run_id()
handle = ExecutionTargetHandle.for_pipeline_python_file(__file__, 'failing_pipeline')
environment_dict = {
'solids': {'sum_solid': {'inputs': {'num': file_relative_path(__file__, 'data/num.csv')}}}
}
selector = ExecutionSelector('csv_hello_world')
instance = DagsterInstance.local_temp()
pipeline_run = instance.create_run(
PipelineRun(
pipeline_name=failing_pipeline.name,
run_id=run_id,
selector=selector,
environment_dict=environment_dict,
mode='default',
reexecution_config=None,
step_keys_to_execute=None,
tags=None,
status=PipelineRunStatus.NOT_STARTED,
)
)
execution_manager = SubprocessExecutionManager(instance)
execution_manager.execute_pipeline(handle, failing_pipeline, pipeline_run, instance)
execution_manager.join()
assert instance.get_run_by_id(run_id).status == PipelineRunStatus.FAILURE
assert instance.all_logs(run_id)
def test_execution_crash():
run_id = make_new_run_id()
handle = ExecutionTargetHandle.for_pipeline_python_file(__file__, 'crashy_pipeline')
environment_dict = {
'solids': {'sum_solid': {'inputs': {'num': file_relative_path(__file__, 'data/num.csv')}}}
}
selector = ExecutionSelector('csv_hello_world')
instance = DagsterInstance.local_temp()
pipeline_run = instance.create_run(
PipelineRun(
pipeline_name=crashy_pipeline.name,
run_id=run_id,
selector=selector,
environment_dict=environment_dict,
mode='default',
reexecution_config=None,
step_keys_to_execute=None,
tags=None,
status=PipelineRunStatus.NOT_STARTED,
)
)
execution_manager = SubprocessExecutionManager(instance)
execution_manager.execute_pipeline(handle, crashy_pipeline, pipeline_run, instance)
execution_manager.join()
assert instance.get_run_by_id(run_id).status == PipelineRunStatus.FAILURE
last_log = instance.all_logs(run_id)[-1]
assert last_log.message.startswith(
'Exception: Pipeline execution process for {run_id} unexpectedly exited\n'.format(
run_id=run_id
)
)
@lambda_solid(
input_defs=[InputDefinition('num', PoorMansDataFrame)],
output_def=OutputDefinition(PoorMansDataFrame),
)
def sum_solid(num):
sum_df = deepcopy(num)
for x in sum_df:
x['sum'] = x['num1'] + x['num2']
return PoorMansDataFrame(sum_df)
@lambda_solid(
input_defs=[InputDefinition('sum_df', PoorMansDataFrame)],
output_def=OutputDefinition(PoorMansDataFrame),
)
def error_solid(sum_df): # pylint: disable=W0613
raise Exception('foo')
@lambda_solid(
input_defs=[InputDefinition('sum_df', PoorMansDataFrame)],
output_def=OutputDefinition(PoorMansDataFrame),
)
def crashy_solid(sum_df): # pylint: disable=W0613
os._exit(1) # pylint: disable=W0212
@pipeline
def passing_pipeline():
return sum_solid()
@pipeline
def failing_pipeline():
return error_solid(sum_solid())
@pipeline
def crashy_pipeline():
crashy_solid(sum_solid())
@solid(config={'foo': Field(String)})
def node_a(context):
return context.solid_config['foo']
@solid(config={'bar': Int})
def node_b(context, input_):
return input_ * context.solid_config['bar']
@composite_solid
def composite_with_nested_config_solid():
return node_b(node_a())
@pipeline
def composite_pipeline():
return composite_with_nested_config_solid()
@composite_solid(
config_fn=lambda _, cfg: {
'node_a': {'config': {'foo': cfg['foo']}},
'node_b': {'config': {'bar': cfg['bar']}},
},
config={'foo': Field(String), 'bar': Int},
)
def composite_with_nested_config_solid_and_config_mapping():
return node_b(node_a())
@pipeline
def composite_pipeline_with_config_mapping():
return composite_with_nested_config_solid_and_config_mapping()
def test_multiprocessing_execution_for_composite_solid():
environment_dict = {
'solids': {
'composite_with_nested_config_solid': {
'solids': {'node_a': {'config': {'foo': 'baz'}}, 'node_b': {'config': {'bar': 3}}}
}
}
}
run_id = make_new_run_id()
handle = ExecutionTargetHandle.for_pipeline_python_file(__file__, 'composite_pipeline')
instance = DagsterInstance.local_temp()
pipeline_run = instance.create_run(
PipelineRun(
pipeline_name=composite_pipeline.name,
run_id=run_id,
selector=ExecutionSelector('nonce'),
environment_dict=environment_dict,
mode='default',
reexecution_config=None,
step_keys_to_execute=None,
tags=None,
status=PipelineRunStatus.NOT_STARTED,
)
)
execution_manager = SubprocessExecutionManager(instance)
execution_manager.execute_pipeline(handle, composite_pipeline, pipeline_run, instance)
execution_manager.join()
assert instance.get_run_by_id(run_id).status == PipelineRunStatus.SUCCESS
environment_dict = {
'solids': {
'composite_with_nested_config_solid': {
'solids': {'node_a': {'config': {'foo': 'baz'}}, 'node_b': {'config': {'bar': 3}}}
}
},
'execution': {'multiprocess': {}},
'storage': {'filesystem': {}},
}
run_id = make_new_run_id()
pipeline_run = instance.create_run(
PipelineRun(
pipeline_name=composite_pipeline.name,
run_id=run_id,
selector=ExecutionSelector('nonce'),
environment_dict=environment_dict,
mode='default',
reexecution_config=None,
step_keys_to_execute=None,
tags=None,
status=PipelineRunStatus.NOT_STARTED,
)
)
execution_manager = SubprocessExecutionManager(instance)
execution_manager.execute_pipeline(handle, composite_pipeline, pipeline_run, instance)
execution_manager.join()
def test_multiprocessing_execution_for_composite_solid_with_config_mapping():
environment_dict = {
'solids': {
'composite_with_nested_config_solid_and_config_mapping': {
'config': {'foo': 'baz', 'bar': 3}
}
}
}
run_id = make_new_run_id()
handle = ExecutionTargetHandle.for_pipeline_python_file(
__file__, 'composite_pipeline_with_config_mapping'
)
instance = DagsterInstance.local_temp()
pipeline_run = instance.create_run(
PipelineRun(
pipeline_name=composite_pipeline_with_config_mapping.name,
run_id=run_id,
selector=ExecutionSelector('nonce'),
environment_dict=environment_dict,
mode='default',
reexecution_config=None,
step_keys_to_execute=None,
tags=None,
status=PipelineRunStatus.NOT_STARTED,
)
)
execution_manager = SubprocessExecutionManager(instance)
execution_manager.execute_pipeline(
handle, composite_pipeline_with_config_mapping, pipeline_run, instance
)
execution_manager.join()
assert instance.get_run_by_id(run_id).status == PipelineRunStatus.SUCCESS
environment_dict = {
'solids': {
'composite_with_nested_config_solid_and_config_mapping': {
'config': {'foo': 'baz', 'bar': 3}
}
},
'execution': {'multiprocess': {}},
'storage': {'filesystem': {}},
}
run_id = make_new_run_id()
pipeline_run = instance.create_run(
PipelineRun(
pipeline_name=composite_pipeline.name,
run_id=run_id,
selector=ExecutionSelector('nonce'),
environment_dict=environment_dict,
mode='default',
reexecution_config=None,
step_keys_to_execute=None,
tags=None,
status=PipelineRunStatus.NOT_STARTED,
)
)
execution_manager = SubprocessExecutionManager(instance)
execution_manager.execute_pipeline(handle, composite_pipeline, pipeline_run, instance)
execution_manager.join()
assert instance.get_run_by_id(run_id).status == PipelineRunStatus.SUCCESS
@solid(config={'file': Field(Path)})
def loop(context):
with open(context.solid_config['file'], 'w') as ff:
ff.write('yup')
while True:
time.sleep(0.1)
@pipeline
def infinite_loop_pipeline():
loop()
def test_has_run_query_and_terminate():
run_id_one = make_new_run_id()
handle = ExecutionTargetHandle.for_pipeline_python_file(__file__, 'infinite_loop_pipeline')
instance = DagsterInstance.local_temp()
with safe_tempfile_path() as path:
pipeline_run = instance.create_run(
PipelineRun.create_empty_run(
pipeline_name=infinite_loop_pipeline.name,
run_id=run_id_one,
environment_dict={'solids': {'loop': {'config': {'file': path}}}},
)
)
execution_manager = SubprocessExecutionManager(instance)
execution_manager.execute_pipeline(handle, infinite_loop_pipeline, pipeline_run, instance)
while not os.path.exists(path):
time.sleep(0.1)
assert os.path.exists(path)
assert execution_manager.is_process_running(run_id_one)
assert execution_manager.terminate(run_id_one)
assert instance.get_run_by_id(run_id_one).is_finished
assert not execution_manager.is_process_running(run_id_one)
assert not execution_manager.terminate(run_id_one)
assert not os.path.exists(path)
def test_two_runs_running():
run_id_one = make_new_run_id()
run_id_two = make_new_run_id()
handle = ExecutionTargetHandle.for_pipeline_python_file(__file__, 'infinite_loop_pipeline')
with safe_tempfile_path() as file_one, safe_tempfile_path() as file_two:
instance = DagsterInstance.local_temp()
execution_manager = SubprocessExecutionManager(instance)
pipeline_run_one = instance.create_run(
PipelineRun.create_empty_run(
pipeline_name=infinite_loop_pipeline.name,
run_id=run_id_one,
environment_dict={'solids': {'loop': {'config': {'file': file_one}}}},
)
)
execution_manager.execute_pipeline(
handle, infinite_loop_pipeline, pipeline_run_one, instance
)
pipeline_run_two = instance.create_run(
PipelineRun.create_empty_run(
pipeline_name=infinite_loop_pipeline.name,
run_id=run_id_two,
environment_dict={'solids': {'loop': {'config': {'file': file_two}}}},
)
)
execution_manager.execute_pipeline(
handle, infinite_loop_pipeline, pipeline_run_two, instance
)
# ensure both runs have begun execution
while not os.path.exists(file_one) and not os.path.exists(file_two):
time.sleep(0.1)
assert execution_manager.is_process_running(run_id_one)
assert execution_manager.is_process_running(run_id_two)
assert execution_manager.terminate(run_id_one)
assert not execution_manager.is_process_running(run_id_one)
assert execution_manager.is_process_running(run_id_two)
assert execution_manager.terminate(run_id_two)
assert not execution_manager.is_process_running(run_id_one)
assert not execution_manager.is_process_running(run_id_two)
def test_max_concurrency_zero():
run_id = make_new_run_id()
handle = ExecutionTargetHandle.for_pipeline_python_file(__file__, 'infinite_loop_pipeline')
with safe_tempfile_path() as filepath:
instance = DagsterInstance.local_temp()
execution_manager = QueueingSubprocessExecutionManager(instance, max_concurrent_runs=0)
pipeline_run = instance.create_run(
PipelineRun.create_empty_run(
pipeline_name=infinite_loop_pipeline.name,
run_id=run_id,
environment_dict={'solids': {'loop': {'config': {'file': filepath}}}},
)
)
execution_manager.execute_pipeline(handle, infinite_loop_pipeline, pipeline_run, instance)
assert not execution_manager.is_active(run_id)
assert not os.path.exists(filepath)
def test_max_concurrency_one():
handle = ExecutionTargetHandle.for_pipeline_python_file(__file__, 'infinite_loop_pipeline')
run_id_one = make_new_run_id()
run_id_two = make_new_run_id()
with safe_tempfile_path() as file_one, safe_tempfile_path() as file_two:
instance = DagsterInstance.local_temp()
execution_manager = QueueingSubprocessExecutionManager(instance, max_concurrent_runs=1)
run_one = instance.create_run(
PipelineRun.create_empty_run(
pipeline_name=infinite_loop_pipeline.name,
run_id=run_id_one,
environment_dict={'solids': {'loop': {'config': {'file': file_one}}}},
)
)
run_two = instance.create_run(
PipelineRun.create_empty_run(
pipeline_name=infinite_loop_pipeline.name,
run_id=run_id_two,
environment_dict={'solids': {'loop': {'config': {'file': file_two}}}},
)
)
execution_manager.execute_pipeline(handle, infinite_loop_pipeline, run_one, instance)
execution_manager.execute_pipeline(handle, infinite_loop_pipeline, run_two, instance)
while not os.path.exists(file_one):
execution_manager.check()
time.sleep(0.1)
assert execution_manager.is_active(run_id_one)
assert not execution_manager.is_active(run_id_two)
assert not os.path.exists(file_two)
assert execution_manager.terminate(run_id_one)
while not os.path.exists(file_two):
execution_manager.check()
time.sleep(0.1)
assert not execution_manager.is_active(run_id_one)
assert execution_manager.is_active(run_id_two)
assert execution_manager.terminate(run_id_two)
| en | 0.559351 | # pylint: disable=W0613 # pylint: disable=W0613 # pylint: disable=W0212 # ensure both runs have begun execution | 2.265943 | 2 |
src/gpt2/train_model.py | azadyasar/GPT2 | 0 | 6632282 | <gh_stars>0
import os
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from gpt2.utils import fusing
from gpt2.modeling import Transformer
from gpt2.data import Dataset, VocabSP, VocabYTTM, TokenizedCorpus, MTCorpus
from gpt2.training import TrainConfig, TrainingSpec, Trainer
from typing import Tuple, Iterator, Dict
class GPT2TrainingSpec(TrainingSpec):
def __init__(self, train_corpus: str, eval_corpus: str, vocab_path: str,
seq_len: int, layers: int, heads: int, dims: int, rate: int,
dropout: float, base_lr: float, wd_rate: float,
total_steps: int, use_grad_ckpt: bool, is_sentencepiece: bool = True,
finetune_nmt: bool = False):
self.train_corpus = train_corpus
self.eval_corpus = eval_corpus
self.vocab_path = vocab_path
self.seq_len = seq_len
self.layers = layers
self.heads = heads
self.dims = dims
self.rate = rate
self.dropout = dropout
self.base_lr = base_lr
self.wd_rate = wd_rate
self.total_steps = total_steps
self.use_grad_ckpt = use_grad_ckpt
self.is_sentencepiece = is_sentencepiece
self.finetune_nmt = finetune_nmt
def initialize(self):
if self.is_sentencepiece:
self.vocab = VocabSP(tokenizer_path=self.vocab_path)
else:
self.vocab = VocabYTTM(tokenizer_path=self.vocab_path)
self.criterion = nn.CrossEntropyLoss(ignore_index=self.vocab.pad_idx,
reduction='mean')
def prepare_datasets(self) -> Tuple[Dataset, Dataset]:
if not self.finetune_nmt:
train_dataset = TokenizedCorpus(corpus_path=self.train_corpus,
vocab=self.vocab,
seq_len=self.seq_len)
eval_dataset = TokenizedCorpus(corpus_path=self.eval_corpus,
vocab=self.vocab,
seq_len=self.seq_len)
else:
train_dataset = MTCorpus(corpus_path=self.train_corpus,
vocab=self.vocab,
seq_len=self.seq_len)
eval_dataset = MTCorpus(corpus_path=self.eval_corpus,
vocab=self.vocab,
seq_len=self.seq_len)
return train_dataset, eval_dataset
def construct_model(self) -> nn.Module:
return Transformer(layers=self.layers, pad_idx=self.vocab.pad_idx,
words=len(self.vocab), seq_len=self.seq_len,
heads=self.heads, dims=self.dims, rate=self.rate,
dropout=self.dropout, bidirectional=False)
def create_optimizer(self, params: Iterator[nn.Parameter]
) -> Tuple[optim.Optimizer,
optim.lr_scheduler._LRScheduler]:
optimizer = fusing.Adam(
params, lr=self.base_lr, weight_decay=self.wd_rate)
scheduler = optim.lr_scheduler.LambdaLR(
optimizer, lambda step: 1 - step / self.total_steps)
return optimizer, scheduler
def train_objective(self, data: Dict[str, torch.Tensor], model: nn.Module
) -> Dict[str, torch.Tensor]:
logits = model(data['input'], use_grad_ckpt=self.use_grad_ckpt)
loss = self.criterion(logits.transpose(1, 2), data['output'])
return {'loss': loss}
def eval_objective(self, data: Dict[str, torch.Tensor], model: nn.Module
) -> Dict[str, torch.Tensor]:
logits, _ = model(data['input'], past=None)
loss = self.criterion(logits.transpose(1, 2), data['output'])
return {'loss': loss}
def train_gpt2_model(args: argparse.Namespace):
spec = GPT2TrainingSpec(
train_corpus=args.train_corpus, eval_corpus=args.eval_corpus,
vocab_path=args.vocab_path, seq_len=args.seq_len, layers=args.layers,
heads=args.heads, dims=args.dims, rate=args.rate, dropout=args.dropout,
base_lr=args.base_lr, wd_rate=args.wd_rate,
total_steps=args.total_steps, use_grad_ckpt=args.use_grad_ckpt,
is_sentencepiece=args.is_sp == 1)
config = TrainConfig(
batch_train=args.batch_train, batch_eval=args.batch_eval,
total_steps=args.total_steps, eval_steps=args.eval_steps,
eval_size=args.eval_size, save_steps=args.save_steps,
save_model_path=args.save_model_path, save_checkpoint_path=args.save_checkpoint_path,
description='Train GPT-2 model', log_format='train/loss: {train_loss:.4f}, eval/loss: {eval_loss:.4f}, lr: {lr:.5f}',
use_amp=args.use_amp, gpus=args.gpus)
Trainer(spec, config).train(from_checkpoint=args.from_checkpoint,
from_pretrained=args.from_pretrained)
def add_subparser(subparsers: argparse._SubParsersAction):
parser = subparsers.add_parser('train', help='train GPT-2 model')
group = parser.add_argument_group('Corpus and vocabulary')
group.add_argument('--train_corpus', required=True,
help='training corpus file path')
group.add_argument('--eval_corpus', required=True,
help='evaluation corpus file path')
group.add_argument('--vocab_path', required=True,
help='vocabulary file path')
group.add_argument('--is_sp', type=int,
help='is tokenizer a sentencepiece model')
group = parser.add_argument_group('Model configurations')
group.add_argument('--seq_len', default=64, type=int,
help='maximum sequence length')
group.add_argument('--layers', default=12, type=int,
help='number of transformer layers')
group.add_argument('--heads', default=16, type=int,
help='number of multi-heads in attention layer')
group.add_argument('--dims', default=784, type=int,
help='dimension of representation in each layer')
group.add_argument('--rate', default=4, type=int,
help='increase rate of dimensionality in bottleneck')
group.add_argument('--dropout', default=0.25, type=float,
help='probability that each element is dropped')
group = parser.add_argument_group('Training and evaluation')
group.add_argument('--batch_train', default=64, type=int,
help='number of training batch size')
group.add_argument('--batch_eval', default=64, type=int,
help='number of evaluation batch size')
group.add_argument('--base_lr', default=1e-4, type=float,
help='default learning rate')
group.add_argument('--wd_rate', default=1e-2, type=float,
help='weight decay rate')
group.add_argument('--total_steps', default=250000, type=int,
help='number of total training steps')
group.add_argument('--eval_steps', default=500, type=int,
help='period to evaluate model and record metrics')
group.add_argument('--eval_size', default=10, type=int,
help="number of batches to use when evaluating")
group.add_argument('--save_steps', default=1000, type=int,
help='period to save training state to checkpoint')
group = parser.add_argument_group('Saving and restoring')
group.add_argument('--save_model_path', default='model.pth',
help='save trained model weights to the file')
group.add_argument('--save_checkpoint_path', default='checkpoint.pth',
help='save training state to the checkpoint file')
group.add_argument('--from_checkpoint', default=None,
help='load last training state from checkpoint file')
group.add_argument('--from_pretrained', default=None,
help='initialize parameters from pretrained model')
group = parser.add_argument_group('Extensions')
group.add_argument('--ft_nmt', action='store_true',
help='fine-tune for neural machine translation')
group.add_argument('--use_amp', action='store_true',
help='use automatic mixed-precision in training')
group.add_argument('--use_grad_ckpt', action='store_true',
help='use gradient checkpointing in transformer layers')
group.add_argument('--gpus', default=None, type=int,
help='number of gpu devices to use in training')
parser.set_defaults(func=train_gpt2_model)
| import os
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from gpt2.utils import fusing
from gpt2.modeling import Transformer
from gpt2.data import Dataset, VocabSP, VocabYTTM, TokenizedCorpus, MTCorpus
from gpt2.training import TrainConfig, TrainingSpec, Trainer
from typing import Tuple, Iterator, Dict
class GPT2TrainingSpec(TrainingSpec):
def __init__(self, train_corpus: str, eval_corpus: str, vocab_path: str,
seq_len: int, layers: int, heads: int, dims: int, rate: int,
dropout: float, base_lr: float, wd_rate: float,
total_steps: int, use_grad_ckpt: bool, is_sentencepiece: bool = True,
finetune_nmt: bool = False):
self.train_corpus = train_corpus
self.eval_corpus = eval_corpus
self.vocab_path = vocab_path
self.seq_len = seq_len
self.layers = layers
self.heads = heads
self.dims = dims
self.rate = rate
self.dropout = dropout
self.base_lr = base_lr
self.wd_rate = wd_rate
self.total_steps = total_steps
self.use_grad_ckpt = use_grad_ckpt
self.is_sentencepiece = is_sentencepiece
self.finetune_nmt = finetune_nmt
def initialize(self):
if self.is_sentencepiece:
self.vocab = VocabSP(tokenizer_path=self.vocab_path)
else:
self.vocab = VocabYTTM(tokenizer_path=self.vocab_path)
self.criterion = nn.CrossEntropyLoss(ignore_index=self.vocab.pad_idx,
reduction='mean')
def prepare_datasets(self) -> Tuple[Dataset, Dataset]:
if not self.finetune_nmt:
train_dataset = TokenizedCorpus(corpus_path=self.train_corpus,
vocab=self.vocab,
seq_len=self.seq_len)
eval_dataset = TokenizedCorpus(corpus_path=self.eval_corpus,
vocab=self.vocab,
seq_len=self.seq_len)
else:
train_dataset = MTCorpus(corpus_path=self.train_corpus,
vocab=self.vocab,
seq_len=self.seq_len)
eval_dataset = MTCorpus(corpus_path=self.eval_corpus,
vocab=self.vocab,
seq_len=self.seq_len)
return train_dataset, eval_dataset
def construct_model(self) -> nn.Module:
return Transformer(layers=self.layers, pad_idx=self.vocab.pad_idx,
words=len(self.vocab), seq_len=self.seq_len,
heads=self.heads, dims=self.dims, rate=self.rate,
dropout=self.dropout, bidirectional=False)
def create_optimizer(self, params: Iterator[nn.Parameter]
) -> Tuple[optim.Optimizer,
optim.lr_scheduler._LRScheduler]:
optimizer = fusing.Adam(
params, lr=self.base_lr, weight_decay=self.wd_rate)
scheduler = optim.lr_scheduler.LambdaLR(
optimizer, lambda step: 1 - step / self.total_steps)
return optimizer, scheduler
def train_objective(self, data: Dict[str, torch.Tensor], model: nn.Module
) -> Dict[str, torch.Tensor]:
logits = model(data['input'], use_grad_ckpt=self.use_grad_ckpt)
loss = self.criterion(logits.transpose(1, 2), data['output'])
return {'loss': loss}
def eval_objective(self, data: Dict[str, torch.Tensor], model: nn.Module
) -> Dict[str, torch.Tensor]:
logits, _ = model(data['input'], past=None)
loss = self.criterion(logits.transpose(1, 2), data['output'])
return {'loss': loss}
def train_gpt2_model(args: argparse.Namespace):
spec = GPT2TrainingSpec(
train_corpus=args.train_corpus, eval_corpus=args.eval_corpus,
vocab_path=args.vocab_path, seq_len=args.seq_len, layers=args.layers,
heads=args.heads, dims=args.dims, rate=args.rate, dropout=args.dropout,
base_lr=args.base_lr, wd_rate=args.wd_rate,
total_steps=args.total_steps, use_grad_ckpt=args.use_grad_ckpt,
is_sentencepiece=args.is_sp == 1)
config = TrainConfig(
batch_train=args.batch_train, batch_eval=args.batch_eval,
total_steps=args.total_steps, eval_steps=args.eval_steps,
eval_size=args.eval_size, save_steps=args.save_steps,
save_model_path=args.save_model_path, save_checkpoint_path=args.save_checkpoint_path,
description='Train GPT-2 model', log_format='train/loss: {train_loss:.4f}, eval/loss: {eval_loss:.4f}, lr: {lr:.5f}',
use_amp=args.use_amp, gpus=args.gpus)
Trainer(spec, config).train(from_checkpoint=args.from_checkpoint,
from_pretrained=args.from_pretrained)
def add_subparser(subparsers: argparse._SubParsersAction):
parser = subparsers.add_parser('train', help='train GPT-2 model')
group = parser.add_argument_group('Corpus and vocabulary')
group.add_argument('--train_corpus', required=True,
help='training corpus file path')
group.add_argument('--eval_corpus', required=True,
help='evaluation corpus file path')
group.add_argument('--vocab_path', required=True,
help='vocabulary file path')
group.add_argument('--is_sp', type=int,
help='is tokenizer a sentencepiece model')
group = parser.add_argument_group('Model configurations')
group.add_argument('--seq_len', default=64, type=int,
help='maximum sequence length')
group.add_argument('--layers', default=12, type=int,
help='number of transformer layers')
group.add_argument('--heads', default=16, type=int,
help='number of multi-heads in attention layer')
group.add_argument('--dims', default=784, type=int,
help='dimension of representation in each layer')
group.add_argument('--rate', default=4, type=int,
help='increase rate of dimensionality in bottleneck')
group.add_argument('--dropout', default=0.25, type=float,
help='probability that each element is dropped')
group = parser.add_argument_group('Training and evaluation')
group.add_argument('--batch_train', default=64, type=int,
help='number of training batch size')
group.add_argument('--batch_eval', default=64, type=int,
help='number of evaluation batch size')
group.add_argument('--base_lr', default=1e-4, type=float,
help='default learning rate')
group.add_argument('--wd_rate', default=1e-2, type=float,
help='weight decay rate')
group.add_argument('--total_steps', default=250000, type=int,
help='number of total training steps')
group.add_argument('--eval_steps', default=500, type=int,
help='period to evaluate model and record metrics')
group.add_argument('--eval_size', default=10, type=int,
help="number of batches to use when evaluating")
group.add_argument('--save_steps', default=1000, type=int,
help='period to save training state to checkpoint')
group = parser.add_argument_group('Saving and restoring')
group.add_argument('--save_model_path', default='model.pth',
help='save trained model weights to the file')
group.add_argument('--save_checkpoint_path', default='checkpoint.pth',
help='save training state to the checkpoint file')
group.add_argument('--from_checkpoint', default=None,
help='load last training state from checkpoint file')
group.add_argument('--from_pretrained', default=None,
help='initialize parameters from pretrained model')
group = parser.add_argument_group('Extensions')
group.add_argument('--ft_nmt', action='store_true',
help='fine-tune for neural machine translation')
group.add_argument('--use_amp', action='store_true',
help='use automatic mixed-precision in training')
group.add_argument('--use_grad_ckpt', action='store_true',
help='use gradient checkpointing in transformer layers')
group.add_argument('--gpus', default=None, type=int,
help='number of gpu devices to use in training')
parser.set_defaults(func=train_gpt2_model) | none | 1 | 2.281202 | 2 |
|
setup.py | theNded/pymagsac | 59 | 6632283 | #! /usr/bin/env python3
import os
import re
import sys
import sysconfig
import platform
import subprocess
from distutils.version import LooseVersion
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
from setuptools.command.test import test as TestCommand
from shutil import copyfile, copymode
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)',
out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(
cfg.upper(),
extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args,
cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args,
cwd=self.build_temp)
# Copy *_test file to tests directory
#test_bin = os.path.join(self.build_temp, 'python_cpp_example_test')
#self.copy_test_file(test_bin)
print() # Add an empty line for cleaner output
def copy_test_file(self, src_file):
'''
Copy ``src_file`` to ``dest_file`` ensuring parent directory exists.
By default, message like `creating directory /path/to/package` and
`copying directory /src/path/to/package -> path/to/package` are displayed on standard output. Adapted from scikit-build.
'''
# Create directory if needed
dest_dir = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'tests', 'bin')
if dest_dir != "" and not os.path.exists(dest_dir):
print("creating directory {}".format(dest_dir))
os.makedirs(dest_dir)
# Copy file
dest_file = os.path.join(dest_dir, os.path.basename(src_file))
print("copying {} -> {}".format(src_file, dest_file))
copyfile(src_file, dest_file)
copymode(src_file, dest_file)
setup(
name='pymagsac',
version='0.2',
author='<NAME>, <NAME>',
author_email='<EMAIL>',
description='MAGSAC: RANSAC for F and H',
long_description='',
packages=find_packages('src'),
package_dir={'':'src'},
ext_modules=[CMakeExtension('pymagsac/pymagsac')],
cmdclass=dict(build_ext=CMakeBuild),
#test_suite='tests',
zip_safe=False,
)
| #! /usr/bin/env python3
import os
import re
import sys
import sysconfig
import platform
import subprocess
from distutils.version import LooseVersion
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
from setuptools.command.test import test as TestCommand
from shutil import copyfile, copymode
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)',
out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(
cfg.upper(),
extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args,
cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args,
cwd=self.build_temp)
# Copy *_test file to tests directory
#test_bin = os.path.join(self.build_temp, 'python_cpp_example_test')
#self.copy_test_file(test_bin)
print() # Add an empty line for cleaner output
def copy_test_file(self, src_file):
'''
Copy ``src_file`` to ``dest_file`` ensuring parent directory exists.
By default, message like `creating directory /path/to/package` and
`copying directory /src/path/to/package -> path/to/package` are displayed on standard output. Adapted from scikit-build.
'''
# Create directory if needed
dest_dir = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'tests', 'bin')
if dest_dir != "" and not os.path.exists(dest_dir):
print("creating directory {}".format(dest_dir))
os.makedirs(dest_dir)
# Copy file
dest_file = os.path.join(dest_dir, os.path.basename(src_file))
print("copying {} -> {}".format(src_file, dest_file))
copyfile(src_file, dest_file)
copymode(src_file, dest_file)
setup(
name='pymagsac',
version='0.2',
author='<NAME>, <NAME>',
author_email='<EMAIL>',
description='MAGSAC: RANSAC for F and H',
long_description='',
packages=find_packages('src'),
package_dir={'':'src'},
ext_modules=[CMakeExtension('pymagsac/pymagsac')],
cmdclass=dict(build_ext=CMakeBuild),
#test_suite='tests',
zip_safe=False,
)
| en | 0.443409 | #! /usr/bin/env python3 # Copy *_test file to tests directory #test_bin = os.path.join(self.build_temp, 'python_cpp_example_test') #self.copy_test_file(test_bin) # Add an empty line for cleaner output Copy ``src_file`` to ``dest_file`` ensuring parent directory exists. By default, message like `creating directory /path/to/package` and `copying directory /src/path/to/package -> path/to/package` are displayed on standard output. Adapted from scikit-build. # Create directory if needed # Copy file #test_suite='tests', | 1.97349 | 2 |
core/entities/jello.py | TheArchives/Nexus | 1 | 6632284 | # iCraft is Copyright 2010 both
#
# The Archives team:
# <<NAME>> <EMAIL> AKA "Adam01"
# <<NAME>> <EMAIL> AKA "Aera"
# <<NAME>> <EMAIL> AKA "revenant"
# <<NAME>> <EMAIL> AKA "gdude2002"
#
# And,
#
# The iCraft team:
# <<NAME>> <EMAIL> AKA "tehcid"
# <<NAME>> <EMAIL> AKA "gothfox"
# <<NAME>> <EMAIL> AKA "AndrewPH"
# <<NAME>> <EMAIL> AKA "PixelEater"
# <<NAME>> <EMAIL> AKA "Varriount"
# <<NAME>> <EMAIL> AKA "iKJames"
# <<NAME>> <EMAIL> AKA "erronjason"
# <<NAME>> <EMAIL> AKA "sk8rjwd"
# <<NAME>> <EMAIL> AKA "destroyerx1"
# <<NAME>> <EMAIL> AKA "Fooblock"
# <<NAME>> <EMAIL> AKA "NotMeh"
# <<NAME>> <EMAIL> AKA "eugo"
# <<NAME>> <EMAIL> AKA "Saanix"
# <<NAME>> <EMAIL> AKA "ntfwc"
# <<NAME>> <EMAIL> AKA "Dwarfy"
# <<NAME>> <EMAIL> AKA "goober"
# <<NAME>> <EMAIL> AKA "willempiee"
#
# Disclaimer: Parts of this code may have been contributed by the end-users.
#
# iCraft is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported License.
# To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/
# Or, send a letter to Creative Commons, 171 2nd Street,
# Suite 300, San Francisco, California, 94105, USA.
var_cango = True
try:
blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(x,y-1,z)])
if blocktocheck != 0:
var_cango = False
except:
var_cango = False
if var_cango:
block = '\x00'
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
var_position = (x,y-1,z)
x,y,z = var_position
block = chr(11)
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
block = chr(9)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
else:
closestposition = (0,0)
closestclient = None
closestdistance = None
for entry in userpositionlist:
client = entry[0]
var_pos = entry[1]
i,j,k = var_pos
distance = ((i-x)**2+(j-y)**2+(k-z)**2)**0.5
if closestdistance == None:
closestdistance = distance
closestclient = client
closestposition = (var_pos[0],var_pos[2])
else:
if distance < closestdistance:
closestdistance = distance
closestclient = client
closestposition = (var_pos[0],var_pos[2])
if closestdistance < 2:
sx,sy,sz,sh = world.spawn
closestclient.teleportTo(sx,sy,sz,sh)
self.client.sendPlainWorldMessage("&d%s has been jellofied." % closestclient.username)
i,k = closestposition
distance = ((i-x)**2+(k-z)**2)**0.5
if distance != 0:
target = [int((i-x)/(distance/1.75)) + x,y,int((k-z)/(distance/1.75)) + z]
i,j,k = target
var_cango = True
try:
blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(i,j,k)])
if blocktocheck != 0:
var_cango = False
blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(i,j+1,k)])
if blocktocheck != 0:
var_cango = False
except:
var_cango = False
if var_cango:
block = '\x00'
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
var_position = target
x,y,z = var_position
block = chr(11)
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
block = chr(9)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
else:
var_cango = True
target[1] = target[1] + 1
j = target[1]
try:
blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(i,j,k)])
if blocktocheck != 0:
var_cango = False
blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(i,j+1,k)])
if blocktocheck != 0:
var_cango = False
except:
var_cango = False
if var_cango:
block = '\x00'
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
var_position = target
x,y,z = var_position
block = chr(11)
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
block = chr(9)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
| # iCraft is Copyright 2010 both
#
# The Archives team:
# <<NAME>> <EMAIL> AKA "Adam01"
# <<NAME>> <EMAIL> AKA "Aera"
# <<NAME>> <EMAIL> AKA "revenant"
# <<NAME>> <EMAIL> AKA "gdude2002"
#
# And,
#
# The iCraft team:
# <<NAME>> <EMAIL> AKA "tehcid"
# <<NAME>> <EMAIL> AKA "gothfox"
# <<NAME>> <EMAIL> AKA "AndrewPH"
# <<NAME>> <EMAIL> AKA "PixelEater"
# <<NAME>> <EMAIL> AKA "Varriount"
# <<NAME>> <EMAIL> AKA "iKJames"
# <<NAME>> <EMAIL> AKA "erronjason"
# <<NAME>> <EMAIL> AKA "sk8rjwd"
# <<NAME>> <EMAIL> AKA "destroyerx1"
# <<NAME>> <EMAIL> AKA "Fooblock"
# <<NAME>> <EMAIL> AKA "NotMeh"
# <<NAME>> <EMAIL> AKA "eugo"
# <<NAME>> <EMAIL> AKA "Saanix"
# <<NAME>> <EMAIL> AKA "ntfwc"
# <<NAME>> <EMAIL> AKA "Dwarfy"
# <<NAME>> <EMAIL> AKA "goober"
# <<NAME>> <EMAIL> AKA "willempiee"
#
# Disclaimer: Parts of this code may have been contributed by the end-users.
#
# iCraft is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported License.
# To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/
# Or, send a letter to Creative Commons, 171 2nd Street,
# Suite 300, San Francisco, California, 94105, USA.
var_cango = True
try:
blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(x,y-1,z)])
if blocktocheck != 0:
var_cango = False
except:
var_cango = False
if var_cango:
block = '\x00'
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
var_position = (x,y-1,z)
x,y,z = var_position
block = chr(11)
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
block = chr(9)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
else:
closestposition = (0,0)
closestclient = None
closestdistance = None
for entry in userpositionlist:
client = entry[0]
var_pos = entry[1]
i,j,k = var_pos
distance = ((i-x)**2+(j-y)**2+(k-z)**2)**0.5
if closestdistance == None:
closestdistance = distance
closestclient = client
closestposition = (var_pos[0],var_pos[2])
else:
if distance < closestdistance:
closestdistance = distance
closestclient = client
closestposition = (var_pos[0],var_pos[2])
if closestdistance < 2:
sx,sy,sz,sh = world.spawn
closestclient.teleportTo(sx,sy,sz,sh)
self.client.sendPlainWorldMessage("&d%s has been jellofied." % closestclient.username)
i,k = closestposition
distance = ((i-x)**2+(k-z)**2)**0.5
if distance != 0:
target = [int((i-x)/(distance/1.75)) + x,y,int((k-z)/(distance/1.75)) + z]
i,j,k = target
var_cango = True
try:
blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(i,j,k)])
if blocktocheck != 0:
var_cango = False
blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(i,j+1,k)])
if blocktocheck != 0:
var_cango = False
except:
var_cango = False
if var_cango:
block = '\x00'
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
var_position = target
x,y,z = var_position
block = chr(11)
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
block = chr(9)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
else:
var_cango = True
target[1] = target[1] + 1
j = target[1]
try:
blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(i,j,k)])
if blocktocheck != 0:
var_cango = False
blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(i,j+1,k)])
if blocktocheck != 0:
var_cango = False
except:
var_cango = False
if var_cango:
block = '\x00'
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
var_position = target
x,y,z = var_position
block = chr(11)
world[x, y, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world)
self.client.sendBlock(x, y, z, block)
block = chr(9)
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
| en | 0.423812 | # iCraft is Copyright 2010 both # # The Archives team: # <<NAME>> <EMAIL> AKA "Adam01" # <<NAME>> <EMAIL> AKA "Aera" # <<NAME>> <EMAIL> AKA "revenant" # <<NAME>> <EMAIL> AKA "gdude2002" # # And, # # The iCraft team: # <<NAME>> <EMAIL> AKA "tehcid" # <<NAME>> <EMAIL> AKA "gothfox" # <<NAME>> <EMAIL> AKA "AndrewPH" # <<NAME>> <EMAIL> AKA "PixelEater" # <<NAME>> <EMAIL> AKA "Varriount" # <<NAME>> <EMAIL> AKA "iKJames" # <<NAME>> <EMAIL> AKA "erronjason" # <<NAME>> <EMAIL> AKA "sk8rjwd" # <<NAME>> <EMAIL> AKA "destroyerx1" # <<NAME>> <EMAIL> AKA "Fooblock" # <<NAME>> <EMAIL> AKA "NotMeh" # <<NAME>> <EMAIL> AKA "eugo" # <<NAME>> <EMAIL> AKA "Saanix" # <<NAME>> <EMAIL> AKA "ntfwc" # <<NAME>> <EMAIL> AKA "Dwarfy" # <<NAME>> <EMAIL> AKA "goober" # <<NAME>> <EMAIL> AKA "willempiee" # # Disclaimer: Parts of this code may have been contributed by the end-users. # # iCraft is licensed under the Creative Commons # Attribution-NonCommercial-ShareAlike 3.0 Unported License. # To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/ # Or, send a letter to Creative Commons, 171 2nd Street, # Suite 300, San Francisco, California, 94105, USA. | 1.399855 | 1 |
leetcode/606.py | sputnikW/algorithm | 0 | 6632285 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def tree2str(self, t: TreeNode) -> str:
if t is None:
return ''
# return the required string
def preOrderTraver(node):
if node.left is None and node.right is None:
return str(node.val)
leftStr = '()' if node.left is None else '(' + preOrderTraver(node.left) + ')'
rightStr = '' if node.right is None else '(' + preOrderTraver(node.right) + ')'
return str(node.val) + leftStr + rightStr
return preOrderTraver(t) | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def tree2str(self, t: TreeNode) -> str:
if t is None:
return ''
# return the required string
def preOrderTraver(node):
if node.left is None and node.right is None:
return str(node.val)
leftStr = '()' if node.left is None else '(' + preOrderTraver(node.left) + ')'
rightStr = '' if node.right is None else '(' + preOrderTraver(node.right) + ')'
return str(node.val) + leftStr + rightStr
return preOrderTraver(t) | en | 0.530494 | # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None # return the required string | 3.817049 | 4 |
world/petitions/admin.py | stesla/arxcode | 5 | 6632286 | <reponame>stesla/arxcode
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import BrokeredSale, PurchasedAmount, PetitionParticipation, Petition, PetitionPost
class PurchasedAmountInline(admin.TabularInline):
"""Inline for purchased amounts"""
model = PurchasedAmount
extra = 0
raw_id_fields = ('buyer',)
class BrokeredSaleAdmin(admin.ModelAdmin):
"""Admin for BrokeredSale"""
list_display = ('id', 'owner', 'sale_type', 'crafting_material_type', 'price', 'amount')
list_filter = ('sale_type',)
search_fields = ('id', 'owner__player__username', 'crafting_material_type__name')
inlines = (PurchasedAmountInline,)
raw_id_fields = ('owner', 'crafting_material_type',)
class PetitionParticipantInline(admin.TabularInline):
"""Inline for participation in petitions"""
model = PetitionParticipation
extra = 0
raw_id_fields = ('dompc',)
class PetitionPostInline(admin.TabularInline):
"""Inline for posts in petitions"""
model = PetitionPost
extra = 0
raw_id_fields = ('dompc',)
class PetitionAdmin(admin.ModelAdmin):
"""Admin for petitions"""
list_display = ('id', 'topic', 'description', 'organization')
search_fields = ('id', 'topic', 'description', 'organization__name')
raw_id_fields = ('organization',)
inlines = (PetitionParticipantInline, PetitionPostInline)
admin.site.register(BrokeredSale, BrokeredSaleAdmin)
admin.site.register(Petition, PetitionAdmin)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import BrokeredSale, PurchasedAmount, PetitionParticipation, Petition, PetitionPost
class PurchasedAmountInline(admin.TabularInline):
"""Inline for purchased amounts"""
model = PurchasedAmount
extra = 0
raw_id_fields = ('buyer',)
class BrokeredSaleAdmin(admin.ModelAdmin):
"""Admin for BrokeredSale"""
list_display = ('id', 'owner', 'sale_type', 'crafting_material_type', 'price', 'amount')
list_filter = ('sale_type',)
search_fields = ('id', 'owner__player__username', 'crafting_material_type__name')
inlines = (PurchasedAmountInline,)
raw_id_fields = ('owner', 'crafting_material_type',)
class PetitionParticipantInline(admin.TabularInline):
"""Inline for participation in petitions"""
model = PetitionParticipation
extra = 0
raw_id_fields = ('dompc',)
class PetitionPostInline(admin.TabularInline):
"""Inline for posts in petitions"""
model = PetitionPost
extra = 0
raw_id_fields = ('dompc',)
class PetitionAdmin(admin.ModelAdmin):
"""Admin for petitions"""
list_display = ('id', 'topic', 'description', 'organization')
search_fields = ('id', 'topic', 'description', 'organization__name')
raw_id_fields = ('organization',)
inlines = (PetitionParticipantInline, PetitionPostInline)
admin.site.register(BrokeredSale, BrokeredSaleAdmin)
admin.site.register(Petition, PetitionAdmin) | en | 0.851879 | # -*- coding: utf-8 -*- Inline for purchased amounts Admin for BrokeredSale Inline for participation in petitions Inline for posts in petitions Admin for petitions | 1.878689 | 2 |
uptimer/__init__.py | izmirli/uptimer | 0 | 6632287 | """Top-level package for UpTimer."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| """Top-level package for UpTimer."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| en | 0.529205 | Top-level package for UpTimer. <NAME> | 0.925718 | 1 |
merge-k-sorted-lists/merge-k-sorted-lists.py | JeremyTsaii/LeetHub | 1 | 6632288 | <filename>merge-k-sorted-lists/merge-k-sorted-lists.py<gh_stars>1-10
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
arr = []
for link in lists:
while(link):
arr.append(link.val)
link = link.next
arr.sort()
head = ListNode(0)
cur = head
for val in arr:
cur.next = ListNode(val)
cur = cur.next
return head.next
| <filename>merge-k-sorted-lists/merge-k-sorted-lists.py<gh_stars>1-10
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
arr = []
for link in lists:
while(link):
arr.append(link.val)
link = link.next
arr.sort()
head = ListNode(0)
cur = head
for val in arr:
cur.next = ListNode(val)
cur = cur.next
return head.next
| en | 0.342808 | # Definition for singly-linked list. # class ListNode(object): # def __init__(self, val=0, next=None): # self.val = val # self.next = next :type lists: List[ListNode] :rtype: ListNode | 3.601638 | 4 |
slackbot/plugins/hello.py | damdev/dolarbot | 0 | 6632289 | #coding: UTF-8
import re
from slackbot.bot import respond_to
from slackbot.bot import listen_to
from slackbot.bot import default_reply
import requests
def bbva(i):
try:
b = requests.get('https://hb.bbv.com.ar/fnet/mod/inversiones/NL-dolareuro.jsp')
aa = b.text.replace('\t', '').replace('\n', '')
return re.findall('[0-9]*,[0-9]*', aa)[i]
except:
return 'error!'
def lala(i):
try:
b = requests.get('https://banco.santanderrio.com.ar/exec/cotizacion/index.jsp')
aa = b.text.replace('\t', '').replace('\n', '')
return re.findall('\$ [0-9]*,[0-9]*', aa)[i]
except:
return 'error!'
@respond_to('venta', re.IGNORECASE)
def hello_reply(message):
message.reply('SANTANDER V:' + lala(0) + '| BBVA V:' + bbva(1))
@respond_to('dolar', re.IGNORECASE)
def hello_reply(message):
message.reply('SANTANDER C:' + lala(1) + '| BBVA C:' + bbva(2))
@default_reply
def my_default_handler(message):
message.reply('Es todo un tema.') | #coding: UTF-8
import re
from slackbot.bot import respond_to
from slackbot.bot import listen_to
from slackbot.bot import default_reply
import requests
def bbva(i):
try:
b = requests.get('https://hb.bbv.com.ar/fnet/mod/inversiones/NL-dolareuro.jsp')
aa = b.text.replace('\t', '').replace('\n', '')
return re.findall('[0-9]*,[0-9]*', aa)[i]
except:
return 'error!'
def lala(i):
try:
b = requests.get('https://banco.santanderrio.com.ar/exec/cotizacion/index.jsp')
aa = b.text.replace('\t', '').replace('\n', '')
return re.findall('\$ [0-9]*,[0-9]*', aa)[i]
except:
return 'error!'
@respond_to('venta', re.IGNORECASE)
def hello_reply(message):
message.reply('SANTANDER V:' + lala(0) + '| BBVA V:' + bbva(1))
@respond_to('dolar', re.IGNORECASE)
def hello_reply(message):
message.reply('SANTANDER C:' + lala(1) + '| BBVA C:' + bbva(2))
@default_reply
def my_default_handler(message):
message.reply('Es todo un tema.') | ru | 0.189046 | #coding: UTF-8 | 2.499282 | 2 |
src/foolscap/logging/filter.py | rbalint/foolscap | 0 | 6632290 | <reponame>rbalint/foolscap
from twisted.python import usage
import sys, os, bz2, time
from foolscap.logging import log, flogfile
from foolscap.util import move_into_place
class FilterOptions(usage.Options):
stdout = sys.stdout
stderr = sys.stderr
synopsis = "Usage: flogtool filter [options] OLDFILE NEWFILE"
optParameters = [
["after", None, None, "include events after timestamp (seconds since epoch)"],
["before", None, None, "include events before timestamp"],
["strip-facility", None, None, "remove events with the given facility prefix"],
["above", None, None, "include events at the given severity level or above"],
["from", None, None, "include events from the given tubid prefix"],
]
optFlags = [
["verbose", "v", "emit event numbers during processing (useful to isolate an unloadable event pickle"],
]
def parseArgs(self, oldfile, newfile=None):
self.oldfile = oldfile
self.newfile = newfile
if newfile is None:
self.newfile = oldfile
def opt_after(self, arg):
self['after'] = int(arg)
def opt_before(self, arg):
self['before'] = int(arg)
def opt_above(self, arg):
try:
self['above'] = int(arg)
except ValueError:
levelmap = {"NOISY": log.NOISY,
"OPERATIONAL": log.OPERATIONAL,
"UNUSUAL": log.UNUSUAL,
"INFREQUENT": log.INFREQUENT,
"CURIOUS": log.CURIOUS,
"WEIRD": log.WEIRD,
"SCARY": log.SCARY,
"BAD": log.BAD,
}
self['above'] = levelmap[arg]
class Filter:
def run(self, options):
stdout = options.stdout
newfilename = options.newfile
if options.newfile == options.oldfile:
print >>stdout, "modifying event file in place"
newfilename = newfilename + ".tmp"
if options.newfile.endswith(".bz2"):
newfile = bz2.BZ2File(newfilename, "w")
else:
newfile = open(newfilename, "wb")
newfile.write(flogfile.MAGIC)
after = options['after']
if after is not None:
print >>stdout, " --after: removing events before %s" % time.ctime(after)
before = options['before']
if before is not None:
print >>stdout, " --before: removing events after %s" % time.ctime(before)
above = options['above']
if above:
print >>stdout, " --above: removing events below level %d" % above
from_tubid = options['from']
if from_tubid:
print >>stdout, " --from: retaining events only from tubid prefix %s" % from_tubid
strip_facility = options['strip-facility']
if strip_facility is not None:
print >>stdout, "--strip-facility: removing events for %s and children" % strip_facility
total = 0
copied = 0
for e in flogfile.get_events(options.oldfile):
if options['verbose']:
if "d" in e:
print >>stdout, e['d']['num']
else:
print >>stdout, "HEADER"
total += 1
if "d" in e:
if before is not None and e['d']['time'] >= before:
continue
if after is not None and e['d']['time'] <= after:
continue
if above is not None and e['d']['level'] < above:
continue
if from_tubid is not None and not e['from'].startswith(from_tubid):
continue
if (strip_facility is not None
and e['d'].get('facility', "").startswith(strip_facility)):
continue
copied += 1
flogfile.serialize_raw_wrapper(newfile, e)
newfile.close()
if options.newfile == options.oldfile:
if sys.platform == "win32":
# Win32 can't do an atomic rename to an existing file.
try:
os.unlink(options.newfile)
except OSError:
pass
move_into_place(newfilename, options.newfile)
print >>stdout, "copied %d of %d events into new file" % (copied, total)
| from twisted.python import usage
import sys, os, bz2, time
from foolscap.logging import log, flogfile
from foolscap.util import move_into_place
class FilterOptions(usage.Options):
stdout = sys.stdout
stderr = sys.stderr
synopsis = "Usage: flogtool filter [options] OLDFILE NEWFILE"
optParameters = [
["after", None, None, "include events after timestamp (seconds since epoch)"],
["before", None, None, "include events before timestamp"],
["strip-facility", None, None, "remove events with the given facility prefix"],
["above", None, None, "include events at the given severity level or above"],
["from", None, None, "include events from the given tubid prefix"],
]
optFlags = [
["verbose", "v", "emit event numbers during processing (useful to isolate an unloadable event pickle"],
]
def parseArgs(self, oldfile, newfile=None):
self.oldfile = oldfile
self.newfile = newfile
if newfile is None:
self.newfile = oldfile
def opt_after(self, arg):
self['after'] = int(arg)
def opt_before(self, arg):
self['before'] = int(arg)
def opt_above(self, arg):
try:
self['above'] = int(arg)
except ValueError:
levelmap = {"NOISY": log.NOISY,
"OPERATIONAL": log.OPERATIONAL,
"UNUSUAL": log.UNUSUAL,
"INFREQUENT": log.INFREQUENT,
"CURIOUS": log.CURIOUS,
"WEIRD": log.WEIRD,
"SCARY": log.SCARY,
"BAD": log.BAD,
}
self['above'] = levelmap[arg]
class Filter:
def run(self, options):
stdout = options.stdout
newfilename = options.newfile
if options.newfile == options.oldfile:
print >>stdout, "modifying event file in place"
newfilename = newfilename + ".tmp"
if options.newfile.endswith(".bz2"):
newfile = bz2.BZ2File(newfilename, "w")
else:
newfile = open(newfilename, "wb")
newfile.write(flogfile.MAGIC)
after = options['after']
if after is not None:
print >>stdout, " --after: removing events before %s" % time.ctime(after)
before = options['before']
if before is not None:
print >>stdout, " --before: removing events after %s" % time.ctime(before)
above = options['above']
if above:
print >>stdout, " --above: removing events below level %d" % above
from_tubid = options['from']
if from_tubid:
print >>stdout, " --from: retaining events only from tubid prefix %s" % from_tubid
strip_facility = options['strip-facility']
if strip_facility is not None:
print >>stdout, "--strip-facility: removing events for %s and children" % strip_facility
total = 0
copied = 0
for e in flogfile.get_events(options.oldfile):
if options['verbose']:
if "d" in e:
print >>stdout, e['d']['num']
else:
print >>stdout, "HEADER"
total += 1
if "d" in e:
if before is not None and e['d']['time'] >= before:
continue
if after is not None and e['d']['time'] <= after:
continue
if above is not None and e['d']['level'] < above:
continue
if from_tubid is not None and not e['from'].startswith(from_tubid):
continue
if (strip_facility is not None
and e['d'].get('facility', "").startswith(strip_facility)):
continue
copied += 1
flogfile.serialize_raw_wrapper(newfile, e)
newfile.close()
if options.newfile == options.oldfile:
if sys.platform == "win32":
# Win32 can't do an atomic rename to an existing file.
try:
os.unlink(options.newfile)
except OSError:
pass
move_into_place(newfilename, options.newfile)
print >>stdout, "copied %d of %d events into new file" % (copied, total) | en | 0.853129 | # Win32 can't do an atomic rename to an existing file. | 2.23722 | 2 |
webapps/ab-testing-results/backend.py | dataiku/dss-plugin-ab-testing | 0 | 6632291 | <filename>webapps/ab-testing-results/backend.py
from flask import request
import dataiku
from distutils.util import strtobool
import json
import traceback
import numpy as np
import simplejson
from dataiku.customwebapp import get_webapp_config
from results.ab_calculator import compute_Z_score, compute_p_value
from results.statistics_helper import read_statistics
from dku_tools import get_output_folder
from helpers import save_parameters, check_int
config = get_webapp_config()
project_key = dataiku.default_project_key()
client = dataiku.api_client()
def convert_numpy_int64_to_int(o):
if isinstance(o, np.int64):
return int(o)
raise TypeError
@app.route('/ab_calculator', methods=['POST'])
def analyse_results():
try:
form_data = json.loads(request.data)
check_int(form_data.get("size_A"), 'size A')
check_int(form_data.get("size_B"), 'size B')
size_A = form_data.get("size_A")
size_B = form_data.get("size_B")
CR_A = float(form_data.get("success_rate_A"))/100
CR_B = float(form_data.get("success_rate_B"))/100
if (CR_A > 1) or (CR_B > 1):
raise ValueError('Success rate must be between 0-100%')
two_tailed = strtobool(form_data.get("tail"))
Z_score = round(compute_Z_score(size_A, size_B, CR_A, CR_B), 3)
p_value = round(compute_p_value(Z_score, two_tailed), 3)
return simplejson.dumps({"Z_score": Z_score, "p_value": p_value}, ignore_nan=True, default=convert_numpy_int64_to_int)
except:
return traceback.format_exc(), 500
@app.route("/statistics", methods=["POST"])
def get_statistics():
try:
dataset_name = json.loads(request.data).get("dataset_name")
column_name = json.loads(request.data).get("column_name")
if dataset_name:
dataset = dataiku.Dataset(dataset_name)
df = dataset.get_dataframe()
else:
raise ValueError("Statistics dataset is missing, specify it in the settings or edit sizes and success rates manually.")
if column_name:
response = read_statistics(df, column_name)
return response
else:
raise ValueError(
"AB group column name is missing, specify it in the settings or edit sizes and success rates manually.")
except:
return traceback.format_exc(), 500
@app.route('/write_parameters', methods=['POST'])
def save():
try:
output_folder = get_output_folder(config, client, project_key)
data = json.loads(request.data)
fields_to_save = ["size_A", "size_B", "success_rate_A", "success_rate_B", "uplift", "p_value", "z_score"]
save_parameters(data, output_folder, fields_to_save)
return json.dumps({"status": "Parameters saved"})
except:
return traceback.format_exc(), 500 | <filename>webapps/ab-testing-results/backend.py
from flask import request
import dataiku
from distutils.util import strtobool
import json
import traceback
import numpy as np
import simplejson
from dataiku.customwebapp import get_webapp_config
from results.ab_calculator import compute_Z_score, compute_p_value
from results.statistics_helper import read_statistics
from dku_tools import get_output_folder
from helpers import save_parameters, check_int
config = get_webapp_config()
project_key = dataiku.default_project_key()
client = dataiku.api_client()
def convert_numpy_int64_to_int(o):
if isinstance(o, np.int64):
return int(o)
raise TypeError
@app.route('/ab_calculator', methods=['POST'])
def analyse_results():
try:
form_data = json.loads(request.data)
check_int(form_data.get("size_A"), 'size A')
check_int(form_data.get("size_B"), 'size B')
size_A = form_data.get("size_A")
size_B = form_data.get("size_B")
CR_A = float(form_data.get("success_rate_A"))/100
CR_B = float(form_data.get("success_rate_B"))/100
if (CR_A > 1) or (CR_B > 1):
raise ValueError('Success rate must be between 0-100%')
two_tailed = strtobool(form_data.get("tail"))
Z_score = round(compute_Z_score(size_A, size_B, CR_A, CR_B), 3)
p_value = round(compute_p_value(Z_score, two_tailed), 3)
return simplejson.dumps({"Z_score": Z_score, "p_value": p_value}, ignore_nan=True, default=convert_numpy_int64_to_int)
except:
return traceback.format_exc(), 500
@app.route("/statistics", methods=["POST"])
def get_statistics():
try:
dataset_name = json.loads(request.data).get("dataset_name")
column_name = json.loads(request.data).get("column_name")
if dataset_name:
dataset = dataiku.Dataset(dataset_name)
df = dataset.get_dataframe()
else:
raise ValueError("Statistics dataset is missing, specify it in the settings or edit sizes and success rates manually.")
if column_name:
response = read_statistics(df, column_name)
return response
else:
raise ValueError(
"AB group column name is missing, specify it in the settings or edit sizes and success rates manually.")
except:
return traceback.format_exc(), 500
@app.route('/write_parameters', methods=['POST'])
def save():
try:
output_folder = get_output_folder(config, client, project_key)
data = json.loads(request.data)
fields_to_save = ["size_A", "size_B", "success_rate_A", "success_rate_B", "uplift", "p_value", "z_score"]
save_parameters(data, output_folder, fields_to_save)
return json.dumps({"status": "Parameters saved"})
except:
return traceback.format_exc(), 500 | none | 1 | 2.372554 | 2 |
|
setup.py | sfairchild/moduler | 0 | 6632292 | from setuptools import setup
setup(
name='Moduler',
version='0.0.2',
author='<NAME>',
author_email='<EMAIL>',
packages=['moduler'],
scripts=['bin/moduler'],
url='#TODO Change this',
license='MIT',
description='Used to checkout packages into a directory',
long_description=open('README.txt').read(),
install_requires=[
"PyYAML >= 5.3.1",
"pytest",
],
zip_safe=False
)
| from setuptools import setup
setup(
name='Moduler',
version='0.0.2',
author='<NAME>',
author_email='<EMAIL>',
packages=['moduler'],
scripts=['bin/moduler'],
url='#TODO Change this',
license='MIT',
description='Used to checkout packages into a directory',
long_description=open('README.txt').read(),
install_requires=[
"PyYAML >= 5.3.1",
"pytest",
],
zip_safe=False
)
| none | 1 | 1.162031 | 1 |
|
penv/features.py | ywchiu/penv | 20 | 6632293 |
import numpy as np
from scipy.special import binom
from tensortrade.feed import Stream
def fracdiff(s: Stream[float], d: float, window: int) -> Stream[float]:
c = np.tile([1.0, -1.0], -(-window // 2))[:window]
w = c*binom(d, np.arange(window))
w = w[::-1]
frac = s.rolling(window=window, min_periods=window).agg(lambda v: np.dot(w.T, v))
return frac
def macd(s: Stream[float], fast: int, slow: int, signal: int) -> Stream[float]:
fm = s.ewm(span=fast, adjust=False).mean()
sm = s.ewm(span=slow, adjust=False).mean()
md = fm - sm
signal = md - md.ewm(span=signal, adjust=False).mean()
return signal
def rsi(s: Stream[float], period: float, use_multiplier: bool = True) -> Stream[float]:
r = s.diff()
upside = r.clamp_min(0).abs()
downside = r.clamp_max(0).abs()
rs = upside.ewm(alpha=1 / period).mean() / downside.ewm(alpha=1 / period).mean()
v = 1 - (1 + rs)**-1
return 100*v if use_multiplier else v
|
import numpy as np
from scipy.special import binom
from tensortrade.feed import Stream
def fracdiff(s: Stream[float], d: float, window: int) -> Stream[float]:
c = np.tile([1.0, -1.0], -(-window // 2))[:window]
w = c*binom(d, np.arange(window))
w = w[::-1]
frac = s.rolling(window=window, min_periods=window).agg(lambda v: np.dot(w.T, v))
return frac
def macd(s: Stream[float], fast: int, slow: int, signal: int) -> Stream[float]:
fm = s.ewm(span=fast, adjust=False).mean()
sm = s.ewm(span=slow, adjust=False).mean()
md = fm - sm
signal = md - md.ewm(span=signal, adjust=False).mean()
return signal
def rsi(s: Stream[float], period: float, use_multiplier: bool = True) -> Stream[float]:
r = s.diff()
upside = r.clamp_min(0).abs()
downside = r.clamp_max(0).abs()
rs = upside.ewm(alpha=1 / period).mean() / downside.ewm(alpha=1 / period).mean()
v = 1 - (1 + rs)**-1
return 100*v if use_multiplier else v
| none | 1 | 2.193373 | 2 |
|
robosuite/controllers/base_controller.py | StanfordVL/Lasersuite | 5 | 6632294 | <filename>robosuite/controllers/base_controller.py
import abc
from collections.abc import Iterable
import numpy as np
import mujoco_py
class Controller(object, metaclass=abc.ABCMeta):
"""
General controller interface.
Requires reference to mujoco sim object, eef_name of specific robot, relevant joint_indexes to that robot, and
whether an initial_joint is used for nullspace torques or not
Args:
sim (MjSim): Simulator instance this controller will pull robot state updates from
eef_name (str): Name of controlled robot arm's end effector (from robot XML)
joint_indexes (dict): Each key contains sim reference indexes to relevant robot joint information, namely:
"joints" : list of indexes to relevant robot joints
"qpos" : list of indexes to relevant robot joint positions
"qvel" : list of indexes to relevant robot joint velocities
"""
def __init__(self,
sim,
eef_name,
joint_indexes,
):
# Attributes for scaling / clipping inputs to outputs
self.action_scale = None
self.action_input_transform = None
self.action_output_transform = None
# Private property attributes
self._control_dim = None
self._output_min = None
self._output_max = None
self._input_min = None
self._input_max = None
# mujoco simulator state
self.sim = sim
self.model_timestep = self.sim.model.opt.timestep
self.eef_name = eef_name
self.joint_index = joint_indexes["joints"]
self.qpos_index = joint_indexes["qpos"]
self.qvel_index = joint_indexes["qvel"]
# robot states
self.ee_pos = None
self.ee_ori_mat = None
self.ee_pos_vel = None
self.ee_ori_vel = None
self.joint_pos = None
self.joint_vel = None
# dynamics and kinematics
self.J_pos = None
self.J_ori = None
self.J_full = None
self.mass_matrix = None
# Joint dimension
self.joint_dim = len(joint_indexes["joints"])
# Torques being outputted by the controller
self.torques = None
# Move forward one timestep to propagate updates before taking first update
self.sim.forward()
# Initialize controller by updating internal state and setting the initial joint, pos, and ori
self.update()
self.initial_joint = self.joint_pos
self.initial_ee_pos = self.ee_pos
self.initial_ee_ori_mat = self.ee_ori_mat
@abc.abstractmethod
def run_controller(self):
"""
Abstract method that should be implemented in all subclass controllers
Converts a given action into torques (pre gravity compensation) to be executed on the robot
"""
raise NotImplementedError
def scale_action(self, action):
"""
Scale the action based on max and min of action
"""
if self.action_scale is None:
self.action_scale = abs(self.output_max - self.output_min) / abs(self.input_max - self.input_min)
self.action_output_transform = (self.output_max + self.output_min) / 2.0
self.action_input_transform = (self.input_max + self.input_min) / 2.0
action = np.clip(action, self.input_min, self.input_max)
transformed_action = (action - self.action_input_transform) * self.action_scale + self.action_output_transform
return transformed_action
def update(self):
"""
Updates the state of the robot arm, including end effector pose / orientation / velocity, joint pos/vel,
jacobian, and mass matrix
"""
self.ee_pos = np.array(self.sim.data.body_xpos[self.sim.model.body_name2id(self.eef_name)])
self.ee_ori_mat = np.array(self.sim.data.body_xmat[self.sim.model.body_name2id(self.eef_name)].reshape([3, 3]))
self.ee_pos_vel = np.array(self.sim.data.body_xvelp[self.sim.model.body_name2id(self.eef_name)])
self.ee_ori_vel = np.array(self.sim.data.body_xvelr[self.sim.model.body_name2id(self.eef_name)])
self.joint_pos = np.array(self.sim.data.qpos[self.qpos_index])
self.joint_vel = np.array(self.sim.data.qvel[self.qvel_index])
self.J_pos = np.array(self.sim.data.get_body_jacp(self.eef_name).reshape((3, -1))[:, self.qvel_index])
self.J_ori = np.array(self.sim.data.get_body_jacr(self.eef_name).reshape((3, -1))[:, self.qvel_index])
self.J_full = np.array(np.vstack([self.J_pos, self.J_ori]))
mass_matrix = np.ndarray(shape=(len(self.sim.data.qvel) ** 2,), dtype=np.float64, order='C')
mujoco_py.cymj._mj_fullM(self.sim.model, mass_matrix, self.sim.data.qM)
mass_matrix = np.reshape(mass_matrix, (len(self.sim.data.qvel), len(self.sim.data.qvel)))
self.mass_matrix = mass_matrix[self.joint_index, :][:, self.joint_index]
def update_base_pose(self, base_pos, base_ori):
"""
Optional function to implement in subclass controllers that will take in @base_pos and @base_ori and update
internal configuration to account for changes in the respective states. Useful for controllers e.g. IK, which
is based on pybullet and requires knowledge of simulator state deviations between pybullet and mujoco
Args:
@base_pos (3-tuple): x,y,z position of robot base in mujoco world coordinates
@base_ori (4-tuple): x,y,z,w orientation or robot base in mujoco world coordinates
"""
pass
def update_initial_joints(self, initial_joints):
"""
Updates the internal attribute self.initial_joints. This is useful for updating changes in controller-specific
behavior, such as with OSC where self.initial_joints is used for determine nullspace actions
This function can also be extended by subclassed controllers for additional controller-specific updates
Args:
initial_joints (array): Array of joint position values to update the initial joints
"""
self.initial_joint = np.array(initial_joints)
@property
def input_min(self):
"""Returns input minimum below which an inputted action will be clipped"""
return self._input_min
@input_min.setter
def input_min(self, input_min):
"""Sets the minimum input"""
self._input_min = np.array(input_min) if isinstance(input_min, Iterable) \
else np.array([input_min]*self.control_dim)
@property
def input_max(self):
"""Returns input maximum above which an inputted action will be clipped"""
return self._input_max
@input_max.setter
def input_max(self, input_max):
"""Sets the maximum input"""
self._input_max = np.array(input_max) if isinstance(input_max, Iterable) \
else np.array([input_max]*self.control_dim)
@property
def output_min(self):
"""Returns output minimum which defines lower end of scaling range when scaling an input action"""
return self._output_min
@output_min.setter
def output_min(self, output_min):
"""Set the minimum output"""
self._output_min = np.array(output_min) if isinstance(output_min, Iterable) \
else np.array([output_min]*self.control_dim)
@property
def output_max(self):
"""Returns output maximum which defines upper end of scaling range when scaling an input action"""
return self._output_max
@output_max.setter
def output_max(self, output_max):
"""Set the maximum output"""
self._output_max = np.array(output_max) if isinstance(output_max, Iterable) \
else np.array([output_max]*self.control_dim)
@property
def control_dim(self):
"""Returns the control dimension for this controller (specifies size of action space)"""
return self._control_dim
@control_dim.setter
def control_dim(self, control_dim):
"""Sets the control dimension for this controller"""
self._control_dim = control_dim
@property
def torque_compensation(self):
"""Returns gravity compensation torques for the robot arm"""
return self.sim.data.qfrc_bias[self.qvel_index]
@property
def name(self):
"""Returns the name of this controller"""
raise NotImplementedError
| <filename>robosuite/controllers/base_controller.py
import abc
from collections.abc import Iterable
import numpy as np
import mujoco_py
class Controller(object, metaclass=abc.ABCMeta):
"""
General controller interface.
Requires reference to mujoco sim object, eef_name of specific robot, relevant joint_indexes to that robot, and
whether an initial_joint is used for nullspace torques or not
Args:
sim (MjSim): Simulator instance this controller will pull robot state updates from
eef_name (str): Name of controlled robot arm's end effector (from robot XML)
joint_indexes (dict): Each key contains sim reference indexes to relevant robot joint information, namely:
"joints" : list of indexes to relevant robot joints
"qpos" : list of indexes to relevant robot joint positions
"qvel" : list of indexes to relevant robot joint velocities
"""
def __init__(self,
sim,
eef_name,
joint_indexes,
):
# Attributes for scaling / clipping inputs to outputs
self.action_scale = None
self.action_input_transform = None
self.action_output_transform = None
# Private property attributes
self._control_dim = None
self._output_min = None
self._output_max = None
self._input_min = None
self._input_max = None
# mujoco simulator state
self.sim = sim
self.model_timestep = self.sim.model.opt.timestep
self.eef_name = eef_name
self.joint_index = joint_indexes["joints"]
self.qpos_index = joint_indexes["qpos"]
self.qvel_index = joint_indexes["qvel"]
# robot states
self.ee_pos = None
self.ee_ori_mat = None
self.ee_pos_vel = None
self.ee_ori_vel = None
self.joint_pos = None
self.joint_vel = None
# dynamics and kinematics
self.J_pos = None
self.J_ori = None
self.J_full = None
self.mass_matrix = None
# Joint dimension
self.joint_dim = len(joint_indexes["joints"])
# Torques being outputted by the controller
self.torques = None
# Move forward one timestep to propagate updates before taking first update
self.sim.forward()
# Initialize controller by updating internal state and setting the initial joint, pos, and ori
self.update()
self.initial_joint = self.joint_pos
self.initial_ee_pos = self.ee_pos
self.initial_ee_ori_mat = self.ee_ori_mat
@abc.abstractmethod
def run_controller(self):
"""
Abstract method that should be implemented in all subclass controllers
Converts a given action into torques (pre gravity compensation) to be executed on the robot
"""
raise NotImplementedError
def scale_action(self, action):
"""
Scale the action based on max and min of action
"""
if self.action_scale is None:
self.action_scale = abs(self.output_max - self.output_min) / abs(self.input_max - self.input_min)
self.action_output_transform = (self.output_max + self.output_min) / 2.0
self.action_input_transform = (self.input_max + self.input_min) / 2.0
action = np.clip(action, self.input_min, self.input_max)
transformed_action = (action - self.action_input_transform) * self.action_scale + self.action_output_transform
return transformed_action
def update(self):
"""
Updates the state of the robot arm, including end effector pose / orientation / velocity, joint pos/vel,
jacobian, and mass matrix
"""
self.ee_pos = np.array(self.sim.data.body_xpos[self.sim.model.body_name2id(self.eef_name)])
self.ee_ori_mat = np.array(self.sim.data.body_xmat[self.sim.model.body_name2id(self.eef_name)].reshape([3, 3]))
self.ee_pos_vel = np.array(self.sim.data.body_xvelp[self.sim.model.body_name2id(self.eef_name)])
self.ee_ori_vel = np.array(self.sim.data.body_xvelr[self.sim.model.body_name2id(self.eef_name)])
self.joint_pos = np.array(self.sim.data.qpos[self.qpos_index])
self.joint_vel = np.array(self.sim.data.qvel[self.qvel_index])
self.J_pos = np.array(self.sim.data.get_body_jacp(self.eef_name).reshape((3, -1))[:, self.qvel_index])
self.J_ori = np.array(self.sim.data.get_body_jacr(self.eef_name).reshape((3, -1))[:, self.qvel_index])
self.J_full = np.array(np.vstack([self.J_pos, self.J_ori]))
mass_matrix = np.ndarray(shape=(len(self.sim.data.qvel) ** 2,), dtype=np.float64, order='C')
mujoco_py.cymj._mj_fullM(self.sim.model, mass_matrix, self.sim.data.qM)
mass_matrix = np.reshape(mass_matrix, (len(self.sim.data.qvel), len(self.sim.data.qvel)))
self.mass_matrix = mass_matrix[self.joint_index, :][:, self.joint_index]
def update_base_pose(self, base_pos, base_ori):
"""
Optional function to implement in subclass controllers that will take in @base_pos and @base_ori and update
internal configuration to account for changes in the respective states. Useful for controllers e.g. IK, which
is based on pybullet and requires knowledge of simulator state deviations between pybullet and mujoco
Args:
@base_pos (3-tuple): x,y,z position of robot base in mujoco world coordinates
@base_ori (4-tuple): x,y,z,w orientation or robot base in mujoco world coordinates
"""
pass
def update_initial_joints(self, initial_joints):
"""
Updates the internal attribute self.initial_joints. This is useful for updating changes in controller-specific
behavior, such as with OSC where self.initial_joints is used for determine nullspace actions
This function can also be extended by subclassed controllers for additional controller-specific updates
Args:
initial_joints (array): Array of joint position values to update the initial joints
"""
self.initial_joint = np.array(initial_joints)
@property
def input_min(self):
"""Returns input minimum below which an inputted action will be clipped"""
return self._input_min
@input_min.setter
def input_min(self, input_min):
"""Sets the minimum input"""
self._input_min = np.array(input_min) if isinstance(input_min, Iterable) \
else np.array([input_min]*self.control_dim)
@property
def input_max(self):
"""Returns input maximum above which an inputted action will be clipped"""
return self._input_max
@input_max.setter
def input_max(self, input_max):
"""Sets the maximum input"""
self._input_max = np.array(input_max) if isinstance(input_max, Iterable) \
else np.array([input_max]*self.control_dim)
@property
def output_min(self):
"""Returns output minimum which defines lower end of scaling range when scaling an input action"""
return self._output_min
@output_min.setter
def output_min(self, output_min):
"""Set the minimum output"""
self._output_min = np.array(output_min) if isinstance(output_min, Iterable) \
else np.array([output_min]*self.control_dim)
@property
def output_max(self):
"""Returns output maximum which defines upper end of scaling range when scaling an input action"""
return self._output_max
@output_max.setter
def output_max(self, output_max):
"""Set the maximum output"""
self._output_max = np.array(output_max) if isinstance(output_max, Iterable) \
else np.array([output_max]*self.control_dim)
@property
def control_dim(self):
"""Returns the control dimension for this controller (specifies size of action space)"""
return self._control_dim
@control_dim.setter
def control_dim(self, control_dim):
"""Sets the control dimension for this controller"""
self._control_dim = control_dim
@property
def torque_compensation(self):
"""Returns gravity compensation torques for the robot arm"""
return self.sim.data.qfrc_bias[self.qvel_index]
@property
def name(self):
"""Returns the name of this controller"""
raise NotImplementedError
| en | 0.728926 | General controller interface. Requires reference to mujoco sim object, eef_name of specific robot, relevant joint_indexes to that robot, and whether an initial_joint is used for nullspace torques or not Args: sim (MjSim): Simulator instance this controller will pull robot state updates from eef_name (str): Name of controlled robot arm's end effector (from robot XML) joint_indexes (dict): Each key contains sim reference indexes to relevant robot joint information, namely: "joints" : list of indexes to relevant robot joints "qpos" : list of indexes to relevant robot joint positions "qvel" : list of indexes to relevant robot joint velocities # Attributes for scaling / clipping inputs to outputs # Private property attributes # mujoco simulator state # robot states # dynamics and kinematics # Joint dimension # Torques being outputted by the controller # Move forward one timestep to propagate updates before taking first update # Initialize controller by updating internal state and setting the initial joint, pos, and ori Abstract method that should be implemented in all subclass controllers Converts a given action into torques (pre gravity compensation) to be executed on the robot Scale the action based on max and min of action Updates the state of the robot arm, including end effector pose / orientation / velocity, joint pos/vel, jacobian, and mass matrix Optional function to implement in subclass controllers that will take in @base_pos and @base_ori and update internal configuration to account for changes in the respective states. Useful for controllers e.g. IK, which is based on pybullet and requires knowledge of simulator state deviations between pybullet and mujoco Args: @base_pos (3-tuple): x,y,z position of robot base in mujoco world coordinates @base_ori (4-tuple): x,y,z,w orientation or robot base in mujoco world coordinates Updates the internal attribute self.initial_joints. This is useful for updating changes in controller-specific behavior, such as with OSC where self.initial_joints is used for determine nullspace actions This function can also be extended by subclassed controllers for additional controller-specific updates Args: initial_joints (array): Array of joint position values to update the initial joints Returns input minimum below which an inputted action will be clipped Sets the minimum input Returns input maximum above which an inputted action will be clipped Sets the maximum input Returns output minimum which defines lower end of scaling range when scaling an input action Set the minimum output Returns output maximum which defines upper end of scaling range when scaling an input action Set the maximum output Returns the control dimension for this controller (specifies size of action space) Sets the control dimension for this controller Returns gravity compensation torques for the robot arm Returns the name of this controller | 2.919991 | 3 |
scripts/experiments.py | gijskant/phdthesis-experiments | 0 | 6632295 | <reponame>gijskant/phdthesis-experiments
#! /usr/bin/env python
# :noTabs=true:
# (c) Copyright (c) 2017 <NAME>
# (c) This file is distributed under the MIT License,
# (c) see the file LICENSE.
"""
experiments.py
Brief: Prepares and runs experiments based on json experiments file.
Author: <NAME> <<EMAIL>>
"""
import os
import sys
import json
from tools import *
"""
Generate files required for the experiments.
"""
def prepare_experiments(config, experiments):
tools = ToolRegistry(config).tools
for experiment in experiments:
experiment_type = experiment['type']
if (experiment_type == 'pbes'):
mcrl2 = tools['mcrl2']
mcrl2.prepare(experiment)
else:
print >> sys.stderr, 'Type not supported:', experiment_type
"""
List the experiments.
"""
def list_experiments(config, experiments):
tools = ToolRegistry(config).tools
ltsmin = tools['ltsmin']
ltsmin.print_list(experiments)
"""
Run the experiments.
"""
def run_experiment(config, experiments, index):
tools = ToolRegistry(config).tools
ltsmin = tools['ltsmin']
ltsmin.run(experiments, index)
"""
Analyse the results.
"""
def analyse_results(config, experiments):
tools = ToolRegistry(config).tools
ltsmin = tools['ltsmin']
ltsmin.analyse(experiments)
"""
Read experiment data from a JSON file.
"""
def read_experiments(json_filename):
json_file = open(json_filename, 'r')
experiments = json.load(json_file)['data']
json_file.close()
return experiments
def read_config(json_filename):
json_file = open(json_filename, 'r')
config = json.load(json_file)
json_file.close()
return config
def usage():
command = os.path.basename(sys.argv[0])
return """Usage: {0} <config.json> <experiments.json> <prepare|list|run|analyse> [index]
For the command 'run', the [index] option is required.""".format(command)
def main():
if len(sys.argv) <= 3:
print >> sys.stderr, usage()
sys.exit(1)
print >> sys.stderr, os.path.basename(sys.argv[0])
config_filename = sys.argv[1]
expriments_filename = sys.argv[2]
action = sys.argv[3]
print >> sys.stderr, 'Config file: ', config_filename
config = read_config(config_filename)
print >> sys.stderr, 'Experiments file: ', expriments_filename
experiments = read_experiments(expriments_filename)
print >> sys.stderr, 'Experiments:', len(experiments)
print >> sys.stderr, 'Action: ', action
if action == 'run':
if len(sys.argv) <= 4:
print >> sys.stderr, usage()
sys.exit(1)
index = int(sys.argv[4])
run_experiment(config, experiments, index)
elif action == 'list':
list_experiments(config, experiments)
elif action == 'prepare':
prepare_experiments(config, experiments)
elif action == 'analyse':
analyse_results(config, experiments)
else:
print >> sys.stderr, usage()
sys.exit(1)
print >> sys.stderr, ''
print >> sys.stderr, 'Done.'
if __name__ == '__main__':
main()
| #! /usr/bin/env python
# :noTabs=true:
# (c) Copyright (c) 2017 <NAME>
# (c) This file is distributed under the MIT License,
# (c) see the file LICENSE.
"""
experiments.py
Brief: Prepares and runs experiments based on json experiments file.
Author: <NAME> <<EMAIL>>
"""
import os
import sys
import json
from tools import *
"""
Generate files required for the experiments.
"""
def prepare_experiments(config, experiments):
tools = ToolRegistry(config).tools
for experiment in experiments:
experiment_type = experiment['type']
if (experiment_type == 'pbes'):
mcrl2 = tools['mcrl2']
mcrl2.prepare(experiment)
else:
print >> sys.stderr, 'Type not supported:', experiment_type
"""
List the experiments.
"""
def list_experiments(config, experiments):
tools = ToolRegistry(config).tools
ltsmin = tools['ltsmin']
ltsmin.print_list(experiments)
"""
Run the experiments.
"""
def run_experiment(config, experiments, index):
tools = ToolRegistry(config).tools
ltsmin = tools['ltsmin']
ltsmin.run(experiments, index)
"""
Analyse the results.
"""
def analyse_results(config, experiments):
tools = ToolRegistry(config).tools
ltsmin = tools['ltsmin']
ltsmin.analyse(experiments)
"""
Read experiment data from a JSON file.
"""
def read_experiments(json_filename):
json_file = open(json_filename, 'r')
experiments = json.load(json_file)['data']
json_file.close()
return experiments
def read_config(json_filename):
json_file = open(json_filename, 'r')
config = json.load(json_file)
json_file.close()
return config
def usage():
command = os.path.basename(sys.argv[0])
return """Usage: {0} <config.json> <experiments.json> <prepare|list|run|analyse> [index]
For the command 'run', the [index] option is required.""".format(command)
def main():
if len(sys.argv) <= 3:
print >> sys.stderr, usage()
sys.exit(1)
print >> sys.stderr, os.path.basename(sys.argv[0])
config_filename = sys.argv[1]
expriments_filename = sys.argv[2]
action = sys.argv[3]
print >> sys.stderr, 'Config file: ', config_filename
config = read_config(config_filename)
print >> sys.stderr, 'Experiments file: ', expriments_filename
experiments = read_experiments(expriments_filename)
print >> sys.stderr, 'Experiments:', len(experiments)
print >> sys.stderr, 'Action: ', action
if action == 'run':
if len(sys.argv) <= 4:
print >> sys.stderr, usage()
sys.exit(1)
index = int(sys.argv[4])
run_experiment(config, experiments, index)
elif action == 'list':
list_experiments(config, experiments)
elif action == 'prepare':
prepare_experiments(config, experiments)
elif action == 'analyse':
analyse_results(config, experiments)
else:
print >> sys.stderr, usage()
sys.exit(1)
print >> sys.stderr, ''
print >> sys.stderr, 'Done.'
if __name__ == '__main__':
main() | en | 0.76206 | #! /usr/bin/env python # :noTabs=true: # (c) Copyright (c) 2017 <NAME> # (c) This file is distributed under the MIT License, # (c) see the file LICENSE. experiments.py Brief: Prepares and runs experiments based on json experiments file. Author: <NAME> <<EMAIL>> Generate files required for the experiments. List the experiments. Run the experiments. Analyse the results. Read experiment data from a JSON file. Usage: {0} <config.json> <experiments.json> <prepare|list|run|analyse> [index] For the command 'run', the [index] option is required. | 2.761049 | 3 |
handler.py | ilhan-mstf/tdd-sample-app | 0 | 6632296 | import json
import logging
import re
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def hello_endpoint(event, context):
logger.info(event)
body = {
"message": "Hello world!"
}
response = {
"statusCode": 200,
"body": json.dumps(body)
}
return response
def sum_endpoint(event, context):
logger.info(event)
# Default response
body = {
"error": "Please provide at least one number e.g. numbers=1,2,3"
}
if 'body' in event and event['body'] is not None:
# Regex that matches "numbers=1,2,33"
r = re.compile("^numbers=([-+]?([1-9]\\d*|0),?)+$")
# Get numbers from post data
numbers = list(filter(r.match, event['body'].split('&')))
if len(numbers) > 0:
numbers = numbers[0].replace('numbers=', '')
numbers = numbers.split(',')
body = {
"sum": sum(map(int, numbers))
}
response = {
"statusCode": 200,
"body": json.dumps(body)
}
return response | import json
import logging
import re
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def hello_endpoint(event, context):
logger.info(event)
body = {
"message": "Hello world!"
}
response = {
"statusCode": 200,
"body": json.dumps(body)
}
return response
def sum_endpoint(event, context):
logger.info(event)
# Default response
body = {
"error": "Please provide at least one number e.g. numbers=1,2,3"
}
if 'body' in event and event['body'] is not None:
# Regex that matches "numbers=1,2,33"
r = re.compile("^numbers=([-+]?([1-9]\\d*|0),?)+$")
# Get numbers from post data
numbers = list(filter(r.match, event['body'].split('&')))
if len(numbers) > 0:
numbers = numbers[0].replace('numbers=', '')
numbers = numbers.split(',')
body = {
"sum": sum(map(int, numbers))
}
response = {
"statusCode": 200,
"body": json.dumps(body)
}
return response | en | 0.812832 | # Default response # Regex that matches "numbers=1,2,33" # Get numbers from post data | 2.78747 | 3 |
albumentations/augmentations/geometric/resize.py | code-kage/albumentations | 3 | 6632297 | import cv2
import random
from . import functional as F
from ...core.transforms_interface import DualTransform, to_tuple
__all__ = ["RandomScale", "LongestMaxSize", "SmallestMaxSize", "Resize"]
class RandomScale(DualTransform):
"""Randomly resize the input. Output image size is different from the input image size.
Args:
scale_limit ((float, float) or float): scaling factor range. If scale_limit is a single float value, the
range will be (1 - scale_limit, 1 + scale_limit). Default: (0.9, 1.1).
interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.
Default: cv2.INTER_LINEAR.
p (float): probability of applying the transform. Default: 0.5.
Targets:
image, mask, bboxes, keypoints
Image types:
uint8, float32
"""
def __init__(self, scale_limit=0.1, interpolation=cv2.INTER_LINEAR, always_apply=False, p=0.5):
super(RandomScale, self).__init__(always_apply, p)
self.scale_limit = to_tuple(scale_limit, bias=1.0)
self.interpolation = interpolation
def get_params(self):
return {"scale": random.uniform(self.scale_limit[0], self.scale_limit[1])}
def apply(self, img, scale=0, interpolation=cv2.INTER_LINEAR, **params):
return F.scale(img, scale, interpolation)
def apply_to_bbox(self, bbox, **params):
# Bounding box coordinates are scale invariant
return bbox
def apply_to_keypoint(self, keypoint, scale=0, **params):
return F.keypoint_scale(keypoint, scale, scale)
def get_transform_init_args(self):
return {"interpolation": self.interpolation, "scale_limit": to_tuple(self.scale_limit, bias=-1.0)}
class LongestMaxSize(DualTransform):
"""Rescale an image so that maximum side is equal to max_size, keeping the aspect ratio of the initial image.
Args:
max_size (int): maximum size of the image after the transformation.
interpolation (OpenCV flag): interpolation method. Default: cv2.INTER_LINEAR.
p (float): probability of applying the transform. Default: 1.
Targets:
image, mask, bboxes, keypoints
Image types:
uint8, float32
"""
def __init__(self, max_size=1024, interpolation=cv2.INTER_LINEAR, always_apply=False, p=1):
super(LongestMaxSize, self).__init__(always_apply, p)
self.interpolation = interpolation
self.max_size = max_size
def apply(self, img, interpolation=cv2.INTER_LINEAR, **params):
return F.longest_max_size(img, max_size=self.max_size, interpolation=interpolation)
def apply_to_bbox(self, bbox, **params):
# Bounding box coordinates are scale invariant
return bbox
def apply_to_keypoint(self, keypoint, **params):
height = params["rows"]
width = params["cols"]
scale = self.max_size / max([height, width])
return F.keypoint_scale(keypoint, scale, scale)
def get_transform_init_args_names(self):
return ("max_size", "interpolation")
class SmallestMaxSize(DualTransform):
"""Rescale an image so that minimum side is equal to max_size, keeping the aspect ratio of the initial image.
Args:
max_size (int): maximum size of smallest side of the image after the transformation.
interpolation (OpenCV flag): interpolation method. Default: cv2.INTER_LINEAR.
p (float): probability of applying the transform. Default: 1.
Targets:
image, mask, bboxes, keypoints
Image types:
uint8, float32
"""
def __init__(self, max_size=1024, interpolation=cv2.INTER_LINEAR, always_apply=False, p=1):
super(SmallestMaxSize, self).__init__(always_apply, p)
self.interpolation = interpolation
self.max_size = max_size
def apply(self, img, interpolation=cv2.INTER_LINEAR, **params):
return F.smallest_max_size(img, max_size=self.max_size, interpolation=interpolation)
def apply_to_bbox(self, bbox, **params):
return bbox
def apply_to_keypoint(self, keypoint, **params):
height = params["rows"]
width = params["cols"]
scale = self.max_size / min([height, width])
return F.keypoint_scale(keypoint, scale, scale)
def get_transform_init_args_names(self):
return ("max_size", "interpolation")
class Resize(DualTransform):
"""Resize the input to the given height and width.
Args:
height (int): desired height of the output.
width (int): desired width of the output.
interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.
Default: cv2.INTER_LINEAR.
p (float): probability of applying the transform. Default: 1.
Targets:
image, mask, bboxes, keypoints
Image types:
uint8, float32
"""
def __init__(self, height, width, interpolation=cv2.INTER_LINEAR, always_apply=False, p=1):
super(Resize, self).__init__(always_apply, p)
self.height = height
self.width = width
self.interpolation = interpolation
def apply(self, img, interpolation=cv2.INTER_LINEAR, **params):
return F.resize(img, height=self.height, width=self.width, interpolation=interpolation)
def apply_to_bbox(self, bbox, **params):
# Bounding box coordinates are scale invariant
return bbox
def apply_to_keypoint(self, keypoint, **params):
height = params["rows"]
width = params["cols"]
scale_x = self.width / width
scale_y = self.height / height
return F.keypoint_scale(keypoint, scale_x, scale_y)
def get_transform_init_args_names(self):
return ("height", "width", "interpolation")
| import cv2
import random
from . import functional as F
from ...core.transforms_interface import DualTransform, to_tuple
__all__ = ["RandomScale", "LongestMaxSize", "SmallestMaxSize", "Resize"]
class RandomScale(DualTransform):
"""Randomly resize the input. Output image size is different from the input image size.
Args:
scale_limit ((float, float) or float): scaling factor range. If scale_limit is a single float value, the
range will be (1 - scale_limit, 1 + scale_limit). Default: (0.9, 1.1).
interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.
Default: cv2.INTER_LINEAR.
p (float): probability of applying the transform. Default: 0.5.
Targets:
image, mask, bboxes, keypoints
Image types:
uint8, float32
"""
def __init__(self, scale_limit=0.1, interpolation=cv2.INTER_LINEAR, always_apply=False, p=0.5):
super(RandomScale, self).__init__(always_apply, p)
self.scale_limit = to_tuple(scale_limit, bias=1.0)
self.interpolation = interpolation
def get_params(self):
return {"scale": random.uniform(self.scale_limit[0], self.scale_limit[1])}
def apply(self, img, scale=0, interpolation=cv2.INTER_LINEAR, **params):
return F.scale(img, scale, interpolation)
def apply_to_bbox(self, bbox, **params):
# Bounding box coordinates are scale invariant
return bbox
def apply_to_keypoint(self, keypoint, scale=0, **params):
return F.keypoint_scale(keypoint, scale, scale)
def get_transform_init_args(self):
return {"interpolation": self.interpolation, "scale_limit": to_tuple(self.scale_limit, bias=-1.0)}
class LongestMaxSize(DualTransform):
"""Rescale an image so that maximum side is equal to max_size, keeping the aspect ratio of the initial image.
Args:
max_size (int): maximum size of the image after the transformation.
interpolation (OpenCV flag): interpolation method. Default: cv2.INTER_LINEAR.
p (float): probability of applying the transform. Default: 1.
Targets:
image, mask, bboxes, keypoints
Image types:
uint8, float32
"""
def __init__(self, max_size=1024, interpolation=cv2.INTER_LINEAR, always_apply=False, p=1):
super(LongestMaxSize, self).__init__(always_apply, p)
self.interpolation = interpolation
self.max_size = max_size
def apply(self, img, interpolation=cv2.INTER_LINEAR, **params):
return F.longest_max_size(img, max_size=self.max_size, interpolation=interpolation)
def apply_to_bbox(self, bbox, **params):
# Bounding box coordinates are scale invariant
return bbox
def apply_to_keypoint(self, keypoint, **params):
height = params["rows"]
width = params["cols"]
scale = self.max_size / max([height, width])
return F.keypoint_scale(keypoint, scale, scale)
def get_transform_init_args_names(self):
return ("max_size", "interpolation")
class SmallestMaxSize(DualTransform):
"""Rescale an image so that minimum side is equal to max_size, keeping the aspect ratio of the initial image.
Args:
max_size (int): maximum size of smallest side of the image after the transformation.
interpolation (OpenCV flag): interpolation method. Default: cv2.INTER_LINEAR.
p (float): probability of applying the transform. Default: 1.
Targets:
image, mask, bboxes, keypoints
Image types:
uint8, float32
"""
def __init__(self, max_size=1024, interpolation=cv2.INTER_LINEAR, always_apply=False, p=1):
super(SmallestMaxSize, self).__init__(always_apply, p)
self.interpolation = interpolation
self.max_size = max_size
def apply(self, img, interpolation=cv2.INTER_LINEAR, **params):
return F.smallest_max_size(img, max_size=self.max_size, interpolation=interpolation)
def apply_to_bbox(self, bbox, **params):
return bbox
def apply_to_keypoint(self, keypoint, **params):
height = params["rows"]
width = params["cols"]
scale = self.max_size / min([height, width])
return F.keypoint_scale(keypoint, scale, scale)
def get_transform_init_args_names(self):
return ("max_size", "interpolation")
class Resize(DualTransform):
"""Resize the input to the given height and width.
Args:
height (int): desired height of the output.
width (int): desired width of the output.
interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.
Default: cv2.INTER_LINEAR.
p (float): probability of applying the transform. Default: 1.
Targets:
image, mask, bboxes, keypoints
Image types:
uint8, float32
"""
def __init__(self, height, width, interpolation=cv2.INTER_LINEAR, always_apply=False, p=1):
super(Resize, self).__init__(always_apply, p)
self.height = height
self.width = width
self.interpolation = interpolation
def apply(self, img, interpolation=cv2.INTER_LINEAR, **params):
return F.resize(img, height=self.height, width=self.width, interpolation=interpolation)
def apply_to_bbox(self, bbox, **params):
# Bounding box coordinates are scale invariant
return bbox
def apply_to_keypoint(self, keypoint, **params):
height = params["rows"]
width = params["cols"]
scale_x = self.width / width
scale_y = self.height / height
return F.keypoint_scale(keypoint, scale_x, scale_y)
def get_transform_init_args_names(self):
return ("height", "width", "interpolation")
| en | 0.523062 | Randomly resize the input. Output image size is different from the input image size. Args: scale_limit ((float, float) or float): scaling factor range. If scale_limit is a single float value, the range will be (1 - scale_limit, 1 + scale_limit). Default: (0.9, 1.1). interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of: cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4. Default: cv2.INTER_LINEAR. p (float): probability of applying the transform. Default: 0.5. Targets: image, mask, bboxes, keypoints Image types: uint8, float32 # Bounding box coordinates are scale invariant Rescale an image so that maximum side is equal to max_size, keeping the aspect ratio of the initial image. Args: max_size (int): maximum size of the image after the transformation. interpolation (OpenCV flag): interpolation method. Default: cv2.INTER_LINEAR. p (float): probability of applying the transform. Default: 1. Targets: image, mask, bboxes, keypoints Image types: uint8, float32 # Bounding box coordinates are scale invariant Rescale an image so that minimum side is equal to max_size, keeping the aspect ratio of the initial image. Args: max_size (int): maximum size of smallest side of the image after the transformation. interpolation (OpenCV flag): interpolation method. Default: cv2.INTER_LINEAR. p (float): probability of applying the transform. Default: 1. Targets: image, mask, bboxes, keypoints Image types: uint8, float32 Resize the input to the given height and width. Args: height (int): desired height of the output. width (int): desired width of the output. interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of: cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4. Default: cv2.INTER_LINEAR. p (float): probability of applying the transform. Default: 1. Targets: image, mask, bboxes, keypoints Image types: uint8, float32 # Bounding box coordinates are scale invariant | 2.961741 | 3 |
bin/AlarmScriptController.py | ToshihitoKon/crystal-signal-lite | 19 | 6632298 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import threading
import time
import subprocess
from os.path import isfile, join
from os import listdir
import json
# - - - - - - - - - - - - - - - - - -
# - ALARM SCRIPT CONTROLLER CLASS -
# - - - - - - - - - - - - - - - - - -
class AlarmScriptController:
def __init__(self):
pass;
def executeAlarmScript(self):
path = "/var/lib/crystal-signal/scripts/"
settings = self.getScriptSettings()
availableScriptNames = self.getScriptNames()
scriptName = settings['dropdown5']
if scriptName is not "---" and scriptName in availableScriptNames:
try:
txt = path + scriptName
subprocess.Popen(txt)
except:
print 'cannot open', scriptName
def getScriptSettings(self):
path = "/var/lib/crystal-signal/ScriptSettings.json"
if not isfile(path):
buttonSettingsInit = {'dropdown1': "---",
'dropdown2': "---",
'dropdown3': "---",
'dropdown4': "---",
'dropdown5': "---"}
with open(path, 'w+') as outfile:
json.dump(buttonSettingsInit, outfile)
with open(path) as data:
return json.load(data)
def getScriptNames(self):
path = "/var/lib/crystal-signal/scripts"
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
return onlyfiles
# - - - - - - - - - - - - - - - -
# - - - - - - MEMO - - - - - - -
# - - - - - - - - - - - - - - - -
| #!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import threading
import time
import subprocess
from os.path import isfile, join
from os import listdir
import json
# - - - - - - - - - - - - - - - - - -
# - ALARM SCRIPT CONTROLLER CLASS -
# - - - - - - - - - - - - - - - - - -
class AlarmScriptController:
def __init__(self):
pass;
def executeAlarmScript(self):
path = "/var/lib/crystal-signal/scripts/"
settings = self.getScriptSettings()
availableScriptNames = self.getScriptNames()
scriptName = settings['dropdown5']
if scriptName is not "---" and scriptName in availableScriptNames:
try:
txt = path + scriptName
subprocess.Popen(txt)
except:
print 'cannot open', scriptName
def getScriptSettings(self):
path = "/var/lib/crystal-signal/ScriptSettings.json"
if not isfile(path):
buttonSettingsInit = {'dropdown1': "---",
'dropdown2': "---",
'dropdown3': "---",
'dropdown4': "---",
'dropdown5': "---"}
with open(path, 'w+') as outfile:
json.dump(buttonSettingsInit, outfile)
with open(path) as data:
return json.load(data)
def getScriptNames(self):
path = "/var/lib/crystal-signal/scripts"
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
return onlyfiles
# - - - - - - - - - - - - - - - -
# - - - - - - MEMO - - - - - - -
# - - - - - - - - - - - - - - - -
| it | 0.396743 | #!/usr/bin/python # -*- coding: UTF-8 -*- # - - - - - - - - - - - - - - - - - - # - ALARM SCRIPT CONTROLLER CLASS - # - - - - - - - - - - - - - - - - - - # - - - - - - - - - - - - - - - - # - - - - - - MEMO - - - - - - - # - - - - - - - - - - - - - - - - | 2.19928 | 2 |
src/signalalign/scripts/empire.py | kishwarshafin/signalAlign | 5 | 6632299 | #!/usr/bin/env python
"""Run signal-to-reference alignments
"""
import pandas as pd
import glob
from .signalAlignLib import *
from .variantCallingLib import get_alignments_labels_and_mask
from .alignmentAnalysisLib import CallMethylation
from multiprocessing import Process, Queue, current_process, Manager
from signalalign.utils.fileHandlers import FolderHandler
from argparse import ArgumentParser
from random import shuffle
STEP = 12
def parse_args():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--file_directory', '-d', action='store',
dest='files_dir', required=True, type=str, default=None,
help="directory with MinION fast5 reads to align")
parser.add_argument('--ref', '-r', action='store',
dest='ref', required=True, type=str,
help="reference sequence to align to, in FASTA")
parser.add_argument('--in_template_hmm', '-T', action='store', dest='in_T_Hmm',
required=False, type=str, default=None,
help="input HMM for template events, if you don't want the default")
parser.add_argument('--in_complement_hmm', '-C', action='store', dest='in_C_Hmm',
required=False, type=str, default=None,
help="input HMM for complement events, if you don't want the default")
parser.add_argument('--templateHDP', '-tH', action='store', dest='templateHDP', default=None,
help="template serialized HDP file")
parser.add_argument('--complementHDP', '-cH', action='store', dest='complementHDP', default=None,
help="complement serialized HDP file")
parser.add_argument('--degenerate', '-x', action='store', dest='degenerate', default="variant",
help="Specify degenerate nucleotide options: "
"variant -> {ACGT}, twoWay -> {CE} threeWay -> {CEO}")
parser.add_argument('--stateMachineType', '-smt', action='store', dest='stateMachineType', type=str,
default="threeState", help="decide which model to use, threeState by default")
parser.add_argument('--threshold', '-t', action='store', dest='threshold', type=float, required=False,
default=None, help="posterior match probability threshold, Default: 0.01")
parser.add_argument('--diagonalExpansion', '-e', action='store', dest='diag_expansion', type=int,
required=False, default=None, help="number of diagonals to expand around each anchor")
parser.add_argument('--constraintTrim', '-m', action='store', dest='constraint_trim', type=int,
required=False, default=None, help='amount to remove from an anchor constraint')
parser.add_argument('--target_regions', '-q', action='store', dest='target_regions', type=str,
required=False, default=None, help="tab separated table with regions to align to")
parser.add_argument('---un-banded', '-ub', action='store_false', dest='banded',
default=True, help='flag, turn off banding')
parser.add_argument('--jobs', '-j', action='store', dest='nb_jobs', required=False,
default=4, type=int, help="number of jobs to run concurrently")
parser.add_argument('--nb_files', '-n', action='store', dest='nb_files', required=False,
default=500, type=int, help="maximum number of reads to align")
parser.add_argument('--cycles', dest='cycles', default=1, required=False, type=int)
parser.add_argument('--output_location', '-o', action='store', dest='out',
required=True, type=str, default=None,
help="directory to put the alignments")
args = parser.parse_args()
return args
def get_first_sequence(input_fasta):
input_sequence = ""
for header, comment, sequence in read_fasta(input_fasta):
input_sequence += sequence
break
return input_sequence
def make_degenerate_reference_iterator(input_sequence, step, block_size=1):
"""
input_sequence: string, input nucleotide sequence
out_path: string, path to directory to put new sequences with substituted degenerate characters
block_size: not implemented
step: number of bases between degenerate characters
:return (subbed sequence, complement subbed sequence)
"""
complement_sequence = reverse_complement(dna=input_sequence, reverse=False, complement=True)
for s in range(0, step):
positions = range(s, len(input_sequence), step)
t_seq = list(input_sequence)
c_seq = list(complement_sequence)
for position in positions:
t_seq[position] = "X"
c_seq[position] = "X"
yield ''.join(t_seq), ''.join(c_seq)
def write_degenerate_reference_set(input_fasta, out_path, step):
# get the first sequence from the FASTA
seq = ""
for header, comment, sequence in read_fasta(input_fasta):
seq += sequence
break
length = len(seq)
for i, s in enumerate(make_degenerate_reference_iterator(input_sequence=seq, step=step)):
with open(out_path + "forward_sub{i}.txt".format(i=i), 'w') as f:
f.write("{seq}".format(seq=s[0]))
with open(out_path + "backward_sub{i}.txt".format(i=i), 'w') as f:
f.write("{seq}".format(seq=s[1]))
return True, length
def aligner(work_queue, done_queue):
try:
for f in iter(work_queue.get, 'STOP'):
alignment = SignalAlignment(**f)
alignment.run()
except Exception as e:
done_queue.put("%s failed with %s" % (current_process().name, e.message))
def run_methyl_caller(work_queue, done_queue):
try:
for f in iter(work_queue.get, 'STOP'):
c = CallMethylation(**f)
c.write()
except Exception as e:
done_queue.put("%s failed with %s" % (current_process().name, e.message))
def load_data(file_path):
data = pd.read_table(file_path,
usecols=(0, 1, 2, 3, 4, 5, 6),
names=['site', 'strand', 'pA', 'pC', 'pG', 'pT', 'read'],
dtype={'site': np.int64,
'strand': np.str,
'pC': np.float64,
'pmC': np.float64,
'phmC': np.float64,
'read': np.str,
})
return data
def symbol_to_base(symbol):
return ["A", "C", "G", "T"][symbol]
def rc_probs(probs):
return [probs[3], probs[2], probs[1], probs[0]]
def update_reference(data, reference_sequence, register, min_depth=0, get_sites=False):
d = load_data(data)
ref = list(reference_sequence)
# todo remove this
candidate_sites = []
add_to_candidates = candidate_sites.append
for g, x in d.groupby("site"):
marginal_forward_p = pd.Series(0, ['pA', 'pC', 'pG', 'pT'])
marginal_backward_p = pd.Series(0, ['pA', 'pC', 'pG', 'pT'])
assert(len(x['site'].unique()) == 1)
site = x['site'].unique()[0]
if len(x['read']) < min_depth:
continue
for i, read in x.iterrows():
if ((read['read'].endswith(".forward.tsv.{}".format(register)) and read['strand'] == 't') or
(read['read'].endswith(".backward.tsv".format(register)) and read['strand'] == 'c')):
direction = True
else:
direction = False
if direction:
marginal_forward_p += read[['pA', 'pC', 'pG', 'pT']]
else:
marginal_backward_p += read[['pA', 'pC', 'pG', 'pT']]
marginal_prob = marginal_forward_p + rc_probs(marginal_backward_p)
called_base = marginal_prob.map(lambda p: p / sum(marginal_prob)).argmax()[1]
if called_base != ref[site]:
print("Changing {orig} to {new} at {site}".format(orig=ref[site], new=called_base, site=site))
if get_sites is False:
ref[site] = called_base
else:
add_to_candidates(site)
if get_sites is True:
return candidate_sites
else:
return ''.join(ref)
def main(args):
# parse args
args = parse_args()
command_line = " ".join(sys.argv[:])
print("Command Line: {cmdLine}\n".format(cmdLine=command_line), file=sys.stderr)
start_message = """
# Starting Empire Error-Correction
# Aligning files from: {fileDir}
# Aligning to reference: {reference}
# Aligning maximum of {nbFiles} files
# Using model: {model}
# Using banding: {banding}
# Aligning to regions in: {regions}
# Non-default template HMM: {inThmm}
# Non-default complement HMM: {inChmm}
# Template HDP: {tHdp}
# Complement HDP: {cHdp}
""".format(fileDir=args.files_dir, reference=args.ref, nbFiles=args.nb_files, banding=args.banded,
inThmm=args.in_T_Hmm, inChmm=args.in_C_Hmm, model=args.stateMachineType, regions=args.target_regions,
tHdp=args.templateHDP, cHdp=args.complementHDP)
print(start_message, file=sys.stdout)
if not os.path.isfile(args.ref):
print("Did not find valid reference file", file=sys.stderr)
sys.exit(1)
temp_folder = FolderHandler()
temp_dir_path = temp_folder.open_folder(args.out + "tempFiles_errorCorrection")
reference_sequence = args.ref
for cycle in range(0, args.cycles):
check, reference_sequence_length = write_degenerate_reference_set(input_fasta=reference_sequence,
out_path=temp_dir_path, step=STEP)
assert check, "Problem making degenerate reference sequence set"
# index the reference for bwa
print("signalAlign - indexing reference", file=sys.stderr)
bwa_ref_index = get_bwa_index(reference_sequence, temp_dir_path)
print("signalAlign - indexing reference, done", file=sys.stderr)
# setup workers for multiprocessing
workers = args.nb_jobs
work_queue = Manager().Queue()
done_queue = Manager().Queue()
jobs = []
# list of alignment files
fast5s = [x for x in os.listdir(args.files_dir) if x.endswith(".fast5")]
# take only some
if args.nb_files < len(fast5s):
shuffle(fast5s)
fast5s = fast5s[:args.nb_files]
for fast5 in fast5s:
alignment_args = {
"forward_reference": None,
"backward_reference": None,
"path_to_EC_refs": temp_dir_path,
"destination": temp_dir_path,
"stateMachineType": args.stateMachineType,
"bwa_index": bwa_ref_index,
"in_templateHmm": args.in_T_Hmm,
"in_complementHmm": args.in_C_Hmm,
"in_templateHdp": args.templateHDP,
"in_complementHdp": args.complementHDP,
"banded": args.banded,
"sparse_output": True,
"in_fast5": args.files_dir + fast5,
"threshold": args.threshold,
"diagonal_expansion": args.diag_expansion,
"constraint_trim": args.constraint_trim,
"target_regions": None,
"degenerate": degenerate_enum(args.degenerate),
}
#alignment = SignalAlignment(**alignment_args)
#alignment.run()
work_queue.put(alignment_args)
for w in range(workers):
p = Process(target=aligner, args=(work_queue, done_queue))
p.start()
jobs.append(p)
work_queue.put('STOP')
for p in jobs:
p.join()
done_queue.put('STOP')
print("\n# signalAlign - finished alignments\n", file=sys.stderr)
print("\n# signalAlign - finished alignments\n", file=sys.stdout)
# working sequence is a string, that has the reference we're going to update this cycle
working_sequence = get_first_sequence(reference_sequence)
# register is the relative position that is being N-ed:
# ACGTAGACAATA --> NCGTAGNCAATA = register 0
# ACGTAGACAATA --> ANGTAGANAATA = register 1 ...
for register in range(0, STEP):
print("# Starting Variant Calling, register: {}...".format(register), file=sys.stdout, end='\n')
print("# Starting Variant Calling, register: {}...".format(register), file=sys.stderr, end='')
# cull the alignment files for this register
alns, forward_mask = get_alignments_labels_and_mask(
path_to_alignments=temp_dir_path + "*.tsv.{}".format(register),
max=args.nb_files,
suffix=".{}".format(register)
)
# this is the list of positions that we're going to look at, based on this register
degenerate_positions = {'forward': list(range(register, reference_sequence_length, STEP)),
'backward': list(range(register, reference_sequence_length, STEP))
}
# place to put the marginal probs
variant_call_file = temp_folder.add_file_path("variants.{cycle}.{reg}.calls".format(cycle=cycle,
reg=register))
# arguments for multiprocessing
for aln, forward_bool in zip(alns, forward_mask):
call_methyl_args = {
"sequence": None,
"alignment_file": aln,
"forward": forward_bool,
"out_file": variant_call_file,
"positions": degenerate_positions,
"degenerate_type": degenerate_enum(args.degenerate),
}
#c = CallMethylation(**call_methyl_args)
#c.write()
work_queue.put(call_methyl_args)
for w in range(workers):
p = Process(target=run_methyl_caller, args=(work_queue, done_queue))
p.start()
jobs.append(p)
work_queue.put('STOP')
for p in jobs:
p.join()
done_queue.put('STOP')
# this is where the per-register update happens
working_sequence = update_reference(variant_call_file, working_sequence, register,
min_depth=0, get_sites=False)
# remove alignments for this register
for f in glob.glob(temp_dir_path + "*.tsv.{}".format(register)):
os.remove(f)
print("done", file=sys.stdout, end="\n")
print("done", file=sys.stderr, end="\n")
# add a file for this cycle
ref_path = temp_folder.add_file_path("iteration.{cycle}.fa".format(cycle=cycle))
# write it to a file
write_fasta("iteration.{cycle}.fa".format(cycle=cycle), working_sequence, open(ref_path, 'w'))
# update the path to the reference for the next cycle
reference_sequence = ref_path
return
if __name__ == "__main__":
sys.exit(main(sys.argv))
| #!/usr/bin/env python
"""Run signal-to-reference alignments
"""
import pandas as pd
import glob
from .signalAlignLib import *
from .variantCallingLib import get_alignments_labels_and_mask
from .alignmentAnalysisLib import CallMethylation
from multiprocessing import Process, Queue, current_process, Manager
from signalalign.utils.fileHandlers import FolderHandler
from argparse import ArgumentParser
from random import shuffle
STEP = 12
def parse_args():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--file_directory', '-d', action='store',
dest='files_dir', required=True, type=str, default=None,
help="directory with MinION fast5 reads to align")
parser.add_argument('--ref', '-r', action='store',
dest='ref', required=True, type=str,
help="reference sequence to align to, in FASTA")
parser.add_argument('--in_template_hmm', '-T', action='store', dest='in_T_Hmm',
required=False, type=str, default=None,
help="input HMM for template events, if you don't want the default")
parser.add_argument('--in_complement_hmm', '-C', action='store', dest='in_C_Hmm',
required=False, type=str, default=None,
help="input HMM for complement events, if you don't want the default")
parser.add_argument('--templateHDP', '-tH', action='store', dest='templateHDP', default=None,
help="template serialized HDP file")
parser.add_argument('--complementHDP', '-cH', action='store', dest='complementHDP', default=None,
help="complement serialized HDP file")
parser.add_argument('--degenerate', '-x', action='store', dest='degenerate', default="variant",
help="Specify degenerate nucleotide options: "
"variant -> {ACGT}, twoWay -> {CE} threeWay -> {CEO}")
parser.add_argument('--stateMachineType', '-smt', action='store', dest='stateMachineType', type=str,
default="threeState", help="decide which model to use, threeState by default")
parser.add_argument('--threshold', '-t', action='store', dest='threshold', type=float, required=False,
default=None, help="posterior match probability threshold, Default: 0.01")
parser.add_argument('--diagonalExpansion', '-e', action='store', dest='diag_expansion', type=int,
required=False, default=None, help="number of diagonals to expand around each anchor")
parser.add_argument('--constraintTrim', '-m', action='store', dest='constraint_trim', type=int,
required=False, default=None, help='amount to remove from an anchor constraint')
parser.add_argument('--target_regions', '-q', action='store', dest='target_regions', type=str,
required=False, default=None, help="tab separated table with regions to align to")
parser.add_argument('---un-banded', '-ub', action='store_false', dest='banded',
default=True, help='flag, turn off banding')
parser.add_argument('--jobs', '-j', action='store', dest='nb_jobs', required=False,
default=4, type=int, help="number of jobs to run concurrently")
parser.add_argument('--nb_files', '-n', action='store', dest='nb_files', required=False,
default=500, type=int, help="maximum number of reads to align")
parser.add_argument('--cycles', dest='cycles', default=1, required=False, type=int)
parser.add_argument('--output_location', '-o', action='store', dest='out',
required=True, type=str, default=None,
help="directory to put the alignments")
args = parser.parse_args()
return args
def get_first_sequence(input_fasta):
input_sequence = ""
for header, comment, sequence in read_fasta(input_fasta):
input_sequence += sequence
break
return input_sequence
def make_degenerate_reference_iterator(input_sequence, step, block_size=1):
"""
input_sequence: string, input nucleotide sequence
out_path: string, path to directory to put new sequences with substituted degenerate characters
block_size: not implemented
step: number of bases between degenerate characters
:return (subbed sequence, complement subbed sequence)
"""
complement_sequence = reverse_complement(dna=input_sequence, reverse=False, complement=True)
for s in range(0, step):
positions = range(s, len(input_sequence), step)
t_seq = list(input_sequence)
c_seq = list(complement_sequence)
for position in positions:
t_seq[position] = "X"
c_seq[position] = "X"
yield ''.join(t_seq), ''.join(c_seq)
def write_degenerate_reference_set(input_fasta, out_path, step):
# get the first sequence from the FASTA
seq = ""
for header, comment, sequence in read_fasta(input_fasta):
seq += sequence
break
length = len(seq)
for i, s in enumerate(make_degenerate_reference_iterator(input_sequence=seq, step=step)):
with open(out_path + "forward_sub{i}.txt".format(i=i), 'w') as f:
f.write("{seq}".format(seq=s[0]))
with open(out_path + "backward_sub{i}.txt".format(i=i), 'w') as f:
f.write("{seq}".format(seq=s[1]))
return True, length
def aligner(work_queue, done_queue):
try:
for f in iter(work_queue.get, 'STOP'):
alignment = SignalAlignment(**f)
alignment.run()
except Exception as e:
done_queue.put("%s failed with %s" % (current_process().name, e.message))
def run_methyl_caller(work_queue, done_queue):
try:
for f in iter(work_queue.get, 'STOP'):
c = CallMethylation(**f)
c.write()
except Exception as e:
done_queue.put("%s failed with %s" % (current_process().name, e.message))
def load_data(file_path):
data = pd.read_table(file_path,
usecols=(0, 1, 2, 3, 4, 5, 6),
names=['site', 'strand', 'pA', 'pC', 'pG', 'pT', 'read'],
dtype={'site': np.int64,
'strand': np.str,
'pC': np.float64,
'pmC': np.float64,
'phmC': np.float64,
'read': np.str,
})
return data
def symbol_to_base(symbol):
return ["A", "C", "G", "T"][symbol]
def rc_probs(probs):
return [probs[3], probs[2], probs[1], probs[0]]
def update_reference(data, reference_sequence, register, min_depth=0, get_sites=False):
d = load_data(data)
ref = list(reference_sequence)
# todo remove this
candidate_sites = []
add_to_candidates = candidate_sites.append
for g, x in d.groupby("site"):
marginal_forward_p = pd.Series(0, ['pA', 'pC', 'pG', 'pT'])
marginal_backward_p = pd.Series(0, ['pA', 'pC', 'pG', 'pT'])
assert(len(x['site'].unique()) == 1)
site = x['site'].unique()[0]
if len(x['read']) < min_depth:
continue
for i, read in x.iterrows():
if ((read['read'].endswith(".forward.tsv.{}".format(register)) and read['strand'] == 't') or
(read['read'].endswith(".backward.tsv".format(register)) and read['strand'] == 'c')):
direction = True
else:
direction = False
if direction:
marginal_forward_p += read[['pA', 'pC', 'pG', 'pT']]
else:
marginal_backward_p += read[['pA', 'pC', 'pG', 'pT']]
marginal_prob = marginal_forward_p + rc_probs(marginal_backward_p)
called_base = marginal_prob.map(lambda p: p / sum(marginal_prob)).argmax()[1]
if called_base != ref[site]:
print("Changing {orig} to {new} at {site}".format(orig=ref[site], new=called_base, site=site))
if get_sites is False:
ref[site] = called_base
else:
add_to_candidates(site)
if get_sites is True:
return candidate_sites
else:
return ''.join(ref)
def main(args):
# parse args
args = parse_args()
command_line = " ".join(sys.argv[:])
print("Command Line: {cmdLine}\n".format(cmdLine=command_line), file=sys.stderr)
start_message = """
# Starting Empire Error-Correction
# Aligning files from: {fileDir}
# Aligning to reference: {reference}
# Aligning maximum of {nbFiles} files
# Using model: {model}
# Using banding: {banding}
# Aligning to regions in: {regions}
# Non-default template HMM: {inThmm}
# Non-default complement HMM: {inChmm}
# Template HDP: {tHdp}
# Complement HDP: {cHdp}
""".format(fileDir=args.files_dir, reference=args.ref, nbFiles=args.nb_files, banding=args.banded,
inThmm=args.in_T_Hmm, inChmm=args.in_C_Hmm, model=args.stateMachineType, regions=args.target_regions,
tHdp=args.templateHDP, cHdp=args.complementHDP)
print(start_message, file=sys.stdout)
if not os.path.isfile(args.ref):
print("Did not find valid reference file", file=sys.stderr)
sys.exit(1)
temp_folder = FolderHandler()
temp_dir_path = temp_folder.open_folder(args.out + "tempFiles_errorCorrection")
reference_sequence = args.ref
for cycle in range(0, args.cycles):
check, reference_sequence_length = write_degenerate_reference_set(input_fasta=reference_sequence,
out_path=temp_dir_path, step=STEP)
assert check, "Problem making degenerate reference sequence set"
# index the reference for bwa
print("signalAlign - indexing reference", file=sys.stderr)
bwa_ref_index = get_bwa_index(reference_sequence, temp_dir_path)
print("signalAlign - indexing reference, done", file=sys.stderr)
# setup workers for multiprocessing
workers = args.nb_jobs
work_queue = Manager().Queue()
done_queue = Manager().Queue()
jobs = []
# list of alignment files
fast5s = [x for x in os.listdir(args.files_dir) if x.endswith(".fast5")]
# take only some
if args.nb_files < len(fast5s):
shuffle(fast5s)
fast5s = fast5s[:args.nb_files]
for fast5 in fast5s:
alignment_args = {
"forward_reference": None,
"backward_reference": None,
"path_to_EC_refs": temp_dir_path,
"destination": temp_dir_path,
"stateMachineType": args.stateMachineType,
"bwa_index": bwa_ref_index,
"in_templateHmm": args.in_T_Hmm,
"in_complementHmm": args.in_C_Hmm,
"in_templateHdp": args.templateHDP,
"in_complementHdp": args.complementHDP,
"banded": args.banded,
"sparse_output": True,
"in_fast5": args.files_dir + fast5,
"threshold": args.threshold,
"diagonal_expansion": args.diag_expansion,
"constraint_trim": args.constraint_trim,
"target_regions": None,
"degenerate": degenerate_enum(args.degenerate),
}
#alignment = SignalAlignment(**alignment_args)
#alignment.run()
work_queue.put(alignment_args)
for w in range(workers):
p = Process(target=aligner, args=(work_queue, done_queue))
p.start()
jobs.append(p)
work_queue.put('STOP')
for p in jobs:
p.join()
done_queue.put('STOP')
print("\n# signalAlign - finished alignments\n", file=sys.stderr)
print("\n# signalAlign - finished alignments\n", file=sys.stdout)
# working sequence is a string, that has the reference we're going to update this cycle
working_sequence = get_first_sequence(reference_sequence)
# register is the relative position that is being N-ed:
# ACGTAGACAATA --> NCGTAGNCAATA = register 0
# ACGTAGACAATA --> ANGTAGANAATA = register 1 ...
for register in range(0, STEP):
print("# Starting Variant Calling, register: {}...".format(register), file=sys.stdout, end='\n')
print("# Starting Variant Calling, register: {}...".format(register), file=sys.stderr, end='')
# cull the alignment files for this register
alns, forward_mask = get_alignments_labels_and_mask(
path_to_alignments=temp_dir_path + "*.tsv.{}".format(register),
max=args.nb_files,
suffix=".{}".format(register)
)
# this is the list of positions that we're going to look at, based on this register
degenerate_positions = {'forward': list(range(register, reference_sequence_length, STEP)),
'backward': list(range(register, reference_sequence_length, STEP))
}
# place to put the marginal probs
variant_call_file = temp_folder.add_file_path("variants.{cycle}.{reg}.calls".format(cycle=cycle,
reg=register))
# arguments for multiprocessing
for aln, forward_bool in zip(alns, forward_mask):
call_methyl_args = {
"sequence": None,
"alignment_file": aln,
"forward": forward_bool,
"out_file": variant_call_file,
"positions": degenerate_positions,
"degenerate_type": degenerate_enum(args.degenerate),
}
#c = CallMethylation(**call_methyl_args)
#c.write()
work_queue.put(call_methyl_args)
for w in range(workers):
p = Process(target=run_methyl_caller, args=(work_queue, done_queue))
p.start()
jobs.append(p)
work_queue.put('STOP')
for p in jobs:
p.join()
done_queue.put('STOP')
# this is where the per-register update happens
working_sequence = update_reference(variant_call_file, working_sequence, register,
min_depth=0, get_sites=False)
# remove alignments for this register
for f in glob.glob(temp_dir_path + "*.tsv.{}".format(register)):
os.remove(f)
print("done", file=sys.stdout, end="\n")
print("done", file=sys.stderr, end="\n")
# add a file for this cycle
ref_path = temp_folder.add_file_path("iteration.{cycle}.fa".format(cycle=cycle))
# write it to a file
write_fasta("iteration.{cycle}.fa".format(cycle=cycle), working_sequence, open(ref_path, 'w'))
# update the path to the reference for the next cycle
reference_sequence = ref_path
return
if __name__ == "__main__":
sys.exit(main(sys.argv))
| en | 0.771292 | #!/usr/bin/env python Run signal-to-reference alignments input_sequence: string, input nucleotide sequence out_path: string, path to directory to put new sequences with substituted degenerate characters block_size: not implemented step: number of bases between degenerate characters :return (subbed sequence, complement subbed sequence) # get the first sequence from the FASTA # todo remove this # parse args # Starting Empire Error-Correction # Aligning files from: {fileDir} # Aligning to reference: {reference} # Aligning maximum of {nbFiles} files # Using model: {model} # Using banding: {banding} # Aligning to regions in: {regions} # Non-default template HMM: {inThmm} # Non-default complement HMM: {inChmm} # Template HDP: {tHdp} # Complement HDP: {cHdp} # index the reference for bwa # setup workers for multiprocessing # list of alignment files # take only some #alignment = SignalAlignment(**alignment_args) #alignment.run() # signalAlign - finished alignments\n", file=sys.stderr) # signalAlign - finished alignments\n", file=sys.stdout) # working sequence is a string, that has the reference we're going to update this cycle # register is the relative position that is being N-ed: # ACGTAGACAATA --> NCGTAGNCAATA = register 0 # ACGTAGACAATA --> ANGTAGANAATA = register 1 ... # cull the alignment files for this register # this is the list of positions that we're going to look at, based on this register # place to put the marginal probs # arguments for multiprocessing #c = CallMethylation(**call_methyl_args) #c.write() # this is where the per-register update happens # remove alignments for this register # add a file for this cycle # write it to a file # update the path to the reference for the next cycle | 2.333734 | 2 |
pong/zadania/pong_z7.py | bladerunner666/pyt101 | 0 | 6632300 | # coding=utf-8
import pygame
import pygame.locals
class Board(object):
"""
Plansza do gry. Odpowiada za rysowanie okna gry.
"""
def __init__(self, width, height):
"""
Konstruktor planszy do gry. Przygotowuje okienko gry.
:param width:
:param height:
"""
self.surface = pygame.display.set_mode((width, height), 0, 32)
pygame.display.set_caption('Simple Pong')
def draw(self, *args):
"""
Rysuje okno gry
:param args: lista obiektów do narysowania
"""
background = (230, 255, 255)
self.surface.fill(background)
for drawable in args:
drawable.draw_on(self.surface)
# dopiero w tym miejscu następuje fatyczne rysowanie
# w oknie gry, wcześniej tylko ustalaliśmy co i jak ma zostać narysowane
pygame.display.update()
class PongGame(object):
"""
Łączy wszystkie elementy gry w całość.
"""
def __init__(self, width, height):
pygame.init()
self.board = Board(width, height)
# zegar którego użyjemy do kontrolowania szybkości rysowania
# kolejnych klatek gry
self.fps_clock = pygame.time.Clock()
self.ball = Ball(width=20, height=20, x=width/2, y=height/2)
self.player1 = Racket(width=80, height=20, x=width/2 - 40, y=height - 40)
self.player2 = Racket(width=80, height=20, x=width/2 - 40, y=20, color=(0, 0, 0))
self.ai = Ai(self.player2, self.ball)
self.judge = Judge(self.board, self.ball, self.player2, self.ball)
def run(self):
"""
Główna pętla programu
"""
while not self.handle_events():
# działaj w pętli do momentu otrzymania sygnału do wyjścia
self.ball.move(self.board, self.player1, self.player2)
self.board.draw(
self.ball,
self.player1,
self.player2,
self.judge,
)
self.ai.move()
self.fps_clock.tick(30)
def handle_events(self):
"""
Obsługa zdarzeń systemowych, tutaj zinterpretujemy np. ruchy myszką
:return True jeżeli pygame przekazał zdarzenie wyjścia z gry
"""
for event in pygame.event.get():
if event.type == pygame.locals.QUIT:
pygame.quit()
return True
if event.type == pygame.locals.MOUSEMOTION:
# myszka steruje ruchem pierwszego gracza
x, y = event.pos
self.player1.move(x)
class Drawable(object):
"""
Klasa bazowa dla rysowanych obiektów
"""
def __init__(self, width, height, x, y, color=(0, 255, 0)):
self.width = width
self.height = height
self.color = color
self.surface = pygame.Surface([width, height], pygame.SRCALPHA, 32).convert_alpha()
self.rect = self.surface.get_rect(x=x, y=y)
def draw_on(self, surface):
surface.blit(self.surface, self.rect)
class Ball(Drawable):
"""
Piłeczka, sama kontroluje swoją prędkość i kierunek poruszania się.
"""
def __init__(self, width, height, x, y, color=(255, 0, 0), x_speed=3, y_speed=3):
super(Ball, self).__init__(width, height, x, y, color)
pygame.draw.ellipse(self.surface, self.color, [0, 0, self.width, self.height])
self.x_speed = x_speed
self.y_speed = y_speed
self.start_x = x
self.start_y = y
def bounce_y(self):
"""
Odwraca wektor prędkości w osi Y
"""
self.y_speed *= -1
def bounce_x(self):
"""
Odwraca wektor prędkości w osi X
"""
self.x_speed *= -1
def reset(self):
"""
Ustawia piłeczkę w położeniu początkowym i odwraca wektor prędkości w osi Y
"""
self.rect.x, self.rect.y = self.start_x, self.start_y
self.bounce_y()
def move(self, board, *args):
"""
Przesuwa piłeczkę o wektor prędkości
"""
self.rect.x += self.x_speed
self.rect.y += self.y_speed
if self.rect.x < 0 or self.rect.x > board.surface.get_width():
self.bounce_x()
if self.rect.y < 0 or self.rect.y > board.surface.get_height():
self.bounce_y()
for racket in args:
if self.rect.colliderect(racket.rect):
self.bounce_y()
class Racket(Drawable):
"""
Rakietka, porusza się w osi X z ograniczeniem prędkości.
"""
def __init__(self, width, height, x, y, color=(0, 255, 0), max_speed=10):
super(Racket, self).__init__(width, height, x, y, color)
self.max_speed = max_speed
self.surface.fill(color)
def move(self, x):
"""
Przesuwa rakietkę w wyznaczone miejsce.
"""
delta = x - self.rect.x
if abs(delta) > self.max_speed:
delta = self.max_speed if delta > 0 else -self.max_speed
self.rect.x += delta
class Ai(object):
"""
Przeciwnik, steruje swoją rakietką na podstawie obserwacji piłeczki.
"""
def __init__(self, racket, ball):
self.ball = ball
self.racket = racket
def move(self):
x = self.ball.rect.centerx
self.racket.move(x)
class Judge(object):
"""
Sędzia gry
"""
def __init__(self, board, ball, *args):
self.ball = ball
self.board = board
self.rackets = args
self.score = [0, 0]
# Przed pisaniem tekstów, musimy zainicjować mechanizmy wyboru fontów PyGame
pygame.font.init()
font_path = pygame.font.match_font('arial')
self.font = pygame.font.Font(font_path, 64)
def update_score(self, board_height):
"""
Jeśli trzeba przydziela punkty i ustawia piłeczkę w początkowym położeniu.
"""
if self.ball.rect.y < 0:
self.score[0] += 1
self.ball.reset()
elif self.ball.rect.y > board_height:
self.score[1] += 1
self.ball.reset()
def draw_text(self, surface, text, x, y):
"""
Rysuje wskazany tekst we wskazanym miejscu
"""
text = self.font.render(text, True, (150, 150, 150))
rect = text.get_rect()
rect.center = x, y
surface.blit(text, rect)
def draw_on(self, surface):
"""
Aktualizuje i rysuje wyniki
"""
height = self.board.surface.get_height()
self.update_score(height)
width = self.board.surface.get_width()
self.draw_text(surface, "Player: {}".format(self.score[0]), width/2, height * 0.3)
self.draw_text(surface, "Computer: {}".format(self.score[1]), width/2, height * 0.7)
# Ta część powinna być zawsze na końcu modułu (ten plik jest modułem)
# chcemy uruchomić naszą grę dopiero po tym jak wszystkie klasy zostaną zadeklarowane
if __name__ == "__main__":
game = PongGame(800, 400)
game.run()
| # coding=utf-8
import pygame
import pygame.locals
class Board(object):
"""
Plansza do gry. Odpowiada za rysowanie okna gry.
"""
def __init__(self, width, height):
"""
Konstruktor planszy do gry. Przygotowuje okienko gry.
:param width:
:param height:
"""
self.surface = pygame.display.set_mode((width, height), 0, 32)
pygame.display.set_caption('Simple Pong')
def draw(self, *args):
"""
Rysuje okno gry
:param args: lista obiektów do narysowania
"""
background = (230, 255, 255)
self.surface.fill(background)
for drawable in args:
drawable.draw_on(self.surface)
# dopiero w tym miejscu następuje fatyczne rysowanie
# w oknie gry, wcześniej tylko ustalaliśmy co i jak ma zostać narysowane
pygame.display.update()
class PongGame(object):
"""
Łączy wszystkie elementy gry w całość.
"""
def __init__(self, width, height):
pygame.init()
self.board = Board(width, height)
# zegar którego użyjemy do kontrolowania szybkości rysowania
# kolejnych klatek gry
self.fps_clock = pygame.time.Clock()
self.ball = Ball(width=20, height=20, x=width/2, y=height/2)
self.player1 = Racket(width=80, height=20, x=width/2 - 40, y=height - 40)
self.player2 = Racket(width=80, height=20, x=width/2 - 40, y=20, color=(0, 0, 0))
self.ai = Ai(self.player2, self.ball)
self.judge = Judge(self.board, self.ball, self.player2, self.ball)
def run(self):
"""
Główna pętla programu
"""
while not self.handle_events():
# działaj w pętli do momentu otrzymania sygnału do wyjścia
self.ball.move(self.board, self.player1, self.player2)
self.board.draw(
self.ball,
self.player1,
self.player2,
self.judge,
)
self.ai.move()
self.fps_clock.tick(30)
def handle_events(self):
"""
Obsługa zdarzeń systemowych, tutaj zinterpretujemy np. ruchy myszką
:return True jeżeli pygame przekazał zdarzenie wyjścia z gry
"""
for event in pygame.event.get():
if event.type == pygame.locals.QUIT:
pygame.quit()
return True
if event.type == pygame.locals.MOUSEMOTION:
# myszka steruje ruchem pierwszego gracza
x, y = event.pos
self.player1.move(x)
class Drawable(object):
"""
Klasa bazowa dla rysowanych obiektów
"""
def __init__(self, width, height, x, y, color=(0, 255, 0)):
self.width = width
self.height = height
self.color = color
self.surface = pygame.Surface([width, height], pygame.SRCALPHA, 32).convert_alpha()
self.rect = self.surface.get_rect(x=x, y=y)
def draw_on(self, surface):
surface.blit(self.surface, self.rect)
class Ball(Drawable):
"""
Piłeczka, sama kontroluje swoją prędkość i kierunek poruszania się.
"""
def __init__(self, width, height, x, y, color=(255, 0, 0), x_speed=3, y_speed=3):
super(Ball, self).__init__(width, height, x, y, color)
pygame.draw.ellipse(self.surface, self.color, [0, 0, self.width, self.height])
self.x_speed = x_speed
self.y_speed = y_speed
self.start_x = x
self.start_y = y
def bounce_y(self):
"""
Odwraca wektor prędkości w osi Y
"""
self.y_speed *= -1
def bounce_x(self):
"""
Odwraca wektor prędkości w osi X
"""
self.x_speed *= -1
def reset(self):
"""
Ustawia piłeczkę w położeniu początkowym i odwraca wektor prędkości w osi Y
"""
self.rect.x, self.rect.y = self.start_x, self.start_y
self.bounce_y()
def move(self, board, *args):
"""
Przesuwa piłeczkę o wektor prędkości
"""
self.rect.x += self.x_speed
self.rect.y += self.y_speed
if self.rect.x < 0 or self.rect.x > board.surface.get_width():
self.bounce_x()
if self.rect.y < 0 or self.rect.y > board.surface.get_height():
self.bounce_y()
for racket in args:
if self.rect.colliderect(racket.rect):
self.bounce_y()
class Racket(Drawable):
"""
Rakietka, porusza się w osi X z ograniczeniem prędkości.
"""
def __init__(self, width, height, x, y, color=(0, 255, 0), max_speed=10):
super(Racket, self).__init__(width, height, x, y, color)
self.max_speed = max_speed
self.surface.fill(color)
def move(self, x):
"""
Przesuwa rakietkę w wyznaczone miejsce.
"""
delta = x - self.rect.x
if abs(delta) > self.max_speed:
delta = self.max_speed if delta > 0 else -self.max_speed
self.rect.x += delta
class Ai(object):
"""
Przeciwnik, steruje swoją rakietką na podstawie obserwacji piłeczki.
"""
def __init__(self, racket, ball):
self.ball = ball
self.racket = racket
def move(self):
x = self.ball.rect.centerx
self.racket.move(x)
class Judge(object):
"""
Sędzia gry
"""
def __init__(self, board, ball, *args):
self.ball = ball
self.board = board
self.rackets = args
self.score = [0, 0]
# Przed pisaniem tekstów, musimy zainicjować mechanizmy wyboru fontów PyGame
pygame.font.init()
font_path = pygame.font.match_font('arial')
self.font = pygame.font.Font(font_path, 64)
def update_score(self, board_height):
"""
Jeśli trzeba przydziela punkty i ustawia piłeczkę w początkowym położeniu.
"""
if self.ball.rect.y < 0:
self.score[0] += 1
self.ball.reset()
elif self.ball.rect.y > board_height:
self.score[1] += 1
self.ball.reset()
def draw_text(self, surface, text, x, y):
"""
Rysuje wskazany tekst we wskazanym miejscu
"""
text = self.font.render(text, True, (150, 150, 150))
rect = text.get_rect()
rect.center = x, y
surface.blit(text, rect)
def draw_on(self, surface):
"""
Aktualizuje i rysuje wyniki
"""
height = self.board.surface.get_height()
self.update_score(height)
width = self.board.surface.get_width()
self.draw_text(surface, "Player: {}".format(self.score[0]), width/2, height * 0.3)
self.draw_text(surface, "Computer: {}".format(self.score[1]), width/2, height * 0.7)
# Ta część powinna być zawsze na końcu modułu (ten plik jest modułem)
# chcemy uruchomić naszą grę dopiero po tym jak wszystkie klasy zostaną zadeklarowane
if __name__ == "__main__":
game = PongGame(800, 400)
game.run()
| pl | 0.998966 | # coding=utf-8 Plansza do gry. Odpowiada za rysowanie okna gry. Konstruktor planszy do gry. Przygotowuje okienko gry. :param width: :param height: Rysuje okno gry :param args: lista obiektów do narysowania # dopiero w tym miejscu następuje fatyczne rysowanie # w oknie gry, wcześniej tylko ustalaliśmy co i jak ma zostać narysowane Łączy wszystkie elementy gry w całość. # zegar którego użyjemy do kontrolowania szybkości rysowania # kolejnych klatek gry Główna pętla programu # działaj w pętli do momentu otrzymania sygnału do wyjścia Obsługa zdarzeń systemowych, tutaj zinterpretujemy np. ruchy myszką :return True jeżeli pygame przekazał zdarzenie wyjścia z gry # myszka steruje ruchem pierwszego gracza Klasa bazowa dla rysowanych obiektów Piłeczka, sama kontroluje swoją prędkość i kierunek poruszania się. Odwraca wektor prędkości w osi Y Odwraca wektor prędkości w osi X Ustawia piłeczkę w położeniu początkowym i odwraca wektor prędkości w osi Y Przesuwa piłeczkę o wektor prędkości Rakietka, porusza się w osi X z ograniczeniem prędkości. Przesuwa rakietkę w wyznaczone miejsce. Przeciwnik, steruje swoją rakietką na podstawie obserwacji piłeczki. Sędzia gry # Przed pisaniem tekstów, musimy zainicjować mechanizmy wyboru fontów PyGame Jeśli trzeba przydziela punkty i ustawia piłeczkę w początkowym położeniu. Rysuje wskazany tekst we wskazanym miejscu Aktualizuje i rysuje wyniki # Ta część powinna być zawsze na końcu modułu (ten plik jest modułem) # chcemy uruchomić naszą grę dopiero po tym jak wszystkie klasy zostaną zadeklarowane | 3.435099 | 3 |
Recursion.py | LeoYuanjieLi/PythonExercise | 0 | 6632301 | def make_empty(seq):
"""
>>> make_empty([1, 2, 3, 4])
[]
>>> make_empty(('a', 'b', 'c'))
()
>>> make_empty("No, not me!")
''
"""
if type(seq) == list:
return list()
elif type(seq) == tuple:
return tuple()
elif type(seq) == str:
return str()
print(make_empty("I am Leo!"))
def insert_at_end(val, seq):
"""
>>> insert_at_end(5, [1, 3, 4, 6])
[1, 3, 4, 6, 5]
>>> insert_at_end('x', 'abc')
'abcx'
>>> insert_at_end(5, (1, 3, 4, 6))
(1, 3, 4, 6, 5)
"""
if type(seq) == list:
seq.append(val)
return seq
elif type(seq) == str:
seq = seq + val
return seq
elif type(seq) == tuple:
seq = seq + (val,)
return seq
a = '1234'
b = insert_at_end('5', a)
print(b)
def insert_in_front(val, seq):
"""
>>> insert_in_front(5, [1, 3, 4, 6])
[5, 1, 3, 4, 6]
>>> insert_in_front(5, (1, 3, 4, 6))
(5, 1, 3, 4, 6)
>>> insert_in_front('x', 'abc')
'xabc'
"""
if type(seq) == list:
seq = [val] + seq
return seq
elif type(seq) == tuple:
seq = (val,) + seq
return seq
elif type(seq) == str:
seq = str(val) + seq
return seq
c = insert_in_front(5,(1,3,4,6))
print(c)
def index_of(val, seq, start=0):
"""
>>> index_of(9, [1, 7, 11, 9, 10])
3
>>> index_of(5, (1, 2, 4, 5, 6, 10, 5, 5))
3
>>> index_of(5, (1, 2, 4, 5, 6, 10, 5, 5), 4)
6
>>> index_of('y', 'happy birthday')
4
>>> index_of('banana', ['apple', 'banana', 'cherry', 'date'])
1
>>> index_of(5, [2, 3, 4])
-1
>>> index_of('b', ['apple', 'banana', 'cherry', 'date'])
-1
"""
if val not in seq:
return -1
else:
if seq.index(val) >= start:
return seq.index(val)
else:
return len(seq) - 1- start + seq.index(val)
print(index_of('b', ['apple', 'banana', 'cherry', 'date']))
def remove_at(index, seq):
"""
>>> remove_at(3, [1, 7, 11, 9, 10])
[1, 7, 11, 10]
>>> remove_at(5, (1, 4, 6, 7, 0, 9, 3, 5))
(1, 4, 6, 7, 0, 3, 5)
>>> remove_at(2, "Yomrktown")
'Yorktown'
"""
if type(seq) == list:
seq.remove(seq[index])
return seq
elif type(seq) == tuple:
a = list(seq)
a.remove(a[index])
return tuple(a)
elif type(seq) == str:
a = list(seq)
a.remove(a[index])
return ''.join(a)
print (remove_at(2, "Yomrktown"))
def remove_val(val, seq):
"""
>>> remove_val(11, [1, 7, 11, 9, 10])
[1, 7, 9, 10]
>>> remove_val(15, (1, 15, 11, 4, 9))
(1, 11, 4, 9)
>>> remove_val('what', ('who', 'what', 'when', 'where', 'why', 'how'))
('who', 'when', 'where', 'why', 'how')
"""
def remove_all(val, seq):
"""
>>> remove_all(11, [1, 7, 11, 9, 11, 10, 2, 11])
[1, 7, 9, 10, 2]
>>> remove_all('i', 'Mississippi')
'Msssspp'
"""
def count(val, seq):
"""
>>> count(5, (1, 5, 3, 7, 5, 8, 5))
3
>>> count('s', 'Mississippi')
4
>>> count((1, 2), [1, 5, (1, 2), 7, (1, 2), 8, 5])
2
"""
def reverse(seq):
"""
>>> reverse([1, 2, 3, 4, 5])
[5, 4, 3, 2, 1]
>>> reverse(('shoe', 'my', 'buckle', 2, 1))
(1, 2, 'buckle', 'my', 'shoe')
>>> reverse('Python')
'nohtyP'
"""
def sort_sequence(seq):
"""
>>> sort_sequence([3, 4, 6, 7, 8, 2])
[2, 3, 4, 6, 7, 8]
>>> sort_sequence((3, 4, 6, 7, 8, 2))
(2, 3, 4, 6, 7, 8)
>>> sort_sequence("nothappy")
'ahnoppty'
"""
# if __name__ == "__main__":
# import doctest
# doctest.testmod()
| def make_empty(seq):
"""
>>> make_empty([1, 2, 3, 4])
[]
>>> make_empty(('a', 'b', 'c'))
()
>>> make_empty("No, not me!")
''
"""
if type(seq) == list:
return list()
elif type(seq) == tuple:
return tuple()
elif type(seq) == str:
return str()
print(make_empty("I am Leo!"))
def insert_at_end(val, seq):
"""
>>> insert_at_end(5, [1, 3, 4, 6])
[1, 3, 4, 6, 5]
>>> insert_at_end('x', 'abc')
'abcx'
>>> insert_at_end(5, (1, 3, 4, 6))
(1, 3, 4, 6, 5)
"""
if type(seq) == list:
seq.append(val)
return seq
elif type(seq) == str:
seq = seq + val
return seq
elif type(seq) == tuple:
seq = seq + (val,)
return seq
a = '1234'
b = insert_at_end('5', a)
print(b)
def insert_in_front(val, seq):
"""
>>> insert_in_front(5, [1, 3, 4, 6])
[5, 1, 3, 4, 6]
>>> insert_in_front(5, (1, 3, 4, 6))
(5, 1, 3, 4, 6)
>>> insert_in_front('x', 'abc')
'xabc'
"""
if type(seq) == list:
seq = [val] + seq
return seq
elif type(seq) == tuple:
seq = (val,) + seq
return seq
elif type(seq) == str:
seq = str(val) + seq
return seq
c = insert_in_front(5,(1,3,4,6))
print(c)
def index_of(val, seq, start=0):
"""
>>> index_of(9, [1, 7, 11, 9, 10])
3
>>> index_of(5, (1, 2, 4, 5, 6, 10, 5, 5))
3
>>> index_of(5, (1, 2, 4, 5, 6, 10, 5, 5), 4)
6
>>> index_of('y', 'happy birthday')
4
>>> index_of('banana', ['apple', 'banana', 'cherry', 'date'])
1
>>> index_of(5, [2, 3, 4])
-1
>>> index_of('b', ['apple', 'banana', 'cherry', 'date'])
-1
"""
if val not in seq:
return -1
else:
if seq.index(val) >= start:
return seq.index(val)
else:
return len(seq) - 1- start + seq.index(val)
print(index_of('b', ['apple', 'banana', 'cherry', 'date']))
def remove_at(index, seq):
"""
>>> remove_at(3, [1, 7, 11, 9, 10])
[1, 7, 11, 10]
>>> remove_at(5, (1, 4, 6, 7, 0, 9, 3, 5))
(1, 4, 6, 7, 0, 3, 5)
>>> remove_at(2, "Yomrktown")
'Yorktown'
"""
if type(seq) == list:
seq.remove(seq[index])
return seq
elif type(seq) == tuple:
a = list(seq)
a.remove(a[index])
return tuple(a)
elif type(seq) == str:
a = list(seq)
a.remove(a[index])
return ''.join(a)
print (remove_at(2, "Yomrktown"))
def remove_val(val, seq):
"""
>>> remove_val(11, [1, 7, 11, 9, 10])
[1, 7, 9, 10]
>>> remove_val(15, (1, 15, 11, 4, 9))
(1, 11, 4, 9)
>>> remove_val('what', ('who', 'what', 'when', 'where', 'why', 'how'))
('who', 'when', 'where', 'why', 'how')
"""
def remove_all(val, seq):
"""
>>> remove_all(11, [1, 7, 11, 9, 11, 10, 2, 11])
[1, 7, 9, 10, 2]
>>> remove_all('i', 'Mississippi')
'Msssspp'
"""
def count(val, seq):
"""
>>> count(5, (1, 5, 3, 7, 5, 8, 5))
3
>>> count('s', 'Mississippi')
4
>>> count((1, 2), [1, 5, (1, 2), 7, (1, 2), 8, 5])
2
"""
def reverse(seq):
"""
>>> reverse([1, 2, 3, 4, 5])
[5, 4, 3, 2, 1]
>>> reverse(('shoe', 'my', 'buckle', 2, 1))
(1, 2, 'buckle', 'my', 'shoe')
>>> reverse('Python')
'nohtyP'
"""
def sort_sequence(seq):
"""
>>> sort_sequence([3, 4, 6, 7, 8, 2])
[2, 3, 4, 6, 7, 8]
>>> sort_sequence((3, 4, 6, 7, 8, 2))
(2, 3, 4, 6, 7, 8)
>>> sort_sequence("nothappy")
'ahnoppty'
"""
# if __name__ == "__main__":
# import doctest
# doctest.testmod()
| en | 0.419895 | >>> make_empty([1, 2, 3, 4]) [] >>> make_empty(('a', 'b', 'c')) () >>> make_empty("No, not me!") '' >>> insert_at_end(5, [1, 3, 4, 6]) [1, 3, 4, 6, 5] >>> insert_at_end('x', 'abc') 'abcx' >>> insert_at_end(5, (1, 3, 4, 6)) (1, 3, 4, 6, 5) >>> insert_in_front(5, [1, 3, 4, 6]) [5, 1, 3, 4, 6] >>> insert_in_front(5, (1, 3, 4, 6)) (5, 1, 3, 4, 6) >>> insert_in_front('x', 'abc') 'xabc' >>> index_of(9, [1, 7, 11, 9, 10]) 3 >>> index_of(5, (1, 2, 4, 5, 6, 10, 5, 5)) 3 >>> index_of(5, (1, 2, 4, 5, 6, 10, 5, 5), 4) 6 >>> index_of('y', 'happy birthday') 4 >>> index_of('banana', ['apple', 'banana', 'cherry', 'date']) 1 >>> index_of(5, [2, 3, 4]) -1 >>> index_of('b', ['apple', 'banana', 'cherry', 'date']) -1 >>> remove_at(3, [1, 7, 11, 9, 10]) [1, 7, 11, 10] >>> remove_at(5, (1, 4, 6, 7, 0, 9, 3, 5)) (1, 4, 6, 7, 0, 3, 5) >>> remove_at(2, "Yomrktown") 'Yorktown' >>> remove_val(11, [1, 7, 11, 9, 10]) [1, 7, 9, 10] >>> remove_val(15, (1, 15, 11, 4, 9)) (1, 11, 4, 9) >>> remove_val('what', ('who', 'what', 'when', 'where', 'why', 'how')) ('who', 'when', 'where', 'why', 'how') >>> remove_all(11, [1, 7, 11, 9, 11, 10, 2, 11]) [1, 7, 9, 10, 2] >>> remove_all('i', 'Mississippi') 'Msssspp' >>> count(5, (1, 5, 3, 7, 5, 8, 5)) 3 >>> count('s', 'Mississippi') 4 >>> count((1, 2), [1, 5, (1, 2), 7, (1, 2), 8, 5]) 2 >>> reverse([1, 2, 3, 4, 5]) [5, 4, 3, 2, 1] >>> reverse(('shoe', 'my', 'buckle', 2, 1)) (1, 2, 'buckle', 'my', 'shoe') >>> reverse('Python') 'nohtyP' >>> sort_sequence([3, 4, 6, 7, 8, 2]) [2, 3, 4, 6, 7, 8] >>> sort_sequence((3, 4, 6, 7, 8, 2)) (2, 3, 4, 6, 7, 8) >>> sort_sequence("nothappy") 'ahnoppty' # if __name__ == "__main__": # import doctest # doctest.testmod() | 4.238644 | 4 |
scripts/presets/photographer/cameras/olympus_em1_m_zuiko_12mm.py | Tilapiatsu/blender-custom_conf | 2 | 6632302 | <filename>scripts/presets/photographer/cameras/olympus_em1_m_zuiko_12mm.py<gh_stars>1-10
import bpy
camera = bpy.context.scene.camera.data
photographer = bpy.context.scene.camera.data.photographer
photographer.sensor_type = '18'
photographer.aperture = 1.2
photographer.aperture_preset = '1.2'
photographer.aperture_slider_enable = False
camera.lens = 12.0
camera.dof.use_dof = True
camera.dof.aperture_ratio = 1.0
camera.dof.aperture_blades = 9
camera.dof.aperture_rotation = 0.0
| <filename>scripts/presets/photographer/cameras/olympus_em1_m_zuiko_12mm.py<gh_stars>1-10
import bpy
camera = bpy.context.scene.camera.data
photographer = bpy.context.scene.camera.data.photographer
photographer.sensor_type = '18'
photographer.aperture = 1.2
photographer.aperture_preset = '1.2'
photographer.aperture_slider_enable = False
camera.lens = 12.0
camera.dof.use_dof = True
camera.dof.aperture_ratio = 1.0
camera.dof.aperture_blades = 9
camera.dof.aperture_rotation = 0.0
| none | 1 | 1.310077 | 1 |
|
leetcode/633.py | Cannizza-zzk/python_review | 0 | 6632303 | <reponame>Cannizza-zzk/python_review
class Solution:
def judgeSquareSum(self, c: int) -> bool:
left_ptr , right_ptr = 0 , int(c**0.5)
while left_ptr <= right_ptr:
cur = left_ptr**2 + right_ptr**2
if c == cur:
return True
elif c > cur:
left_ptr += 1
else:
right_ptr -= 1
return False | class Solution:
def judgeSquareSum(self, c: int) -> bool:
left_ptr , right_ptr = 0 , int(c**0.5)
while left_ptr <= right_ptr:
cur = left_ptr**2 + right_ptr**2
if c == cur:
return True
elif c > cur:
left_ptr += 1
else:
right_ptr -= 1
return False | none | 1 | 3.379284 | 3 |
|
PlateBridge/PlateBridge-simple-designVehicle-1stSpanMidSpan.py | ming91915/PlateBridge | 1 | 6632304 | # -*- coding: mbcs -*-
#
# Abaqus/CAE Release 6.14-1
# Internal Version: 2014_06_05-06.11.02 134264
# Run by eamdf on Tue Aug 21 16:58:48 2018
#
# from driverUtils import executeOnCaeGraphicsStartup
# executeOnCaeGraphicsStartup()
#: Executing "onCaeGraphicsStartup()" in the site directory ...
from abaqus import *
from abaqusConstants import *
from caeModules import *
from driverUtils import executeOnCaeStartup
modelName='FuzhouHouyuRoad'
deckName='BridgeDeck'
span=11
C40='C40'
C30='C30'
session.journalOptions.setValues(replayGeometry=COORDINATE,recoverGeometry=COORDINATE)
#create model
myModel = mdb.Model(name=modelName)
#create sketch
s1 = mdb.models[modelName].ConstrainedSketch(name='__profile__', sheetSize=4.0)
g, v, d, c = s1.geometry, s1.vertices, s1.dimensions, s1.constraints
s1.Line(point1=(0.0, 0.0), point2=(0.0, 13))
s1.Line(point1=(0.0, 13), point2=(22,18))
s1.Line(point1=(22, 18), point2=(22, 0.0))
s1.Line(point1=(22, 0.0), point2=(0.0, 0.0))
#create part
p = mdb.models[modelName].Part(name=deckName, dimensionality=THREE_D,
type=DEFORMABLE_BODY)
p = mdb.models[modelName].parts[deckName]
p.BaseShell(sketch=s1)
#del sketch
del mdb.models[modelName].sketches['__profile__']
#create material
#deck material
mdb.models[modelName].Material(name=C40)
mdb.models[modelName].materials[C40].Elastic(table=((3.25e10, 0.2),
))
#pavemoent material
mdb.models[modelName].Material(name=C30)
mdb.models[modelName].materials[C30].Elastic(table=((3.00e10, 0.2),
))
mdb.models[modelName].HomogeneousShellSection(name='DeckSection',
preIntegrate=ON, material=C40, thicknessType=UNIFORM, thickness=0.50,
thicknessField='', idealization=NO_IDEALIZATION, poissonDefinition=DEFAULT,
thicknessModulus=None, useDensity=OFF)
#assign section
p = mdb.models[modelName].parts[deckName]
f = p.faces
faces = f.findAt(((1.0,1.0, 0.0), ))
region = regionToolset.Region(faces=faces)
p.SectionAssignment(region=region, sectionName='DeckSection', offset=0.0,
offsetType=MIDDLE_SURFACE, offsetField='',
thicknessAssignment=FROM_SECTION)
##create assembly
a = mdb.models[modelName].rootAssembly
p = mdb.models[modelName].parts[deckName]
a.Instance(name='Plate-1', part=p, dependent=OFF)
#create partition
f1 = a.instances['Plate-1'].faces
pickedFaces = f1.findAt(((7.333333, 4.333333, 0.0), ))
e1 = a.instances['Plate-1'].edges
a.PartitionFaceByShortestPath(faces=pickedFaces,
point1=a.instances['Plate-1'].InterestingPoint(edge=e1.findAt(coordinates=(
5.5, 14.25, 0.0)), rule=MIDDLE),
point2=a.instances['Plate-1'].InterestingPoint(edge=e1.findAt(coordinates=(
16.5, 0.0, 0.0)), rule=MIDDLE))
#create boundry set
e1 = a.instances['Plate-1'].edges
#left
edges1 = e1.findAt(((0.0, 1.0, 0.0), ))
a.Set(edges=edges1, name='End-1')
#middle
edges1 = e1.findAt(((span, 1.0, 0.0), ))
a.Set(edges=edges1, name='End-2')
#right
edges1 = e1.findAt(((2*span, 1.0, 0.0), ))
a.Set(edges=edges1, name='End-3')
#Create Step
mdb.models[modelName].StaticStep(name='ApplyLoad', previous='Initial')
#set field output
mdb.models['FuzhouHouyuRoad'].fieldOutputRequests['F-Output-1'].setValues(
variables=('S', 'PE', 'PEEQ', 'PEMAG', 'LE', 'U', 'RF', 'CF', 'SF',
'CSTRESS', 'CDISP'))
#create boundry
region = a.sets['End-1']
mdb.models[modelName].DisplacementBC(name='BC-1', createStepName='ApplyLoad',
region=region, u1=0.0, u2=0.0, u3=0.0, ur1=0.0, ur2=UNSET, ur3=0.0,
amplitude=UNSET, fixed=OFF, distributionType=UNIFORM, fieldName='',
localCsys=None)
region = a.sets['End-2']
mdb.models[modelName].DisplacementBC(name='BC-2', createStepName='ApplyLoad',
region=region, u1=0.0, u2=0.0, u3=0.0, ur1=0.0, ur2=UNSET, ur3=0.0,
amplitude=UNSET, fixed=OFF, distributionType=UNIFORM, fieldName='',
localCsys=None)
region = a.sets['End-3']
mdb.models[modelName].DisplacementBC(name='BC-3', createStepName='ApplyLoad',
region=region, u1=0.0, u2=0.0, u3=0.0, ur1=0.0, ur2=UNSET, ur3=0.0,
amplitude=UNSET, fixed=OFF, distributionType=UNIFORM, fieldName='',
localCsys=None)
#create load
#1 5.5,1.5,0
#2 5.5,3.4,0
#3 4.1,1.5,0
#4 4.1,3.4,0
#5 0.3,1.5,0
#6 0.3,3.4,0
#7 5.5,4.8,0
#8 5.5,6.2,0
#9 4.1,4.8,0
#10 4.1,6.2,0
#11 0.3,4.8,0
#12 0.3,6.2,0
#-90
#-90
#-90
#-90
#-45
#-45
#-90
#-90
#-90
#-90
#-45
#-45
tuplePoint=((6.9,1.5,0),(6.9,3.4,0),(5.5,1.5,0),(5.5,3.4,0),(1.7,1.5,0),(1.7,3.4,0),
(6.9,4.8,0),(6.9,6.2,0),(5.5,4.8,0),(5.5,6.2,0),(1.7,4.8,0),(1.7,6.2,0))
tupleLoad=(-90000,-90000,-90000,-90000,-45000,-45000,
-90000,-90000,-90000,-90000,-45000,-45000)
for i in range(0,len(tuplePoint)):
ref1=a.ReferencePoint(point=tuplePoint[i])
r1 = a.referencePoints
refPoints1=(r1[ref1.id], )
region1=a.Set(referencePoints=refPoints1, name='m_Set-'+str(i+30))
s1 = a.instances['Plate-1'].faces
#TODO: according to tuplePoint to find faces
side1Faces1 = s1.findAt(((5.0, 5.0, 0), ))
region2=a.Surface(side1Faces=side1Faces1, name='s_Surf-'+str(i+30))
mdb.models[modelName].Coupling(name='Constraint-RP'+str(i+1),
controlPoint=region1, surface=region2, influenceRadius=0.1,
couplingType=DISTRIBUTING, weightingMethod=UNIFORM, localCsys=None, u1=ON,
u2=ON, u3=ON, ur1=ON, ur2=ON, ur3=ON)
region = regionToolset.Region(referencePoints=refPoints1)
mdb.models[modelName].ConcentratedForce(name='Load-'+str(i+1),
createStepName='ApplyLoad', region=region, cf3=tupleLoad[i],
distributionType=UNIFORM, field='', localCsys=None)
#create mesh
partInstances =(a.instances['Plate-1'], )
a.seedPartInstance(regions=partInstances, size=0.2, deviationFactor=0.1,
minSizeFactor=0.1)
a = mdb.models[modelName].rootAssembly
partInstances =(a.instances['Plate-1'], )
a.generateMesh(regions=partInstances)
#job
#mdb.Job(name='Job-2', model=modelName, description='', type=ANALYSIS,
# atTime=None, waitMinutes=0, waitHours=0, queue=None, memory=90,
# memoryUnits=PERCENTAGE, getMemoryFromAnalysis=True,
# explicitPrecision=SINGLE, nodalOutputPrecision=SINGLE, echoPrint=OFF,
# modelPrint=OFF, contactPrint=OFF, historyPrint=OFF, userSubroutine='',
# scratch='', resultsFormat=ODB, multiprocessingMode=DEFAULT, numCpus=1,
# numGPUs=0)
#mdb.jobs['Job-2'].submit(consistencyChecking=OFF)
#a = mdb.models['LinearPlate'].rootAssembly
#partInstances =(a.instances['Plate-1'], )
#a.generateMesh(regions=partInstances)
#mdb.jobs['SkewPlate'].submit(consistencyChecking=OFF)
#a1 = mdb.models['LinearPlate'].rootAssembly
#a1.regenerate()
#a = mdb.models['LinearPlate'].rootAssembly
#session.viewports['Viewport: 1'].setValues(displayedObject=a)
#mdb.jobs['SkewPlate'].submit(consistencyChecking=OFF)
##: The job input file "SkewPlate.inp" has been submitted for analysis.
##: Job SkewPlate: Analysis Input File Processor completed successfully.
##: Job SkewPlate: Abaqus/Standard completed successfully.
##: Job SkewPlate completed successfully.
| # -*- coding: mbcs -*-
#
# Abaqus/CAE Release 6.14-1
# Internal Version: 2014_06_05-06.11.02 134264
# Run by eamdf on Tue Aug 21 16:58:48 2018
#
# from driverUtils import executeOnCaeGraphicsStartup
# executeOnCaeGraphicsStartup()
#: Executing "onCaeGraphicsStartup()" in the site directory ...
from abaqus import *
from abaqusConstants import *
from caeModules import *
from driverUtils import executeOnCaeStartup
modelName='FuzhouHouyuRoad'
deckName='BridgeDeck'
span=11
C40='C40'
C30='C30'
session.journalOptions.setValues(replayGeometry=COORDINATE,recoverGeometry=COORDINATE)
#create model
myModel = mdb.Model(name=modelName)
#create sketch
s1 = mdb.models[modelName].ConstrainedSketch(name='__profile__', sheetSize=4.0)
g, v, d, c = s1.geometry, s1.vertices, s1.dimensions, s1.constraints
s1.Line(point1=(0.0, 0.0), point2=(0.0, 13))
s1.Line(point1=(0.0, 13), point2=(22,18))
s1.Line(point1=(22, 18), point2=(22, 0.0))
s1.Line(point1=(22, 0.0), point2=(0.0, 0.0))
#create part
p = mdb.models[modelName].Part(name=deckName, dimensionality=THREE_D,
type=DEFORMABLE_BODY)
p = mdb.models[modelName].parts[deckName]
p.BaseShell(sketch=s1)
#del sketch
del mdb.models[modelName].sketches['__profile__']
#create material
#deck material
mdb.models[modelName].Material(name=C40)
mdb.models[modelName].materials[C40].Elastic(table=((3.25e10, 0.2),
))
#pavemoent material
mdb.models[modelName].Material(name=C30)
mdb.models[modelName].materials[C30].Elastic(table=((3.00e10, 0.2),
))
mdb.models[modelName].HomogeneousShellSection(name='DeckSection',
preIntegrate=ON, material=C40, thicknessType=UNIFORM, thickness=0.50,
thicknessField='', idealization=NO_IDEALIZATION, poissonDefinition=DEFAULT,
thicknessModulus=None, useDensity=OFF)
#assign section
p = mdb.models[modelName].parts[deckName]
f = p.faces
faces = f.findAt(((1.0,1.0, 0.0), ))
region = regionToolset.Region(faces=faces)
p.SectionAssignment(region=region, sectionName='DeckSection', offset=0.0,
offsetType=MIDDLE_SURFACE, offsetField='',
thicknessAssignment=FROM_SECTION)
##create assembly
a = mdb.models[modelName].rootAssembly
p = mdb.models[modelName].parts[deckName]
a.Instance(name='Plate-1', part=p, dependent=OFF)
#create partition
f1 = a.instances['Plate-1'].faces
pickedFaces = f1.findAt(((7.333333, 4.333333, 0.0), ))
e1 = a.instances['Plate-1'].edges
a.PartitionFaceByShortestPath(faces=pickedFaces,
point1=a.instances['Plate-1'].InterestingPoint(edge=e1.findAt(coordinates=(
5.5, 14.25, 0.0)), rule=MIDDLE),
point2=a.instances['Plate-1'].InterestingPoint(edge=e1.findAt(coordinates=(
16.5, 0.0, 0.0)), rule=MIDDLE))
#create boundry set
e1 = a.instances['Plate-1'].edges
#left
edges1 = e1.findAt(((0.0, 1.0, 0.0), ))
a.Set(edges=edges1, name='End-1')
#middle
edges1 = e1.findAt(((span, 1.0, 0.0), ))
a.Set(edges=edges1, name='End-2')
#right
edges1 = e1.findAt(((2*span, 1.0, 0.0), ))
a.Set(edges=edges1, name='End-3')
#Create Step
mdb.models[modelName].StaticStep(name='ApplyLoad', previous='Initial')
#set field output
mdb.models['FuzhouHouyuRoad'].fieldOutputRequests['F-Output-1'].setValues(
variables=('S', 'PE', 'PEEQ', 'PEMAG', 'LE', 'U', 'RF', 'CF', 'SF',
'CSTRESS', 'CDISP'))
#create boundry
region = a.sets['End-1']
mdb.models[modelName].DisplacementBC(name='BC-1', createStepName='ApplyLoad',
region=region, u1=0.0, u2=0.0, u3=0.0, ur1=0.0, ur2=UNSET, ur3=0.0,
amplitude=UNSET, fixed=OFF, distributionType=UNIFORM, fieldName='',
localCsys=None)
region = a.sets['End-2']
mdb.models[modelName].DisplacementBC(name='BC-2', createStepName='ApplyLoad',
region=region, u1=0.0, u2=0.0, u3=0.0, ur1=0.0, ur2=UNSET, ur3=0.0,
amplitude=UNSET, fixed=OFF, distributionType=UNIFORM, fieldName='',
localCsys=None)
region = a.sets['End-3']
mdb.models[modelName].DisplacementBC(name='BC-3', createStepName='ApplyLoad',
region=region, u1=0.0, u2=0.0, u3=0.0, ur1=0.0, ur2=UNSET, ur3=0.0,
amplitude=UNSET, fixed=OFF, distributionType=UNIFORM, fieldName='',
localCsys=None)
#create load
#1 5.5,1.5,0
#2 5.5,3.4,0
#3 4.1,1.5,0
#4 4.1,3.4,0
#5 0.3,1.5,0
#6 0.3,3.4,0
#7 5.5,4.8,0
#8 5.5,6.2,0
#9 4.1,4.8,0
#10 4.1,6.2,0
#11 0.3,4.8,0
#12 0.3,6.2,0
#-90
#-90
#-90
#-90
#-45
#-45
#-90
#-90
#-90
#-90
#-45
#-45
tuplePoint=((6.9,1.5,0),(6.9,3.4,0),(5.5,1.5,0),(5.5,3.4,0),(1.7,1.5,0),(1.7,3.4,0),
(6.9,4.8,0),(6.9,6.2,0),(5.5,4.8,0),(5.5,6.2,0),(1.7,4.8,0),(1.7,6.2,0))
tupleLoad=(-90000,-90000,-90000,-90000,-45000,-45000,
-90000,-90000,-90000,-90000,-45000,-45000)
for i in range(0,len(tuplePoint)):
ref1=a.ReferencePoint(point=tuplePoint[i])
r1 = a.referencePoints
refPoints1=(r1[ref1.id], )
region1=a.Set(referencePoints=refPoints1, name='m_Set-'+str(i+30))
s1 = a.instances['Plate-1'].faces
#TODO: according to tuplePoint to find faces
side1Faces1 = s1.findAt(((5.0, 5.0, 0), ))
region2=a.Surface(side1Faces=side1Faces1, name='s_Surf-'+str(i+30))
mdb.models[modelName].Coupling(name='Constraint-RP'+str(i+1),
controlPoint=region1, surface=region2, influenceRadius=0.1,
couplingType=DISTRIBUTING, weightingMethod=UNIFORM, localCsys=None, u1=ON,
u2=ON, u3=ON, ur1=ON, ur2=ON, ur3=ON)
region = regionToolset.Region(referencePoints=refPoints1)
mdb.models[modelName].ConcentratedForce(name='Load-'+str(i+1),
createStepName='ApplyLoad', region=region, cf3=tupleLoad[i],
distributionType=UNIFORM, field='', localCsys=None)
#create mesh
partInstances =(a.instances['Plate-1'], )
a.seedPartInstance(regions=partInstances, size=0.2, deviationFactor=0.1,
minSizeFactor=0.1)
a = mdb.models[modelName].rootAssembly
partInstances =(a.instances['Plate-1'], )
a.generateMesh(regions=partInstances)
#job
#mdb.Job(name='Job-2', model=modelName, description='', type=ANALYSIS,
# atTime=None, waitMinutes=0, waitHours=0, queue=None, memory=90,
# memoryUnits=PERCENTAGE, getMemoryFromAnalysis=True,
# explicitPrecision=SINGLE, nodalOutputPrecision=SINGLE, echoPrint=OFF,
# modelPrint=OFF, contactPrint=OFF, historyPrint=OFF, userSubroutine='',
# scratch='', resultsFormat=ODB, multiprocessingMode=DEFAULT, numCpus=1,
# numGPUs=0)
#mdb.jobs['Job-2'].submit(consistencyChecking=OFF)
#a = mdb.models['LinearPlate'].rootAssembly
#partInstances =(a.instances['Plate-1'], )
#a.generateMesh(regions=partInstances)
#mdb.jobs['SkewPlate'].submit(consistencyChecking=OFF)
#a1 = mdb.models['LinearPlate'].rootAssembly
#a1.regenerate()
#a = mdb.models['LinearPlate'].rootAssembly
#session.viewports['Viewport: 1'].setValues(displayedObject=a)
#mdb.jobs['SkewPlate'].submit(consistencyChecking=OFF)
##: The job input file "SkewPlate.inp" has been submitted for analysis.
##: Job SkewPlate: Analysis Input File Processor completed successfully.
##: Job SkewPlate: Abaqus/Standard completed successfully.
##: Job SkewPlate completed successfully.
| en | 0.50331 | # -*- coding: mbcs -*- # # Abaqus/CAE Release 6.14-1 # Internal Version: 2014_06_05-06.11.02 134264 # Run by eamdf on Tue Aug 21 16:58:48 2018 # # from driverUtils import executeOnCaeGraphicsStartup # executeOnCaeGraphicsStartup() #: Executing "onCaeGraphicsStartup()" in the site directory ... #create model #create sketch #create part #del sketch #create material #deck material #pavemoent material #assign section ##create assembly #create partition #create boundry set #left #middle #right #Create Step #set field output #create boundry #create load #1 5.5,1.5,0 #2 5.5,3.4,0 #3 4.1,1.5,0 #4 4.1,3.4,0 #5 0.3,1.5,0 #6 0.3,3.4,0 #7 5.5,4.8,0 #8 5.5,6.2,0 #9 4.1,4.8,0 #10 4.1,6.2,0 #11 0.3,4.8,0 #12 0.3,6.2,0 #-90 #-90 #-90 #-90 #-45 #-45 #-90 #-90 #-90 #-90 #-45 #-45 #TODO: according to tuplePoint to find faces #create mesh #job #mdb.Job(name='Job-2', model=modelName, description='', type=ANALYSIS, # atTime=None, waitMinutes=0, waitHours=0, queue=None, memory=90, # memoryUnits=PERCENTAGE, getMemoryFromAnalysis=True, # explicitPrecision=SINGLE, nodalOutputPrecision=SINGLE, echoPrint=OFF, # modelPrint=OFF, contactPrint=OFF, historyPrint=OFF, userSubroutine='', # scratch='', resultsFormat=ODB, multiprocessingMode=DEFAULT, numCpus=1, # numGPUs=0) #mdb.jobs['Job-2'].submit(consistencyChecking=OFF) #a = mdb.models['LinearPlate'].rootAssembly #partInstances =(a.instances['Plate-1'], ) #a.generateMesh(regions=partInstances) #mdb.jobs['SkewPlate'].submit(consistencyChecking=OFF) #a1 = mdb.models['LinearPlate'].rootAssembly #a1.regenerate() #a = mdb.models['LinearPlate'].rootAssembly #session.viewports['Viewport: 1'].setValues(displayedObject=a) #mdb.jobs['SkewPlate'].submit(consistencyChecking=OFF) ##: The job input file "SkewPlate.inp" has been submitted for analysis. ##: Job SkewPlate: Analysis Input File Processor completed successfully. ##: Job SkewPlate: Abaqus/Standard completed successfully. ##: Job SkewPlate completed successfully. | 1.647836 | 2 |
website/views/registry.py | jrdbnntt-com/com_jrdbnntt_wedding | 0 | 6632305 | <filename>website/views/registry.py
from django.http.request import HttpRequest
from django.shortcuts import render
def index(request: HttpRequest):
return render(request, "registry/index.html", {
'page_title': 'Registry'
})
| <filename>website/views/registry.py
from django.http.request import HttpRequest
from django.shortcuts import render
def index(request: HttpRequest):
return render(request, "registry/index.html", {
'page_title': 'Registry'
})
| none | 1 | 1.874302 | 2 |
|
data-science-in-python/introduction_to_data_science_in_python/pandas_csv_census1.py | ysoftman/test_code | 3 | 6632306 | # -*- coding: utf-8 -*-
# author: ysoftman
# python version : 3.x
# desc : pandas test
import numpy as np
import pandas as pd
print("load census.csv ... ")
print()
# csv 파일 읽기
df = pd.read_csv('census.csv')
print("df.head()=\n", df.head(), sep="")
print()
# 컬럼 이름들
print(df.columns)
print()
# SUMLEV 컬럼의 유니크값
# SUMLEV(Summary Level) 인구 통계청에서 지역구분에 쓰는 코드다
# 040 : state
# 050 : county
# 140 : census trac
# 참고 https://blog.cubitplanning.com/2011/03/census-summary-level-sumlev/
print("df['SUMLEV'].unique() =", df['SUMLEV'].unique())
print()
# SUMLEV == 50 인 데이터만 남기기
df = df[df['SUMLEV'] == 50]
# 지정한 컬럼만 남기기
columns_to_keep = ['STNAME',
'CTYNAME',
'BIRTHS2010',
'BIRTHS2011',
'BIRTHS2012',
'BIRTHS2013',
'BIRTHS2014',
'BIRTHS2015',
'POPESTIMATE2010',
'POPESTIMATE2011',
'POPESTIMATE2012',
'POPESTIMATE2013',
'POPESTIMATE2014',
'POPESTIMATE2015']
df = df[columns_to_keep]
print(df)
print()
# STNAME, CTYNAME 2개의 컬럼을 인덱스로 설정(MultiIndex 멀티 인덱스)
df = df.set_index(['STNAME', 'CTYNAME'])
# 기존 디폴트 인덱스에 추가할 수 도 있지만
# 디폴트는 숫자 카운트 형이라 조회시 불편하다.
# df = df.set_index([df.index, 'STNAME', 'CTYNAME'])
# 인덱스 이름을 별도로 설정할 수도 있다.
df.index.name = ['state', 'city']
print(df)
print()
# STNAME, CTYNAME 가 인덱싱이 되었으니
# STNAME=Michigan, CTYNAME=Washtenaw County 레이블로 조회 할 수 있다.
# 차원으로 표현된다.
print("df.loc['Michigan', 'Washtenaw County'] =\n",
df.loc['Michigan', 'Washtenaw County'])
# 2개의 도시이니경우 다음과 같이 2차원으료 표현된다.
print("df.loc[[('Michigan', 'Washtenaw County'),('Michigan', 'Wayne County')]]=\n",
df.loc[[('Michigan', 'Washtenaw County'), ('Michigan', 'Wayne County')]])
print()
| # -*- coding: utf-8 -*-
# author: ysoftman
# python version : 3.x
# desc : pandas test
import numpy as np
import pandas as pd
print("load census.csv ... ")
print()
# csv 파일 읽기
df = pd.read_csv('census.csv')
print("df.head()=\n", df.head(), sep="")
print()
# 컬럼 이름들
print(df.columns)
print()
# SUMLEV 컬럼의 유니크값
# SUMLEV(Summary Level) 인구 통계청에서 지역구분에 쓰는 코드다
# 040 : state
# 050 : county
# 140 : census trac
# 참고 https://blog.cubitplanning.com/2011/03/census-summary-level-sumlev/
print("df['SUMLEV'].unique() =", df['SUMLEV'].unique())
print()
# SUMLEV == 50 인 데이터만 남기기
df = df[df['SUMLEV'] == 50]
# 지정한 컬럼만 남기기
columns_to_keep = ['STNAME',
'CTYNAME',
'BIRTHS2010',
'BIRTHS2011',
'BIRTHS2012',
'BIRTHS2013',
'BIRTHS2014',
'BIRTHS2015',
'POPESTIMATE2010',
'POPESTIMATE2011',
'POPESTIMATE2012',
'POPESTIMATE2013',
'POPESTIMATE2014',
'POPESTIMATE2015']
df = df[columns_to_keep]
print(df)
print()
# STNAME, CTYNAME 2개의 컬럼을 인덱스로 설정(MultiIndex 멀티 인덱스)
df = df.set_index(['STNAME', 'CTYNAME'])
# 기존 디폴트 인덱스에 추가할 수 도 있지만
# 디폴트는 숫자 카운트 형이라 조회시 불편하다.
# df = df.set_index([df.index, 'STNAME', 'CTYNAME'])
# 인덱스 이름을 별도로 설정할 수도 있다.
df.index.name = ['state', 'city']
print(df)
print()
# STNAME, CTYNAME 가 인덱싱이 되었으니
# STNAME=Michigan, CTYNAME=Washtenaw County 레이블로 조회 할 수 있다.
# 차원으로 표현된다.
print("df.loc['Michigan', 'Washtenaw County'] =\n",
df.loc['Michigan', 'Washtenaw County'])
# 2개의 도시이니경우 다음과 같이 2차원으료 표현된다.
print("df.loc[[('Michigan', 'Washtenaw County'),('Michigan', 'Wayne County')]]=\n",
df.loc[[('Michigan', 'Washtenaw County'), ('Michigan', 'Wayne County')]])
print()
| ko | 0.995884 | # -*- coding: utf-8 -*- # author: ysoftman # python version : 3.x # desc : pandas test # csv 파일 읽기 # 컬럼 이름들 # SUMLEV 컬럼의 유니크값 # SUMLEV(Summary Level) 인구 통계청에서 지역구분에 쓰는 코드다 # 040 : state # 050 : county # 140 : census trac # 참고 https://blog.cubitplanning.com/2011/03/census-summary-level-sumlev/ # SUMLEV == 50 인 데이터만 남기기 # 지정한 컬럼만 남기기 # STNAME, CTYNAME 2개의 컬럼을 인덱스로 설정(MultiIndex 멀티 인덱스) # 기존 디폴트 인덱스에 추가할 수 도 있지만 # 디폴트는 숫자 카운트 형이라 조회시 불편하다. # df = df.set_index([df.index, 'STNAME', 'CTYNAME']) # 인덱스 이름을 별도로 설정할 수도 있다. # STNAME, CTYNAME 가 인덱싱이 되었으니 # STNAME=Michigan, CTYNAME=Washtenaw County 레이블로 조회 할 수 있다. # 차원으로 표현된다. # 2개의 도시이니경우 다음과 같이 2차원으료 표현된다. | 3.2833 | 3 |
master/main.py | agarwalsiddhant10/cluster-middleware | 0 | 6632307 | <filename>master/main.py
import argparse
import multiprocessing as mp
import os
import os.path
import pickle
import select
import socket
import time
import copy
from . import message_handlers
from ..messaging import message
from ..messaging import messageutils
from ..messaging import network_params
from . import priorityqueue
from .job import job_parser
# SERVER_SEND_PORT = 5005
# SERVER_RECV_PORT = 5006
BUFFER_SIZE = 1048576
SERVER_START_WAIT_TIME = 5 # seconds
CRASH_ASSUMPTION_TIME = 20 # seconds
CRASH_DETECTOR_SLEEP_TIME = 5 # seconds
SERVER_START_WAIT_TIME = 5 # seconds
BACKUP_SERVER_STATE_PATH = '/home/ubuntu/sharedfolder/backup_state.pkl'
def print_welcome_message():
"""Print a welcome message read from prompt_welcome file to stdout."""
# prompt_welcome_filepath = \
# os.path.dirname(os.path.realpath(__file__)) + "CLUSTER_MIDDLEWARE"
# with open(prompt_welcome_filepath, 'r') as file:
# print(file.read())
print("WELCOME TO MASTER NODE")
def detect_node_crash(node_last_seen, server_ip):
while True:
time.sleep(CRASH_DETECTOR_SLEEP_TIME)
print('Crash Check')
current_time = time.time()
crashed_nodes = set()
for node_id, last_seen_time in node_last_seen.items():
time_since_last_heartbeat = current_time - last_seen_time
if time_since_last_heartbeat > CRASH_ASSUMPTION_TIME:
crashed_nodes.add(node_id)
# Make and send a crash message to main process which is listening
# on SERVER_RECV_PORT for incoming messages.
if len(crashed_nodes) != 0:
print('NODE CRASHED')
print(crashed_nodes)
messageutils.make_and_send_message(msg_type='NODE_CRASH',
content=crashed_nodes,
file_path=None,
to=server_ip,
msg_socket=None,
port=network_params.SERVER_RECV_PORT)
def main():
# """Get server ip, backup ip, listen for messages and manage jobs.
# """
parser = argparse.ArgumentParser(description='Set up central server.')
backup_ip = None
print_welcome_message()
compute_nodes = {} # {node_id: status}
job_queue = priorityqueue.JobQueue()
running_jobs = {} # {node_id: [list of jobs]}
job_executable = {} # {job_id: executable}
job_running_node = {} #{job_id: running_node}
job_sender = {} # {job_id: sender}
# In case of backup server taking over on original central server crash
# gives backup process enough time to terminate
time.sleep(SERVER_START_WAIT_TIME)
job_receipt_id = 0 # Unique ID assigned to each job from server.
server_state_order = 0 # Sequence ordering of ServerState sent to backup.
manager = mp.Manager()
node_last_seen = manager.dict() # {node_id: last_seen_time}
# Initialize current server state from backup snapshot
# Used in case primary backup is taking over as central server
if os.path.isfile(BACKUP_SERVER_STATE_PATH):
# server_ip = network_params.BACKUP_NODE_IP
with open(BACKUP_SERVER_STATE_PATH, 'rb') as backup_server_state:
server_state = pickle.load(backup_server_state)
compute_nodes = server_state.compute_nodes
for node_id, _ in compute_nodes.items():
print(node_id)
node_last_seen[node_id] = time.time()
running_jobs = server_state.running_jobs
job_receipt_id = server_state.job_receipt_id
job_sender = server_state.job_sender
job_executable = server_state.job_executable
job_queue = priorityqueue.JobQueue()
for job in server_state.job_queue:
job_queue.put(job)
process_crash_detector = mp.Process(
target=detect_node_crash, args=(node_last_seen, '127.0.0.1',))
process_crash_detector.start()
# Creates a TCP/IP socket
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Binds the socket to the port
server_address = ('', network_params.SERVER_RECV_PORT)
print('Starting up on %s port %s' % server_address)
server.bind(server_address)
server.listen(5)
# Sockets for reading and writing
inputs = [server]
outputs = []
while inputs:
# Wait for at least one of the sockets to be ready for processing
readable, _, _ = select.select(inputs, outputs, inputs)
# Handle inputs
for msg_socket in readable:
if msg_socket is server:
# A "readable" server socket is ready to accept a connection
connection, client_address = msg_socket.accept()
inputs.append(connection)
# print("XXXX")
else:
data = msg_socket.recv(BUFFER_SIZE)
if data:
data_list = []
while data:
data_list.append(data)
data = msg_socket.recv(BUFFER_SIZE)
data = b''.join(data_list)
msg = pickle.loads(data)
assert isinstance(msg, message.Message), \
"Received object on socket not of type Message."
if msg.msg_type == 'HEARTBEAT_BACKUP':
backup_ip = msg.sender
message_handlers.heartbeat_from_backup_handler(
received_msg=msg)
elif msg.msg_type == 'HEARTBEAT':
print("\n\n\n")
print(compute_nodes)
print("\n\n\n")
message_handlers.heartbeat_handler(
compute_nodes=compute_nodes,
node_last_seen=node_last_seen,
running_jobs=running_jobs,
job_queue=job_queue,
job_sender=job_sender,
job_executable=job_executable,
job_receipt_id=job_receipt_id,
backup_ip=backup_ip,
server_state_order=server_state_order,
received_msg=msg,
job_running_node=job_running_node)
elif msg.msg_type == 'JOB_SUBMIT':
job_receipt_id += 1
server_state_order += 1
try:
message_handlers.job_submit_handler(
job_queue=job_queue,
compute_nodes=compute_nodes,
running_jobs=running_jobs,
job_sender=job_sender,
job_running_node=job_running_node,
job_executable=job_executable,
received_msg=msg,
job_receipt_id=job_receipt_id,
backup_ip=backup_ip,
server_state_order=server_state_order
)
except:
messageutils.make_and_send_message(
msg_type='ERR_JOB_SUBMIT',
content=None,
file_path=None,
to="127.0.0.1",
port= network_params.SUBMIT_RECV_PORT,
msg_socket=None)
elif msg.msg_type == 'EXECUTED_JOB':
server_state_order += 1
print(
'RECV: ' + str(msg.content) + ' ' +
str(msg.content.completed))
job_queue = message_handlers.executed_job_handler(
job_queue=job_queue,
compute_nodes=compute_nodes,
job_receipt_id=job_receipt_id,
running_jobs=running_jobs,
job_running_node=job_running_node,
job_sender=job_sender,
job_executable=job_executable,
backup_ip=backup_ip,
server_state_order=server_state_order,
received_msg=msg)
elif msg.msg_type == 'KILL_JOB':
print('__________')
print('msg received\n\n')
try:
job_queue = message_handlers.kill_job_handler(
job_queue=job_queue,
compute_nodes=compute_nodes,
job_receipt_id=int(msg.content),
running_jobs=running_jobs,
job_executable=job_executable,
job_sender=job_sender,
job_running_node=job_running_node,
backup_ip=backup_ip,
server_state_order=server_state_order
)
except:
print('__________')
print('Error')
messageutils.make_and_send_message(
msg_type='ERR_JOB_KILL',
content=None,
file_path=None,
to="127.0.0.1",
port= network_params.KILL_RECV_PORT,
msg_socket=None)
elif msg.msg_type == 'STATS_JOB':
try:
print("STATS RECEIVED IN SERVER")
message_handlers.stats_job_handler(
running_jobs= running_jobs,
job_queue= job_queue,
)
except:
messageutils.make_and_send_message(
msg_type='ERR_STATS',
content=None,
file_path=None,
to="127.0.0.1",
port= network_params.STATS_RECV_PORT,
msg_socket=None)
elif msg.msg_type == 'ACK_JOB_EXEC':
message_handlers.ack_ignore_handler()
elif msg.msg_type == 'ACK_JOB_EXEC_PREEMPT':
message_handlers.ack_ignore_handler()
elif msg.msg_type == 'ACK_SUBMITTED_JOB_COMPLETION':
message_handlers.ack_ignore_handler()
elif msg.msg_type == 'ACK_JOB_KILL_EXEC':
print("KILLED JOB")
message_handlers.ack_job_kill_handler(
content = msg.content
)
elif msg.msg_type == 'NODE_CRASH':
message_handlers.node_crash_handler(
received_msg=msg,
compute_nodes=compute_nodes,
running_jobs=running_jobs,
job_queue=job_queue,
node_last_seen=node_last_seen,
job_executable=job_executable,
job_running_node=job_running_node)
else:
inputs.remove(msg_socket)
msg_socket.close()
if __name__ == '__main__':
main() | <filename>master/main.py
import argparse
import multiprocessing as mp
import os
import os.path
import pickle
import select
import socket
import time
import copy
from . import message_handlers
from ..messaging import message
from ..messaging import messageutils
from ..messaging import network_params
from . import priorityqueue
from .job import job_parser
# SERVER_SEND_PORT = 5005
# SERVER_RECV_PORT = 5006
BUFFER_SIZE = 1048576
SERVER_START_WAIT_TIME = 5 # seconds
CRASH_ASSUMPTION_TIME = 20 # seconds
CRASH_DETECTOR_SLEEP_TIME = 5 # seconds
SERVER_START_WAIT_TIME = 5 # seconds
BACKUP_SERVER_STATE_PATH = '/home/ubuntu/sharedfolder/backup_state.pkl'
def print_welcome_message():
"""Print a welcome message read from prompt_welcome file to stdout."""
# prompt_welcome_filepath = \
# os.path.dirname(os.path.realpath(__file__)) + "CLUSTER_MIDDLEWARE"
# with open(prompt_welcome_filepath, 'r') as file:
# print(file.read())
print("WELCOME TO MASTER NODE")
def detect_node_crash(node_last_seen, server_ip):
while True:
time.sleep(CRASH_DETECTOR_SLEEP_TIME)
print('Crash Check')
current_time = time.time()
crashed_nodes = set()
for node_id, last_seen_time in node_last_seen.items():
time_since_last_heartbeat = current_time - last_seen_time
if time_since_last_heartbeat > CRASH_ASSUMPTION_TIME:
crashed_nodes.add(node_id)
# Make and send a crash message to main process which is listening
# on SERVER_RECV_PORT for incoming messages.
if len(crashed_nodes) != 0:
print('NODE CRASHED')
print(crashed_nodes)
messageutils.make_and_send_message(msg_type='NODE_CRASH',
content=crashed_nodes,
file_path=None,
to=server_ip,
msg_socket=None,
port=network_params.SERVER_RECV_PORT)
def main():
# """Get server ip, backup ip, listen for messages and manage jobs.
# """
parser = argparse.ArgumentParser(description='Set up central server.')
backup_ip = None
print_welcome_message()
compute_nodes = {} # {node_id: status}
job_queue = priorityqueue.JobQueue()
running_jobs = {} # {node_id: [list of jobs]}
job_executable = {} # {job_id: executable}
job_running_node = {} #{job_id: running_node}
job_sender = {} # {job_id: sender}
# In case of backup server taking over on original central server crash
# gives backup process enough time to terminate
time.sleep(SERVER_START_WAIT_TIME)
job_receipt_id = 0 # Unique ID assigned to each job from server.
server_state_order = 0 # Sequence ordering of ServerState sent to backup.
manager = mp.Manager()
node_last_seen = manager.dict() # {node_id: last_seen_time}
# Initialize current server state from backup snapshot
# Used in case primary backup is taking over as central server
if os.path.isfile(BACKUP_SERVER_STATE_PATH):
# server_ip = network_params.BACKUP_NODE_IP
with open(BACKUP_SERVER_STATE_PATH, 'rb') as backup_server_state:
server_state = pickle.load(backup_server_state)
compute_nodes = server_state.compute_nodes
for node_id, _ in compute_nodes.items():
print(node_id)
node_last_seen[node_id] = time.time()
running_jobs = server_state.running_jobs
job_receipt_id = server_state.job_receipt_id
job_sender = server_state.job_sender
job_executable = server_state.job_executable
job_queue = priorityqueue.JobQueue()
for job in server_state.job_queue:
job_queue.put(job)
process_crash_detector = mp.Process(
target=detect_node_crash, args=(node_last_seen, '127.0.0.1',))
process_crash_detector.start()
# Creates a TCP/IP socket
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Binds the socket to the port
server_address = ('', network_params.SERVER_RECV_PORT)
print('Starting up on %s port %s' % server_address)
server.bind(server_address)
server.listen(5)
# Sockets for reading and writing
inputs = [server]
outputs = []
while inputs:
# Wait for at least one of the sockets to be ready for processing
readable, _, _ = select.select(inputs, outputs, inputs)
# Handle inputs
for msg_socket in readable:
if msg_socket is server:
# A "readable" server socket is ready to accept a connection
connection, client_address = msg_socket.accept()
inputs.append(connection)
# print("XXXX")
else:
data = msg_socket.recv(BUFFER_SIZE)
if data:
data_list = []
while data:
data_list.append(data)
data = msg_socket.recv(BUFFER_SIZE)
data = b''.join(data_list)
msg = pickle.loads(data)
assert isinstance(msg, message.Message), \
"Received object on socket not of type Message."
if msg.msg_type == 'HEARTBEAT_BACKUP':
backup_ip = msg.sender
message_handlers.heartbeat_from_backup_handler(
received_msg=msg)
elif msg.msg_type == 'HEARTBEAT':
print("\n\n\n")
print(compute_nodes)
print("\n\n\n")
message_handlers.heartbeat_handler(
compute_nodes=compute_nodes,
node_last_seen=node_last_seen,
running_jobs=running_jobs,
job_queue=job_queue,
job_sender=job_sender,
job_executable=job_executable,
job_receipt_id=job_receipt_id,
backup_ip=backup_ip,
server_state_order=server_state_order,
received_msg=msg,
job_running_node=job_running_node)
elif msg.msg_type == 'JOB_SUBMIT':
job_receipt_id += 1
server_state_order += 1
try:
message_handlers.job_submit_handler(
job_queue=job_queue,
compute_nodes=compute_nodes,
running_jobs=running_jobs,
job_sender=job_sender,
job_running_node=job_running_node,
job_executable=job_executable,
received_msg=msg,
job_receipt_id=job_receipt_id,
backup_ip=backup_ip,
server_state_order=server_state_order
)
except:
messageutils.make_and_send_message(
msg_type='ERR_JOB_SUBMIT',
content=None,
file_path=None,
to="127.0.0.1",
port= network_params.SUBMIT_RECV_PORT,
msg_socket=None)
elif msg.msg_type == 'EXECUTED_JOB':
server_state_order += 1
print(
'RECV: ' + str(msg.content) + ' ' +
str(msg.content.completed))
job_queue = message_handlers.executed_job_handler(
job_queue=job_queue,
compute_nodes=compute_nodes,
job_receipt_id=job_receipt_id,
running_jobs=running_jobs,
job_running_node=job_running_node,
job_sender=job_sender,
job_executable=job_executable,
backup_ip=backup_ip,
server_state_order=server_state_order,
received_msg=msg)
elif msg.msg_type == 'KILL_JOB':
print('__________')
print('msg received\n\n')
try:
job_queue = message_handlers.kill_job_handler(
job_queue=job_queue,
compute_nodes=compute_nodes,
job_receipt_id=int(msg.content),
running_jobs=running_jobs,
job_executable=job_executable,
job_sender=job_sender,
job_running_node=job_running_node,
backup_ip=backup_ip,
server_state_order=server_state_order
)
except:
print('__________')
print('Error')
messageutils.make_and_send_message(
msg_type='ERR_JOB_KILL',
content=None,
file_path=None,
to="127.0.0.1",
port= network_params.KILL_RECV_PORT,
msg_socket=None)
elif msg.msg_type == 'STATS_JOB':
try:
print("STATS RECEIVED IN SERVER")
message_handlers.stats_job_handler(
running_jobs= running_jobs,
job_queue= job_queue,
)
except:
messageutils.make_and_send_message(
msg_type='ERR_STATS',
content=None,
file_path=None,
to="127.0.0.1",
port= network_params.STATS_RECV_PORT,
msg_socket=None)
elif msg.msg_type == 'ACK_JOB_EXEC':
message_handlers.ack_ignore_handler()
elif msg.msg_type == 'ACK_JOB_EXEC_PREEMPT':
message_handlers.ack_ignore_handler()
elif msg.msg_type == 'ACK_SUBMITTED_JOB_COMPLETION':
message_handlers.ack_ignore_handler()
elif msg.msg_type == 'ACK_JOB_KILL_EXEC':
print("KILLED JOB")
message_handlers.ack_job_kill_handler(
content = msg.content
)
elif msg.msg_type == 'NODE_CRASH':
message_handlers.node_crash_handler(
received_msg=msg,
compute_nodes=compute_nodes,
running_jobs=running_jobs,
job_queue=job_queue,
node_last_seen=node_last_seen,
job_executable=job_executable,
job_running_node=job_running_node)
else:
inputs.remove(msg_socket)
msg_socket.close()
if __name__ == '__main__':
main() | en | 0.685536 | # SERVER_SEND_PORT = 5005 # SERVER_RECV_PORT = 5006 # seconds # seconds # seconds # seconds Print a welcome message read from prompt_welcome file to stdout. # prompt_welcome_filepath = \ # os.path.dirname(os.path.realpath(__file__)) + "CLUSTER_MIDDLEWARE" # with open(prompt_welcome_filepath, 'r') as file: # print(file.read()) # Make and send a crash message to main process which is listening # on SERVER_RECV_PORT for incoming messages. # """Get server ip, backup ip, listen for messages and manage jobs. # """ # {node_id: status} # {node_id: [list of jobs]} # {job_id: executable} #{job_id: running_node} # {job_id: sender} # In case of backup server taking over on original central server crash # gives backup process enough time to terminate # Unique ID assigned to each job from server. # Sequence ordering of ServerState sent to backup. # {node_id: last_seen_time} # Initialize current server state from backup snapshot # Used in case primary backup is taking over as central server # server_ip = network_params.BACKUP_NODE_IP # Creates a TCP/IP socket # Binds the socket to the port # Sockets for reading and writing # Wait for at least one of the sockets to be ready for processing # Handle inputs # A "readable" server socket is ready to accept a connection # print("XXXX") | 2.403162 | 2 |
cinder/tests/unit/test_migrations.py | aarunsai81/netapp | 11 | 6632308 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations. This test case reads the configuration
file test_migrations.conf for database connection settings
to use in the tests. For each connection found in the config file,
the test case runs a series of test cases to ensure that migrations work
properly both upgrading and downgrading, and that no data loss occurs
if possible.
"""
import os
import uuid
import fixtures
from migrate.versioning import api as migration_api
from migrate.versioning import repository
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import test_migrations
from oslo_db.sqlalchemy import utils as db_utils
import sqlalchemy
from cinder.db import migration
import cinder.db.sqlalchemy.migrate_repo
class MigrationsMixin(test_migrations.WalkVersionsMixin):
"""Test sqlalchemy-migrate migrations."""
BOOL_TYPE = sqlalchemy.types.BOOLEAN
TIME_TYPE = sqlalchemy.types.DATETIME
INTEGER_TYPE = sqlalchemy.types.INTEGER
VARCHAR_TYPE = sqlalchemy.types.VARCHAR
@property
def INIT_VERSION(self):
return migration.INIT_VERSION
@property
def REPOSITORY(self):
migrate_file = cinder.db.sqlalchemy.migrate_repo.__file__
return repository.Repository(
os.path.abspath(os.path.dirname(migrate_file)))
@property
def migration_api(self):
return migration_api
@property
def migrate_engine(self):
return self.engine
def get_table_ref(self, engine, name, metadata):
metadata.bind = engine
return sqlalchemy.Table(name, metadata, autoload=True)
class BannedDBSchemaOperations(fixtures.Fixture):
"""Ban some operations for migrations"""
def __init__(self, banned_resources=None):
super(MigrationsMixin.BannedDBSchemaOperations, self).__init__()
self._banned_resources = banned_resources or []
@staticmethod
def _explode(resource, op):
print('%s.%s()' % (resource, op)) # noqa
raise Exception(
'Operation %s.%s() is not allowed in a database migration' % (
resource, op))
def setUp(self):
super(MigrationsMixin.BannedDBSchemaOperations, self).setUp()
for thing in self._banned_resources:
self.useFixture(fixtures.MonkeyPatch(
'sqlalchemy.%s.drop' % thing,
lambda *a, **k: self._explode(thing, 'drop')))
self.useFixture(fixtures.MonkeyPatch(
'sqlalchemy.%s.alter' % thing,
lambda *a, **k: self._explode(thing, 'alter')))
def migrate_up(self, version, with_data=False):
# NOTE(dulek): This is a list of migrations where we allow dropping
# things. The rules for adding things here are very very specific.
# Insight on how to drop things from the DB in a backward-compatible
# manner is provided in Cinder's developer documentation.
# Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE WITHOUT CARE
exceptions = [
# NOTE(dulek): 62 alters the column type from boolean to integer to
# fix the bug 1518363. If we've followed the guidelines for live
# schema upgrades we would end up either waiting 3 releases to fix
# a simple bug or trigger a rebuild index operation in migration
# (because constraint was impossible to delete without deleting
# other foreign key constraints). Either way it's harsh... We've
# decided to go with alter to minimise upgrade impact. The only
# consequence for deployments running recent MySQL is inability
# to perform volume-type-access modifications while running this
# migration.
62,
# NOTE(dulek): 66 sets reservations.usage_id to nullable. This is
# 100% backward compatible and according to MySQL docs such ALTER
# is performed with the same restrictions as column addition, which
# we of course allow.
66,
# NOTE(dulek): 73 drops tables and columns we've stopped using a
# release ago.
73,
]
# NOTE(dulek): We only started requiring things be additive in
# Mitaka, so ignore all migrations before that point.
MITAKA_START = 61
if version >= MITAKA_START and version not in exceptions:
banned = ['Table', 'Column']
else:
banned = None
with MigrationsMixin.BannedDBSchemaOperations(banned):
super(MigrationsMixin, self).migrate_up(version, with_data)
def _pre_upgrade_004(self, engine):
"""Change volume types to UUID """
data = {
'volumes': [{'id': str(uuid.uuid4()), 'host': 'test1',
'volume_type_id': 1},
{'id': str(uuid.uuid4()), 'host': 'test2',
'volume_type_id': 1},
{'id': str(uuid.uuid4()), 'host': 'test3',
'volume_type_id': 3},
],
'volume_types': [{'name': 'vtype1'},
{'name': 'vtype2'},
{'name': 'vtype3'},
],
'volume_type_extra_specs': [{'volume_type_id': 1,
'key': 'v1',
'value': 'hotep',
},
{'volume_type_id': 1,
'key': 'v2',
'value': 'bending rodrigez',
},
{'volume_type_id': 2,
'key': 'v3',
'value': 'bending rodrigez',
},
]}
volume_types = db_utils.get_table(engine, 'volume_types')
for vtype in data['volume_types']:
r = volume_types.insert().values(vtype).execute()
vtype['id'] = r.inserted_primary_key[0]
volume_type_es = db_utils.get_table(engine, 'volume_type_extra_specs')
for vtes in data['volume_type_extra_specs']:
r = volume_type_es.insert().values(vtes).execute()
vtes['id'] = r.inserted_primary_key[0]
volumes = db_utils.get_table(engine, 'volumes')
for vol in data['volumes']:
r = volumes.insert().values(vol).execute()
vol['id'] = r.inserted_primary_key[0]
return data
def _check_004(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
v1 = volumes.select(volumes.c.id ==
data['volumes'][0]['id']
).execute().first()
v2 = volumes.select(volumes.c.id ==
data['volumes'][1]['id']
).execute().first()
v3 = volumes.select(volumes.c.id ==
data['volumes'][2]['id']
).execute().first()
volume_types = db_utils.get_table(engine, 'volume_types')
vt1 = volume_types.select(volume_types.c.name ==
data['volume_types'][0]['name']
).execute().first()
vt2 = volume_types.select(volume_types.c.name ==
data['volume_types'][1]['name']
).execute().first()
vt3 = volume_types.select(volume_types.c.name ==
data['volume_types'][2]['name']
).execute().first()
vtes = db_utils.get_table(engine, 'volume_type_extra_specs')
vtes1 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][0]['key']
).execute().first()
vtes2 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][1]['key']
).execute().first()
vtes3 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][2]['key']
).execute().first()
self.assertEqual(v1['volume_type_id'], vt1['id'])
self.assertEqual(v2['volume_type_id'], vt1['id'])
self.assertEqual(v3['volume_type_id'], vt3['id'])
self.assertEqual(vtes1['volume_type_id'], vt1['id'])
self.assertEqual(vtes2['volume_type_id'], vt1['id'])
self.assertEqual(vtes3['volume_type_id'], vt2['id'])
def _check_005(self, engine, data):
"""Test that adding source_volid column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.source_volid.type,
self.VARCHAR_TYPE)
def _check_006(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.provider_location.type,
self.VARCHAR_TYPE)
def _check_007(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
fkey, = snapshots.c.volume_id.foreign_keys
self.assertIsNotNone(fkey)
def _pre_upgrade_008(self, engine):
self.assertFalse(engine.dialect.has_table(engine.connect(),
"backups"))
def _check_008(self, engine, data):
"""Test that adding and removing the backups table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"backups"))
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(backups.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(backups.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(backups.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(backups.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.availability_zone.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.display_name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.display_description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.container.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.status.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.fail_reason.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.service_metadata.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.service.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.size.type,
self.INTEGER_TYPE)
self.assertIsInstance(backups.c.object_count.type,
self.INTEGER_TYPE)
def _check_009(self, engine, data):
"""Test adding snapshot_metadata table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"snapshot_metadata"))
snapshot_metadata = db_utils.get_table(engine, 'snapshot_metadata')
self.assertIsInstance(snapshot_metadata.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(snapshot_metadata.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(snapshot_metadata.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(snapshot_metadata.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(snapshot_metadata.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(snapshot_metadata.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(snapshot_metadata.c.snapshot_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(snapshot_metadata.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(snapshot_metadata.c.value.type,
self.VARCHAR_TYPE)
def _check_010(self, engine, data):
"""Test adding transfers table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"transfers"))
transfers = db_utils.get_table(engine, 'transfers')
self.assertIsInstance(transfers.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(transfers.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(transfers.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(transfers.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(transfers.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.display_name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.salt.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.crypt_hash.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.expires_at.type,
self.TIME_TYPE)
def _check_011(self, engine, data):
"""Test adding transfers table works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIn('bootable', volumes.c)
self.assertIsInstance(volumes.c.bootable.type,
self.BOOL_TYPE)
def _check_012(self, engine, data):
"""Test that adding attached_host column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.attached_host.type,
self.VARCHAR_TYPE)
def _check_013(self, engine, data):
"""Test that adding provider_geometry column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.provider_geometry.type,
self.VARCHAR_TYPE)
def _check_014(self, engine, data):
"""Test that adding _name_id column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c._name_id.type,
self.VARCHAR_TYPE)
def _check_015(self, engine, data):
"""Test removing migrations table works correctly."""
self.assertFalse(engine.dialect.has_table(engine.connect(),
"migrations"))
def _check_016(self, engine, data):
"""Test that dropping xen storage manager tables works correctly."""
self.assertFalse(engine.dialect.has_table(engine.connect(),
'sm_flavors'))
self.assertFalse(engine.dialect.has_table(engine.connect(),
'sm_backend_config'))
self.assertFalse(engine.dialect.has_table(engine.connect(),
'sm_volume'))
def _check_017(self, engine, data):
"""Test that added encryption information works correctly."""
# encryption key UUID
volumes = db_utils.get_table(engine, 'volumes')
self.assertIn('encryption_key_id', volumes.c)
self.assertIsInstance(volumes.c.encryption_key_id.type,
self.VARCHAR_TYPE)
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIn('encryption_key_id', snapshots.c)
self.assertIsInstance(snapshots.c.encryption_key_id.type,
self.VARCHAR_TYPE)
self.assertIn('volume_type_id', snapshots.c)
self.assertIsInstance(snapshots.c.volume_type_id.type,
self.VARCHAR_TYPE)
# encryption types table
encryption = db_utils.get_table(engine, 'encryption')
self.assertIsInstance(encryption.c.volume_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(encryption.c.cipher.type,
self.VARCHAR_TYPE)
self.assertIsInstance(encryption.c.key_size.type,
self.INTEGER_TYPE)
self.assertIsInstance(encryption.c.provider.type,
self.VARCHAR_TYPE)
def _check_018(self, engine, data):
"""Test that added qos_specs table works correctly."""
self.assertTrue(engine.dialect.has_table(
engine.connect(), "quality_of_service_specs"))
qos_specs = db_utils.get_table(engine, 'quality_of_service_specs')
self.assertIsInstance(qos_specs.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(qos_specs.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(qos_specs.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(qos_specs.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(qos_specs.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(qos_specs.c.specs_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(qos_specs.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(qos_specs.c.value.type,
self.VARCHAR_TYPE)
def _check_019(self, engine, data):
"""Test that adding migration_status column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.migration_status.type,
self.VARCHAR_TYPE)
def _check_020(self, engine, data):
"""Test adding volume_admin_metadata table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"volume_admin_metadata"))
volume_admin_metadata = db_utils.get_table(engine,
'volume_admin_metadata')
self.assertIsInstance(volume_admin_metadata.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_admin_metadata.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_admin_metadata.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_admin_metadata.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(volume_admin_metadata.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(volume_admin_metadata.c.volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volume_admin_metadata.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volume_admin_metadata.c.value.type,
self.VARCHAR_TYPE)
def _verify_quota_defaults(self, engine):
quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
num_defaults = quota_class_metadata.count().\
where(quota_class_metadata.c.class_name == 'default').\
execute().scalar()
self.assertEqual(3, num_defaults)
def _check_021(self, engine, data):
"""Test adding default data for quota classes works correctly."""
self._verify_quota_defaults(engine)
def _check_022(self, engine, data):
"""Test that adding disabled_reason column works correctly."""
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.disabled_reason.type,
self.VARCHAR_TYPE)
def _check_023(self, engine, data):
"""Test that adding reservations index works correctly."""
reservations = db_utils.get_table(engine, 'reservations')
index_columns = []
for idx in reservations.indexes:
if idx.name == 'reservations_deleted_expire_idx':
index_columns = idx.columns.keys()
break
self.assertEqual(sorted(['deleted', 'expire']),
sorted(index_columns))
def _check_024(self, engine, data):
"""Test adding replication columns to volume table."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.replication_status.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volumes.c.replication_extended_status.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volumes.c.replication_driver_data.type,
self.VARCHAR_TYPE)
def _check_025(self, engine, data):
"""Test adding table and columns for consistencygroups."""
# Test consistencygroup_id is in Table volumes
metadata = sqlalchemy.MetaData()
volumes = self.get_table_ref(engine, 'volumes', metadata)
self.assertIsInstance(volumes.c.consistencygroup_id.type,
self.VARCHAR_TYPE)
# Test cgsnapshot_id is in Table snapshots
snapshots = self.get_table_ref(engine, 'snapshots', metadata)
self.assertIsInstance(snapshots.c.cgsnapshot_id.type,
self.VARCHAR_TYPE)
# Test Table consistencygroups exists
self.assertTrue(engine.dialect.has_table(engine.connect(),
"consistencygroups"))
consistencygroups = self.get_table_ref(engine,
'consistencygroups',
metadata)
self.assertIsInstance(consistencygroups.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(consistencygroups.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(consistencygroups.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(consistencygroups.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(consistencygroups.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.availability_zone.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.volume_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.status.type,
self.VARCHAR_TYPE)
# Test Table cgsnapshots exists
self.assertTrue(engine.dialect.has_table(engine.connect(),
"cgsnapshots"))
cgsnapshots = self.get_table_ref(engine,
'cgsnapshots',
metadata)
self.assertIsInstance(cgsnapshots.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(cgsnapshots.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(cgsnapshots.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(cgsnapshots.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(cgsnapshots.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.consistencygroup_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.status.type,
self.VARCHAR_TYPE)
# Verify foreign keys are created
fkey, = volumes.c.consistencygroup_id.foreign_keys
self.assertEqual(consistencygroups.c.id, fkey.column)
self.assertEqual(1, len(volumes.foreign_keys))
fkey, = snapshots.c.cgsnapshot_id.foreign_keys
self.assertEqual(cgsnapshots.c.id, fkey.column)
fkey, = snapshots.c.volume_id.foreign_keys
self.assertEqual(volumes.c.id, fkey.column)
# 2 foreign keys in Table snapshots
self.assertEqual(2, len(snapshots.foreign_keys))
def _pre_upgrade_026(self, engine):
"""Test adding default data for consistencygroups quota class."""
quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
num_defaults = quota_class_metadata.count().\
where(quota_class_metadata.c.class_name == 'default').\
execute().scalar()
self.assertEqual(3, num_defaults)
def _check_026(self, engine, data):
quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
num_defaults = quota_class_metadata.count().\
where(quota_class_metadata.c.class_name == 'default').\
execute().scalar()
self.assertEqual(4, num_defaults)
def _check_032(self, engine, data):
"""Test adding volume_type_projects table works correctly."""
volume_type_projects = db_utils.get_table(engine,
'volume_type_projects')
self.assertIsInstance(volume_type_projects.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_type_projects.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_type_projects.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_type_projects.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(volume_type_projects.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(volume_type_projects.c.volume_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volume_type_projects.c.project_id.type,
self.VARCHAR_TYPE)
volume_types = db_utils.get_table(engine, 'volume_types')
self.assertIsInstance(volume_types.c.is_public.type,
self.BOOL_TYPE)
def _check_033(self, engine, data):
"""Test adding encryption_id column to encryption table."""
encryptions = db_utils.get_table(engine, 'encryption')
self.assertIsInstance(encryptions.c.encryption_id.type,
self.VARCHAR_TYPE)
def _check_034(self, engine, data):
"""Test adding description columns to volume_types table."""
volume_types = db_utils.get_table(engine, 'volume_types')
self.assertIsInstance(volume_types.c.description.type,
self.VARCHAR_TYPE)
def _check_035(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.provider_id.type,
self.VARCHAR_TYPE)
def _check_036(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.provider_id.type,
self.VARCHAR_TYPE)
def _check_037(self, engine, data):
consistencygroups = db_utils.get_table(engine, 'consistencygroups')
self.assertIsInstance(consistencygroups.c.cgsnapshot_id.type,
self.VARCHAR_TYPE)
def _check_038(self, engine, data):
"""Test adding and removing driver_initiator_data table."""
has_table = engine.dialect.has_table(engine.connect(),
"driver_initiator_data")
self.assertTrue(has_table)
private_data = db_utils.get_table(
engine,
'driver_initiator_data'
)
self.assertIsInstance(private_data.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(private_data.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(private_data.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(private_data.c.initiator.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.namespace.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.value.type,
self.VARCHAR_TYPE)
def _check_039(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.parent_id.type,
self.VARCHAR_TYPE)
def _check_40(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
self.assertNotIn('instance_uuid', volumes.c)
self.assertNotIn('attached_host', volumes.c)
self.assertNotIn('attach_time', volumes.c)
self.assertNotIn('mountpoint', volumes.c)
self.assertIsInstance(volumes.c.multiattach.type,
self.BOOL_TYPE)
attachments = db_utils.get_table(engine, 'volume_attachment')
self.assertIsInstance(attachments.c.attach_mode.type,
self.VARCHAR_TYPE)
self.assertIsInstance(attachments.c.instance_uuid.type,
self.VARCHAR_TYPE)
self.assertIsInstance(attachments.c.attached_host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(attachments.c.mountpoint.type,
self.VARCHAR_TYPE)
self.assertIsInstance(attachments.c.attach_status.type,
self.VARCHAR_TYPE)
def _check_041(self, engine, data):
"""Test that adding modified_at column works correctly."""
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.modified_at.type,
self.TIME_TYPE)
def _check_048(self, engine, data):
quotas = db_utils.get_table(engine, 'quotas')
self.assertIsInstance(quotas.c.allocated.type,
self.INTEGER_TYPE)
def _check_049(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.temp_volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.temp_snapshot_id.type,
self.VARCHAR_TYPE)
def _check_050(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.previous_status.type,
self.VARCHAR_TYPE)
def _check_051(self, engine, data):
consistencygroups = db_utils.get_table(engine, 'consistencygroups')
self.assertIsInstance(consistencygroups.c.source_cgid.type,
self.VARCHAR_TYPE)
def _check_052(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.provider_auth.type,
self.VARCHAR_TYPE)
def _check_053(self, engine, data):
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.rpc_current_version.type,
self.VARCHAR_TYPE)
self.assertIsInstance(services.c.rpc_available_version.type,
self.VARCHAR_TYPE)
self.assertIsInstance(services.c.object_current_version.type,
self.VARCHAR_TYPE)
self.assertIsInstance(services.c.object_available_version.type,
self.VARCHAR_TYPE)
def _check_054(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.num_dependent_backups.type,
self.INTEGER_TYPE)
def _check_055(self, engine, data):
"""Test adding image_volume_cache_entries table."""
has_table = engine.dialect.has_table(engine.connect(),
"image_volume_cache_entries")
self.assertTrue(has_table)
private_data = db_utils.get_table(
engine,
'image_volume_cache_entries'
)
self.assertIsInstance(private_data.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(private_data.c.host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.image_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.image_updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(private_data.c.volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.size.type,
self.INTEGER_TYPE)
self.assertIsInstance(private_data.c.last_used.type,
self.TIME_TYPE)
def _check_061(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.snapshot_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.data_timestamp.type,
self.TIME_TYPE)
def _check_062(self, engine, data):
volume_type_projects = db_utils.get_table(engine,
'volume_type_projects')
self.assertIsInstance(volume_type_projects.c.id.type,
self.INTEGER_TYPE)
def _check_064(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.restore_volume_id.type,
self.VARCHAR_TYPE)
def _check_065(self, engine, data):
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.replication_status.type,
self.VARCHAR_TYPE)
self.assertIsInstance(services.c.frozen.type,
self.BOOL_TYPE)
self.assertIsInstance(services.c.active_backend_id.type,
self.VARCHAR_TYPE)
def _check_066(self, engine, data):
reservations = db_utils.get_table(engine, 'reservations')
self.assertIsInstance(reservations.c.allocated_id.type,
self.INTEGER_TYPE)
def __check_cinderbase_fields(self, columns):
"""Check fields inherited from CinderBase ORM class."""
self.assertIsInstance(columns.created_at.type, self.TIME_TYPE)
self.assertIsInstance(columns.updated_at.type, self.TIME_TYPE)
self.assertIsInstance(columns.deleted_at.type, self.TIME_TYPE)
self.assertIsInstance(columns.deleted.type, self.BOOL_TYPE)
def _check_067(self, engine, data):
iscsi_targets = db_utils.get_table(engine, 'iscsi_targets')
fkey, = iscsi_targets.c.volume_id.foreign_keys
self.assertIsNotNone(fkey)
def _check_074(self, engine, data):
"""Test adding message table."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"messages"))
messages = db_utils.get_table(engine, 'messages')
self.assertIsInstance(messages.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(messages.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(messages.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(messages.c.message_level.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.request_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.resource_uuid.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.event_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.resource_type.type,
self.VARCHAR_TYPE)
def _check_075(self, engine, data):
"""Test adding cluster table and cluster_id fields."""
self.assertTrue(engine.dialect.has_table(engine.connect(), 'clusters'))
clusters = db_utils.get_table(engine, 'clusters')
columns = clusters.c
self.__check_cinderbase_fields(columns)
# Cluster specific fields
self.assertIsInstance(columns.id.type, self.INTEGER_TYPE)
self.assertIsInstance(columns.name.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.binary.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.disabled.type, self.BOOL_TYPE)
self.assertIsInstance(columns.disabled_reason.type, self.VARCHAR_TYPE)
# Check that we have added cluster_name field to all required tables
for table_name in ('services', 'consistencygroups', 'volumes'):
table = db_utils.get_table(engine, table_name)
self.assertIsInstance(table.c.cluster_name.type,
self.VARCHAR_TYPE)
def _check_076(self, engine, data):
workers = db_utils.get_table(engine, 'workers')
columns = workers.c
self.__check_cinderbase_fields(columns)
# Workers specific fields
self.assertIsInstance(columns.id.type, self.INTEGER_TYPE)
self.assertIsInstance(columns.resource_type.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.resource_id.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.status.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.service_id.type, self.INTEGER_TYPE)
def _check_077(self, engine, data):
"""Test adding group types and specs tables."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_types"))
group_types = db_utils.get_table(engine, 'group_types')
self.assertIsInstance(group_types.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_types.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_types.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_types.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_types.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_types.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_types.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(group_types.c.is_public.type,
self.BOOL_TYPE)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_type_specs"))
group_specs = db_utils.get_table(engine, 'group_type_specs')
self.assertIsInstance(group_specs.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(group_specs.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_specs.c.value.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_specs.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_specs.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_specs.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_specs.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_specs.c.deleted.type,
self.BOOL_TYPE)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_type_projects"))
type_projects = db_utils.get_table(engine, 'group_type_projects')
self.assertIsInstance(type_projects.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(type_projects.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(type_projects.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(type_projects.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(type_projects.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(type_projects.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(type_projects.c.project_id.type,
self.VARCHAR_TYPE)
def _check_078(self, engine, data):
"""Test adding groups tables."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"groups"))
groups = db_utils.get_table(engine, 'groups')
self.assertIsInstance(groups.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(groups.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(groups.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(groups.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(groups.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.availability_zone.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.status.type,
self.VARCHAR_TYPE)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_volume_type_mapping"))
mapping = db_utils.get_table(engine, 'group_volume_type_mapping')
self.assertIsInstance(mapping.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(mapping.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(mapping.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(mapping.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(mapping.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(mapping.c.volume_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(mapping.c.group_id.type,
self.VARCHAR_TYPE)
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.group_id.type,
self.VARCHAR_TYPE)
quota_classes = db_utils.get_table(engine, 'quota_classes')
rows = quota_classes.count().\
where(quota_classes.c.resource == 'groups').\
execute().scalar()
self.assertEqual(1, rows)
def _check_079(self, engine, data):
"""Test adding group_snapshots tables."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_snapshots"))
group_snapshots = db_utils.get_table(engine, 'group_snapshots')
self.assertIsInstance(group_snapshots.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_snapshots.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_snapshots.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_snapshots.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(group_snapshots.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.group_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.status.type,
self.VARCHAR_TYPE)
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.group_snapshot_id.type,
self.VARCHAR_TYPE)
groups = db_utils.get_table(engine, 'groups')
self.assertIsInstance(groups.c.group_snapshot_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.source_group_id.type,
self.VARCHAR_TYPE)
def test_walk_versions(self):
self.walk_versions(False, False)
class TestSqliteMigrations(test_base.DbTestCase,
MigrationsMixin):
pass
class TestMysqlMigrations(test_base.MySQLOpportunisticTestCase,
MigrationsMixin):
BOOL_TYPE = sqlalchemy.dialects.mysql.TINYINT
def test_mysql_innodb(self):
"""Test that table creation on mysql only builds InnoDB tables."""
# add this to the global lists to make reset work with it, it's removed
# automatically in tearDown so no need to clean it up here.
# sanity check
migration.db_sync(engine=self.migrate_engine)
total = self.migrate_engine.execute(
"SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='{0}'".format(
self.migrate_engine.url.database))
self.assertGreater(total.scalar(), 0,
msg="No tables found. Wrong schema?")
noninnodb = self.migrate_engine.execute(
"SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='openstack_citest' "
"and ENGINE!='InnoDB' "
"and TABLE_NAME!='migrate_version'")
count = noninnodb.scalar()
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
class TestPostgresqlMigrations(test_base.PostgreSQLOpportunisticTestCase,
MigrationsMixin):
TIME_TYPE = sqlalchemy.types.TIMESTAMP
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations. This test case reads the configuration
file test_migrations.conf for database connection settings
to use in the tests. For each connection found in the config file,
the test case runs a series of test cases to ensure that migrations work
properly both upgrading and downgrading, and that no data loss occurs
if possible.
"""
import os
import uuid
import fixtures
from migrate.versioning import api as migration_api
from migrate.versioning import repository
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import test_migrations
from oslo_db.sqlalchemy import utils as db_utils
import sqlalchemy
from cinder.db import migration
import cinder.db.sqlalchemy.migrate_repo
class MigrationsMixin(test_migrations.WalkVersionsMixin):
"""Test sqlalchemy-migrate migrations."""
BOOL_TYPE = sqlalchemy.types.BOOLEAN
TIME_TYPE = sqlalchemy.types.DATETIME
INTEGER_TYPE = sqlalchemy.types.INTEGER
VARCHAR_TYPE = sqlalchemy.types.VARCHAR
@property
def INIT_VERSION(self):
return migration.INIT_VERSION
@property
def REPOSITORY(self):
migrate_file = cinder.db.sqlalchemy.migrate_repo.__file__
return repository.Repository(
os.path.abspath(os.path.dirname(migrate_file)))
@property
def migration_api(self):
return migration_api
@property
def migrate_engine(self):
return self.engine
def get_table_ref(self, engine, name, metadata):
metadata.bind = engine
return sqlalchemy.Table(name, metadata, autoload=True)
class BannedDBSchemaOperations(fixtures.Fixture):
"""Ban some operations for migrations"""
def __init__(self, banned_resources=None):
super(MigrationsMixin.BannedDBSchemaOperations, self).__init__()
self._banned_resources = banned_resources or []
@staticmethod
def _explode(resource, op):
print('%s.%s()' % (resource, op)) # noqa
raise Exception(
'Operation %s.%s() is not allowed in a database migration' % (
resource, op))
def setUp(self):
super(MigrationsMixin.BannedDBSchemaOperations, self).setUp()
for thing in self._banned_resources:
self.useFixture(fixtures.MonkeyPatch(
'sqlalchemy.%s.drop' % thing,
lambda *a, **k: self._explode(thing, 'drop')))
self.useFixture(fixtures.MonkeyPatch(
'sqlalchemy.%s.alter' % thing,
lambda *a, **k: self._explode(thing, 'alter')))
def migrate_up(self, version, with_data=False):
# NOTE(dulek): This is a list of migrations where we allow dropping
# things. The rules for adding things here are very very specific.
# Insight on how to drop things from the DB in a backward-compatible
# manner is provided in Cinder's developer documentation.
# Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE WITHOUT CARE
exceptions = [
# NOTE(dulek): 62 alters the column type from boolean to integer to
# fix the bug 1518363. If we've followed the guidelines for live
# schema upgrades we would end up either waiting 3 releases to fix
# a simple bug or trigger a rebuild index operation in migration
# (because constraint was impossible to delete without deleting
# other foreign key constraints). Either way it's harsh... We've
# decided to go with alter to minimise upgrade impact. The only
# consequence for deployments running recent MySQL is inability
# to perform volume-type-access modifications while running this
# migration.
62,
# NOTE(dulek): 66 sets reservations.usage_id to nullable. This is
# 100% backward compatible and according to MySQL docs such ALTER
# is performed with the same restrictions as column addition, which
# we of course allow.
66,
# NOTE(dulek): 73 drops tables and columns we've stopped using a
# release ago.
73,
]
# NOTE(dulek): We only started requiring things be additive in
# Mitaka, so ignore all migrations before that point.
MITAKA_START = 61
if version >= MITAKA_START and version not in exceptions:
banned = ['Table', 'Column']
else:
banned = None
with MigrationsMixin.BannedDBSchemaOperations(banned):
super(MigrationsMixin, self).migrate_up(version, with_data)
def _pre_upgrade_004(self, engine):
"""Change volume types to UUID """
data = {
'volumes': [{'id': str(uuid.uuid4()), 'host': 'test1',
'volume_type_id': 1},
{'id': str(uuid.uuid4()), 'host': 'test2',
'volume_type_id': 1},
{'id': str(uuid.uuid4()), 'host': 'test3',
'volume_type_id': 3},
],
'volume_types': [{'name': 'vtype1'},
{'name': 'vtype2'},
{'name': 'vtype3'},
],
'volume_type_extra_specs': [{'volume_type_id': 1,
'key': 'v1',
'value': 'hotep',
},
{'volume_type_id': 1,
'key': 'v2',
'value': 'bending rodrigez',
},
{'volume_type_id': 2,
'key': 'v3',
'value': 'bending rodrigez',
},
]}
volume_types = db_utils.get_table(engine, 'volume_types')
for vtype in data['volume_types']:
r = volume_types.insert().values(vtype).execute()
vtype['id'] = r.inserted_primary_key[0]
volume_type_es = db_utils.get_table(engine, 'volume_type_extra_specs')
for vtes in data['volume_type_extra_specs']:
r = volume_type_es.insert().values(vtes).execute()
vtes['id'] = r.inserted_primary_key[0]
volumes = db_utils.get_table(engine, 'volumes')
for vol in data['volumes']:
r = volumes.insert().values(vol).execute()
vol['id'] = r.inserted_primary_key[0]
return data
def _check_004(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
v1 = volumes.select(volumes.c.id ==
data['volumes'][0]['id']
).execute().first()
v2 = volumes.select(volumes.c.id ==
data['volumes'][1]['id']
).execute().first()
v3 = volumes.select(volumes.c.id ==
data['volumes'][2]['id']
).execute().first()
volume_types = db_utils.get_table(engine, 'volume_types')
vt1 = volume_types.select(volume_types.c.name ==
data['volume_types'][0]['name']
).execute().first()
vt2 = volume_types.select(volume_types.c.name ==
data['volume_types'][1]['name']
).execute().first()
vt3 = volume_types.select(volume_types.c.name ==
data['volume_types'][2]['name']
).execute().first()
vtes = db_utils.get_table(engine, 'volume_type_extra_specs')
vtes1 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][0]['key']
).execute().first()
vtes2 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][1]['key']
).execute().first()
vtes3 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][2]['key']
).execute().first()
self.assertEqual(v1['volume_type_id'], vt1['id'])
self.assertEqual(v2['volume_type_id'], vt1['id'])
self.assertEqual(v3['volume_type_id'], vt3['id'])
self.assertEqual(vtes1['volume_type_id'], vt1['id'])
self.assertEqual(vtes2['volume_type_id'], vt1['id'])
self.assertEqual(vtes3['volume_type_id'], vt2['id'])
def _check_005(self, engine, data):
"""Test that adding source_volid column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.source_volid.type,
self.VARCHAR_TYPE)
def _check_006(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.provider_location.type,
self.VARCHAR_TYPE)
def _check_007(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
fkey, = snapshots.c.volume_id.foreign_keys
self.assertIsNotNone(fkey)
def _pre_upgrade_008(self, engine):
self.assertFalse(engine.dialect.has_table(engine.connect(),
"backups"))
def _check_008(self, engine, data):
"""Test that adding and removing the backups table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"backups"))
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(backups.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(backups.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(backups.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(backups.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.availability_zone.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.display_name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.display_description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.container.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.status.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.fail_reason.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.service_metadata.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.service.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.size.type,
self.INTEGER_TYPE)
self.assertIsInstance(backups.c.object_count.type,
self.INTEGER_TYPE)
def _check_009(self, engine, data):
"""Test adding snapshot_metadata table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"snapshot_metadata"))
snapshot_metadata = db_utils.get_table(engine, 'snapshot_metadata')
self.assertIsInstance(snapshot_metadata.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(snapshot_metadata.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(snapshot_metadata.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(snapshot_metadata.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(snapshot_metadata.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(snapshot_metadata.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(snapshot_metadata.c.snapshot_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(snapshot_metadata.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(snapshot_metadata.c.value.type,
self.VARCHAR_TYPE)
def _check_010(self, engine, data):
"""Test adding transfers table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"transfers"))
transfers = db_utils.get_table(engine, 'transfers')
self.assertIsInstance(transfers.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(transfers.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(transfers.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(transfers.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(transfers.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.display_name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.salt.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.crypt_hash.type,
self.VARCHAR_TYPE)
self.assertIsInstance(transfers.c.expires_at.type,
self.TIME_TYPE)
def _check_011(self, engine, data):
"""Test adding transfers table works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIn('bootable', volumes.c)
self.assertIsInstance(volumes.c.bootable.type,
self.BOOL_TYPE)
def _check_012(self, engine, data):
"""Test that adding attached_host column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.attached_host.type,
self.VARCHAR_TYPE)
def _check_013(self, engine, data):
"""Test that adding provider_geometry column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.provider_geometry.type,
self.VARCHAR_TYPE)
def _check_014(self, engine, data):
"""Test that adding _name_id column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c._name_id.type,
self.VARCHAR_TYPE)
def _check_015(self, engine, data):
"""Test removing migrations table works correctly."""
self.assertFalse(engine.dialect.has_table(engine.connect(),
"migrations"))
def _check_016(self, engine, data):
"""Test that dropping xen storage manager tables works correctly."""
self.assertFalse(engine.dialect.has_table(engine.connect(),
'sm_flavors'))
self.assertFalse(engine.dialect.has_table(engine.connect(),
'sm_backend_config'))
self.assertFalse(engine.dialect.has_table(engine.connect(),
'sm_volume'))
def _check_017(self, engine, data):
"""Test that added encryption information works correctly."""
# encryption key UUID
volumes = db_utils.get_table(engine, 'volumes')
self.assertIn('encryption_key_id', volumes.c)
self.assertIsInstance(volumes.c.encryption_key_id.type,
self.VARCHAR_TYPE)
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIn('encryption_key_id', snapshots.c)
self.assertIsInstance(snapshots.c.encryption_key_id.type,
self.VARCHAR_TYPE)
self.assertIn('volume_type_id', snapshots.c)
self.assertIsInstance(snapshots.c.volume_type_id.type,
self.VARCHAR_TYPE)
# encryption types table
encryption = db_utils.get_table(engine, 'encryption')
self.assertIsInstance(encryption.c.volume_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(encryption.c.cipher.type,
self.VARCHAR_TYPE)
self.assertIsInstance(encryption.c.key_size.type,
self.INTEGER_TYPE)
self.assertIsInstance(encryption.c.provider.type,
self.VARCHAR_TYPE)
def _check_018(self, engine, data):
"""Test that added qos_specs table works correctly."""
self.assertTrue(engine.dialect.has_table(
engine.connect(), "quality_of_service_specs"))
qos_specs = db_utils.get_table(engine, 'quality_of_service_specs')
self.assertIsInstance(qos_specs.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(qos_specs.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(qos_specs.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(qos_specs.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(qos_specs.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(qos_specs.c.specs_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(qos_specs.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(qos_specs.c.value.type,
self.VARCHAR_TYPE)
def _check_019(self, engine, data):
"""Test that adding migration_status column works correctly."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.migration_status.type,
self.VARCHAR_TYPE)
def _check_020(self, engine, data):
"""Test adding volume_admin_metadata table works correctly."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"volume_admin_metadata"))
volume_admin_metadata = db_utils.get_table(engine,
'volume_admin_metadata')
self.assertIsInstance(volume_admin_metadata.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_admin_metadata.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_admin_metadata.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_admin_metadata.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(volume_admin_metadata.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(volume_admin_metadata.c.volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volume_admin_metadata.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volume_admin_metadata.c.value.type,
self.VARCHAR_TYPE)
def _verify_quota_defaults(self, engine):
quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
num_defaults = quota_class_metadata.count().\
where(quota_class_metadata.c.class_name == 'default').\
execute().scalar()
self.assertEqual(3, num_defaults)
def _check_021(self, engine, data):
"""Test adding default data for quota classes works correctly."""
self._verify_quota_defaults(engine)
def _check_022(self, engine, data):
"""Test that adding disabled_reason column works correctly."""
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.disabled_reason.type,
self.VARCHAR_TYPE)
def _check_023(self, engine, data):
"""Test that adding reservations index works correctly."""
reservations = db_utils.get_table(engine, 'reservations')
index_columns = []
for idx in reservations.indexes:
if idx.name == 'reservations_deleted_expire_idx':
index_columns = idx.columns.keys()
break
self.assertEqual(sorted(['deleted', 'expire']),
sorted(index_columns))
def _check_024(self, engine, data):
"""Test adding replication columns to volume table."""
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.replication_status.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volumes.c.replication_extended_status.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volumes.c.replication_driver_data.type,
self.VARCHAR_TYPE)
def _check_025(self, engine, data):
"""Test adding table and columns for consistencygroups."""
# Test consistencygroup_id is in Table volumes
metadata = sqlalchemy.MetaData()
volumes = self.get_table_ref(engine, 'volumes', metadata)
self.assertIsInstance(volumes.c.consistencygroup_id.type,
self.VARCHAR_TYPE)
# Test cgsnapshot_id is in Table snapshots
snapshots = self.get_table_ref(engine, 'snapshots', metadata)
self.assertIsInstance(snapshots.c.cgsnapshot_id.type,
self.VARCHAR_TYPE)
# Test Table consistencygroups exists
self.assertTrue(engine.dialect.has_table(engine.connect(),
"consistencygroups"))
consistencygroups = self.get_table_ref(engine,
'consistencygroups',
metadata)
self.assertIsInstance(consistencygroups.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(consistencygroups.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(consistencygroups.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(consistencygroups.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(consistencygroups.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.availability_zone.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.volume_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(consistencygroups.c.status.type,
self.VARCHAR_TYPE)
# Test Table cgsnapshots exists
self.assertTrue(engine.dialect.has_table(engine.connect(),
"cgsnapshots"))
cgsnapshots = self.get_table_ref(engine,
'cgsnapshots',
metadata)
self.assertIsInstance(cgsnapshots.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(cgsnapshots.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(cgsnapshots.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(cgsnapshots.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(cgsnapshots.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.consistencygroup_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(cgsnapshots.c.status.type,
self.VARCHAR_TYPE)
# Verify foreign keys are created
fkey, = volumes.c.consistencygroup_id.foreign_keys
self.assertEqual(consistencygroups.c.id, fkey.column)
self.assertEqual(1, len(volumes.foreign_keys))
fkey, = snapshots.c.cgsnapshot_id.foreign_keys
self.assertEqual(cgsnapshots.c.id, fkey.column)
fkey, = snapshots.c.volume_id.foreign_keys
self.assertEqual(volumes.c.id, fkey.column)
# 2 foreign keys in Table snapshots
self.assertEqual(2, len(snapshots.foreign_keys))
def _pre_upgrade_026(self, engine):
"""Test adding default data for consistencygroups quota class."""
quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
num_defaults = quota_class_metadata.count().\
where(quota_class_metadata.c.class_name == 'default').\
execute().scalar()
self.assertEqual(3, num_defaults)
def _check_026(self, engine, data):
quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
num_defaults = quota_class_metadata.count().\
where(quota_class_metadata.c.class_name == 'default').\
execute().scalar()
self.assertEqual(4, num_defaults)
def _check_032(self, engine, data):
"""Test adding volume_type_projects table works correctly."""
volume_type_projects = db_utils.get_table(engine,
'volume_type_projects')
self.assertIsInstance(volume_type_projects.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_type_projects.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_type_projects.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(volume_type_projects.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(volume_type_projects.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(volume_type_projects.c.volume_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(volume_type_projects.c.project_id.type,
self.VARCHAR_TYPE)
volume_types = db_utils.get_table(engine, 'volume_types')
self.assertIsInstance(volume_types.c.is_public.type,
self.BOOL_TYPE)
def _check_033(self, engine, data):
"""Test adding encryption_id column to encryption table."""
encryptions = db_utils.get_table(engine, 'encryption')
self.assertIsInstance(encryptions.c.encryption_id.type,
self.VARCHAR_TYPE)
def _check_034(self, engine, data):
"""Test adding description columns to volume_types table."""
volume_types = db_utils.get_table(engine, 'volume_types')
self.assertIsInstance(volume_types.c.description.type,
self.VARCHAR_TYPE)
def _check_035(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.provider_id.type,
self.VARCHAR_TYPE)
def _check_036(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.provider_id.type,
self.VARCHAR_TYPE)
def _check_037(self, engine, data):
consistencygroups = db_utils.get_table(engine, 'consistencygroups')
self.assertIsInstance(consistencygroups.c.cgsnapshot_id.type,
self.VARCHAR_TYPE)
def _check_038(self, engine, data):
"""Test adding and removing driver_initiator_data table."""
has_table = engine.dialect.has_table(engine.connect(),
"driver_initiator_data")
self.assertTrue(has_table)
private_data = db_utils.get_table(
engine,
'driver_initiator_data'
)
self.assertIsInstance(private_data.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(private_data.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(private_data.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(private_data.c.initiator.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.namespace.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.value.type,
self.VARCHAR_TYPE)
def _check_039(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.parent_id.type,
self.VARCHAR_TYPE)
def _check_40(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
self.assertNotIn('instance_uuid', volumes.c)
self.assertNotIn('attached_host', volumes.c)
self.assertNotIn('attach_time', volumes.c)
self.assertNotIn('mountpoint', volumes.c)
self.assertIsInstance(volumes.c.multiattach.type,
self.BOOL_TYPE)
attachments = db_utils.get_table(engine, 'volume_attachment')
self.assertIsInstance(attachments.c.attach_mode.type,
self.VARCHAR_TYPE)
self.assertIsInstance(attachments.c.instance_uuid.type,
self.VARCHAR_TYPE)
self.assertIsInstance(attachments.c.attached_host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(attachments.c.mountpoint.type,
self.VARCHAR_TYPE)
self.assertIsInstance(attachments.c.attach_status.type,
self.VARCHAR_TYPE)
def _check_041(self, engine, data):
"""Test that adding modified_at column works correctly."""
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.modified_at.type,
self.TIME_TYPE)
def _check_048(self, engine, data):
quotas = db_utils.get_table(engine, 'quotas')
self.assertIsInstance(quotas.c.allocated.type,
self.INTEGER_TYPE)
def _check_049(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.temp_volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.temp_snapshot_id.type,
self.VARCHAR_TYPE)
def _check_050(self, engine, data):
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.previous_status.type,
self.VARCHAR_TYPE)
def _check_051(self, engine, data):
consistencygroups = db_utils.get_table(engine, 'consistencygroups')
self.assertIsInstance(consistencygroups.c.source_cgid.type,
self.VARCHAR_TYPE)
def _check_052(self, engine, data):
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.provider_auth.type,
self.VARCHAR_TYPE)
def _check_053(self, engine, data):
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.rpc_current_version.type,
self.VARCHAR_TYPE)
self.assertIsInstance(services.c.rpc_available_version.type,
self.VARCHAR_TYPE)
self.assertIsInstance(services.c.object_current_version.type,
self.VARCHAR_TYPE)
self.assertIsInstance(services.c.object_available_version.type,
self.VARCHAR_TYPE)
def _check_054(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.num_dependent_backups.type,
self.INTEGER_TYPE)
def _check_055(self, engine, data):
"""Test adding image_volume_cache_entries table."""
has_table = engine.dialect.has_table(engine.connect(),
"image_volume_cache_entries")
self.assertTrue(has_table)
private_data = db_utils.get_table(
engine,
'image_volume_cache_entries'
)
self.assertIsInstance(private_data.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(private_data.c.host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.image_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.image_updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(private_data.c.volume_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(private_data.c.size.type,
self.INTEGER_TYPE)
self.assertIsInstance(private_data.c.last_used.type,
self.TIME_TYPE)
def _check_061(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.snapshot_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backups.c.data_timestamp.type,
self.TIME_TYPE)
def _check_062(self, engine, data):
volume_type_projects = db_utils.get_table(engine,
'volume_type_projects')
self.assertIsInstance(volume_type_projects.c.id.type,
self.INTEGER_TYPE)
def _check_064(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.restore_volume_id.type,
self.VARCHAR_TYPE)
def _check_065(self, engine, data):
services = db_utils.get_table(engine, 'services')
self.assertIsInstance(services.c.replication_status.type,
self.VARCHAR_TYPE)
self.assertIsInstance(services.c.frozen.type,
self.BOOL_TYPE)
self.assertIsInstance(services.c.active_backend_id.type,
self.VARCHAR_TYPE)
def _check_066(self, engine, data):
reservations = db_utils.get_table(engine, 'reservations')
self.assertIsInstance(reservations.c.allocated_id.type,
self.INTEGER_TYPE)
def __check_cinderbase_fields(self, columns):
"""Check fields inherited from CinderBase ORM class."""
self.assertIsInstance(columns.created_at.type, self.TIME_TYPE)
self.assertIsInstance(columns.updated_at.type, self.TIME_TYPE)
self.assertIsInstance(columns.deleted_at.type, self.TIME_TYPE)
self.assertIsInstance(columns.deleted.type, self.BOOL_TYPE)
def _check_067(self, engine, data):
iscsi_targets = db_utils.get_table(engine, 'iscsi_targets')
fkey, = iscsi_targets.c.volume_id.foreign_keys
self.assertIsNotNone(fkey)
def _check_074(self, engine, data):
"""Test adding message table."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"messages"))
messages = db_utils.get_table(engine, 'messages')
self.assertIsInstance(messages.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(messages.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(messages.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(messages.c.message_level.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.request_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.resource_uuid.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.event_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(messages.c.resource_type.type,
self.VARCHAR_TYPE)
def _check_075(self, engine, data):
"""Test adding cluster table and cluster_id fields."""
self.assertTrue(engine.dialect.has_table(engine.connect(), 'clusters'))
clusters = db_utils.get_table(engine, 'clusters')
columns = clusters.c
self.__check_cinderbase_fields(columns)
# Cluster specific fields
self.assertIsInstance(columns.id.type, self.INTEGER_TYPE)
self.assertIsInstance(columns.name.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.binary.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.disabled.type, self.BOOL_TYPE)
self.assertIsInstance(columns.disabled_reason.type, self.VARCHAR_TYPE)
# Check that we have added cluster_name field to all required tables
for table_name in ('services', 'consistencygroups', 'volumes'):
table = db_utils.get_table(engine, table_name)
self.assertIsInstance(table.c.cluster_name.type,
self.VARCHAR_TYPE)
def _check_076(self, engine, data):
workers = db_utils.get_table(engine, 'workers')
columns = workers.c
self.__check_cinderbase_fields(columns)
# Workers specific fields
self.assertIsInstance(columns.id.type, self.INTEGER_TYPE)
self.assertIsInstance(columns.resource_type.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.resource_id.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.status.type, self.VARCHAR_TYPE)
self.assertIsInstance(columns.service_id.type, self.INTEGER_TYPE)
def _check_077(self, engine, data):
"""Test adding group types and specs tables."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_types"))
group_types = db_utils.get_table(engine, 'group_types')
self.assertIsInstance(group_types.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_types.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_types.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_types.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_types.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_types.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_types.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(group_types.c.is_public.type,
self.BOOL_TYPE)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_type_specs"))
group_specs = db_utils.get_table(engine, 'group_type_specs')
self.assertIsInstance(group_specs.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(group_specs.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_specs.c.value.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_specs.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_specs.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_specs.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_specs.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_specs.c.deleted.type,
self.BOOL_TYPE)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_type_projects"))
type_projects = db_utils.get_table(engine, 'group_type_projects')
self.assertIsInstance(type_projects.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(type_projects.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(type_projects.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(type_projects.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(type_projects.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(type_projects.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(type_projects.c.project_id.type,
self.VARCHAR_TYPE)
def _check_078(self, engine, data):
"""Test adding groups tables."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"groups"))
groups = db_utils.get_table(engine, 'groups')
self.assertIsInstance(groups.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(groups.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(groups.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(groups.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(groups.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.host.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.availability_zone.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.status.type,
self.VARCHAR_TYPE)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_volume_type_mapping"))
mapping = db_utils.get_table(engine, 'group_volume_type_mapping')
self.assertIsInstance(mapping.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(mapping.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(mapping.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(mapping.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(mapping.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(mapping.c.volume_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(mapping.c.group_id.type,
self.VARCHAR_TYPE)
volumes = db_utils.get_table(engine, 'volumes')
self.assertIsInstance(volumes.c.group_id.type,
self.VARCHAR_TYPE)
quota_classes = db_utils.get_table(engine, 'quota_classes')
rows = quota_classes.count().\
where(quota_classes.c.resource == 'groups').\
execute().scalar()
self.assertEqual(1, rows)
def _check_079(self, engine, data):
"""Test adding group_snapshots tables."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_snapshots"))
group_snapshots = db_utils.get_table(engine, 'group_snapshots')
self.assertIsInstance(group_snapshots.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_snapshots.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_snapshots.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_snapshots.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(group_snapshots.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.group_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.status.type,
self.VARCHAR_TYPE)
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.group_snapshot_id.type,
self.VARCHAR_TYPE)
groups = db_utils.get_table(engine, 'groups')
self.assertIsInstance(groups.c.group_snapshot_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.source_group_id.type,
self.VARCHAR_TYPE)
def test_walk_versions(self):
self.walk_versions(False, False)
class TestSqliteMigrations(test_base.DbTestCase,
MigrationsMixin):
pass
class TestMysqlMigrations(test_base.MySQLOpportunisticTestCase,
MigrationsMixin):
BOOL_TYPE = sqlalchemy.dialects.mysql.TINYINT
def test_mysql_innodb(self):
"""Test that table creation on mysql only builds InnoDB tables."""
# add this to the global lists to make reset work with it, it's removed
# automatically in tearDown so no need to clean it up here.
# sanity check
migration.db_sync(engine=self.migrate_engine)
total = self.migrate_engine.execute(
"SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='{0}'".format(
self.migrate_engine.url.database))
self.assertGreater(total.scalar(), 0,
msg="No tables found. Wrong schema?")
noninnodb = self.migrate_engine.execute(
"SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='openstack_citest' "
"and ENGINE!='InnoDB' "
"and TABLE_NAME!='migrate_version'")
count = noninnodb.scalar()
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
class TestPostgresqlMigrations(test_base.PostgreSQLOpportunisticTestCase,
MigrationsMixin):
TIME_TYPE = sqlalchemy.types.TIMESTAMP
| en | 0.811326 | # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Tests for database migrations. This test case reads the configuration file test_migrations.conf for database connection settings to use in the tests. For each connection found in the config file, the test case runs a series of test cases to ensure that migrations work properly both upgrading and downgrading, and that no data loss occurs if possible. Test sqlalchemy-migrate migrations. Ban some operations for migrations # noqa # NOTE(dulek): This is a list of migrations where we allow dropping # things. The rules for adding things here are very very specific. # Insight on how to drop things from the DB in a backward-compatible # manner is provided in Cinder's developer documentation. # Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE WITHOUT CARE # NOTE(dulek): 62 alters the column type from boolean to integer to # fix the bug 1518363. If we've followed the guidelines for live # schema upgrades we would end up either waiting 3 releases to fix # a simple bug or trigger a rebuild index operation in migration # (because constraint was impossible to delete without deleting # other foreign key constraints). Either way it's harsh... We've # decided to go with alter to minimise upgrade impact. The only # consequence for deployments running recent MySQL is inability # to perform volume-type-access modifications while running this # migration. # NOTE(dulek): 66 sets reservations.usage_id to nullable. This is # 100% backward compatible and according to MySQL docs such ALTER # is performed with the same restrictions as column addition, which # we of course allow. # NOTE(dulek): 73 drops tables and columns we've stopped using a # release ago. # NOTE(dulek): We only started requiring things be additive in # Mitaka, so ignore all migrations before that point. Change volume types to UUID Test that adding source_volid column works correctly. Test that adding and removing the backups table works correctly. Test adding snapshot_metadata table works correctly. Test adding transfers table works correctly. Test adding transfers table works correctly. Test that adding attached_host column works correctly. Test that adding provider_geometry column works correctly. Test that adding _name_id column works correctly. Test removing migrations table works correctly. Test that dropping xen storage manager tables works correctly. Test that added encryption information works correctly. # encryption key UUID # encryption types table Test that added qos_specs table works correctly. Test that adding migration_status column works correctly. Test adding volume_admin_metadata table works correctly. Test adding default data for quota classes works correctly. Test that adding disabled_reason column works correctly. Test that adding reservations index works correctly. Test adding replication columns to volume table. Test adding table and columns for consistencygroups. # Test consistencygroup_id is in Table volumes # Test cgsnapshot_id is in Table snapshots # Test Table consistencygroups exists # Test Table cgsnapshots exists # Verify foreign keys are created # 2 foreign keys in Table snapshots Test adding default data for consistencygroups quota class. Test adding volume_type_projects table works correctly. Test adding encryption_id column to encryption table. Test adding description columns to volume_types table. Test adding and removing driver_initiator_data table. Test that adding modified_at column works correctly. Test adding image_volume_cache_entries table. Check fields inherited from CinderBase ORM class. Test adding message table. Test adding cluster table and cluster_id fields. # Cluster specific fields # Check that we have added cluster_name field to all required tables # Workers specific fields Test adding group types and specs tables. Test adding groups tables. Test adding group_snapshots tables. Test that table creation on mysql only builds InnoDB tables. # add this to the global lists to make reset work with it, it's removed # automatically in tearDown so no need to clean it up here. # sanity check | 1.985947 | 2 |
hallo/modules/subscriptions/subscription_factory.py | joshcoales/Hallo | 1 | 6632309 | from typing import List, Type, Dict, TYPE_CHECKING
import hallo.modules.subscriptions.subscription_exception
from hallo.destination import Destination
import hallo.modules.subscriptions.common_fa_key
import hallo.modules.subscriptions.subscription_common
import hallo.modules.subscriptions.subscription
import hallo.modules.subscriptions.source_reddit
import hallo.modules.subscriptions.source_fa_watchers
import hallo.modules.subscriptions.source_fa_favs
import hallo.modules.subscriptions.source_fa_notif_comments
import hallo.modules.subscriptions.source_fa_notif_favs
import hallo.modules.subscriptions.source_fa_notes
import hallo.modules.subscriptions.source_e621_tagging
import hallo.modules.subscriptions.source_e621_backlog
import hallo.modules.subscriptions.source_e621
import hallo.modules.subscriptions.source_rss
import hallo.modules.subscriptions.source_twitter
import hallo.modules.subscriptions.source
if TYPE_CHECKING:
from hallo.hallo import Hallo
class SubscriptionFactory:
sub_sources: List[Type['hallo.modules.new_subscriptions.source.Source']] = [
hallo.modules.subscriptions.source_e621.E621Source,
hallo.modules.subscriptions.source_e621_tagging.E621TaggingSource,
hallo.modules.subscriptions.source_e621_backlog.E621BacklogTaggingSource,
hallo.modules.subscriptions.source_fa_favs.FAFavsSource,
hallo.modules.subscriptions.source_fa_notes.FANotesSource,
hallo.modules.subscriptions.source_fa_notif_comments.FACommentNotificationsSource,
hallo.modules.subscriptions.source_fa_notif_favs.FAFavNotificationsSource,
hallo.modules.subscriptions.source_fa_watchers.FAWatchersSource,
hallo.modules.subscriptions.source_fa_watchers.FAUserWatchersSource,
hallo.modules.subscriptions.source_reddit.RedditSource,
hallo.modules.subscriptions.source_rss.RssSource,
]
common_classes: List[Type[hallo.modules.subscriptions.subscription_common.SubscriptionCommon]] = [
hallo.modules.subscriptions.common_fa_key.FAKeysCommon
]
@staticmethod
def get_source_names() -> List[str]:
return [
name
for sub_class in SubscriptionFactory.sub_sources
for name in sub_class.type_names
]
@staticmethod
def get_source_class_by_name(name: str) -> Type[hallo.modules.subscriptions.source.Source]:
classes = [
sub_class
for sub_class in SubscriptionFactory.sub_sources
if name in sub_class.type_names
]
if len(classes) != 1:
raise hallo.modules.subscriptions.subscription_exception.SubscriptionException(
"Failed to find a subscription type matching the name {}".format(name)
)
return classes[0]
@staticmethod
def source_from_json(
json_data: Dict,
destination: Destination,
sub_repo
) -> 'hallo.modules.new_subscriptions.source.Source':
name = json_data["type"]
classes = [
sub_class
for sub_class in SubscriptionFactory.sub_sources
if name == sub_class.type_name
]
if len(classes) != 1:
raise hallo.modules.subscriptions.subscription_exception.SubscriptionException(
f"Failed to find a subscription source type matching the name {name}"
)
return classes[0].from_json(json_data, destination, sub_repo)
@staticmethod
def common_from_json(
common_json: Dict,
hallo_obj: 'Hallo'
) -> hallo.modules.subscriptions.subscription_common.SubscriptionCommon:
common_type_name = common_json["common_type"]
for common_class in SubscriptionFactory.common_classes:
if common_class.type_name == common_type_name:
return common_class.from_json(common_json, hallo_obj)
raise hallo.modules.subscriptions.subscription_exception.SubscriptionException(
f"Could not load common configuration of type {common_type_name}"
)
| from typing import List, Type, Dict, TYPE_CHECKING
import hallo.modules.subscriptions.subscription_exception
from hallo.destination import Destination
import hallo.modules.subscriptions.common_fa_key
import hallo.modules.subscriptions.subscription_common
import hallo.modules.subscriptions.subscription
import hallo.modules.subscriptions.source_reddit
import hallo.modules.subscriptions.source_fa_watchers
import hallo.modules.subscriptions.source_fa_favs
import hallo.modules.subscriptions.source_fa_notif_comments
import hallo.modules.subscriptions.source_fa_notif_favs
import hallo.modules.subscriptions.source_fa_notes
import hallo.modules.subscriptions.source_e621_tagging
import hallo.modules.subscriptions.source_e621_backlog
import hallo.modules.subscriptions.source_e621
import hallo.modules.subscriptions.source_rss
import hallo.modules.subscriptions.source_twitter
import hallo.modules.subscriptions.source
if TYPE_CHECKING:
from hallo.hallo import Hallo
class SubscriptionFactory:
sub_sources: List[Type['hallo.modules.new_subscriptions.source.Source']] = [
hallo.modules.subscriptions.source_e621.E621Source,
hallo.modules.subscriptions.source_e621_tagging.E621TaggingSource,
hallo.modules.subscriptions.source_e621_backlog.E621BacklogTaggingSource,
hallo.modules.subscriptions.source_fa_favs.FAFavsSource,
hallo.modules.subscriptions.source_fa_notes.FANotesSource,
hallo.modules.subscriptions.source_fa_notif_comments.FACommentNotificationsSource,
hallo.modules.subscriptions.source_fa_notif_favs.FAFavNotificationsSource,
hallo.modules.subscriptions.source_fa_watchers.FAWatchersSource,
hallo.modules.subscriptions.source_fa_watchers.FAUserWatchersSource,
hallo.modules.subscriptions.source_reddit.RedditSource,
hallo.modules.subscriptions.source_rss.RssSource,
]
common_classes: List[Type[hallo.modules.subscriptions.subscription_common.SubscriptionCommon]] = [
hallo.modules.subscriptions.common_fa_key.FAKeysCommon
]
@staticmethod
def get_source_names() -> List[str]:
return [
name
for sub_class in SubscriptionFactory.sub_sources
for name in sub_class.type_names
]
@staticmethod
def get_source_class_by_name(name: str) -> Type[hallo.modules.subscriptions.source.Source]:
classes = [
sub_class
for sub_class in SubscriptionFactory.sub_sources
if name in sub_class.type_names
]
if len(classes) != 1:
raise hallo.modules.subscriptions.subscription_exception.SubscriptionException(
"Failed to find a subscription type matching the name {}".format(name)
)
return classes[0]
@staticmethod
def source_from_json(
json_data: Dict,
destination: Destination,
sub_repo
) -> 'hallo.modules.new_subscriptions.source.Source':
name = json_data["type"]
classes = [
sub_class
for sub_class in SubscriptionFactory.sub_sources
if name == sub_class.type_name
]
if len(classes) != 1:
raise hallo.modules.subscriptions.subscription_exception.SubscriptionException(
f"Failed to find a subscription source type matching the name {name}"
)
return classes[0].from_json(json_data, destination, sub_repo)
@staticmethod
def common_from_json(
common_json: Dict,
hallo_obj: 'Hallo'
) -> hallo.modules.subscriptions.subscription_common.SubscriptionCommon:
common_type_name = common_json["common_type"]
for common_class in SubscriptionFactory.common_classes:
if common_class.type_name == common_type_name:
return common_class.from_json(common_json, hallo_obj)
raise hallo.modules.subscriptions.subscription_exception.SubscriptionException(
f"Could not load common configuration of type {common_type_name}"
)
| none | 1 | 2.060347 | 2 |
|
runway/sources/git.py | avosper-intellaegis/runway | 0 | 6632310 | """'Git type Path Source."""
import logging
import shutil
import subprocess
import tempfile
from pathlib import Path
from typing import Any, Dict, Optional
from .source import Source
LOGGER = logging.getLogger(__name__)
class Git(Source):
"""Git Path Source.
The Git path source can be tasked with cloning a remote repository
and pointing to a specific module folder (or the root).
"""
def __init__(
self,
*,
arguments: Optional[Dict[str, str]] = None,
location: str = "",
uri: str = "",
**kwargs: Any,
) -> None:
"""Git Path Source.
Args:
arguments: A reference can be passed along via the arguments so that a specific
version of the repository is cloned. **commit**, **tag**, **branch**
are all valid keys with respective output
location: The relative location to the root of the repository where the
module resides. Leaving this as an empty string, ``/``, or ``./``
will have runway look in the root folder.
uri: The uniform resource identifier that targets the remote git repository
"""
self.args = arguments or {}
self.uri = uri
self.location = location
super().__init__(**kwargs)
def fetch(self) -> Path:
"""Retrieve the git repository from it's remote location."""
from git import Repo # pylint: disable=import-outside-toplevel
ref = self.__determine_git_ref()
dir_name = "_".join([self.sanitize_git_path(self.uri), ref])
cached_dir_path = self.cache_dir / dir_name
if cached_dir_path.exists():
return cached_dir_path
with tempfile.TemporaryDirectory() as tmpdirname:
tmp_repo_path = Path(tmpdirname) / dir_name
with Repo.clone_from(self.uri, str(tmp_repo_path)) as repo:
repo.head.reference = ref
repo.head.reset(index=True, working_tree=True)
shutil.move(str(tmp_repo_path), self.cache_dir)
return cached_dir_path
def __git_ls_remote(self, ref: str) -> str:
"""List remote repositories based on uri and ref received.
Keyword Args:
ref (str): The git reference value
"""
cmd = ["git", "ls-remote", self.uri, ref]
LOGGER.debug("getting commit ID from repo: %s", " ".join(cmd))
ls_remote_output = subprocess.check_output(cmd)
if b"\t" in ls_remote_output:
commit_id = ls_remote_output.split(b"\t")[0].decode()
LOGGER.debug("matching commit id found: %s", commit_id)
return commit_id
raise ValueError('Ref "%s" not found for repo %s.' % (ref, self.uri))
def __determine_git_ls_remote_ref(self) -> str:
"""Determine remote ref, defaulting to HEAD unless a branch is found."""
ref = "HEAD"
if self.args.get("branch"):
ref = "refs/heads/%s" % self.args["branch"]
return ref
def __determine_git_ref(self) -> str:
"""Determine the git reference code."""
ref_config_keys = sum(
bool(self.args.get(i)) for i in ["commit", "tag", "branch"]
)
if ref_config_keys > 1:
raise ValueError(
"Fetching remote git sources failed: conflicting revisions "
"(e.g. 'commit', 'tag', 'branch') specified for a package source"
)
if self.args.get("commit"):
return self.args["commit"]
if self.args.get("tag"):
return self.args["tag"]
return self.__git_ls_remote(self.__determine_git_ls_remote_ref())
@classmethod
def sanitize_git_path(cls, path: str) -> str:
"""Sanitize the git path for folder/file assignment.
Keyword Args:
path: The path string to be sanitized
"""
dir_name = path
split = path.split("//")
domain = split[len(split) - 1]
if domain.endswith(".git"):
dir_name = domain[:-4]
return cls.sanitize_directory_path(dir_name)
| """'Git type Path Source."""
import logging
import shutil
import subprocess
import tempfile
from pathlib import Path
from typing import Any, Dict, Optional
from .source import Source
LOGGER = logging.getLogger(__name__)
class Git(Source):
"""Git Path Source.
The Git path source can be tasked with cloning a remote repository
and pointing to a specific module folder (or the root).
"""
def __init__(
self,
*,
arguments: Optional[Dict[str, str]] = None,
location: str = "",
uri: str = "",
**kwargs: Any,
) -> None:
"""Git Path Source.
Args:
arguments: A reference can be passed along via the arguments so that a specific
version of the repository is cloned. **commit**, **tag**, **branch**
are all valid keys with respective output
location: The relative location to the root of the repository where the
module resides. Leaving this as an empty string, ``/``, or ``./``
will have runway look in the root folder.
uri: The uniform resource identifier that targets the remote git repository
"""
self.args = arguments or {}
self.uri = uri
self.location = location
super().__init__(**kwargs)
def fetch(self) -> Path:
"""Retrieve the git repository from it's remote location."""
from git import Repo # pylint: disable=import-outside-toplevel
ref = self.__determine_git_ref()
dir_name = "_".join([self.sanitize_git_path(self.uri), ref])
cached_dir_path = self.cache_dir / dir_name
if cached_dir_path.exists():
return cached_dir_path
with tempfile.TemporaryDirectory() as tmpdirname:
tmp_repo_path = Path(tmpdirname) / dir_name
with Repo.clone_from(self.uri, str(tmp_repo_path)) as repo:
repo.head.reference = ref
repo.head.reset(index=True, working_tree=True)
shutil.move(str(tmp_repo_path), self.cache_dir)
return cached_dir_path
def __git_ls_remote(self, ref: str) -> str:
"""List remote repositories based on uri and ref received.
Keyword Args:
ref (str): The git reference value
"""
cmd = ["git", "ls-remote", self.uri, ref]
LOGGER.debug("getting commit ID from repo: %s", " ".join(cmd))
ls_remote_output = subprocess.check_output(cmd)
if b"\t" in ls_remote_output:
commit_id = ls_remote_output.split(b"\t")[0].decode()
LOGGER.debug("matching commit id found: %s", commit_id)
return commit_id
raise ValueError('Ref "%s" not found for repo %s.' % (ref, self.uri))
def __determine_git_ls_remote_ref(self) -> str:
"""Determine remote ref, defaulting to HEAD unless a branch is found."""
ref = "HEAD"
if self.args.get("branch"):
ref = "refs/heads/%s" % self.args["branch"]
return ref
def __determine_git_ref(self) -> str:
"""Determine the git reference code."""
ref_config_keys = sum(
bool(self.args.get(i)) for i in ["commit", "tag", "branch"]
)
if ref_config_keys > 1:
raise ValueError(
"Fetching remote git sources failed: conflicting revisions "
"(e.g. 'commit', 'tag', 'branch') specified for a package source"
)
if self.args.get("commit"):
return self.args["commit"]
if self.args.get("tag"):
return self.args["tag"]
return self.__git_ls_remote(self.__determine_git_ls_remote_ref())
@classmethod
def sanitize_git_path(cls, path: str) -> str:
"""Sanitize the git path for folder/file assignment.
Keyword Args:
path: The path string to be sanitized
"""
dir_name = path
split = path.split("//")
domain = split[len(split) - 1]
if domain.endswith(".git"):
dir_name = domain[:-4]
return cls.sanitize_directory_path(dir_name)
| en | 0.793959 | 'Git type Path Source. Git Path Source. The Git path source can be tasked with cloning a remote repository and pointing to a specific module folder (or the root). Git Path Source. Args: arguments: A reference can be passed along via the arguments so that a specific version of the repository is cloned. **commit**, **tag**, **branch** are all valid keys with respective output location: The relative location to the root of the repository where the module resides. Leaving this as an empty string, ``/``, or ``./`` will have runway look in the root folder. uri: The uniform resource identifier that targets the remote git repository Retrieve the git repository from it's remote location. # pylint: disable=import-outside-toplevel List remote repositories based on uri and ref received. Keyword Args: ref (str): The git reference value Determine remote ref, defaulting to HEAD unless a branch is found. Determine the git reference code. Sanitize the git path for folder/file assignment. Keyword Args: path: The path string to be sanitized | 2.704262 | 3 |
app.py | tristanmkernan/off-the-beaten-path-backend | 1 | 6632311 | from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import func
from flask_cors import CORS
from jsonschema import validate
from geopy import Point
from geopy.distance import vincenty
import cloudinary
import cloudinary.uploader
import cloudinary.api
from datetime import datetime
from os import environ
import random
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = environ.get('OTBP_DATABASE_URI')
app.config['DEFAULT_PAGINATION_PAGE_LENGTH'] = 10
app.config['POST_SCHEMA'] = {
"schema": "http://json-schema.org/draft-04/schema#",
"title": "Post",
"description": "Input data for a new post",
"type": "object",
"properties": {
"text": {"type": "string"},
"pictureId": {"type": ["integer", "null"]},
"location": {
"type": "object",
"properties": {
"lat": {"type": "number"},
"lng": {"type": "number"}
},
"required": ["lat", "lng"]
}
},
"required": ["text", "location"]
}
app.config['TARGET_MIN_DISTANCE'] = 100
app.config['TARGET_MAX_DISTANCE'] = 200
CORS(app)
db = SQLAlchemy(app)
cloudinary.config(
cloud_name=environ.get('OTBP_CLOUDINARY_CLOUD_NAME'),
api_key=environ.get('OTBP_CLOUDINARY_API_KEY'),
api_secret=environ.get('OTBP_CLOUDINARY_API_SECRET')
)
class TargetLocation(db.Model):
key = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.DateTime,
default=datetime.utcnow,
nullable=False)
lat = db.Column(db.Float, nullable=False)
lng = db.Column(db.Float, nullable=False)
def toSimpleDict(self):
return {
'key': self.key,
'position': {
'lat': self.lat,
'lng': self.lng,
},
'totalVisitors': 10,
'averageVisitorsPerHour': 10
}
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.DateTime,
default=datetime.utcnow,
nullable=False)
text = db.Column(db.String(140), nullable=False)
final_distance = db.Column(db.Float, nullable=False)
location_id = db.Column(db.Integer,
db.ForeignKey('target_location.key'),
nullable=False)
location = db.relationship('TargetLocation',
backref=db.backref('posts', lazy=True))
image_id = db.Column(db.Integer,
db.ForeignKey('saved_image.id'),
nullable=True)
image = db.relationship('SavedImage')
def toSimpleDict(self):
return {
'timestamp': self.created_at.timestamp(),
'pictureUrl': getattr(self.image, 'url', None),
'finalDistance': self.final_distance,
'text': self.text
}
class SavedImage(db.Model):
id = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.DateTime,
default=datetime.utcnow,
nullable=False)
url = db.Column(db.String(512), nullable=True)
class EasyPagination(object):
def __init__(self, data, pageNumber, lastPage):
self.data = data
self.pageNumber = pageNumber
self.lastPage = lastPage
def toSimpleDict(self):
return {
'data': self.data,
'pageNumber': self.pageNumber,
'lastPage': self.lastPage
}
def _haversine(a, b):
return vincenty((a.lat, a.lng), (b.lat, b.lng)).meters
@app.route('/')
def index():
return 'todo: find an api details generator like swagger?'
@app.route('/target/<location>', methods=['get'])
def get_target_by_location(location):
# location should be in format `lat,lng`
source_lat, source_lng = list(map(lambda x: float(x), location.split(',')))
source_location = TargetLocation(lat=source_lat, lng=source_lng)
# attempt to find an existing location
target_list = TargetLocation.query \
.filter(
# check for results created today
func.date(TargetLocation.created_at) == func.current_date()
) \
.all()
sorted_target_list = sorted(target_list,
key=lambda t: _haversine(source_location, t))
if len(sorted_target_list) > 0:
target = sorted_target_list[0]
haversine_distance = _haversine(
target,
source_location)
if haversine_distance < app.config['TARGET_MAX_DISTANCE']:
return jsonify(target.toSimpleDict())
# naively create a target between MIN to MAX m away from current location
angle = random.randint(1, 360)
pdistance = random.randint(app.config['TARGET_MIN_DISTANCE'],
app.config['TARGET_MAX_DISTANCE'])
pdistance /= 1000
target_lat, target_lng, alt = vincenty(kilometers=pdistance) \
.destination(Point(source_lat, source_lng), angle)
target_location = TargetLocation(lat=target_lat,
lng=target_lng)
db.session.add(target_location)
db.session.commit()
return jsonify(target_location.toSimpleDict())
@app.route('/target/key/<int:key>', methods=['get'])
def get_target_by_key(key):
target = TargetLocation.query.get_or_404(key)
return jsonify(target.toSimpleDict())
@app.route('/posts/<int:key>/<int:page>', methods=['get'])
def get_posts_by_page(key, page=1):
pagination = Post.query \
.filter(Post.location_id == key) \
.paginate(page,
app.config['DEFAULT_PAGINATION_PAGE_LENGTH'],
False)
posts = list(map(lambda x: x.toSimpleDict(), pagination.items))
easy_pagination = EasyPagination(posts, page, not pagination.has_next)
return jsonify(easy_pagination.toSimpleDict())
@app.route('/posts/<int:key>', methods=['post'])
def create_post(key):
data = request.get_json()
validate(data, app.config['POST_SCHEMA'])
target_location = TargetLocation.query.get_or_404(key)
post = Post(text=data['text'],
image_id=data.get('pictureId', None),
final_distance=_haversine(
TargetLocation(
lat=data['location']['lat'],
lng=data['location']['lng']
),
target_location),
location_id=key)
db.session.add(post)
db.session.commit()
return jsonify({'success': True}), 201
@app.route('/image', methods=['post'])
def upload_photo():
cloudinary_data = cloudinary.uploader.upload(request.files['image'])
image = SavedImage(url=cloudinary_data['secure_url'])
db.session.add(image)
db.session.commit()
return jsonify({
'pictureId': image.id
})
if __name__ == '__main__':
app.run(debug=environ.get('OTBP_DEBUG_MODE', False))
| from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import func
from flask_cors import CORS
from jsonschema import validate
from geopy import Point
from geopy.distance import vincenty
import cloudinary
import cloudinary.uploader
import cloudinary.api
from datetime import datetime
from os import environ
import random
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = environ.get('OTBP_DATABASE_URI')
app.config['DEFAULT_PAGINATION_PAGE_LENGTH'] = 10
app.config['POST_SCHEMA'] = {
"schema": "http://json-schema.org/draft-04/schema#",
"title": "Post",
"description": "Input data for a new post",
"type": "object",
"properties": {
"text": {"type": "string"},
"pictureId": {"type": ["integer", "null"]},
"location": {
"type": "object",
"properties": {
"lat": {"type": "number"},
"lng": {"type": "number"}
},
"required": ["lat", "lng"]
}
},
"required": ["text", "location"]
}
app.config['TARGET_MIN_DISTANCE'] = 100
app.config['TARGET_MAX_DISTANCE'] = 200
CORS(app)
db = SQLAlchemy(app)
cloudinary.config(
cloud_name=environ.get('OTBP_CLOUDINARY_CLOUD_NAME'),
api_key=environ.get('OTBP_CLOUDINARY_API_KEY'),
api_secret=environ.get('OTBP_CLOUDINARY_API_SECRET')
)
class TargetLocation(db.Model):
key = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.DateTime,
default=datetime.utcnow,
nullable=False)
lat = db.Column(db.Float, nullable=False)
lng = db.Column(db.Float, nullable=False)
def toSimpleDict(self):
return {
'key': self.key,
'position': {
'lat': self.lat,
'lng': self.lng,
},
'totalVisitors': 10,
'averageVisitorsPerHour': 10
}
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.DateTime,
default=datetime.utcnow,
nullable=False)
text = db.Column(db.String(140), nullable=False)
final_distance = db.Column(db.Float, nullable=False)
location_id = db.Column(db.Integer,
db.ForeignKey('target_location.key'),
nullable=False)
location = db.relationship('TargetLocation',
backref=db.backref('posts', lazy=True))
image_id = db.Column(db.Integer,
db.ForeignKey('saved_image.id'),
nullable=True)
image = db.relationship('SavedImage')
def toSimpleDict(self):
return {
'timestamp': self.created_at.timestamp(),
'pictureUrl': getattr(self.image, 'url', None),
'finalDistance': self.final_distance,
'text': self.text
}
class SavedImage(db.Model):
id = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.DateTime,
default=datetime.utcnow,
nullable=False)
url = db.Column(db.String(512), nullable=True)
class EasyPagination(object):
def __init__(self, data, pageNumber, lastPage):
self.data = data
self.pageNumber = pageNumber
self.lastPage = lastPage
def toSimpleDict(self):
return {
'data': self.data,
'pageNumber': self.pageNumber,
'lastPage': self.lastPage
}
def _haversine(a, b):
return vincenty((a.lat, a.lng), (b.lat, b.lng)).meters
@app.route('/')
def index():
return 'todo: find an api details generator like swagger?'
@app.route('/target/<location>', methods=['get'])
def get_target_by_location(location):
# location should be in format `lat,lng`
source_lat, source_lng = list(map(lambda x: float(x), location.split(',')))
source_location = TargetLocation(lat=source_lat, lng=source_lng)
# attempt to find an existing location
target_list = TargetLocation.query \
.filter(
# check for results created today
func.date(TargetLocation.created_at) == func.current_date()
) \
.all()
sorted_target_list = sorted(target_list,
key=lambda t: _haversine(source_location, t))
if len(sorted_target_list) > 0:
target = sorted_target_list[0]
haversine_distance = _haversine(
target,
source_location)
if haversine_distance < app.config['TARGET_MAX_DISTANCE']:
return jsonify(target.toSimpleDict())
# naively create a target between MIN to MAX m away from current location
angle = random.randint(1, 360)
pdistance = random.randint(app.config['TARGET_MIN_DISTANCE'],
app.config['TARGET_MAX_DISTANCE'])
pdistance /= 1000
target_lat, target_lng, alt = vincenty(kilometers=pdistance) \
.destination(Point(source_lat, source_lng), angle)
target_location = TargetLocation(lat=target_lat,
lng=target_lng)
db.session.add(target_location)
db.session.commit()
return jsonify(target_location.toSimpleDict())
@app.route('/target/key/<int:key>', methods=['get'])
def get_target_by_key(key):
target = TargetLocation.query.get_or_404(key)
return jsonify(target.toSimpleDict())
@app.route('/posts/<int:key>/<int:page>', methods=['get'])
def get_posts_by_page(key, page=1):
pagination = Post.query \
.filter(Post.location_id == key) \
.paginate(page,
app.config['DEFAULT_PAGINATION_PAGE_LENGTH'],
False)
posts = list(map(lambda x: x.toSimpleDict(), pagination.items))
easy_pagination = EasyPagination(posts, page, not pagination.has_next)
return jsonify(easy_pagination.toSimpleDict())
@app.route('/posts/<int:key>', methods=['post'])
def create_post(key):
data = request.get_json()
validate(data, app.config['POST_SCHEMA'])
target_location = TargetLocation.query.get_or_404(key)
post = Post(text=data['text'],
image_id=data.get('pictureId', None),
final_distance=_haversine(
TargetLocation(
lat=data['location']['lat'],
lng=data['location']['lng']
),
target_location),
location_id=key)
db.session.add(post)
db.session.commit()
return jsonify({'success': True}), 201
@app.route('/image', methods=['post'])
def upload_photo():
cloudinary_data = cloudinary.uploader.upload(request.files['image'])
image = SavedImage(url=cloudinary_data['secure_url'])
db.session.add(image)
db.session.commit()
return jsonify({
'pictureId': image.id
})
if __name__ == '__main__':
app.run(debug=environ.get('OTBP_DEBUG_MODE', False))
| en | 0.891353 | #", # location should be in format `lat,lng` # attempt to find an existing location # check for results created today # naively create a target between MIN to MAX m away from current location | 2.181265 | 2 |
distancemap.py | quinpallet/distance-map | 0 | 6632312 | <reponame>quinpallet/distance-map<filename>distancemap.py
import folium
import unicodedata
from geopy.distance import great_circle as distance
from q_geocode import ReverseGeocode
def get_tooltip(location: tuple) -> str:
return f'緯度:{location[0]}, 経度:{location[1]}'
def get_marker_icon(label: str) -> folium.DivIcon:
# マーカ文字サイズ
font_size = 16
# 文字数取得(半角1、全角2)
n_chars = sum([2 if unicodedata.east_asian_width(c) in 'FWA' else 1 for c in label]) + 2
# マーカアイコンの表示サイズ
icon_size = (font_size * n_chars / 2, font_size * 2.5)
# マーカHTMLスタイル
marker_dom_style = f'font-size: {font_size}px; color: red; background-color: rgba(255, 255, 255, .7); text-align: center'
return folium.DivIcon(icon_size=icon_size, icon_anchor=(-10, font_size * 0.75), html=f'<div style="{marker_dom_style}">{label}</div>')
def distance_on_map(location1: tuple, location2: tuple) -> folium.Map:
# 2点間座標の緯度中間座標を取得
min_lat = min(location1[0], location2[0])
max_lat = max(location1[0], location2[0])
center_lat = (max_lat - min_lat) / 2 + min_lat
# 2点間座標の経度中間座標を取得
min_lng = min(location1[1], location2[1])
max_lng = max(location1[1], location2[1])
center_lng = (max_lng - min_lng) / 2 + min_lng
map = folium.Map(location=(center_lng, center_lat))
map.fit_bounds([location1, location2])
folium.PolyLine(locations=[location1, location2], color='blue').add_to(map)
folium.CircleMarker(location=location1, tooltip=get_tooltip(location1), color='red', radius=5, fill='red').add_to(map)
folium.CircleMarker(location=location2, tooltip=get_tooltip(location2), color='red', radius=5, fill='red').add_to(map)
# 始点座標住所表示
folium.map.Marker(location=location1, icon=get_marker_icon(ReverseGeocode(location1).get_address())).add_to(map)
# 終点座標住所表示
folium.map.Marker(location=location2, icon=get_marker_icon(ReverseGeocode(location2).get_address())).add_to(map)
# 直線距離表示
folium.map.Marker(location=(center_lat, center_lng), icon=get_marker_icon(f'直線距離: {distance(location1, location2).kilometers:.3f}Km')).add_to(map)
return map
| import folium
import unicodedata
from geopy.distance import great_circle as distance
from q_geocode import ReverseGeocode
def get_tooltip(location: tuple) -> str:
return f'緯度:{location[0]}, 経度:{location[1]}'
def get_marker_icon(label: str) -> folium.DivIcon:
# マーカ文字サイズ
font_size = 16
# 文字数取得(半角1、全角2)
n_chars = sum([2 if unicodedata.east_asian_width(c) in 'FWA' else 1 for c in label]) + 2
# マーカアイコンの表示サイズ
icon_size = (font_size * n_chars / 2, font_size * 2.5)
# マーカHTMLスタイル
marker_dom_style = f'font-size: {font_size}px; color: red; background-color: rgba(255, 255, 255, .7); text-align: center'
return folium.DivIcon(icon_size=icon_size, icon_anchor=(-10, font_size * 0.75), html=f'<div style="{marker_dom_style}">{label}</div>')
def distance_on_map(location1: tuple, location2: tuple) -> folium.Map:
# 2点間座標の緯度中間座標を取得
min_lat = min(location1[0], location2[0])
max_lat = max(location1[0], location2[0])
center_lat = (max_lat - min_lat) / 2 + min_lat
# 2点間座標の経度中間座標を取得
min_lng = min(location1[1], location2[1])
max_lng = max(location1[1], location2[1])
center_lng = (max_lng - min_lng) / 2 + min_lng
map = folium.Map(location=(center_lng, center_lat))
map.fit_bounds([location1, location2])
folium.PolyLine(locations=[location1, location2], color='blue').add_to(map)
folium.CircleMarker(location=location1, tooltip=get_tooltip(location1), color='red', radius=5, fill='red').add_to(map)
folium.CircleMarker(location=location2, tooltip=get_tooltip(location2), color='red', radius=5, fill='red').add_to(map)
# 始点座標住所表示
folium.map.Marker(location=location1, icon=get_marker_icon(ReverseGeocode(location1).get_address())).add_to(map)
# 終点座標住所表示
folium.map.Marker(location=location2, icon=get_marker_icon(ReverseGeocode(location2).get_address())).add_to(map)
# 直線距離表示
folium.map.Marker(location=(center_lat, center_lng), icon=get_marker_icon(f'直線距離: {distance(location1, location2).kilometers:.3f}Km')).add_to(map)
return map | ja | 0.998082 | # マーカ文字サイズ # 文字数取得(半角1、全角2) # マーカアイコンの表示サイズ # マーカHTMLスタイル # 2点間座標の緯度中間座標を取得 # 2点間座標の経度中間座標を取得 # 始点座標住所表示 # 終点座標住所表示 # 直線距離表示 | 2.798299 | 3 |
venv/Lib/site-packages/cryptography/x509/base.py | arnoyu-hub/COMP0016miemie | 0 | 6632313 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import abc
import datetime
import os
import typing
from cryptography import utils
from cryptography.hazmat.bindings._rust import x509 as rust_x509
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import (
dsa,
ec,
ed25519,
ed448,
rsa,
x25519,
x448,
)
from cryptography.hazmat.primitives.asymmetric.types import (
CERTIFICATE_PUBLIC_KEY_TYPES,
PRIVATE_KEY_TYPES as PRIVATE_KEY_TYPES,
PUBLIC_KEY_TYPES as PUBLIC_KEY_TYPES,
)
from cryptography.x509.extensions import (
Extension,
ExtensionType,
Extensions,
_make_sequence_methods,
)
from cryptography.x509.name import Name, _ASN1Type
from cryptography.x509.oid import ObjectIdentifier
_EARLIEST_UTC_TIME = datetime.datetime(1950, 1, 1)
class AttributeNotFound(Exception):
def __init__(self, msg: str, oid: ObjectIdentifier) -> None:
super(AttributeNotFound, self).__init__(msg)
self.oid = oid
def _reject_duplicate_extension(
extension: Extension[ExtensionType],
extensions: typing.List[Extension[ExtensionType]],
) -> None:
# This is quadratic in the number of extensions
for e in extensions:
if e.oid == extension.oid:
raise ValueError("This extension has already been set.")
def _reject_duplicate_attribute(
oid: ObjectIdentifier,
attributes: typing.List[typing.Tuple[ObjectIdentifier, bytes]],
) -> None:
# This is quadratic in the number of attributes
for attr_oid, _ in attributes:
if attr_oid == oid:
raise ValueError("This attribute has already been set.")
def _convert_to_naive_utc_time(time: datetime.datetime) -> datetime.datetime:
"""Normalizes a datetime to a naive datetime in UTC.
time -- datetime to normalize. Assumed to be in UTC if not timezone
aware.
"""
if time.tzinfo is not None:
offset = time.utcoffset()
offset = offset if offset else datetime.timedelta()
return time.replace(tzinfo=None) - offset
else:
return time
class Attribute:
def __init__(
self,
oid: ObjectIdentifier,
value: bytes,
_type: int = _ASN1Type.UTF8String.value,
) -> None:
self._oid = oid
self._value = value
self._type = _type
@property
def oid(self) -> ObjectIdentifier:
return self._oid
@property
def value(self) -> bytes:
return self._value
def __repr__(self):
return "<Attribute(oid={}, value={!r})>".format(self.oid, self.value)
def __eq__(self, other: typing.Any) -> bool:
if not isinstance(other, Attribute):
return NotImplemented
return (
self.oid == other.oid
and self.value == other.value
and self._type == other._type
)
def __ne__(self, other: typing.Any) -> bool:
return not self == other
def __hash__(self) -> int:
return hash((self.oid, self.value, self._type))
class Attributes:
def __init__(
self,
attributes: typing.Iterable[Attribute],
) -> None:
self._attributes = list(attributes)
__len__, __iter__, __getitem__ = _make_sequence_methods("_attributes")
def __repr__(self):
return "<Attributes({})>".format(self._attributes)
def get_attribute_for_oid(self, oid: ObjectIdentifier) -> Attribute:
for attr in self:
if attr.oid == oid:
return attr
raise AttributeNotFound("No {} attribute was found".format(oid), oid)
class Version(utils.Enum):
v1 = 0
v3 = 2
class InvalidVersion(Exception):
def __init__(self, msg: str, parsed_version: int) -> None:
super(InvalidVersion, self).__init__(msg)
self.parsed_version = parsed_version
class Certificate(metaclass=abc.ABCMeta):
@abc.abstractmethod
def fingerprint(self, algorithm: hashes.HashAlgorithm) -> bytes:
"""
Returns bytes using digest passed.
"""
@abc.abstractproperty
def serial_number(self) -> int:
"""
Returns certificate serial number
"""
@abc.abstractproperty
def version(self) -> Version:
"""
Returns the certificate version
"""
@abc.abstractmethod
def public_key(self) -> CERTIFICATE_PUBLIC_KEY_TYPES:
"""
Returns the public key
"""
@abc.abstractproperty
def not_valid_before(self) -> datetime.datetime:
"""
Not before time (represented as UTC datetime)
"""
@abc.abstractproperty
def not_valid_after(self) -> datetime.datetime:
"""
Not after time (represented as UTC datetime)
"""
@abc.abstractproperty
def issuer(self) -> Name:
"""
Returns the issuer name object.
"""
@abc.abstractproperty
def subject(self) -> Name:
"""
Returns the subject name object.
"""
@abc.abstractproperty
def signature_hash_algorithm(
self,
) -> typing.Optional[hashes.HashAlgorithm]:
"""
Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate.
"""
@abc.abstractproperty
def signature_algorithm_oid(self) -> ObjectIdentifier:
"""
Returns the ObjectIdentifier of the signature algorithm.
"""
@abc.abstractproperty
def extensions(self) -> Extensions:
"""
Returns an Extensions object.
"""
@abc.abstractproperty
def signature(self) -> bytes:
"""
Returns the signature bytes.
"""
@abc.abstractproperty
def tbs_certificate_bytes(self) -> bytes:
"""
Returns the tbsCertificate payload bytes as defined in RFC 5280.
"""
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Checks equality.
"""
@abc.abstractmethod
def __ne__(self, other: object) -> bool:
"""
Checks not equal.
"""
@abc.abstractmethod
def __hash__(self) -> int:
"""
Computes a hash.
"""
@abc.abstractmethod
def public_bytes(self, encoding: serialization.Encoding) -> bytes:
"""
Serializes the certificate to PEM or DER format.
"""
# Runtime isinstance checks need this since the rust class is not a subclass.
Certificate.register(rust_x509.Certificate)
class RevokedCertificate(metaclass=abc.ABCMeta):
@abc.abstractproperty
def serial_number(self) -> int:
"""
Returns the serial number of the revoked certificate.
"""
@abc.abstractproperty
def revocation_date(self) -> datetime.datetime:
"""
Returns the date of when this certificate was revoked.
"""
@abc.abstractproperty
def extensions(self) -> Extensions:
"""
Returns an Extensions object containing a list of Revoked extensions.
"""
# Runtime isinstance checks need this since the rust class is not a subclass.
RevokedCertificate.register(rust_x509.RevokedCertificate)
class _RawRevokedCertificate(RevokedCertificate):
def __init__(
self,
serial_number: int,
revocation_date: datetime.datetime,
extensions: Extensions,
):
self._serial_number = serial_number
self._revocation_date = revocation_date
self._extensions = extensions
@property
def serial_number(self) -> int:
return self._serial_number
@property
def revocation_date(self) -> datetime.datetime:
return self._revocation_date
@property
def extensions(self) -> Extensions:
return self._extensions
class CertificateRevocationList(metaclass=abc.ABCMeta):
@abc.abstractmethod
def public_bytes(self, encoding: serialization.Encoding) -> bytes:
"""
Serializes the CRL to PEM or DER format.
"""
@abc.abstractmethod
def fingerprint(self, algorithm: hashes.HashAlgorithm) -> bytes:
"""
Returns bytes using digest passed.
"""
@abc.abstractmethod
def get_revoked_certificate_by_serial_number(
self, serial_number: int
) -> typing.Optional[RevokedCertificate]:
"""
Returns an instance of RevokedCertificate or None if the serial_number
is not in the CRL.
"""
@abc.abstractproperty
def signature_hash_algorithm(
self,
) -> typing.Optional[hashes.HashAlgorithm]:
"""
Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate.
"""
@abc.abstractproperty
def signature_algorithm_oid(self) -> ObjectIdentifier:
"""
Returns the ObjectIdentifier of the signature algorithm.
"""
@abc.abstractproperty
def issuer(self) -> Name:
"""
Returns the X509Name with the issuer of this CRL.
"""
@abc.abstractproperty
def next_update(self) -> typing.Optional[datetime.datetime]:
"""
Returns the date of next update for this CRL.
"""
@abc.abstractproperty
def last_update(self) -> datetime.datetime:
"""
Returns the date of last update for this CRL.
"""
@abc.abstractproperty
def extensions(self) -> Extensions:
"""
Returns an Extensions object containing a list of CRL extensions.
"""
@abc.abstractproperty
def signature(self) -> bytes:
"""
Returns the signature bytes.
"""
@abc.abstractproperty
def tbs_certlist_bytes(self) -> bytes:
"""
Returns the tbsCertList payload bytes as defined in RFC 5280.
"""
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Checks equality.
"""
@abc.abstractmethod
def __ne__(self, other: object) -> bool:
"""
Checks not equal.
"""
@abc.abstractmethod
def __len__(self) -> int:
"""
Number of revoked certificates in the CRL.
"""
@typing.overload
def __getitem__(self, idx: int) -> RevokedCertificate:
...
@typing.overload
def __getitem__(self, idx: slice) -> typing.List[RevokedCertificate]:
...
@abc.abstractmethod
def __getitem__(
self, idx: typing.Union[int, slice]
) -> typing.Union[RevokedCertificate, typing.List[RevokedCertificate]]:
"""
Returns a revoked certificate (or slice of revoked certificates).
"""
@abc.abstractmethod
def __iter__(self) -> typing.Iterator[RevokedCertificate]:
"""
Iterator over the revoked certificates
"""
@abc.abstractmethod
def is_signature_valid(self, public_key: PUBLIC_KEY_TYPES) -> bool:
"""
Verifies signature of revocation list against given public key.
"""
CertificateRevocationList.register(rust_x509.CertificateRevocationList)
class CertificateSigningRequest(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Checks equality.
"""
@abc.abstractmethod
def __ne__(self, other: object) -> bool:
"""
Checks not equal.
"""
@abc.abstractmethod
def __hash__(self) -> int:
"""
Computes a hash.
"""
@abc.abstractmethod
def public_key(self) -> PUBLIC_KEY_TYPES:
"""
Returns the public key
"""
@abc.abstractproperty
def subject(self) -> Name:
"""
Returns the subject name object.
"""
@abc.abstractproperty
def signature_hash_algorithm(
self,
) -> typing.Optional[hashes.HashAlgorithm]:
"""
Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate.
"""
@abc.abstractproperty
def signature_algorithm_oid(self) -> ObjectIdentifier:
"""
Returns the ObjectIdentifier of the signature algorithm.
"""
@abc.abstractproperty
def extensions(self) -> Extensions:
"""
Returns the extensions in the signing request.
"""
@abc.abstractproperty
def attributes(self) -> Attributes:
"""
Returns an Attributes object.
"""
@abc.abstractmethod
def public_bytes(self, encoding: serialization.Encoding) -> bytes:
"""
Encodes the request to PEM or DER format.
"""
@abc.abstractproperty
def signature(self) -> bytes:
"""
Returns the signature bytes.
"""
@abc.abstractproperty
def tbs_certrequest_bytes(self) -> bytes:
"""
Returns the PKCS#10 CertificationRequestInfo bytes as defined in RFC
2986.
"""
@abc.abstractproperty
def is_signature_valid(self) -> bool:
"""
Verifies signature of signing request.
"""
@abc.abstractmethod
def get_attribute_for_oid(self, oid: ObjectIdentifier) -> bytes:
"""
Get the attribute value for a given OID.
"""
# Runtime isinstance checks need this since the rust class is not a subclass.
CertificateSigningRequest.register(rust_x509.CertificateSigningRequest)
# Backend argument preserved for API compatibility, but ignored.
def load_pem_x509_certificate(
data: bytes, backend: typing.Any = None
) -> Certificate:
return rust_x509.load_pem_x509_certificate(data)
# Backend argument preserved for API compatibility, but ignored.
def load_der_x509_certificate(
data: bytes, backend: typing.Any = None
) -> Certificate:
return rust_x509.load_der_x509_certificate(data)
# Backend argument preserved for API compatibility, but ignored.
def load_pem_x509_csr(
data: bytes, backend: typing.Any = None
) -> CertificateSigningRequest:
return rust_x509.load_pem_x509_csr(data)
# Backend argument preserved for API compatibility, but ignored.
def load_der_x509_csr(
data: bytes, backend: typing.Any = None
) -> CertificateSigningRequest:
return rust_x509.load_der_x509_csr(data)
# Backend argument preserved for API compatibility, but ignored.
def load_pem_x509_crl(
data: bytes, backend: typing.Any = None
) -> CertificateRevocationList:
return rust_x509.load_pem_x509_crl(data)
# Backend argument preserved for API compatibility, but ignored.
def load_der_x509_crl(
data: bytes, backend: typing.Any = None
) -> CertificateRevocationList:
return rust_x509.load_der_x509_crl(data)
class CertificateSigningRequestBuilder(object):
def __init__(
self,
subject_name: typing.Optional[Name] = None,
extensions: typing.List[Extension[ExtensionType]] = [],
attributes: typing.List[typing.Tuple[ObjectIdentifier, bytes]] = [],
):
"""
Creates an empty X.509 certificate request (v1).
"""
self._subject_name = subject_name
self._extensions = extensions
self._attributes = attributes
def subject_name(self, name: Name) -> "CertificateSigningRequestBuilder":
"""
Sets the certificate requestor's distinguished name.
"""
if not isinstance(name, Name):
raise TypeError("Expecting x509.Name object.")
if self._subject_name is not None:
raise ValueError("The subject name may only be set once.")
return CertificateSigningRequestBuilder(
name, self._extensions, self._attributes
)
def add_extension(
self, extval: ExtensionType, critical: bool
) -> "CertificateSigningRequestBuilder":
"""
Adds an X.509 extension to the certificate request.
"""
if not isinstance(extval, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return CertificateSigningRequestBuilder(
self._subject_name,
self._extensions + [extension],
self._attributes,
)
def add_attribute(
self, oid: ObjectIdentifier, value: bytes
) -> "CertificateSigningRequestBuilder":
"""
Adds an X.509 attribute with an OID and associated value.
"""
if not isinstance(oid, ObjectIdentifier):
raise TypeError("oid must be an ObjectIdentifier")
if not isinstance(value, bytes):
raise TypeError("value must be bytes")
_reject_duplicate_attribute(oid, self._attributes)
return CertificateSigningRequestBuilder(
self._subject_name,
self._extensions,
self._attributes + [(oid, value)],
)
def sign(
self,
private_key: PRIVATE_KEY_TYPES,
algorithm: typing.Optional[hashes.HashAlgorithm],
backend: typing.Any = None,
) -> CertificateSigningRequest:
"""
Signs the request using the requestor's private key.
"""
if self._subject_name is None:
raise ValueError("A CertificateSigningRequest must have a subject")
return rust_x509.create_x509_csr(self, private_key, algorithm)
class CertificateBuilder(object):
_extensions: typing.List[Extension[ExtensionType]]
def __init__(
self,
issuer_name: typing.Optional[Name] = None,
subject_name: typing.Optional[Name] = None,
public_key: typing.Optional[CERTIFICATE_PUBLIC_KEY_TYPES] = None,
serial_number: typing.Optional[int] = None,
not_valid_before: typing.Optional[datetime.datetime] = None,
not_valid_after: typing.Optional[datetime.datetime] = None,
extensions: typing.List[Extension[ExtensionType]] = [],
) -> None:
self._version = Version.v3
self._issuer_name = issuer_name
self._subject_name = subject_name
self._public_key = public_key
self._serial_number = serial_number
self._not_valid_before = not_valid_before
self._not_valid_after = not_valid_after
self._extensions = extensions
def issuer_name(self, name: Name) -> "CertificateBuilder":
"""
Sets the CA's distinguished name.
"""
if not isinstance(name, Name):
raise TypeError("Expecting x509.Name object.")
if self._issuer_name is not None:
raise ValueError("The issuer name may only be set once.")
return CertificateBuilder(
name,
self._subject_name,
self._public_key,
self._serial_number,
self._not_valid_before,
self._not_valid_after,
self._extensions,
)
def subject_name(self, name: Name) -> "CertificateBuilder":
"""
Sets the requestor's distinguished name.
"""
if not isinstance(name, Name):
raise TypeError("Expecting x509.Name object.")
if self._subject_name is not None:
raise ValueError("The subject name may only be set once.")
return CertificateBuilder(
self._issuer_name,
name,
self._public_key,
self._serial_number,
self._not_valid_before,
self._not_valid_after,
self._extensions,
)
def public_key(
self,
key: CERTIFICATE_PUBLIC_KEY_TYPES,
) -> "CertificateBuilder":
"""
Sets the requestor's public key (as found in the signing request).
"""
if not isinstance(
key,
(
dsa.DSAPublicKey,
rsa.RSAPublicKey,
ec.EllipticCurvePublicKey,
ed25519.Ed25519PublicKey,
ed448.Ed448PublicKey,
x25519.X25519PublicKey,
x448.X448PublicKey,
),
):
raise TypeError(
"Expecting one of DSAPublicKey, RSAPublicKey,"
" EllipticCurvePublicKey, Ed25519PublicKey,"
" Ed448PublicKey, X25519PublicKey, or "
"X448PublicKey."
)
if self._public_key is not None:
raise ValueError("The public key may only be set once.")
return CertificateBuilder(
self._issuer_name,
self._subject_name,
key,
self._serial_number,
self._not_valid_before,
self._not_valid_after,
self._extensions,
)
def serial_number(self, number: int) -> "CertificateBuilder":
"""
Sets the certificate serial number.
"""
if not isinstance(number, int):
raise TypeError("Serial number must be of integral type.")
if self._serial_number is not None:
raise ValueError("The serial number may only be set once.")
if number <= 0:
raise ValueError("The serial number should be positive.")
# ASN.1 integers are always signed, so most significant bit must be
# zero.
if number.bit_length() >= 160: # As defined in RFC 5280
raise ValueError(
"The serial number should not be more than 159 " "bits."
)
return CertificateBuilder(
self._issuer_name,
self._subject_name,
self._public_key,
number,
self._not_valid_before,
self._not_valid_after,
self._extensions,
)
def not_valid_before(
self, time: datetime.datetime
) -> "CertificateBuilder":
"""
Sets the certificate activation time.
"""
if not isinstance(time, datetime.datetime):
raise TypeError("Expecting datetime object.")
if self._not_valid_before is not None:
raise ValueError("The not valid before may only be set once.")
time = _convert_to_naive_utc_time(time)
if time < _EARLIEST_UTC_TIME:
raise ValueError(
"The not valid before date must be on or after"
" 1950 January 1)."
)
if self._not_valid_after is not None and time > self._not_valid_after:
raise ValueError(
"The not valid before date must be before the not valid after "
"date."
)
return CertificateBuilder(
self._issuer_name,
self._subject_name,
self._public_key,
self._serial_number,
time,
self._not_valid_after,
self._extensions,
)
def not_valid_after(self, time: datetime.datetime) -> "CertificateBuilder":
"""
Sets the certificate expiration time.
"""
if not isinstance(time, datetime.datetime):
raise TypeError("Expecting datetime object.")
if self._not_valid_after is not None:
raise ValueError("The not valid after may only be set once.")
time = _convert_to_naive_utc_time(time)
if time < _EARLIEST_UTC_TIME:
raise ValueError(
"The not valid after date must be on or after"
" 1950 January 1."
)
if (
self._not_valid_before is not None
and time < self._not_valid_before
):
raise ValueError(
"The not valid after date must be after the not valid before "
"date."
)
return CertificateBuilder(
self._issuer_name,
self._subject_name,
self._public_key,
self._serial_number,
self._not_valid_before,
time,
self._extensions,
)
def add_extension(
self, extval: ExtensionType, critical: bool
) -> "CertificateBuilder":
"""
Adds an X.509 extension to the certificate.
"""
if not isinstance(extval, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return CertificateBuilder(
self._issuer_name,
self._subject_name,
self._public_key,
self._serial_number,
self._not_valid_before,
self._not_valid_after,
self._extensions + [extension],
)
def sign(
self,
private_key: PRIVATE_KEY_TYPES,
algorithm: typing.Optional[hashes.HashAlgorithm],
backend: typing.Any = None,
) -> Certificate:
"""
Signs the certificate using the CA's private key.
"""
if self._subject_name is None:
raise ValueError("A certificate must have a subject name")
if self._issuer_name is None:
raise ValueError("A certificate must have an issuer name")
if self._serial_number is None:
raise ValueError("A certificate must have a serial number")
if self._not_valid_before is None:
raise ValueError("A certificate must have a not valid before time")
if self._not_valid_after is None:
raise ValueError("A certificate must have a not valid after time")
if self._public_key is None:
raise ValueError("A certificate must have a public key")
return rust_x509.create_x509_certificate(self, private_key, algorithm)
class CertificateRevocationListBuilder(object):
_extensions: typing.List[Extension[ExtensionType]]
_revoked_certificates: typing.List[RevokedCertificate]
def __init__(
self,
issuer_name: typing.Optional[Name] = None,
last_update: typing.Optional[datetime.datetime] = None,
next_update: typing.Optional[datetime.datetime] = None,
extensions: typing.List[Extension[ExtensionType]] = [],
revoked_certificates: typing.List[RevokedCertificate] = [],
):
self._issuer_name = issuer_name
self._last_update = last_update
self._next_update = next_update
self._extensions = extensions
self._revoked_certificates = revoked_certificates
def issuer_name(
self, issuer_name: Name
) -> "CertificateRevocationListBuilder":
if not isinstance(issuer_name, Name):
raise TypeError("Expecting x509.Name object.")
if self._issuer_name is not None:
raise ValueError("The issuer name may only be set once.")
return CertificateRevocationListBuilder(
issuer_name,
self._last_update,
self._next_update,
self._extensions,
self._revoked_certificates,
)
def last_update(
self, last_update: datetime.datetime
) -> "CertificateRevocationListBuilder":
if not isinstance(last_update, datetime.datetime):
raise TypeError("Expecting datetime object.")
if self._last_update is not None:
raise ValueError("Last update may only be set once.")
last_update = _convert_to_naive_utc_time(last_update)
if last_update < _EARLIEST_UTC_TIME:
raise ValueError(
"The last update date must be on or after" " 1950 January 1."
)
if self._next_update is not None and last_update > self._next_update:
raise ValueError(
"The last update date must be before the next update date."
)
return CertificateRevocationListBuilder(
self._issuer_name,
last_update,
self._next_update,
self._extensions,
self._revoked_certificates,
)
def next_update(
self, next_update: datetime.datetime
) -> "CertificateRevocationListBuilder":
if not isinstance(next_update, datetime.datetime):
raise TypeError("Expecting datetime object.")
if self._next_update is not None:
raise ValueError("Last update may only be set once.")
next_update = _convert_to_naive_utc_time(next_update)
if next_update < _EARLIEST_UTC_TIME:
raise ValueError(
"The last update date must be on or after" " 1950 January 1."
)
if self._last_update is not None and next_update < self._last_update:
raise ValueError(
"The next update date must be after the last update date."
)
return CertificateRevocationListBuilder(
self._issuer_name,
self._last_update,
next_update,
self._extensions,
self._revoked_certificates,
)
def add_extension(
self, extval: ExtensionType, critical: bool
) -> "CertificateRevocationListBuilder":
"""
Adds an X.509 extension to the certificate revocation list.
"""
if not isinstance(extval, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return CertificateRevocationListBuilder(
self._issuer_name,
self._last_update,
self._next_update,
self._extensions + [extension],
self._revoked_certificates,
)
def add_revoked_certificate(
self, revoked_certificate: RevokedCertificate
) -> "CertificateRevocationListBuilder":
"""
Adds a revoked certificate to the CRL.
"""
if not isinstance(revoked_certificate, RevokedCertificate):
raise TypeError("Must be an instance of RevokedCertificate")
return CertificateRevocationListBuilder(
self._issuer_name,
self._last_update,
self._next_update,
self._extensions,
self._revoked_certificates + [revoked_certificate],
)
def sign(
self,
private_key: PRIVATE_KEY_TYPES,
algorithm: typing.Optional[hashes.HashAlgorithm],
backend: typing.Any = None,
) -> CertificateRevocationList:
if self._issuer_name is None:
raise ValueError("A CRL must have an issuer name")
if self._last_update is None:
raise ValueError("A CRL must have a last update time")
if self._next_update is None:
raise ValueError("A CRL must have a next update time")
return rust_x509.create_x509_crl(self, private_key, algorithm)
class RevokedCertificateBuilder(object):
def __init__(
self,
serial_number: typing.Optional[int] = None,
revocation_date: typing.Optional[datetime.datetime] = None,
extensions: typing.List[Extension[ExtensionType]] = [],
):
self._serial_number = serial_number
self._revocation_date = revocation_date
self._extensions = extensions
def serial_number(self, number: int) -> "RevokedCertificateBuilder":
if not isinstance(number, int):
raise TypeError("Serial number must be of integral type.")
if self._serial_number is not None:
raise ValueError("The serial number may only be set once.")
if number <= 0:
raise ValueError("The serial number should be positive")
# ASN.1 integers are always signed, so most significant bit must be
# zero.
if number.bit_length() >= 160: # As defined in RFC 5280
raise ValueError(
"The serial number should not be more than 159 " "bits."
)
return RevokedCertificateBuilder(
number, self._revocation_date, self._extensions
)
def revocation_date(
self, time: datetime.datetime
) -> "RevokedCertificateBuilder":
if not isinstance(time, datetime.datetime):
raise TypeError("Expecting datetime object.")
if self._revocation_date is not None:
raise ValueError("The revocation date may only be set once.")
time = _convert_to_naive_utc_time(time)
if time < _EARLIEST_UTC_TIME:
raise ValueError(
"The revocation date must be on or after" " 1950 January 1."
)
return RevokedCertificateBuilder(
self._serial_number, time, self._extensions
)
def add_extension(
self, extval: ExtensionType, critical: bool
) -> "RevokedCertificateBuilder":
if not isinstance(extval, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return RevokedCertificateBuilder(
self._serial_number,
self._revocation_date,
self._extensions + [extension],
)
def build(self, backend: typing.Any = None) -> RevokedCertificate:
if self._serial_number is None:
raise ValueError("A revoked certificate must have a serial number")
if self._revocation_date is None:
raise ValueError(
"A revoked certificate must have a revocation date"
)
return _RawRevokedCertificate(
self._serial_number,
self._revocation_date,
Extensions(self._extensions),
)
def random_serial_number() -> int:
return int.from_bytes(os.urandom(20), "big") >> 1
| # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import abc
import datetime
import os
import typing
from cryptography import utils
from cryptography.hazmat.bindings._rust import x509 as rust_x509
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import (
dsa,
ec,
ed25519,
ed448,
rsa,
x25519,
x448,
)
from cryptography.hazmat.primitives.asymmetric.types import (
CERTIFICATE_PUBLIC_KEY_TYPES,
PRIVATE_KEY_TYPES as PRIVATE_KEY_TYPES,
PUBLIC_KEY_TYPES as PUBLIC_KEY_TYPES,
)
from cryptography.x509.extensions import (
Extension,
ExtensionType,
Extensions,
_make_sequence_methods,
)
from cryptography.x509.name import Name, _ASN1Type
from cryptography.x509.oid import ObjectIdentifier
_EARLIEST_UTC_TIME = datetime.datetime(1950, 1, 1)
class AttributeNotFound(Exception):
def __init__(self, msg: str, oid: ObjectIdentifier) -> None:
super(AttributeNotFound, self).__init__(msg)
self.oid = oid
def _reject_duplicate_extension(
extension: Extension[ExtensionType],
extensions: typing.List[Extension[ExtensionType]],
) -> None:
# This is quadratic in the number of extensions
for e in extensions:
if e.oid == extension.oid:
raise ValueError("This extension has already been set.")
def _reject_duplicate_attribute(
oid: ObjectIdentifier,
attributes: typing.List[typing.Tuple[ObjectIdentifier, bytes]],
) -> None:
# This is quadratic in the number of attributes
for attr_oid, _ in attributes:
if attr_oid == oid:
raise ValueError("This attribute has already been set.")
def _convert_to_naive_utc_time(time: datetime.datetime) -> datetime.datetime:
"""Normalizes a datetime to a naive datetime in UTC.
time -- datetime to normalize. Assumed to be in UTC if not timezone
aware.
"""
if time.tzinfo is not None:
offset = time.utcoffset()
offset = offset if offset else datetime.timedelta()
return time.replace(tzinfo=None) - offset
else:
return time
class Attribute:
def __init__(
self,
oid: ObjectIdentifier,
value: bytes,
_type: int = _ASN1Type.UTF8String.value,
) -> None:
self._oid = oid
self._value = value
self._type = _type
@property
def oid(self) -> ObjectIdentifier:
return self._oid
@property
def value(self) -> bytes:
return self._value
def __repr__(self):
return "<Attribute(oid={}, value={!r})>".format(self.oid, self.value)
def __eq__(self, other: typing.Any) -> bool:
if not isinstance(other, Attribute):
return NotImplemented
return (
self.oid == other.oid
and self.value == other.value
and self._type == other._type
)
def __ne__(self, other: typing.Any) -> bool:
return not self == other
def __hash__(self) -> int:
return hash((self.oid, self.value, self._type))
class Attributes:
def __init__(
self,
attributes: typing.Iterable[Attribute],
) -> None:
self._attributes = list(attributes)
__len__, __iter__, __getitem__ = _make_sequence_methods("_attributes")
def __repr__(self):
return "<Attributes({})>".format(self._attributes)
def get_attribute_for_oid(self, oid: ObjectIdentifier) -> Attribute:
for attr in self:
if attr.oid == oid:
return attr
raise AttributeNotFound("No {} attribute was found".format(oid), oid)
class Version(utils.Enum):
v1 = 0
v3 = 2
class InvalidVersion(Exception):
def __init__(self, msg: str, parsed_version: int) -> None:
super(InvalidVersion, self).__init__(msg)
self.parsed_version = parsed_version
class Certificate(metaclass=abc.ABCMeta):
@abc.abstractmethod
def fingerprint(self, algorithm: hashes.HashAlgorithm) -> bytes:
"""
Returns bytes using digest passed.
"""
@abc.abstractproperty
def serial_number(self) -> int:
"""
Returns certificate serial number
"""
@abc.abstractproperty
def version(self) -> Version:
"""
Returns the certificate version
"""
@abc.abstractmethod
def public_key(self) -> CERTIFICATE_PUBLIC_KEY_TYPES:
"""
Returns the public key
"""
@abc.abstractproperty
def not_valid_before(self) -> datetime.datetime:
"""
Not before time (represented as UTC datetime)
"""
@abc.abstractproperty
def not_valid_after(self) -> datetime.datetime:
"""
Not after time (represented as UTC datetime)
"""
@abc.abstractproperty
def issuer(self) -> Name:
"""
Returns the issuer name object.
"""
@abc.abstractproperty
def subject(self) -> Name:
"""
Returns the subject name object.
"""
@abc.abstractproperty
def signature_hash_algorithm(
self,
) -> typing.Optional[hashes.HashAlgorithm]:
"""
Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate.
"""
@abc.abstractproperty
def signature_algorithm_oid(self) -> ObjectIdentifier:
"""
Returns the ObjectIdentifier of the signature algorithm.
"""
@abc.abstractproperty
def extensions(self) -> Extensions:
"""
Returns an Extensions object.
"""
@abc.abstractproperty
def signature(self) -> bytes:
"""
Returns the signature bytes.
"""
@abc.abstractproperty
def tbs_certificate_bytes(self) -> bytes:
"""
Returns the tbsCertificate payload bytes as defined in RFC 5280.
"""
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Checks equality.
"""
@abc.abstractmethod
def __ne__(self, other: object) -> bool:
"""
Checks not equal.
"""
@abc.abstractmethod
def __hash__(self) -> int:
"""
Computes a hash.
"""
@abc.abstractmethod
def public_bytes(self, encoding: serialization.Encoding) -> bytes:
"""
Serializes the certificate to PEM or DER format.
"""
# Runtime isinstance checks need this since the rust class is not a subclass.
Certificate.register(rust_x509.Certificate)
class RevokedCertificate(metaclass=abc.ABCMeta):
@abc.abstractproperty
def serial_number(self) -> int:
"""
Returns the serial number of the revoked certificate.
"""
@abc.abstractproperty
def revocation_date(self) -> datetime.datetime:
"""
Returns the date of when this certificate was revoked.
"""
@abc.abstractproperty
def extensions(self) -> Extensions:
"""
Returns an Extensions object containing a list of Revoked extensions.
"""
# Runtime isinstance checks need this since the rust class is not a subclass.
RevokedCertificate.register(rust_x509.RevokedCertificate)
class _RawRevokedCertificate(RevokedCertificate):
def __init__(
self,
serial_number: int,
revocation_date: datetime.datetime,
extensions: Extensions,
):
self._serial_number = serial_number
self._revocation_date = revocation_date
self._extensions = extensions
@property
def serial_number(self) -> int:
return self._serial_number
@property
def revocation_date(self) -> datetime.datetime:
return self._revocation_date
@property
def extensions(self) -> Extensions:
return self._extensions
class CertificateRevocationList(metaclass=abc.ABCMeta):
@abc.abstractmethod
def public_bytes(self, encoding: serialization.Encoding) -> bytes:
"""
Serializes the CRL to PEM or DER format.
"""
@abc.abstractmethod
def fingerprint(self, algorithm: hashes.HashAlgorithm) -> bytes:
"""
Returns bytes using digest passed.
"""
@abc.abstractmethod
def get_revoked_certificate_by_serial_number(
self, serial_number: int
) -> typing.Optional[RevokedCertificate]:
"""
Returns an instance of RevokedCertificate or None if the serial_number
is not in the CRL.
"""
@abc.abstractproperty
def signature_hash_algorithm(
self,
) -> typing.Optional[hashes.HashAlgorithm]:
"""
Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate.
"""
@abc.abstractproperty
def signature_algorithm_oid(self) -> ObjectIdentifier:
"""
Returns the ObjectIdentifier of the signature algorithm.
"""
@abc.abstractproperty
def issuer(self) -> Name:
"""
Returns the X509Name with the issuer of this CRL.
"""
@abc.abstractproperty
def next_update(self) -> typing.Optional[datetime.datetime]:
"""
Returns the date of next update for this CRL.
"""
@abc.abstractproperty
def last_update(self) -> datetime.datetime:
"""
Returns the date of last update for this CRL.
"""
@abc.abstractproperty
def extensions(self) -> Extensions:
"""
Returns an Extensions object containing a list of CRL extensions.
"""
@abc.abstractproperty
def signature(self) -> bytes:
"""
Returns the signature bytes.
"""
@abc.abstractproperty
def tbs_certlist_bytes(self) -> bytes:
"""
Returns the tbsCertList payload bytes as defined in RFC 5280.
"""
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Checks equality.
"""
@abc.abstractmethod
def __ne__(self, other: object) -> bool:
"""
Checks not equal.
"""
@abc.abstractmethod
def __len__(self) -> int:
"""
Number of revoked certificates in the CRL.
"""
@typing.overload
def __getitem__(self, idx: int) -> RevokedCertificate:
...
@typing.overload
def __getitem__(self, idx: slice) -> typing.List[RevokedCertificate]:
...
@abc.abstractmethod
def __getitem__(
self, idx: typing.Union[int, slice]
) -> typing.Union[RevokedCertificate, typing.List[RevokedCertificate]]:
"""
Returns a revoked certificate (or slice of revoked certificates).
"""
@abc.abstractmethod
def __iter__(self) -> typing.Iterator[RevokedCertificate]:
"""
Iterator over the revoked certificates
"""
@abc.abstractmethod
def is_signature_valid(self, public_key: PUBLIC_KEY_TYPES) -> bool:
"""
Verifies signature of revocation list against given public key.
"""
CertificateRevocationList.register(rust_x509.CertificateRevocationList)
class CertificateSigningRequest(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Checks equality.
"""
@abc.abstractmethod
def __ne__(self, other: object) -> bool:
"""
Checks not equal.
"""
@abc.abstractmethod
def __hash__(self) -> int:
"""
Computes a hash.
"""
@abc.abstractmethod
def public_key(self) -> PUBLIC_KEY_TYPES:
"""
Returns the public key
"""
@abc.abstractproperty
def subject(self) -> Name:
"""
Returns the subject name object.
"""
@abc.abstractproperty
def signature_hash_algorithm(
self,
) -> typing.Optional[hashes.HashAlgorithm]:
"""
Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate.
"""
@abc.abstractproperty
def signature_algorithm_oid(self) -> ObjectIdentifier:
"""
Returns the ObjectIdentifier of the signature algorithm.
"""
@abc.abstractproperty
def extensions(self) -> Extensions:
"""
Returns the extensions in the signing request.
"""
@abc.abstractproperty
def attributes(self) -> Attributes:
"""
Returns an Attributes object.
"""
@abc.abstractmethod
def public_bytes(self, encoding: serialization.Encoding) -> bytes:
"""
Encodes the request to PEM or DER format.
"""
@abc.abstractproperty
def signature(self) -> bytes:
"""
Returns the signature bytes.
"""
@abc.abstractproperty
def tbs_certrequest_bytes(self) -> bytes:
"""
Returns the PKCS#10 CertificationRequestInfo bytes as defined in RFC
2986.
"""
@abc.abstractproperty
def is_signature_valid(self) -> bool:
"""
Verifies signature of signing request.
"""
@abc.abstractmethod
def get_attribute_for_oid(self, oid: ObjectIdentifier) -> bytes:
"""
Get the attribute value for a given OID.
"""
# Runtime isinstance checks need this since the rust class is not a subclass.
CertificateSigningRequest.register(rust_x509.CertificateSigningRequest)
# Backend argument preserved for API compatibility, but ignored.
def load_pem_x509_certificate(
data: bytes, backend: typing.Any = None
) -> Certificate:
return rust_x509.load_pem_x509_certificate(data)
# Backend argument preserved for API compatibility, but ignored.
def load_der_x509_certificate(
data: bytes, backend: typing.Any = None
) -> Certificate:
return rust_x509.load_der_x509_certificate(data)
# Backend argument preserved for API compatibility, but ignored.
def load_pem_x509_csr(
data: bytes, backend: typing.Any = None
) -> CertificateSigningRequest:
return rust_x509.load_pem_x509_csr(data)
# Backend argument preserved for API compatibility, but ignored.
def load_der_x509_csr(
data: bytes, backend: typing.Any = None
) -> CertificateSigningRequest:
return rust_x509.load_der_x509_csr(data)
# Backend argument preserved for API compatibility, but ignored.
def load_pem_x509_crl(
data: bytes, backend: typing.Any = None
) -> CertificateRevocationList:
return rust_x509.load_pem_x509_crl(data)
# Backend argument preserved for API compatibility, but ignored.
def load_der_x509_crl(
data: bytes, backend: typing.Any = None
) -> CertificateRevocationList:
return rust_x509.load_der_x509_crl(data)
class CertificateSigningRequestBuilder(object):
def __init__(
self,
subject_name: typing.Optional[Name] = None,
extensions: typing.List[Extension[ExtensionType]] = [],
attributes: typing.List[typing.Tuple[ObjectIdentifier, bytes]] = [],
):
"""
Creates an empty X.509 certificate request (v1).
"""
self._subject_name = subject_name
self._extensions = extensions
self._attributes = attributes
def subject_name(self, name: Name) -> "CertificateSigningRequestBuilder":
"""
Sets the certificate requestor's distinguished name.
"""
if not isinstance(name, Name):
raise TypeError("Expecting x509.Name object.")
if self._subject_name is not None:
raise ValueError("The subject name may only be set once.")
return CertificateSigningRequestBuilder(
name, self._extensions, self._attributes
)
def add_extension(
self, extval: ExtensionType, critical: bool
) -> "CertificateSigningRequestBuilder":
"""
Adds an X.509 extension to the certificate request.
"""
if not isinstance(extval, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return CertificateSigningRequestBuilder(
self._subject_name,
self._extensions + [extension],
self._attributes,
)
def add_attribute(
self, oid: ObjectIdentifier, value: bytes
) -> "CertificateSigningRequestBuilder":
"""
Adds an X.509 attribute with an OID and associated value.
"""
if not isinstance(oid, ObjectIdentifier):
raise TypeError("oid must be an ObjectIdentifier")
if not isinstance(value, bytes):
raise TypeError("value must be bytes")
_reject_duplicate_attribute(oid, self._attributes)
return CertificateSigningRequestBuilder(
self._subject_name,
self._extensions,
self._attributes + [(oid, value)],
)
def sign(
self,
private_key: PRIVATE_KEY_TYPES,
algorithm: typing.Optional[hashes.HashAlgorithm],
backend: typing.Any = None,
) -> CertificateSigningRequest:
"""
Signs the request using the requestor's private key.
"""
if self._subject_name is None:
raise ValueError("A CertificateSigningRequest must have a subject")
return rust_x509.create_x509_csr(self, private_key, algorithm)
class CertificateBuilder(object):
_extensions: typing.List[Extension[ExtensionType]]
def __init__(
self,
issuer_name: typing.Optional[Name] = None,
subject_name: typing.Optional[Name] = None,
public_key: typing.Optional[CERTIFICATE_PUBLIC_KEY_TYPES] = None,
serial_number: typing.Optional[int] = None,
not_valid_before: typing.Optional[datetime.datetime] = None,
not_valid_after: typing.Optional[datetime.datetime] = None,
extensions: typing.List[Extension[ExtensionType]] = [],
) -> None:
self._version = Version.v3
self._issuer_name = issuer_name
self._subject_name = subject_name
self._public_key = public_key
self._serial_number = serial_number
self._not_valid_before = not_valid_before
self._not_valid_after = not_valid_after
self._extensions = extensions
def issuer_name(self, name: Name) -> "CertificateBuilder":
"""
Sets the CA's distinguished name.
"""
if not isinstance(name, Name):
raise TypeError("Expecting x509.Name object.")
if self._issuer_name is not None:
raise ValueError("The issuer name may only be set once.")
return CertificateBuilder(
name,
self._subject_name,
self._public_key,
self._serial_number,
self._not_valid_before,
self._not_valid_after,
self._extensions,
)
def subject_name(self, name: Name) -> "CertificateBuilder":
"""
Sets the requestor's distinguished name.
"""
if not isinstance(name, Name):
raise TypeError("Expecting x509.Name object.")
if self._subject_name is not None:
raise ValueError("The subject name may only be set once.")
return CertificateBuilder(
self._issuer_name,
name,
self._public_key,
self._serial_number,
self._not_valid_before,
self._not_valid_after,
self._extensions,
)
def public_key(
self,
key: CERTIFICATE_PUBLIC_KEY_TYPES,
) -> "CertificateBuilder":
"""
Sets the requestor's public key (as found in the signing request).
"""
if not isinstance(
key,
(
dsa.DSAPublicKey,
rsa.RSAPublicKey,
ec.EllipticCurvePublicKey,
ed25519.Ed25519PublicKey,
ed448.Ed448PublicKey,
x25519.X25519PublicKey,
x448.X448PublicKey,
),
):
raise TypeError(
"Expecting one of DSAPublicKey, RSAPublicKey,"
" EllipticCurvePublicKey, Ed25519PublicKey,"
" Ed448PublicKey, X25519PublicKey, or "
"X448PublicKey."
)
if self._public_key is not None:
raise ValueError("The public key may only be set once.")
return CertificateBuilder(
self._issuer_name,
self._subject_name,
key,
self._serial_number,
self._not_valid_before,
self._not_valid_after,
self._extensions,
)
def serial_number(self, number: int) -> "CertificateBuilder":
"""
Sets the certificate serial number.
"""
if not isinstance(number, int):
raise TypeError("Serial number must be of integral type.")
if self._serial_number is not None:
raise ValueError("The serial number may only be set once.")
if number <= 0:
raise ValueError("The serial number should be positive.")
# ASN.1 integers are always signed, so most significant bit must be
# zero.
if number.bit_length() >= 160: # As defined in RFC 5280
raise ValueError(
"The serial number should not be more than 159 " "bits."
)
return CertificateBuilder(
self._issuer_name,
self._subject_name,
self._public_key,
number,
self._not_valid_before,
self._not_valid_after,
self._extensions,
)
def not_valid_before(
self, time: datetime.datetime
) -> "CertificateBuilder":
"""
Sets the certificate activation time.
"""
if not isinstance(time, datetime.datetime):
raise TypeError("Expecting datetime object.")
if self._not_valid_before is not None:
raise ValueError("The not valid before may only be set once.")
time = _convert_to_naive_utc_time(time)
if time < _EARLIEST_UTC_TIME:
raise ValueError(
"The not valid before date must be on or after"
" 1950 January 1)."
)
if self._not_valid_after is not None and time > self._not_valid_after:
raise ValueError(
"The not valid before date must be before the not valid after "
"date."
)
return CertificateBuilder(
self._issuer_name,
self._subject_name,
self._public_key,
self._serial_number,
time,
self._not_valid_after,
self._extensions,
)
def not_valid_after(self, time: datetime.datetime) -> "CertificateBuilder":
"""
Sets the certificate expiration time.
"""
if not isinstance(time, datetime.datetime):
raise TypeError("Expecting datetime object.")
if self._not_valid_after is not None:
raise ValueError("The not valid after may only be set once.")
time = _convert_to_naive_utc_time(time)
if time < _EARLIEST_UTC_TIME:
raise ValueError(
"The not valid after date must be on or after"
" 1950 January 1."
)
if (
self._not_valid_before is not None
and time < self._not_valid_before
):
raise ValueError(
"The not valid after date must be after the not valid before "
"date."
)
return CertificateBuilder(
self._issuer_name,
self._subject_name,
self._public_key,
self._serial_number,
self._not_valid_before,
time,
self._extensions,
)
def add_extension(
self, extval: ExtensionType, critical: bool
) -> "CertificateBuilder":
"""
Adds an X.509 extension to the certificate.
"""
if not isinstance(extval, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return CertificateBuilder(
self._issuer_name,
self._subject_name,
self._public_key,
self._serial_number,
self._not_valid_before,
self._not_valid_after,
self._extensions + [extension],
)
def sign(
self,
private_key: PRIVATE_KEY_TYPES,
algorithm: typing.Optional[hashes.HashAlgorithm],
backend: typing.Any = None,
) -> Certificate:
"""
Signs the certificate using the CA's private key.
"""
if self._subject_name is None:
raise ValueError("A certificate must have a subject name")
if self._issuer_name is None:
raise ValueError("A certificate must have an issuer name")
if self._serial_number is None:
raise ValueError("A certificate must have a serial number")
if self._not_valid_before is None:
raise ValueError("A certificate must have a not valid before time")
if self._not_valid_after is None:
raise ValueError("A certificate must have a not valid after time")
if self._public_key is None:
raise ValueError("A certificate must have a public key")
return rust_x509.create_x509_certificate(self, private_key, algorithm)
class CertificateRevocationListBuilder(object):
_extensions: typing.List[Extension[ExtensionType]]
_revoked_certificates: typing.List[RevokedCertificate]
def __init__(
self,
issuer_name: typing.Optional[Name] = None,
last_update: typing.Optional[datetime.datetime] = None,
next_update: typing.Optional[datetime.datetime] = None,
extensions: typing.List[Extension[ExtensionType]] = [],
revoked_certificates: typing.List[RevokedCertificate] = [],
):
self._issuer_name = issuer_name
self._last_update = last_update
self._next_update = next_update
self._extensions = extensions
self._revoked_certificates = revoked_certificates
def issuer_name(
self, issuer_name: Name
) -> "CertificateRevocationListBuilder":
if not isinstance(issuer_name, Name):
raise TypeError("Expecting x509.Name object.")
if self._issuer_name is not None:
raise ValueError("The issuer name may only be set once.")
return CertificateRevocationListBuilder(
issuer_name,
self._last_update,
self._next_update,
self._extensions,
self._revoked_certificates,
)
def last_update(
self, last_update: datetime.datetime
) -> "CertificateRevocationListBuilder":
if not isinstance(last_update, datetime.datetime):
raise TypeError("Expecting datetime object.")
if self._last_update is not None:
raise ValueError("Last update may only be set once.")
last_update = _convert_to_naive_utc_time(last_update)
if last_update < _EARLIEST_UTC_TIME:
raise ValueError(
"The last update date must be on or after" " 1950 January 1."
)
if self._next_update is not None and last_update > self._next_update:
raise ValueError(
"The last update date must be before the next update date."
)
return CertificateRevocationListBuilder(
self._issuer_name,
last_update,
self._next_update,
self._extensions,
self._revoked_certificates,
)
def next_update(
self, next_update: datetime.datetime
) -> "CertificateRevocationListBuilder":
if not isinstance(next_update, datetime.datetime):
raise TypeError("Expecting datetime object.")
if self._next_update is not None:
raise ValueError("Last update may only be set once.")
next_update = _convert_to_naive_utc_time(next_update)
if next_update < _EARLIEST_UTC_TIME:
raise ValueError(
"The last update date must be on or after" " 1950 January 1."
)
if self._last_update is not None and next_update < self._last_update:
raise ValueError(
"The next update date must be after the last update date."
)
return CertificateRevocationListBuilder(
self._issuer_name,
self._last_update,
next_update,
self._extensions,
self._revoked_certificates,
)
def add_extension(
self, extval: ExtensionType, critical: bool
) -> "CertificateRevocationListBuilder":
"""
Adds an X.509 extension to the certificate revocation list.
"""
if not isinstance(extval, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return CertificateRevocationListBuilder(
self._issuer_name,
self._last_update,
self._next_update,
self._extensions + [extension],
self._revoked_certificates,
)
def add_revoked_certificate(
self, revoked_certificate: RevokedCertificate
) -> "CertificateRevocationListBuilder":
"""
Adds a revoked certificate to the CRL.
"""
if not isinstance(revoked_certificate, RevokedCertificate):
raise TypeError("Must be an instance of RevokedCertificate")
return CertificateRevocationListBuilder(
self._issuer_name,
self._last_update,
self._next_update,
self._extensions,
self._revoked_certificates + [revoked_certificate],
)
def sign(
self,
private_key: PRIVATE_KEY_TYPES,
algorithm: typing.Optional[hashes.HashAlgorithm],
backend: typing.Any = None,
) -> CertificateRevocationList:
if self._issuer_name is None:
raise ValueError("A CRL must have an issuer name")
if self._last_update is None:
raise ValueError("A CRL must have a last update time")
if self._next_update is None:
raise ValueError("A CRL must have a next update time")
return rust_x509.create_x509_crl(self, private_key, algorithm)
class RevokedCertificateBuilder(object):
def __init__(
self,
serial_number: typing.Optional[int] = None,
revocation_date: typing.Optional[datetime.datetime] = None,
extensions: typing.List[Extension[ExtensionType]] = [],
):
self._serial_number = serial_number
self._revocation_date = revocation_date
self._extensions = extensions
def serial_number(self, number: int) -> "RevokedCertificateBuilder":
if not isinstance(number, int):
raise TypeError("Serial number must be of integral type.")
if self._serial_number is not None:
raise ValueError("The serial number may only be set once.")
if number <= 0:
raise ValueError("The serial number should be positive")
# ASN.1 integers are always signed, so most significant bit must be
# zero.
if number.bit_length() >= 160: # As defined in RFC 5280
raise ValueError(
"The serial number should not be more than 159 " "bits."
)
return RevokedCertificateBuilder(
number, self._revocation_date, self._extensions
)
def revocation_date(
self, time: datetime.datetime
) -> "RevokedCertificateBuilder":
if not isinstance(time, datetime.datetime):
raise TypeError("Expecting datetime object.")
if self._revocation_date is not None:
raise ValueError("The revocation date may only be set once.")
time = _convert_to_naive_utc_time(time)
if time < _EARLIEST_UTC_TIME:
raise ValueError(
"The revocation date must be on or after" " 1950 January 1."
)
return RevokedCertificateBuilder(
self._serial_number, time, self._extensions
)
def add_extension(
self, extval: ExtensionType, critical: bool
) -> "RevokedCertificateBuilder":
if not isinstance(extval, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return RevokedCertificateBuilder(
self._serial_number,
self._revocation_date,
self._extensions + [extension],
)
def build(self, backend: typing.Any = None) -> RevokedCertificate:
if self._serial_number is None:
raise ValueError("A revoked certificate must have a serial number")
if self._revocation_date is None:
raise ValueError(
"A revoked certificate must have a revocation date"
)
return _RawRevokedCertificate(
self._serial_number,
self._revocation_date,
Extensions(self._extensions),
)
def random_serial_number() -> int:
return int.from_bytes(os.urandom(20), "big") >> 1
| en | 0.791107 | # This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. # This is quadratic in the number of extensions # This is quadratic in the number of attributes Normalizes a datetime to a naive datetime in UTC.
time -- datetime to normalize. Assumed to be in UTC if not timezone
aware. Returns bytes using digest passed. Returns certificate serial number Returns the certificate version Returns the public key Not before time (represented as UTC datetime) Not after time (represented as UTC datetime) Returns the issuer name object. Returns the subject name object. Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate. Returns the ObjectIdentifier of the signature algorithm. Returns an Extensions object. Returns the signature bytes. Returns the tbsCertificate payload bytes as defined in RFC 5280. Checks equality. Checks not equal. Computes a hash. Serializes the certificate to PEM or DER format. # Runtime isinstance checks need this since the rust class is not a subclass. Returns the serial number of the revoked certificate. Returns the date of when this certificate was revoked. Returns an Extensions object containing a list of Revoked extensions. # Runtime isinstance checks need this since the rust class is not a subclass. Serializes the CRL to PEM or DER format. Returns bytes using digest passed. Returns an instance of RevokedCertificate or None if the serial_number
is not in the CRL. Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate. Returns the ObjectIdentifier of the signature algorithm. Returns the X509Name with the issuer of this CRL. Returns the date of next update for this CRL. Returns the date of last update for this CRL. Returns an Extensions object containing a list of CRL extensions. Returns the signature bytes. Returns the tbsCertList payload bytes as defined in RFC 5280. Checks equality. Checks not equal. Number of revoked certificates in the CRL. Returns a revoked certificate (or slice of revoked certificates). Iterator over the revoked certificates Verifies signature of revocation list against given public key. Checks equality. Checks not equal. Computes a hash. Returns the public key Returns the subject name object. Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate. Returns the ObjectIdentifier of the signature algorithm. Returns the extensions in the signing request. Returns an Attributes object. Encodes the request to PEM or DER format. Returns the signature bytes. Returns the PKCS#10 CertificationRequestInfo bytes as defined in RFC
2986. Verifies signature of signing request. Get the attribute value for a given OID. # Runtime isinstance checks need this since the rust class is not a subclass. # Backend argument preserved for API compatibility, but ignored. # Backend argument preserved for API compatibility, but ignored. # Backend argument preserved for API compatibility, but ignored. # Backend argument preserved for API compatibility, but ignored. # Backend argument preserved for API compatibility, but ignored. # Backend argument preserved for API compatibility, but ignored. Creates an empty X.509 certificate request (v1). Sets the certificate requestor's distinguished name. Adds an X.509 extension to the certificate request. Adds an X.509 attribute with an OID and associated value. Signs the request using the requestor's private key. Sets the CA's distinguished name. Sets the requestor's distinguished name. Sets the requestor's public key (as found in the signing request). Sets the certificate serial number. # ASN.1 integers are always signed, so most significant bit must be # zero. # As defined in RFC 5280 Sets the certificate activation time. Sets the certificate expiration time. Adds an X.509 extension to the certificate. Signs the certificate using the CA's private key. Adds an X.509 extension to the certificate revocation list. Adds a revoked certificate to the CRL. # ASN.1 integers are always signed, so most significant bit must be # zero. # As defined in RFC 5280 | 2.039556 | 2 |
dfirtrack_artifacts/tests/artifacttype/test_artifacttype_models.py | blackhatethicalhacking/dfirtrack | 4 | 6632314 | <reponame>blackhatethicalhacking/dfirtrack
from django.test import TestCase
from dfirtrack_artifacts.models import Artifacttype
class ArtifacttypeModelTestCase(TestCase):
""" artifacttype model tests """
@classmethod
def setUpTestData(cls):
# create object
Artifacttype.objects.create(artifacttype_name = 'artifacttype_1')
def test_artifacttype_string(self):
""" test string representation """
# get object
artifacttype_1 = Artifacttype.objects.get(artifacttype_name='artifacttype_1')
# compare
self.assertEqual(str(artifacttype_1), 'artifacttype_1')
def test_artifacttype_id_attribute_label(self):
""" test attribute label """
# get object
artifacttype_1 = Artifacttype.objects.get(artifacttype_name='artifacttype_1')
# get label
field_label = artifacttype_1._meta.get_field('artifacttype_id').verbose_name
# compare
self.assertEquals(field_label, 'artifacttype id')
def test_artifacttype_name_attribute_label(self):
""" test attribute label """
# get object
artifacttype_1 = Artifacttype.objects.get(artifacttype_name='artifacttype_1')
# get label
field_label = artifacttype_1._meta.get_field('artifacttype_name').verbose_name
# compare
self.assertEquals(field_label, 'artifacttype name')
def test_artifacttype_note_attribute_label(self):
""" test attribute label """
# get object
artifacttype_1 = Artifacttype.objects.get(artifacttype_name='artifacttype_1')
# get label
field_label = artifacttype_1._meta.get_field('artifacttype_note').verbose_name
# compare
self.assertEquals(field_label, 'artifacttype note')
def test_artifacttype_slug_attribute_label(self):
""" test attribute label """
# get object
artifacttype_1 = Artifacttype.objects.get(artifacttype_name='artifacttype_1')
# get label
field_label = artifacttype_1._meta.get_field('artifacttype_slug').verbose_name
# compare
self.assertEquals(field_label, 'artifacttype slug')
def test_artifacttype_name_length(self):
""" test for max length """
# get object
artifacttype_1 = Artifacttype.objects.get(artifacttype_name='artifacttype_1')
# get max length
max_length = artifacttype_1._meta.get_field('artifacttype_name').max_length
# compare
self.assertEquals(max_length, 255)
def test_artifacttype_slug_length(self):
""" test for max length """
# get object
artifacttype_1 = Artifacttype.objects.get(artifacttype_name='artifacttype_1')
# get max length
max_length = artifacttype_1._meta.get_field('artifacttype_slug').max_length
# compare
self.assertEquals(max_length, 255)
| from django.test import TestCase
from dfirtrack_artifacts.models import Artifacttype
class ArtifacttypeModelTestCase(TestCase):
""" artifacttype model tests """
@classmethod
def setUpTestData(cls):
# create object
Artifacttype.objects.create(artifacttype_name = 'artifacttype_1')
def test_artifacttype_string(self):
""" test string representation """
# get object
artifacttype_1 = Artifacttype.objects.get(artifacttype_name='artifacttype_1')
# compare
self.assertEqual(str(artifacttype_1), 'artifacttype_1')
def test_artifacttype_id_attribute_label(self):
""" test attribute label """
# get object
artifacttype_1 = Artifacttype.objects.get(artifacttype_name='artifacttype_1')
# get label
field_label = artifacttype_1._meta.get_field('artifacttype_id').verbose_name
# compare
self.assertEquals(field_label, 'artifacttype id')
def test_artifacttype_name_attribute_label(self):
""" test attribute label """
# get object
artifacttype_1 = Artifacttype.objects.get(artifacttype_name='artifacttype_1')
# get label
field_label = artifacttype_1._meta.get_field('artifacttype_name').verbose_name
# compare
self.assertEquals(field_label, 'artifacttype name')
def test_artifacttype_note_attribute_label(self):
""" test attribute label """
# get object
artifacttype_1 = Artifacttype.objects.get(artifacttype_name='artifacttype_1')
# get label
field_label = artifacttype_1._meta.get_field('artifacttype_note').verbose_name
# compare
self.assertEquals(field_label, 'artifacttype note')
def test_artifacttype_slug_attribute_label(self):
""" test attribute label """
# get object
artifacttype_1 = Artifacttype.objects.get(artifacttype_name='artifacttype_1')
# get label
field_label = artifacttype_1._meta.get_field('artifacttype_slug').verbose_name
# compare
self.assertEquals(field_label, 'artifacttype slug')
def test_artifacttype_name_length(self):
""" test for max length """
# get object
artifacttype_1 = Artifacttype.objects.get(artifacttype_name='artifacttype_1')
# get max length
max_length = artifacttype_1._meta.get_field('artifacttype_name').max_length
# compare
self.assertEquals(max_length, 255)
def test_artifacttype_slug_length(self):
""" test for max length """
# get object
artifacttype_1 = Artifacttype.objects.get(artifacttype_name='artifacttype_1')
# get max length
max_length = artifacttype_1._meta.get_field('artifacttype_slug').max_length
# compare
self.assertEquals(max_length, 255) | en | 0.507676 | artifacttype model tests # create object test string representation # get object # compare test attribute label # get object # get label # compare test attribute label # get object # get label # compare test attribute label # get object # get label # compare test attribute label # get object # get label # compare test for max length # get object # get max length # compare test for max length # get object # get max length # compare | 2.495126 | 2 |
python/test_controller.py | anyway-blows/uav_geometric_control | 25 | 6632315 | import numpy as np
from controller import Dumbbell
from kinematics import attitude
angle = (2*np.pi- 0) * np.random.rand(1) + 0
# generate a random initial state that is outside of the asteroid
pos = np.random.rand(3)+np.array([2,2,2])
R = attitude.rot1(angle).reshape(9)
vel = np.random.rand(3)
ang_vel = np.random.rand(3)
t = np.random.rand()*100
state = np.hstack((pos,vel, R, ang_vel))
class TestDumbbellInertialDesiredAttitude():
dum = Dumbbell()
alpha = np.random.rand()
axis = np.array([1, 0, 0])
Rd, Rd_dot, ang_vel_d, ang_vel_d_dot = dum.desired_attitude(1, alpha, axis)
def test_desired_rotation_matrix_determinant(self):
np.testing.assert_almost_equal(np.linalg.det(self.Rd), 1)
def test_desired_rotation_matrix_orthogonal(self):
np.testing.assert_array_almost_equal(self.Rd.T.dot(self.Rd),
np.eye(3,3))
def test_desired_attitude_satifies_kinematics(self):
np.testing.assert_array_almost_equal(self.Rd_dot,
self.Rd.dot(attitude.hat_map(self.ang_vel_d)))
def test_moment_of_inertia(self):
np.testing.assert_allclose(self.dum.J, np.trace(self.dum.Jd)*np.eye(3,3) - self.dum.Jd)
class TestDumbbellInertialAttitudeController():
"""Test the attitude controller for the inertial eoms
"""
dum = Dumbbell()
u_m = dum.attitude_controller(t, state, np.zeros(3))
def test_control_moment_size(self):
np.testing.assert_equal(self.u_m.shape, (3,))
class TestDumbbellInertialTranslationalController():
dum = Dumbbell()
u_f = dum.translation_controller(t, state, np.zeros(3))
def test_control_force_size(self):
np.testing.assert_equal(self.u_f.shape, (3,))
| import numpy as np
from controller import Dumbbell
from kinematics import attitude
angle = (2*np.pi- 0) * np.random.rand(1) + 0
# generate a random initial state that is outside of the asteroid
pos = np.random.rand(3)+np.array([2,2,2])
R = attitude.rot1(angle).reshape(9)
vel = np.random.rand(3)
ang_vel = np.random.rand(3)
t = np.random.rand()*100
state = np.hstack((pos,vel, R, ang_vel))
class TestDumbbellInertialDesiredAttitude():
dum = Dumbbell()
alpha = np.random.rand()
axis = np.array([1, 0, 0])
Rd, Rd_dot, ang_vel_d, ang_vel_d_dot = dum.desired_attitude(1, alpha, axis)
def test_desired_rotation_matrix_determinant(self):
np.testing.assert_almost_equal(np.linalg.det(self.Rd), 1)
def test_desired_rotation_matrix_orthogonal(self):
np.testing.assert_array_almost_equal(self.Rd.T.dot(self.Rd),
np.eye(3,3))
def test_desired_attitude_satifies_kinematics(self):
np.testing.assert_array_almost_equal(self.Rd_dot,
self.Rd.dot(attitude.hat_map(self.ang_vel_d)))
def test_moment_of_inertia(self):
np.testing.assert_allclose(self.dum.J, np.trace(self.dum.Jd)*np.eye(3,3) - self.dum.Jd)
class TestDumbbellInertialAttitudeController():
"""Test the attitude controller for the inertial eoms
"""
dum = Dumbbell()
u_m = dum.attitude_controller(t, state, np.zeros(3))
def test_control_moment_size(self):
np.testing.assert_equal(self.u_m.shape, (3,))
class TestDumbbellInertialTranslationalController():
dum = Dumbbell()
u_f = dum.translation_controller(t, state, np.zeros(3))
def test_control_force_size(self):
np.testing.assert_equal(self.u_f.shape, (3,))
| en | 0.874716 | # generate a random initial state that is outside of the asteroid Test the attitude controller for the inertial eoms | 3.016088 | 3 |
proto2rapidjson/__main__.py | Sweetnow/Proto2RapidJSON | 0 | 6632316 | <filename>proto2rapidjson/__main__.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ***********************************************************************************
# * Copyright 2020-2021 <NAME>. All Rights Reserved. *
# * Distributed under MIT license. *
# * See file LICENSE for details for copy at https://opensource.org/licenses/MIT *
# ***********************************************************************************
from argparse import ArgumentParser
from . import entry
__all__ = ['entry']
def get_argparser() -> ArgumentParser:
parser = ArgumentParser(
'proto2rapidjson', description='Convert .proto file to header-only RapidJSON based c++ code')
parser.add_argument('-i', '--input', type=str, dest='input',
help='input .proto file', required=True)
parser.add_argument('-o', '--output', type=str, dest='output',
help='output .h file', required=True)
parser.add_argument('-y', dest='yes', action='store_true',
help='pass all interactive checks', default=False)
return parser
def main():
args = get_argparser().parse_args()
entry(args.input, args.output, args.yes)
if __name__ == "__main__":
main()
| <filename>proto2rapidjson/__main__.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ***********************************************************************************
# * Copyright 2020-2021 <NAME>. All Rights Reserved. *
# * Distributed under MIT license. *
# * See file LICENSE for details for copy at https://opensource.org/licenses/MIT *
# ***********************************************************************************
from argparse import ArgumentParser
from . import entry
__all__ = ['entry']
def get_argparser() -> ArgumentParser:
parser = ArgumentParser(
'proto2rapidjson', description='Convert .proto file to header-only RapidJSON based c++ code')
parser.add_argument('-i', '--input', type=str, dest='input',
help='input .proto file', required=True)
parser.add_argument('-o', '--output', type=str, dest='output',
help='output .h file', required=True)
parser.add_argument('-y', dest='yes', action='store_true',
help='pass all interactive checks', default=False)
return parser
def main():
args = get_argparser().parse_args()
entry(args.input, args.output, args.yes)
if __name__ == "__main__":
main()
| en | 0.430597 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # *********************************************************************************** # * Copyright 2020-2021 <NAME>. All Rights Reserved. * # * Distributed under MIT license. * # * See file LICENSE for details for copy at https://opensource.org/licenses/MIT * # *********************************************************************************** | 2.634394 | 3 |
locations/spiders/vetco_clinic.py | nbeecher/alltheplaces | 0 | 6632317 | <reponame>nbeecher/alltheplaces<gh_stars>0
# -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from scrapy.selector import Selector
class VetcoSpider(scrapy.Spider):
name = "vetco"
item_attributes = {'brand': "vetcoclinics"}
allowed_domains = ["vetcoclinics.com"]
start_urls = (
'https://www.vetcoclinics.com/services-and-clinics/vaccination-clinics-by-state/',
)
def start_requests(self):
with open('./locations/searchable_points/us_zcta.csv') as points:
next(points) # Ignore the header
for point in points:
row = point.split(',')
zip = row[0].strip().strip('"')
url = f"https://www.vetcoclinics.com/_assets/dynamic/ajax/locator.php?zip={zip}"
yield scrapy.http.Request(
url,
self.parse,
method='GET'
)
def parse(self, response):
jsonresponse = json.loads(response.body_as_unicode())
if jsonresponse is not None:
clinics = jsonresponse.get('clinics')
if clinics:
for stores in clinics:
body = stores['label']
address = Selector(text=body).xpath('//div[@class="locationinfo_area"]/address/text()').extract()
if len(address) == 3:
addr_full, city_state_postal, phone = [item.split(",") for item in address]
city, state_postal = [item.split(",") for item in city_state_postal]
state, postal = re.search(r'([A-Z]{2}) (\d{5})', state_postal[0]).groups()
else:
addr_full, city_state_postal = [item.split(",") for item in address]
city, state_postal = [item.split(",") for item in city_state_postal]
state, postal = re.search(r'([A-Z]{2}) (\d{5})', state_postal[0]).groups()
properties = {
'ref': addr_full[0].strip(),
'addr_full': addr_full[0].strip(),
'city': city[0].strip(),
'state': state,
'postcode': postal,
'lat': float(stores["point"]["lat"]),
'lon': float(stores["point"]["long"]),
'website': response.url
}
yield GeojsonPointItem(**properties)
| # -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from scrapy.selector import Selector
class VetcoSpider(scrapy.Spider):
name = "vetco"
item_attributes = {'brand': "vetcoclinics"}
allowed_domains = ["vetcoclinics.com"]
start_urls = (
'https://www.vetcoclinics.com/services-and-clinics/vaccination-clinics-by-state/',
)
def start_requests(self):
with open('./locations/searchable_points/us_zcta.csv') as points:
next(points) # Ignore the header
for point in points:
row = point.split(',')
zip = row[0].strip().strip('"')
url = f"https://www.vetcoclinics.com/_assets/dynamic/ajax/locator.php?zip={zip}"
yield scrapy.http.Request(
url,
self.parse,
method='GET'
)
def parse(self, response):
jsonresponse = json.loads(response.body_as_unicode())
if jsonresponse is not None:
clinics = jsonresponse.get('clinics')
if clinics:
for stores in clinics:
body = stores['label']
address = Selector(text=body).xpath('//div[@class="locationinfo_area"]/address/text()').extract()
if len(address) == 3:
addr_full, city_state_postal, phone = [item.split(",") for item in address]
city, state_postal = [item.split(",") for item in city_state_postal]
state, postal = re.search(r'([A-Z]{2}) (\d{5})', state_postal[0]).groups()
else:
addr_full, city_state_postal = [item.split(",") for item in address]
city, state_postal = [item.split(",") for item in city_state_postal]
state, postal = re.search(r'([A-Z]{2}) (\d{5})', state_postal[0]).groups()
properties = {
'ref': addr_full[0].strip(),
'addr_full': addr_full[0].strip(),
'city': city[0].strip(),
'state': state,
'postcode': postal,
'lat': float(stores["point"]["lat"]),
'lon': float(stores["point"]["long"]),
'website': response.url
}
yield GeojsonPointItem(**properties) | en | 0.589017 | # -*- coding: utf-8 -*- # Ignore the header | 2.827273 | 3 |
pyspikelib/decomposition_plots.py | lzrvch/pyspikelib | 3 | 6632318 | <reponame>lzrvch/pyspikelib
# PySpikeLib: A set of tools for neuronal spiking data mining
# Copyright (c) 2020 <NAME>.
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.lines import Line2D
from .mpladeq import beautify_mpl, prettify
def feature_scatter_plot(
X,
y,
features,
samples=1000,
legend=None,
xaxis=None,
yaxis=None,
figsize=(15, 8),
alpha=0.3,
):
sns.set(palette='Set2', style='ticks', font_scale=1.7)
indices = np.random.choice(X.shape[0], samples)
cmap = ('#fb290f', '#063373')
# cmap = (sns.color_palette('Paired')[5], sns.color_palette('Paired')[1])
colors = pd.Series(y[indices]).map(
{0: cmap[0], 1: cmap[1]}
)
custom_lines = [
Line2D([0], [0], color=cmap[0], lw=1.5),
Line2D([0], [0], color=cmap[1], lw=1.5),
]
beautify_mpl()
fig, ax = plt.subplots(figsize=figsize)
ax.scatter(
X.loc[:, features[0]].values[indices],
X.loc[:, features[1]].values[indices],
c=colors,
alpha=alpha,
)
prettify()
if legend:
ax.legend(custom_lines, legend)
plt.xlabel(xaxis)
plt.ylabel(yaxis)
return ax
def decompose_scatter_plot(
X,
y,
features,
reducer,
samples=1000,
legend=None,
xaxis=None,
yaxis=None,
supervised=False,
figsize=(15, 8),
alpha=0.3,
):
sns.set(palette='Set2', style='ticks', font_scale=1.7)
indices = np.random.choice(X.shape[0], samples)
cmap = ('#fb290f', '#063373')
# cmap = (sns.color_palette('Paired')[5], sns.color_palette('Paired')[1])
colors = pd.Series(y[indices]).map(
{0: cmap[0], 1: cmap[1]}
)
custom_lines = [
Line2D([0], [0], color=cmap[0], lw=1.5),
Line2D([0], [0], color=cmap[1], lw=1.5),
]
beautify_mpl()
fig, ax = plt.subplots(figsize=figsize)
if supervised:
train_indices = list(set(range(X.shape[0])) - set(indices))
mapper = reducer[0](**reducer[1]).fit(
X.loc[:, features].values[train_indices, :], y[train_indices]
)
X2d = mapper.transform(X.loc[:, features].values[indices, :])
else:
X2d = reducer[0](**reducer[1]).fit_transform(
X.loc[:, features].values[indices, :]
)
ax.scatter(X2d[:, 0], X2d[:, 1], c=colors, alpha=alpha)
prettify()
if legend:
ax.legend(custom_lines, legend)
plt.xlabel(xaxis)
plt.ylabel(yaxis)
plt.xticks([])
plt.yticks([])
return ax
| # PySpikeLib: A set of tools for neuronal spiking data mining
# Copyright (c) 2020 <NAME>.
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.lines import Line2D
from .mpladeq import beautify_mpl, prettify
def feature_scatter_plot(
X,
y,
features,
samples=1000,
legend=None,
xaxis=None,
yaxis=None,
figsize=(15, 8),
alpha=0.3,
):
sns.set(palette='Set2', style='ticks', font_scale=1.7)
indices = np.random.choice(X.shape[0], samples)
cmap = ('#fb290f', '#063373')
# cmap = (sns.color_palette('Paired')[5], sns.color_palette('Paired')[1])
colors = pd.Series(y[indices]).map(
{0: cmap[0], 1: cmap[1]}
)
custom_lines = [
Line2D([0], [0], color=cmap[0], lw=1.5),
Line2D([0], [0], color=cmap[1], lw=1.5),
]
beautify_mpl()
fig, ax = plt.subplots(figsize=figsize)
ax.scatter(
X.loc[:, features[0]].values[indices],
X.loc[:, features[1]].values[indices],
c=colors,
alpha=alpha,
)
prettify()
if legend:
ax.legend(custom_lines, legend)
plt.xlabel(xaxis)
plt.ylabel(yaxis)
return ax
def decompose_scatter_plot(
X,
y,
features,
reducer,
samples=1000,
legend=None,
xaxis=None,
yaxis=None,
supervised=False,
figsize=(15, 8),
alpha=0.3,
):
sns.set(palette='Set2', style='ticks', font_scale=1.7)
indices = np.random.choice(X.shape[0], samples)
cmap = ('#fb290f', '#063373')
# cmap = (sns.color_palette('Paired')[5], sns.color_palette('Paired')[1])
colors = pd.Series(y[indices]).map(
{0: cmap[0], 1: cmap[1]}
)
custom_lines = [
Line2D([0], [0], color=cmap[0], lw=1.5),
Line2D([0], [0], color=cmap[1], lw=1.5),
]
beautify_mpl()
fig, ax = plt.subplots(figsize=figsize)
if supervised:
train_indices = list(set(range(X.shape[0])) - set(indices))
mapper = reducer[0](**reducer[1]).fit(
X.loc[:, features].values[train_indices, :], y[train_indices]
)
X2d = mapper.transform(X.loc[:, features].values[indices, :])
else:
X2d = reducer[0](**reducer[1]).fit_transform(
X.loc[:, features].values[indices, :]
)
ax.scatter(X2d[:, 0], X2d[:, 1], c=colors, alpha=alpha)
prettify()
if legend:
ax.legend(custom_lines, legend)
plt.xlabel(xaxis)
plt.ylabel(yaxis)
plt.xticks([])
plt.yticks([])
return ax | en | 0.759938 | # PySpikeLib: A set of tools for neuronal spiking data mining # Copyright (c) 2020 <NAME>. # # This program is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. # You should have received a copy of the GNU General Public License along with # this program. If not, see <http://www.gnu.org/licenses/>. # cmap = (sns.color_palette('Paired')[5], sns.color_palette('Paired')[1]) # cmap = (sns.color_palette('Paired')[5], sns.color_palette('Paired')[1]) | 2.321233 | 2 |
tests/test_seg_text.py | ffreemt/seg-text | 0 | 6632319 | """Test seg_text."""
import pytest
from seg_text.seg_text import seg_text
def test_seg_text1():
"""Test seg_text 1."""
text = " text 1\n\n test 2. test 3"
_ = seg_text(text)
assert len(_) == 2
text = " text 1\n\n test 2. Test 3"
_ = seg_text(text)
assert len(_) == 3
@pytest.mark.parametrize(
"test_input,expected", [
("", []),
(" ", []),
(" \n ", []),
]
)
def test_seg_text_blanks(test_input, expected):
"""Test blanks."""
assert seg_text(test_input) == expected
def test_seg_text_semicolon():
"""Test semicolon."""
text = """ “元宇宙”,英文為“Metaverse”。該詞出自1992年;的科幻小說《雪崩》。 """
assert len(seg_text(text)) == 2
assert len(seg_text(text, 'zh')) == 2
assert len(seg_text(text, 'ja')) == 2
assert len(seg_text(text, 'ko')) == 2
assert len(seg_text(text, 'en')) == 1
def test_seg_text_semicolon_extra():
"""Test semicolon."""
extra = "[;;]"
text = """ “元宇宙”,英文為“Metaverse”。該詞出自1992年;的科幻小說《雪崩》。 """
assert len(seg_text(text, extra=extra)) == 2 + 1
assert len(seg_text(text, 'zh', extra=extra)) == 2 + 1
assert len(seg_text(text, 'ja', extra=extra)) == 2 + 1
assert len(seg_text(text, 'ko', extra=extra)) == 2 + 1
assert len(seg_text(text, 'en', extra=extra)) == 1 + 1
| """Test seg_text."""
import pytest
from seg_text.seg_text import seg_text
def test_seg_text1():
"""Test seg_text 1."""
text = " text 1\n\n test 2. test 3"
_ = seg_text(text)
assert len(_) == 2
text = " text 1\n\n test 2. Test 3"
_ = seg_text(text)
assert len(_) == 3
@pytest.mark.parametrize(
"test_input,expected", [
("", []),
(" ", []),
(" \n ", []),
]
)
def test_seg_text_blanks(test_input, expected):
"""Test blanks."""
assert seg_text(test_input) == expected
def test_seg_text_semicolon():
"""Test semicolon."""
text = """ “元宇宙”,英文為“Metaverse”。該詞出自1992年;的科幻小說《雪崩》。 """
assert len(seg_text(text)) == 2
assert len(seg_text(text, 'zh')) == 2
assert len(seg_text(text, 'ja')) == 2
assert len(seg_text(text, 'ko')) == 2
assert len(seg_text(text, 'en')) == 1
def test_seg_text_semicolon_extra():
"""Test semicolon."""
extra = "[;;]"
text = """ “元宇宙”,英文為“Metaverse”。該詞出自1992年;的科幻小說《雪崩》。 """
assert len(seg_text(text, extra=extra)) == 2 + 1
assert len(seg_text(text, 'zh', extra=extra)) == 2 + 1
assert len(seg_text(text, 'ja', extra=extra)) == 2 + 1
assert len(seg_text(text, 'ko', extra=extra)) == 2 + 1
assert len(seg_text(text, 'en', extra=extra)) == 1 + 1
| zh | 0.844246 | Test seg_text. Test seg_text 1. Test blanks. Test semicolon. “元宇宙”,英文為“Metaverse”。該詞出自1992年;的科幻小說《雪崩》。 Test semicolon. “元宇宙”,英文為“Metaverse”。該詞出自1992年;的科幻小說《雪崩》。 | 2.970715 | 3 |
rpi1/scripts/controllers/CPUTemp.py | mkeshita/bachelor_degree_thesis | 1 | 6632320 | <reponame>mkeshita/bachelor_degree_thesis
# -*- coding: utf-8 -*-
class CPUTemp:
def __init__(self, temp_file_path="/sys/class/thermal/thermal_zone0/temp"):
self.temp_file_path = temp_file_path
def __enter__(self):
self.open()
return self
def open(self):
"""Opens file with rpi temperature from /sys"""
self.tempfile = open(self.temp_file_path, "r")
def read(self):
"""Reads rpi self temperature from file"""
self.tempfile.seek(0)
return self.tempfile.read().rstrip()
def convert_c_to_f(self, c):
"""Converts celsius degrees value to fahrenheit degrees value"""
return c * 9.0 / 5.0 + 32.0
def get_temperature_in_c(self):
"""Returns temperature in celsius degrees"""
temp_raw = self.read()
return float(temp_raw[:-3] + "." + temp_raw[-3:])
def get_temperature_in_f(self):
"""Returns temperature in fahrenheit degrees"""
return self.convert_c_to_f(self.get_temperature_in_c())
def get_temperature(self):
"""Returns temperature (currently in celsius degrees)"""
return self.get_temperature_in_c()
def __exit__(self, type, value, traceback):
self.close()
def close(self):
"""Closes file with rpi temperature from /sys"""
self.tempfile.close()
| # -*- coding: utf-8 -*-
class CPUTemp:
def __init__(self, temp_file_path="/sys/class/thermal/thermal_zone0/temp"):
self.temp_file_path = temp_file_path
def __enter__(self):
self.open()
return self
def open(self):
"""Opens file with rpi temperature from /sys"""
self.tempfile = open(self.temp_file_path, "r")
def read(self):
"""Reads rpi self temperature from file"""
self.tempfile.seek(0)
return self.tempfile.read().rstrip()
def convert_c_to_f(self, c):
"""Converts celsius degrees value to fahrenheit degrees value"""
return c * 9.0 / 5.0 + 32.0
def get_temperature_in_c(self):
"""Returns temperature in celsius degrees"""
temp_raw = self.read()
return float(temp_raw[:-3] + "." + temp_raw[-3:])
def get_temperature_in_f(self):
"""Returns temperature in fahrenheit degrees"""
return self.convert_c_to_f(self.get_temperature_in_c())
def get_temperature(self):
"""Returns temperature (currently in celsius degrees)"""
return self.get_temperature_in_c()
def __exit__(self, type, value, traceback):
self.close()
def close(self):
"""Closes file with rpi temperature from /sys"""
self.tempfile.close() | en | 0.725217 | # -*- coding: utf-8 -*- Opens file with rpi temperature from /sys Reads rpi self temperature from file Converts celsius degrees value to fahrenheit degrees value Returns temperature in celsius degrees Returns temperature in fahrenheit degrees Returns temperature (currently in celsius degrees) Closes file with rpi temperature from /sys | 3.36061 | 3 |
omega/fetcher/abstract_quotes_fetcher.py | zillionare/zeta | 0 | 6632321 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is a awesome
python script!"""
import datetime
import importlib
import logging
from typing import List, Union, Optional
import arrow
import cfg4py
import numpy as np
from numpy.lib import recfunctions as rfn
from omicron import cache
from omicron.core.lang import static_vars
from omicron.core.timeframe import tf
from omicron.core.types import Frame, FrameType
from omicron.models.valuation import Valuation
from omega.core.accelerate import merge
from omega.fetcher.quotes_fetcher import QuotesFetcher
logger = logging.getLogger(__file__)
cfg = cfg4py.get_instance()
class AbstractQuotesFetcher(QuotesFetcher):
_instances = []
@classmethod
async def create_instance(cls, module_name, **kwargs):
# todo: check if implementor has implemented all the required methods
# todo: check duplicates
module = importlib.import_module(module_name)
factory_method = getattr(module, "create_instance")
if not callable(factory_method):
raise TypeError(f"Bad omega adaptor implementation {module_name}")
impl: QuotesFetcher = await factory_method(**kwargs)
cls._instances.append(impl)
logger.info("add one quotes fetcher implementor: %s", module_name)
@classmethod
@static_vars(i=0)
def get_instance(cls):
if len(cls._instances) == 0:
raise IndexError("No fetchers available")
i = (cls.get_instance.i + 1) % len(cls._instances)
return cls._instances[i]
@classmethod
async def get_security_list(cls) -> Union[None, np.ndarray]:
"""按如下格式返回证券列表。
code display_name name start_date end_date type
000001.XSHE 平安银行 PAYH 1991-04-03 2200-01-01 stock
Returns:
Union[None, np.ndarray]: [description]
"""
securities = await cls.get_instance().get_security_list()
if securities is None or len(securities) == 0:
logger.warning("failed to update securities. %s is returned.", securities)
return securities
key = "securities"
pipeline = cache.security.pipeline()
pipeline.delete(key)
for code, display_name, name, start, end, _type in securities:
pipeline.rpush(
key, f"{code},{display_name},{name},{start}," f"{end},{_type}"
)
await pipeline.execute()
return securities
@classmethod
async def get_bars_batch(
cls,
secs: List[str],
end: Frame,
n_bars: int,
frame_type: FrameType,
include_unclosed=True,
) -> np.ndarray:
return await cls.get_instance().get_bars_batch(
secs, end, n_bars, frame_type.value, include_unclosed
)
@classmethod
async def get_bars(
cls,
sec: str,
end: Frame,
n_bars: int,
frame_type: FrameType,
include_unclosed=True,
) -> np.ndarray:
"""获取行情数据,并将已结束的周期数据存入缓存。
各种情况:
1. 假设现在时间是2021-2-24日,盘中。此时请求上证指数日线,且`include_unclosed`为
`True`:
```python
get_bars("000001.XSHE", None, 1, FrameType.DAY)
```
得到的数据可能如下:
```
[(datetime.date(2021, 2, 24), 3638.9358, 3645.5288, 3617.44, 3620.3542, ...)]
```
在收盘前不同时间调用,得到的数据除开盘价外,其它都实时在变动。
2. 假设现在时间是2021-2-23日,盘后,此时请求上证指数日线,将得到收盘后固定的价格。
3. 上述请求中,`include_unclosed`参数使用默认值(`True`)。如果取为`False`,仍以示例1
指定的场景为例,则:
```python
get_bars("000001.XSHG", None, 1, FrameType.DAY, False)
```
因为2021-2-24日未收盘,所以获取的最后一条数据是2021-2-23日的。
4. 同样假设现在时间是2021-2-24日盘中,周三。此时获取周K线。在`include_unclosed`分别为
`True`和`False`的情况下:
```
[(datetime.date(2021, 2, 24), 3707.19, 3717.27, 3591.3647, 3592.3977, ...)]
[(datetime.date(2021, 2, 19), 3721.09, 3731.69, 3634.01, 3696.17, ...)]
```
注意这里当`include_unclosed`为True时,返回的周K线是以2021-2-24为Frame的。同样,在盘中
的不同时间取这个数据,除了`open`数值之外,其它都是实时变化的。
5. 如果在已结束的周期中,包含停牌数据,则会对停牌期间的数据进行nan填充,以方便数据使用
者可以较容易地分辨出数据不连贯的原因:哪些是停牌造成的,哪些是非交易日造成的。这种处理
会略微降低数据获取速度,并增加存储空间。
比如下面的请求:
```python
get_bars("000029.XSHE", datetime.date(2020,8,18), 10, FrameType.DAY)
```
将获取到2020-8-5到2020-8-18间共10条数据。但由于期间000029这支股票处于停牌期,所以返回
的10条数据中,数值部分全部填充为np.nan。
注意如果取周线和月线数据,如果当天停牌,但只要周线有数据,则仍能取到。周线(或者月线)的
`frame`将是停牌前一交易日。比如,
```python
sec = "600721.XSHG"
frame_type = FrameType.WEEK
end = arrow.get("2020-4-29 15:00").datetime
bars = await aq.get_bars(sec, end, 3, FrameType.WEEK)
print(bars)
```
2020年4月30日是该周的最后一个交易日。股票600721在4月29日停牌一天。上述请求将得到如下数
据:
```
[(datetime.date(2020, 4, 17), 6.02, 6.69, 5.84, 6.58, ...)
(datetime.date(2020, 4, 24), 6.51, 6.57, 5.68, 5.72, ...)
(datetime.date(2020, 4, 28), 5.7, 5.71, 5.17, 5.36, ...)]
```
停牌发生在日线级别上,但我们的请求发生在周线级别上,所以不会对4/29日进行填充,而是返回
截止到4月29日的数据。
args:
sec: 证券代码
end: 数据截止日
n_bars: 待获取的数据条数
frame_type: 数据所属的周期
include_unclosed: 如果为真,则会包含当end所处的那个Frame的数据,即使当前它还未结束
"""
now = arrow.now(tz=cfg.tz)
end = end or now.datetime
# 如果end超出当前时间,则认为是不合法的。如果用户想取到最新的数据,应该传入None
if type(end) == datetime.date:
if end > now.date():
return None
elif type(end) == datetime.datetime:
if end > now:
return None
bars = await cls.get_instance().get_bars(
sec, end, n_bars, frame_type.value, include_unclosed
)
if len(bars) == 0:
return
# 根据指定的end,计算结束时的frame
last_closed_frame = tf.floor(end, frame_type)
last_frame = bars[-1]["frame"]
# 计算有多少根k线是已结束的
n_closed = n_bars - 1
if frame_type == FrameType.DAY:
# 盘后取日线,返回的一定是全部都已closed的数据
# 盘中取日线,返回的last_frame会是当天的日期,但该日线并未结束
if now.datetime.hour >= 15 or last_frame < now.date():
n_closed = n_bars
else:
# 如果last_frame <= end的上限,则返回的也一定是全部都closed的数据
if last_frame <= tf.floor(end, frame_type):
n_closed = n_bars
remainder = [bars[-1]] if n_closed < n_bars else None
closed_bars = cls._fill_na(bars, n_closed, last_closed_frame, frame_type)
# 只保存已结束的bar
await cache.save_bars(sec, closed_bars, frame_type)
if remainder is None:
return closed_bars
else:
return np.concatenate([closed_bars, remainder])
@classmethod
def _fill_na(cls, bars: np.array, n: int, end: Frame, frame_type) -> np.ndarray:
if frame_type in tf.minute_level_frames:
convert = tf.int2time
else:
convert = tf.int2date
frames = [convert(x) for x in tf.get_frames_by_count(end, n, frame_type)]
filled = np.empty(n, dtype=bars.dtype)
filled[:] = np.nan
filled["frame"] = frames
return merge(filled, bars, "frame")
@classmethod
async def get_all_trade_days(cls):
days = await cls.get_instance().get_all_trade_days()
await cache.save_calendar("day_frames", map(tf.date2int, days))
return days
@classmethod
async def get_valuation(
cls,
code: Union[str, List[str]],
day: datetime.date,
fields: List[str] = None,
n: int = 1,
) -> np.ndarray:
valuation = await cls.get_instance().get_valuation(code, day, n)
await Valuation.save(valuation)
if fields is None:
return valuation
if isinstance(fields, str):
fields = [fields]
mapping = dict(valuation.dtype.descr)
fields = [(name, mapping[name]) for name in fields]
return rfn.require_fields(valuation, fields)
@classmethod
async def get_price(
cls,
sec: Union[List, str],
end_date: Union[str, datetime.datetime],
n_bars: Optional[int],
start_date: Optional[Union[str, datetime.datetime]] = None,
) -> np.ndarray:
fields = ['open', 'close', 'high', 'low', 'volume', 'money', 'high_limit', 'low_limit', 'avg', 'factor']
params = {
"security": sec,
"end_date": end_date,
"fields": fields,
"fq": None,
"fill_paused": False,
"frequency": FrameType.MIN1.value,
}
if start_date:
params.update({"start_date": start_date})
if n_bars is not None:
params.update({"count": start_date})
if "start_date" in params and "count" in params:
raise ValueError("start_date and count cannot appear at the same time")
bars = await cls.get_instance().get_price(**params)
if len(bars) == 0:
return
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is a awesome
python script!"""
import datetime
import importlib
import logging
from typing import List, Union, Optional
import arrow
import cfg4py
import numpy as np
from numpy.lib import recfunctions as rfn
from omicron import cache
from omicron.core.lang import static_vars
from omicron.core.timeframe import tf
from omicron.core.types import Frame, FrameType
from omicron.models.valuation import Valuation
from omega.core.accelerate import merge
from omega.fetcher.quotes_fetcher import QuotesFetcher
logger = logging.getLogger(__file__)
cfg = cfg4py.get_instance()
class AbstractQuotesFetcher(QuotesFetcher):
_instances = []
@classmethod
async def create_instance(cls, module_name, **kwargs):
# todo: check if implementor has implemented all the required methods
# todo: check duplicates
module = importlib.import_module(module_name)
factory_method = getattr(module, "create_instance")
if not callable(factory_method):
raise TypeError(f"Bad omega adaptor implementation {module_name}")
impl: QuotesFetcher = await factory_method(**kwargs)
cls._instances.append(impl)
logger.info("add one quotes fetcher implementor: %s", module_name)
@classmethod
@static_vars(i=0)
def get_instance(cls):
if len(cls._instances) == 0:
raise IndexError("No fetchers available")
i = (cls.get_instance.i + 1) % len(cls._instances)
return cls._instances[i]
@classmethod
async def get_security_list(cls) -> Union[None, np.ndarray]:
"""按如下格式返回证券列表。
code display_name name start_date end_date type
000001.XSHE 平安银行 PAYH 1991-04-03 2200-01-01 stock
Returns:
Union[None, np.ndarray]: [description]
"""
securities = await cls.get_instance().get_security_list()
if securities is None or len(securities) == 0:
logger.warning("failed to update securities. %s is returned.", securities)
return securities
key = "securities"
pipeline = cache.security.pipeline()
pipeline.delete(key)
for code, display_name, name, start, end, _type in securities:
pipeline.rpush(
key, f"{code},{display_name},{name},{start}," f"{end},{_type}"
)
await pipeline.execute()
return securities
@classmethod
async def get_bars_batch(
cls,
secs: List[str],
end: Frame,
n_bars: int,
frame_type: FrameType,
include_unclosed=True,
) -> np.ndarray:
return await cls.get_instance().get_bars_batch(
secs, end, n_bars, frame_type.value, include_unclosed
)
@classmethod
async def get_bars(
cls,
sec: str,
end: Frame,
n_bars: int,
frame_type: FrameType,
include_unclosed=True,
) -> np.ndarray:
"""获取行情数据,并将已结束的周期数据存入缓存。
各种情况:
1. 假设现在时间是2021-2-24日,盘中。此时请求上证指数日线,且`include_unclosed`为
`True`:
```python
get_bars("000001.XSHE", None, 1, FrameType.DAY)
```
得到的数据可能如下:
```
[(datetime.date(2021, 2, 24), 3638.9358, 3645.5288, 3617.44, 3620.3542, ...)]
```
在收盘前不同时间调用,得到的数据除开盘价外,其它都实时在变动。
2. 假设现在时间是2021-2-23日,盘后,此时请求上证指数日线,将得到收盘后固定的价格。
3. 上述请求中,`include_unclosed`参数使用默认值(`True`)。如果取为`False`,仍以示例1
指定的场景为例,则:
```python
get_bars("000001.XSHG", None, 1, FrameType.DAY, False)
```
因为2021-2-24日未收盘,所以获取的最后一条数据是2021-2-23日的。
4. 同样假设现在时间是2021-2-24日盘中,周三。此时获取周K线。在`include_unclosed`分别为
`True`和`False`的情况下:
```
[(datetime.date(2021, 2, 24), 3707.19, 3717.27, 3591.3647, 3592.3977, ...)]
[(datetime.date(2021, 2, 19), 3721.09, 3731.69, 3634.01, 3696.17, ...)]
```
注意这里当`include_unclosed`为True时,返回的周K线是以2021-2-24为Frame的。同样,在盘中
的不同时间取这个数据,除了`open`数值之外,其它都是实时变化的。
5. 如果在已结束的周期中,包含停牌数据,则会对停牌期间的数据进行nan填充,以方便数据使用
者可以较容易地分辨出数据不连贯的原因:哪些是停牌造成的,哪些是非交易日造成的。这种处理
会略微降低数据获取速度,并增加存储空间。
比如下面的请求:
```python
get_bars("000029.XSHE", datetime.date(2020,8,18), 10, FrameType.DAY)
```
将获取到2020-8-5到2020-8-18间共10条数据。但由于期间000029这支股票处于停牌期,所以返回
的10条数据中,数值部分全部填充为np.nan。
注意如果取周线和月线数据,如果当天停牌,但只要周线有数据,则仍能取到。周线(或者月线)的
`frame`将是停牌前一交易日。比如,
```python
sec = "600721.XSHG"
frame_type = FrameType.WEEK
end = arrow.get("2020-4-29 15:00").datetime
bars = await aq.get_bars(sec, end, 3, FrameType.WEEK)
print(bars)
```
2020年4月30日是该周的最后一个交易日。股票600721在4月29日停牌一天。上述请求将得到如下数
据:
```
[(datetime.date(2020, 4, 17), 6.02, 6.69, 5.84, 6.58, ...)
(datetime.date(2020, 4, 24), 6.51, 6.57, 5.68, 5.72, ...)
(datetime.date(2020, 4, 28), 5.7, 5.71, 5.17, 5.36, ...)]
```
停牌发生在日线级别上,但我们的请求发生在周线级别上,所以不会对4/29日进行填充,而是返回
截止到4月29日的数据。
args:
sec: 证券代码
end: 数据截止日
n_bars: 待获取的数据条数
frame_type: 数据所属的周期
include_unclosed: 如果为真,则会包含当end所处的那个Frame的数据,即使当前它还未结束
"""
now = arrow.now(tz=cfg.tz)
end = end or now.datetime
# 如果end超出当前时间,则认为是不合法的。如果用户想取到最新的数据,应该传入None
if type(end) == datetime.date:
if end > now.date():
return None
elif type(end) == datetime.datetime:
if end > now:
return None
bars = await cls.get_instance().get_bars(
sec, end, n_bars, frame_type.value, include_unclosed
)
if len(bars) == 0:
return
# 根据指定的end,计算结束时的frame
last_closed_frame = tf.floor(end, frame_type)
last_frame = bars[-1]["frame"]
# 计算有多少根k线是已结束的
n_closed = n_bars - 1
if frame_type == FrameType.DAY:
# 盘后取日线,返回的一定是全部都已closed的数据
# 盘中取日线,返回的last_frame会是当天的日期,但该日线并未结束
if now.datetime.hour >= 15 or last_frame < now.date():
n_closed = n_bars
else:
# 如果last_frame <= end的上限,则返回的也一定是全部都closed的数据
if last_frame <= tf.floor(end, frame_type):
n_closed = n_bars
remainder = [bars[-1]] if n_closed < n_bars else None
closed_bars = cls._fill_na(bars, n_closed, last_closed_frame, frame_type)
# 只保存已结束的bar
await cache.save_bars(sec, closed_bars, frame_type)
if remainder is None:
return closed_bars
else:
return np.concatenate([closed_bars, remainder])
@classmethod
def _fill_na(cls, bars: np.array, n: int, end: Frame, frame_type) -> np.ndarray:
if frame_type in tf.minute_level_frames:
convert = tf.int2time
else:
convert = tf.int2date
frames = [convert(x) for x in tf.get_frames_by_count(end, n, frame_type)]
filled = np.empty(n, dtype=bars.dtype)
filled[:] = np.nan
filled["frame"] = frames
return merge(filled, bars, "frame")
@classmethod
async def get_all_trade_days(cls):
days = await cls.get_instance().get_all_trade_days()
await cache.save_calendar("day_frames", map(tf.date2int, days))
return days
@classmethod
async def get_valuation(
cls,
code: Union[str, List[str]],
day: datetime.date,
fields: List[str] = None,
n: int = 1,
) -> np.ndarray:
valuation = await cls.get_instance().get_valuation(code, day, n)
await Valuation.save(valuation)
if fields is None:
return valuation
if isinstance(fields, str):
fields = [fields]
mapping = dict(valuation.dtype.descr)
fields = [(name, mapping[name]) for name in fields]
return rfn.require_fields(valuation, fields)
@classmethod
async def get_price(
cls,
sec: Union[List, str],
end_date: Union[str, datetime.datetime],
n_bars: Optional[int],
start_date: Optional[Union[str, datetime.datetime]] = None,
) -> np.ndarray:
fields = ['open', 'close', 'high', 'low', 'volume', 'money', 'high_limit', 'low_limit', 'avg', 'factor']
params = {
"security": sec,
"end_date": end_date,
"fields": fields,
"fq": None,
"fill_paused": False,
"frequency": FrameType.MIN1.value,
}
if start_date:
params.update({"start_date": start_date})
if n_bars is not None:
params.update({"count": start_date})
if "start_date" in params and "count" in params:
raise ValueError("start_date and count cannot appear at the same time")
bars = await cls.get_instance().get_price(**params)
if len(bars) == 0:
return
| zh | 0.828451 | #!/usr/bin/env python # -*- coding: utf-8 -*- This is a awesome python script! # todo: check if implementor has implemented all the required methods # todo: check duplicates 按如下格式返回证券列表。 code display_name name start_date end_date type 000001.XSHE 平安银行 PAYH 1991-04-03 2200-01-01 stock Returns: Union[None, np.ndarray]: [description] 获取行情数据,并将已结束的周期数据存入缓存。 各种情况: 1. 假设现在时间是2021-2-24日,盘中。此时请求上证指数日线,且`include_unclosed`为 `True`: ```python get_bars("000001.XSHE", None, 1, FrameType.DAY) ``` 得到的数据可能如下: ``` [(datetime.date(2021, 2, 24), 3638.9358, 3645.5288, 3617.44, 3620.3542, ...)] ``` 在收盘前不同时间调用,得到的数据除开盘价外,其它都实时在变动。 2. 假设现在时间是2021-2-23日,盘后,此时请求上证指数日线,将得到收盘后固定的价格。 3. 上述请求中,`include_unclosed`参数使用默认值(`True`)。如果取为`False`,仍以示例1 指定的场景为例,则: ```python get_bars("000001.XSHG", None, 1, FrameType.DAY, False) ``` 因为2021-2-24日未收盘,所以获取的最后一条数据是2021-2-23日的。 4. 同样假设现在时间是2021-2-24日盘中,周三。此时获取周K线。在`include_unclosed`分别为 `True`和`False`的情况下: ``` [(datetime.date(2021, 2, 24), 3707.19, 3717.27, 3591.3647, 3592.3977, ...)] [(datetime.date(2021, 2, 19), 3721.09, 3731.69, 3634.01, 3696.17, ...)] ``` 注意这里当`include_unclosed`为True时,返回的周K线是以2021-2-24为Frame的。同样,在盘中 的不同时间取这个数据,除了`open`数值之外,其它都是实时变化的。 5. 如果在已结束的周期中,包含停牌数据,则会对停牌期间的数据进行nan填充,以方便数据使用 者可以较容易地分辨出数据不连贯的原因:哪些是停牌造成的,哪些是非交易日造成的。这种处理 会略微降低数据获取速度,并增加存储空间。 比如下面的请求: ```python get_bars("000029.XSHE", datetime.date(2020,8,18), 10, FrameType.DAY) ``` 将获取到2020-8-5到2020-8-18间共10条数据。但由于期间000029这支股票处于停牌期,所以返回 的10条数据中,数值部分全部填充为np.nan。 注意如果取周线和月线数据,如果当天停牌,但只要周线有数据,则仍能取到。周线(或者月线)的 `frame`将是停牌前一交易日。比如, ```python sec = "600721.XSHG" frame_type = FrameType.WEEK end = arrow.get("2020-4-29 15:00").datetime bars = await aq.get_bars(sec, end, 3, FrameType.WEEK) print(bars) ``` 2020年4月30日是该周的最后一个交易日。股票600721在4月29日停牌一天。上述请求将得到如下数 据: ``` [(datetime.date(2020, 4, 17), 6.02, 6.69, 5.84, 6.58, ...) (datetime.date(2020, 4, 24), 6.51, 6.57, 5.68, 5.72, ...) (datetime.date(2020, 4, 28), 5.7, 5.71, 5.17, 5.36, ...)] ``` 停牌发生在日线级别上,但我们的请求发生在周线级别上,所以不会对4/29日进行填充,而是返回 截止到4月29日的数据。 args: sec: 证券代码 end: 数据截止日 n_bars: 待获取的数据条数 frame_type: 数据所属的周期 include_unclosed: 如果为真,则会包含当end所处的那个Frame的数据,即使当前它还未结束 # 如果end超出当前时间,则认为是不合法的。如果用户想取到最新的数据,应该传入None # 根据指定的end,计算结束时的frame # 计算有多少根k线是已结束的 # 盘后取日线,返回的一定是全部都已closed的数据 # 盘中取日线,返回的last_frame会是当天的日期,但该日线并未结束 # 如果last_frame <= end的上限,则返回的也一定是全部都closed的数据 # 只保存已结束的bar | 2.383138 | 2 |
test_data/intermediate/unexpected/documentation/unexpected_documentation_elements/meta_model.py | aas-core-works/aas-core-codegen | 5 | 6632322 | <reponame>aas-core-works/aas-core-codegen<filename>test_data/intermediate/unexpected/documentation/unexpected_documentation_elements/meta_model.py<gh_stars>1-10
"""
Do something.
0: An unexpected block quote.
"""
class Something:
"""
Represent something.
1: An unexpected block quote.
"""
something: str
"""
Capture something.
2: An unexpected block quote.
"""
def __init__(self, something: str) -> None:
self.something = something
def do_something(self) -> None:
"""
Do something.
3: An unexpected block quote.
"""
__book_url__ = "dummy"
__book_version__ = "dummy"
| """
Do something.
0: An unexpected block quote.
"""
class Something:
"""
Represent something.
1: An unexpected block quote.
"""
something: str
"""
Capture something.
2: An unexpected block quote.
"""
def __init__(self, something: str) -> None:
self.something = something
def do_something(self) -> None:
"""
Do something.
3: An unexpected block quote.
"""
__book_url__ = "dummy"
__book_version__ = "dummy" | en | 0.808428 | Do something. 0: An unexpected block quote. Represent something. 1: An unexpected block quote. Capture something. 2: An unexpected block quote. Do something. 3: An unexpected block quote. | 2.67838 | 3 |
_GTW/_RST/_TOP/Dir.py | Tapyr/tapyr | 6 | 6632323 | <reponame>Tapyr/tapyr
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2017 Mag. <NAME> All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. <EMAIL>
# #*** <License> ************************************************************#
# This module is part of the package GTW.RST.TOP.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# #*** </License> ***********************************************************#
#
#++
# Name
# GTW.RST.TOP.Dir
#
# Purpose
# Model a directory in a tree of pages
#
# Revision Dates
# 6-Jul-2012 (CT) Creation (based on GTW.NAV.Base)
# 18-Jul-2012 (CT) Redefine `add_entries` to set `Index`
# 18-Jul-2012 (CT) Move `add_entries` from `_Dir_` to `_Dir_Base_`
# 18-Jul-2012 (CT) Redefine `_Dir_._get_child` to handle "index"
# 20-Jul-2012 (CT) Factor `_add_index`
# 23-Jul-2012 (CT) Redefine `Dir_V.has_children`
# 3-Aug-2012 (CT) Change `is_current_dir` to use `href`, not `prefix`
# 6-Aug-2012 (MG) Consider `hidden` in `is_current_dir`
# 7-Aug-2012 (CT) Factor `own_links` to `RST.Base`
# 8-Aug-2012 (MG) Consider `hidden` in `_effective`
# 9-Aug-2012 (CT) Fix `is_current_dir` (test for "/" after `startswith`)
# 17-Sep-2012 (CT) Ignore `TypeError` in `_effective`
# 26-Sep-2012 (CT) Factor `_effective_entry`
# 26-Sep-2012 (CT) Change `_effective_entry` to consider `self.hidden`
# 26-Sep-2012 (CT) Remove `hidden` from `is_current_dir`
# 26-Sep-2012 (CT) Redefine `show_in_nav`
# 7-Dec-2012 (CT) Check `allow_method` in `_effective_entry`
# 17-Sep-2013 (CT) Move `IndexError` from `_effective` to `_effective_entry`
# 8-Feb-2017 (CT) Make argument `nav_page` of `show_in_nav` optional
# ««revision-date»»···
#--
from _GTW import GTW
from _TFL import TFL
import _GTW._RST.Resource
import _GTW._RST._TOP.Base
from _TFL._Meta.Once_Property import Once_Property
from _TFL.Decorator import getattr_safe
from _TFL.predicate import enumerate_slice, first
class _TOP_Dir_Base_ (GTW.RST.TOP._Base_, GTW.RST._Dir_Base_) :
_real_name = "_Dir_Base_"
def add_entries (self, * entries) :
l = len (self._entries)
self.__super.add_entries (* entries)
self._add_index (l)
# end def add_entries
def is_current_dir (self, page) :
p = page.href_dynamic
s = self.href_dynamic
return p == s or (p.startswith (s) and p [len (s)] == "/")
# end def is_current_dir
def _add_index (self, l) :
Index = self.Index_Type
for i, e in enumerate_slice (self._entries, l) :
e._index = (Index (i))
# end def _add_index
_Dir_Base_ = _TOP_Dir_Base_ # end class
_Ancestor = _Dir_Base_
class _TOP_Dir_ (_Ancestor, GTW.RST._Dir_) :
_real_name = "_Dir_"
@property
@getattr_safe
def has_children (self) :
try :
first (self.own_links)
except IndexError :
return False
else :
return True
# end def has_children
@property
@getattr_safe
def _effective (self) :
result = self
dt = self.dir_template
if dt is None :
try :
result = self._effective_entry
except TypeError as exc :
print ("TypeError in %s._effective: %s" % (self, exc))
return result
# end def _effective
@property
@getattr_safe
def _effective_entry (self) :
if self.hidden :
entries = self.entries
else :
entries = (e for e in self.entries if not e.hidden)
if self.request :
method = self.request.method
user = self.user
entries = (e for e in entries if e.allow_method (method, user))
try :
page = first (entries)
except IndexError :
return self
else :
return page._effective
# end def _effective_entry
def show_in_nav (self, nav_page = None) :
return \
( self.__super.show_in_nav (nav_page)
or (nav_page is not None and self.is_current_dir (nav_page))
)
# end def show_in_nav
def _get_child (self, child, * grandchildren) :
result = self.__super._get_child (child, * grandchildren)
if result is None and child == "index" and not grandchildren :
return self
return result
# end def _get_child
_Dir_ = _TOP_Dir_ # end class
class TOP_Dir (_Dir_, GTW.RST.Dir) :
"""Directory of tree of pages."""
_real_name = "Dir"
Dir = TOP_Dir # end class
class TOP_Dir_V (_Dir_Base_, GTW.RST.Dir_V) :
"""Volatile directory of tree of pages (directory with children,
without permanent `_entries`).
"""
_real_name = "Dir_V"
@property
@getattr_safe
def has_children (self) :
return False
# end def has_children
Dir_V = TOP_Dir_V # end class
if __name__ != "__main__" :
GTW.RST.TOP._Export ("*", "_Dir_Base_", "_Dir_")
### __END__ GTW.RST.TOP.Dir
| # -*- coding: utf-8 -*-
# Copyright (C) 2012-2017 Mag. <NAME> All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. <EMAIL>
# #*** <License> ************************************************************#
# This module is part of the package GTW.RST.TOP.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# #*** </License> ***********************************************************#
#
#++
# Name
# GTW.RST.TOP.Dir
#
# Purpose
# Model a directory in a tree of pages
#
# Revision Dates
# 6-Jul-2012 (CT) Creation (based on GTW.NAV.Base)
# 18-Jul-2012 (CT) Redefine `add_entries` to set `Index`
# 18-Jul-2012 (CT) Move `add_entries` from `_Dir_` to `_Dir_Base_`
# 18-Jul-2012 (CT) Redefine `_Dir_._get_child` to handle "index"
# 20-Jul-2012 (CT) Factor `_add_index`
# 23-Jul-2012 (CT) Redefine `Dir_V.has_children`
# 3-Aug-2012 (CT) Change `is_current_dir` to use `href`, not `prefix`
# 6-Aug-2012 (MG) Consider `hidden` in `is_current_dir`
# 7-Aug-2012 (CT) Factor `own_links` to `RST.Base`
# 8-Aug-2012 (MG) Consider `hidden` in `_effective`
# 9-Aug-2012 (CT) Fix `is_current_dir` (test for "/" after `startswith`)
# 17-Sep-2012 (CT) Ignore `TypeError` in `_effective`
# 26-Sep-2012 (CT) Factor `_effective_entry`
# 26-Sep-2012 (CT) Change `_effective_entry` to consider `self.hidden`
# 26-Sep-2012 (CT) Remove `hidden` from `is_current_dir`
# 26-Sep-2012 (CT) Redefine `show_in_nav`
# 7-Dec-2012 (CT) Check `allow_method` in `_effective_entry`
# 17-Sep-2013 (CT) Move `IndexError` from `_effective` to `_effective_entry`
# 8-Feb-2017 (CT) Make argument `nav_page` of `show_in_nav` optional
# ««revision-date»»···
#--
from _GTW import GTW
from _TFL import TFL
import _GTW._RST.Resource
import _GTW._RST._TOP.Base
from _TFL._Meta.Once_Property import Once_Property
from _TFL.Decorator import getattr_safe
from _TFL.predicate import enumerate_slice, first
class _TOP_Dir_Base_ (GTW.RST.TOP._Base_, GTW.RST._Dir_Base_) :
_real_name = "_Dir_Base_"
def add_entries (self, * entries) :
l = len (self._entries)
self.__super.add_entries (* entries)
self._add_index (l)
# end def add_entries
def is_current_dir (self, page) :
p = page.href_dynamic
s = self.href_dynamic
return p == s or (p.startswith (s) and p [len (s)] == "/")
# end def is_current_dir
def _add_index (self, l) :
Index = self.Index_Type
for i, e in enumerate_slice (self._entries, l) :
e._index = (Index (i))
# end def _add_index
_Dir_Base_ = _TOP_Dir_Base_ # end class
_Ancestor = _Dir_Base_
class _TOP_Dir_ (_Ancestor, GTW.RST._Dir_) :
_real_name = "_Dir_"
@property
@getattr_safe
def has_children (self) :
try :
first (self.own_links)
except IndexError :
return False
else :
return True
# end def has_children
@property
@getattr_safe
def _effective (self) :
result = self
dt = self.dir_template
if dt is None :
try :
result = self._effective_entry
except TypeError as exc :
print ("TypeError in %s._effective: %s" % (self, exc))
return result
# end def _effective
@property
@getattr_safe
def _effective_entry (self) :
if self.hidden :
entries = self.entries
else :
entries = (e for e in self.entries if not e.hidden)
if self.request :
method = self.request.method
user = self.user
entries = (e for e in entries if e.allow_method (method, user))
try :
page = first (entries)
except IndexError :
return self
else :
return page._effective
# end def _effective_entry
def show_in_nav (self, nav_page = None) :
return \
( self.__super.show_in_nav (nav_page)
or (nav_page is not None and self.is_current_dir (nav_page))
)
# end def show_in_nav
def _get_child (self, child, * grandchildren) :
result = self.__super._get_child (child, * grandchildren)
if result is None and child == "index" and not grandchildren :
return self
return result
# end def _get_child
_Dir_ = _TOP_Dir_ # end class
class TOP_Dir (_Dir_, GTW.RST.Dir) :
"""Directory of tree of pages."""
_real_name = "Dir"
Dir = TOP_Dir # end class
class TOP_Dir_V (_Dir_Base_, GTW.RST.Dir_V) :
"""Volatile directory of tree of pages (directory with children,
without permanent `_entries`).
"""
_real_name = "Dir_V"
@property
@getattr_safe
def has_children (self) :
return False
# end def has_children
Dir_V = TOP_Dir_V # end class
if __name__ != "__main__" :
GTW.RST.TOP._Export ("*", "_Dir_Base_", "_Dir_")
### __END__ GTW.RST.TOP.Dir | en | 0.356333 | # -*- coding: utf-8 -*- # Copyright (C) 2012-2017 Mag. <NAME> All rights reserved # Glasauergasse 32, A--1130 Wien, Austria. <EMAIL> # #*** <License> ************************************************************# # This module is part of the package GTW.RST.TOP. # # This module is licensed under the terms of the BSD 3-Clause License # <http://www.c-tanzer.at/license/bsd_3c.html>. # #*** </License> ***********************************************************# # #++ # Name # GTW.RST.TOP.Dir # # Purpose # Model a directory in a tree of pages # # Revision Dates # 6-Jul-2012 (CT) Creation (based on GTW.NAV.Base) # 18-Jul-2012 (CT) Redefine `add_entries` to set `Index` # 18-Jul-2012 (CT) Move `add_entries` from `_Dir_` to `_Dir_Base_` # 18-Jul-2012 (CT) Redefine `_Dir_._get_child` to handle "index" # 20-Jul-2012 (CT) Factor `_add_index` # 23-Jul-2012 (CT) Redefine `Dir_V.has_children` # 3-Aug-2012 (CT) Change `is_current_dir` to use `href`, not `prefix` # 6-Aug-2012 (MG) Consider `hidden` in `is_current_dir` # 7-Aug-2012 (CT) Factor `own_links` to `RST.Base` # 8-Aug-2012 (MG) Consider `hidden` in `_effective` # 9-Aug-2012 (CT) Fix `is_current_dir` (test for "/" after `startswith`) # 17-Sep-2012 (CT) Ignore `TypeError` in `_effective` # 26-Sep-2012 (CT) Factor `_effective_entry` # 26-Sep-2012 (CT) Change `_effective_entry` to consider `self.hidden` # 26-Sep-2012 (CT) Remove `hidden` from `is_current_dir` # 26-Sep-2012 (CT) Redefine `show_in_nav` # 7-Dec-2012 (CT) Check `allow_method` in `_effective_entry` # 17-Sep-2013 (CT) Move `IndexError` from `_effective` to `_effective_entry` # 8-Feb-2017 (CT) Make argument `nav_page` of `show_in_nav` optional # ««revision-date»»··· #-- # end def add_entries # end def is_current_dir # end def _add_index # end class # end def has_children # end def _effective # end def _effective_entry # end def show_in_nav # end def _get_child # end class Directory of tree of pages. # end class Volatile directory of tree of pages (directory with children, without permanent `_entries`). # end def has_children # end class ### __END__ GTW.RST.TOP.Dir | 1.574257 | 2 |
tests/test_multiplesOf3Or5.py | janusson/codewars-katas | 0 | 6632324 | # multiplesOf3Or5.py
"""
If we list all the natural numbers below 10 that are multiples of 3 or 5, we
get 3, 5, 6 and 9. The sum of these multiples is 23.
Finish the solution so that it returns the sum of all the multiples of 3 or 5
below the number passed in.
Note: If the number is a multiple of both 3 and 5, only count it once. Also,
if a number is negative, return 0(for languages that do have them)
https://www.codewars.com/kata/514b92a657cdc65150000006/train/python
"""
def solution(number):
list = [i for i in range(1, 10) if (i % 3 == 0) or (i % 5 == 0)]
newList = []
if type(number) == 'int' and number > 0:
for multiple in list:
if multiple < number:
newList.append(multiple)
return sum(newList)
else:
newList.append(0)
return sum(newList)
print(f'The sum of 3/5 multiples below number: \n'
print(solution(None))
| # multiplesOf3Or5.py
"""
If we list all the natural numbers below 10 that are multiples of 3 or 5, we
get 3, 5, 6 and 9. The sum of these multiples is 23.
Finish the solution so that it returns the sum of all the multiples of 3 or 5
below the number passed in.
Note: If the number is a multiple of both 3 and 5, only count it once. Also,
if a number is negative, return 0(for languages that do have them)
https://www.codewars.com/kata/514b92a657cdc65150000006/train/python
"""
def solution(number):
list = [i for i in range(1, 10) if (i % 3 == 0) or (i % 5 == 0)]
newList = []
if type(number) == 'int' and number > 0:
for multiple in list:
if multiple < number:
newList.append(multiple)
return sum(newList)
else:
newList.append(0)
return sum(newList)
print(f'The sum of 3/5 multiples below number: \n'
print(solution(None))
| en | 0.77756 | # multiplesOf3Or5.py If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. Finish the solution so that it returns the sum of all the multiples of 3 or 5 below the number passed in. Note: If the number is a multiple of both 3 and 5, only count it once. Also, if a number is negative, return 0(for languages that do have them) https://www.codewars.com/kata/514b92a657cdc65150000006/train/python | 4.124288 | 4 |
tests/test_model.py | heinrichreimer/targer-api | 1 | 6632325 | from typing import List
from pytest import fixture
from targer_api import (
ArgumentLabel, ArgumentTag, ArgumentSentence, ArgumentSentences
)
@fixture
def argument_label() -> ArgumentLabel:
return ArgumentLabel.C_I
@fixture
def argument_tag(argument_label: ArgumentLabel) -> ArgumentTag:
return ArgumentTag(argument_label, 0.7, "house")
def test_argument_tag(argument_tag: ArgumentTag):
assert isinstance(argument_tag.label, ArgumentLabel)
assert argument_tag.label == ArgumentLabel.C_I
assert argument_tag.probability == 0.7
assert argument_tag.token == "house"
@fixture
def argument_sentence(argument_tag: ArgumentTag) -> ArgumentSentence:
return [argument_tag]
def test_argument_sentence(argument_sentence: ArgumentSentence):
assert len(argument_sentence) == 1
assert isinstance(argument_sentence[0], ArgumentTag)
@fixture
def argument_sentences(
argument_sentence: ArgumentSentence
) -> ArgumentSentences:
return [argument_sentence]
def test_argument_sentences(argument_sentences: ArgumentSentences):
assert len(argument_sentences) == 1
assert isinstance(argument_sentences[0], List)
| from typing import List
from pytest import fixture
from targer_api import (
ArgumentLabel, ArgumentTag, ArgumentSentence, ArgumentSentences
)
@fixture
def argument_label() -> ArgumentLabel:
return ArgumentLabel.C_I
@fixture
def argument_tag(argument_label: ArgumentLabel) -> ArgumentTag:
return ArgumentTag(argument_label, 0.7, "house")
def test_argument_tag(argument_tag: ArgumentTag):
assert isinstance(argument_tag.label, ArgumentLabel)
assert argument_tag.label == ArgumentLabel.C_I
assert argument_tag.probability == 0.7
assert argument_tag.token == "house"
@fixture
def argument_sentence(argument_tag: ArgumentTag) -> ArgumentSentence:
return [argument_tag]
def test_argument_sentence(argument_sentence: ArgumentSentence):
assert len(argument_sentence) == 1
assert isinstance(argument_sentence[0], ArgumentTag)
@fixture
def argument_sentences(
argument_sentence: ArgumentSentence
) -> ArgumentSentences:
return [argument_sentence]
def test_argument_sentences(argument_sentences: ArgumentSentences):
assert len(argument_sentences) == 1
assert isinstance(argument_sentences[0], List)
| none | 1 | 2.449685 | 2 |
|
Horan_Colby/email_validation-db/mysqlconnection.py | webguru001/Python-Django-Web | 5 | 6632326 | from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import text
# Create a class that will give us an object that we can use to connect to a database
class MySQLConnection(object):
def __init__(self, app, db):
config = {
'host': 'localhost',
'database': db,
'user': 'root',
'password': '',
'port': '3306'
}
DATABASE_URI = "mysql://{}:{}@127.0.0.1:{}/{}".format(config['user'], config['password'], config['port'], config['database'])
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
self.db = SQLAlchemy(app)
def query_db(self, query, data=None):
result = self.db.session.execute(text(query), data)
if query[0:6].lower() == 'select':
list_result = [dict(r) for r in result]
return list_result
elif query[0:6].lower() == 'insert':
self.db.session.commit()
return result.lastrowid
else:
self.db.session.commit()
def MySQLConnector(app, db):
return MySQLConnection(app, db) | from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import text
# Create a class that will give us an object that we can use to connect to a database
class MySQLConnection(object):
def __init__(self, app, db):
config = {
'host': 'localhost',
'database': db,
'user': 'root',
'password': '',
'port': '3306'
}
DATABASE_URI = "mysql://{}:{}@127.0.0.1:{}/{}".format(config['user'], config['password'], config['port'], config['database'])
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
self.db = SQLAlchemy(app)
def query_db(self, query, data=None):
result = self.db.session.execute(text(query), data)
if query[0:6].lower() == 'select':
list_result = [dict(r) for r in result]
return list_result
elif query[0:6].lower() == 'insert':
self.db.session.commit()
return result.lastrowid
else:
self.db.session.commit()
def MySQLConnector(app, db):
return MySQLConnection(app, db) | en | 0.91618 | # Create a class that will give us an object that we can use to connect to a database | 3.30058 | 3 |
grading_scripts/format_grades.py | vardhman1996/473_grading_scripts | 0 | 6632327 | # grade formatting for cse 473
# author: <NAME>
import os
import sys
import zipfile
if len(sys.argv) != 3:
print 'Usage: python format_grades.py <project number> <output directory>'
exit()
_, project_num, d = sys.argv
if project_num not in ['0','1','2','3','4','5']:
print 'Invalid project number. Must be one of [0,1,2,3,4,5].'
exit()
if d[-1] != '/':
d += '/'
all_txt = sorted(i for i in os.listdir(d) if i.endswith('.txt'))
len_longest = len(max(all_txt, key=len))
for cur_file in all_txt:
name = cur_file[:-4]
with open(d+cur_file, 'r') as f:
txt = f.read()
output_val='?'
if len(txt.split('\n')) > 10:
if project_num == '0':
# Project 0 output
print name.ljust(len_longest) + '- ' + txt.split('\n')[-6] + ' (' + str(output_val) + ' nodes)'
if project_num == '1':
# Project 1 output
if 'expanded nodes:' in txt:
remainder=txt[txt.index('expanded nodes:')+len('expanded nodes:'):]
output_val = remainder.split()[0]
print name.ljust(len_longest) + '- ' + txt.split('\n')[-6]
elif project_num == '2':
# Project 2 output
if 'Question q5\n===========' in txt:
remainder=txt[txt.rfind('Average Score: ')+len('Average Score: '):]
output_val = remainder.split()[0]
print name.ljust(len_longest) + '- ' + txt.split('\n')[-6] + ' (' + str(output_val) + ' average)'
elif project_num == '3':
# Project 3 output
if 'Question q7\n===========' in txt:
remainder=txt[txt.rfind('Average Score: ')+len('Average Score: '):]
output_val = remainder.split()[0]
print name.ljust(len_longest) + '- ' + txt.split('\n')[-6] + ' (' + str(output_val) + ' points)'
elif project_num == '4':
# Project 4 output
if 'Question q7\n===========' in txt:
remainder=txt[txt.rfind('Average Score: ')+len('Average Score: '):]
output_val = remainder.split()[0]
total=txt[txt.rfind('Total: '):].split('\n')[0]
print name.ljust(len_longest) + '- ' + total + ' (' + str(output_val) + ' points)'
elif project_num == '5':
# Project 5 output
if 'Question q4\n===========' in txt:
remainder=txt[txt.rfind('correct out of 100 (')+len('correct out of 100 ('):]
output_val = remainder.split()[0][:-2]
total=txt[txt.rfind('Total: '):].split('\n')[0]
print name.ljust(len_longest) + '- ' + total + ' (' + str(output_val) + ' points)'
else:
print name.ljust(len_longest) + '- ' + 'error'
| # grade formatting for cse 473
# author: <NAME>
import os
import sys
import zipfile
if len(sys.argv) != 3:
print 'Usage: python format_grades.py <project number> <output directory>'
exit()
_, project_num, d = sys.argv
if project_num not in ['0','1','2','3','4','5']:
print 'Invalid project number. Must be one of [0,1,2,3,4,5].'
exit()
if d[-1] != '/':
d += '/'
all_txt = sorted(i for i in os.listdir(d) if i.endswith('.txt'))
len_longest = len(max(all_txt, key=len))
for cur_file in all_txt:
name = cur_file[:-4]
with open(d+cur_file, 'r') as f:
txt = f.read()
output_val='?'
if len(txt.split('\n')) > 10:
if project_num == '0':
# Project 0 output
print name.ljust(len_longest) + '- ' + txt.split('\n')[-6] + ' (' + str(output_val) + ' nodes)'
if project_num == '1':
# Project 1 output
if 'expanded nodes:' in txt:
remainder=txt[txt.index('expanded nodes:')+len('expanded nodes:'):]
output_val = remainder.split()[0]
print name.ljust(len_longest) + '- ' + txt.split('\n')[-6]
elif project_num == '2':
# Project 2 output
if 'Question q5\n===========' in txt:
remainder=txt[txt.rfind('Average Score: ')+len('Average Score: '):]
output_val = remainder.split()[0]
print name.ljust(len_longest) + '- ' + txt.split('\n')[-6] + ' (' + str(output_val) + ' average)'
elif project_num == '3':
# Project 3 output
if 'Question q7\n===========' in txt:
remainder=txt[txt.rfind('Average Score: ')+len('Average Score: '):]
output_val = remainder.split()[0]
print name.ljust(len_longest) + '- ' + txt.split('\n')[-6] + ' (' + str(output_val) + ' points)'
elif project_num == '4':
# Project 4 output
if 'Question q7\n===========' in txt:
remainder=txt[txt.rfind('Average Score: ')+len('Average Score: '):]
output_val = remainder.split()[0]
total=txt[txt.rfind('Total: '):].split('\n')[0]
print name.ljust(len_longest) + '- ' + total + ' (' + str(output_val) + ' points)'
elif project_num == '5':
# Project 5 output
if 'Question q4\n===========' in txt:
remainder=txt[txt.rfind('correct out of 100 (')+len('correct out of 100 ('):]
output_val = remainder.split()[0][:-2]
total=txt[txt.rfind('Total: '):].split('\n')[0]
print name.ljust(len_longest) + '- ' + total + ' (' + str(output_val) + ' points)'
else:
print name.ljust(len_longest) + '- ' + 'error'
| en | 0.395345 | # grade formatting for cse 473 # author: <NAME> # Project 0 output # Project 1 output # Project 2 output # Project 3 output # Project 4 output # Project 5 output | 3.646508 | 4 |
2019-01/WeixinArticles/db_redis.py | authetic-x/Web_Scraping | 0 | 6632328 | from redis import StrictRedis
from config import *
from request import WeixinRequest
from pickle import dumps, loads
class RedisQueue():
def __init__(self):
self.db = StrictRedis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD)
def add(self, request):
if isinstance(request, WeixinRequest):
return self.db.rpush(REDIS_KEY, dumps(request))
return False
def pop(self):
if self.db.llen(REDIS_KEY):
return loads(self.db.lpop(REDIS_KEY))
return False
def clear(self):
self.db.delete(REDIS_KEY)
def empty(self):
return self.db.llen(REDIS_KEY) == 0
if __name__ == '__main__':
db = RedisQueue()
start_url = 'http://www.baidu.com'
weixin_request = WeixinRequest(url=start_url, callback='hello', need_proxy=True)
db.add(weixin_request)
request = db.pop()
print(request)
print(request.callback, request.need_proxy) | from redis import StrictRedis
from config import *
from request import WeixinRequest
from pickle import dumps, loads
class RedisQueue():
def __init__(self):
self.db = StrictRedis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD)
def add(self, request):
if isinstance(request, WeixinRequest):
return self.db.rpush(REDIS_KEY, dumps(request))
return False
def pop(self):
if self.db.llen(REDIS_KEY):
return loads(self.db.lpop(REDIS_KEY))
return False
def clear(self):
self.db.delete(REDIS_KEY)
def empty(self):
return self.db.llen(REDIS_KEY) == 0
if __name__ == '__main__':
db = RedisQueue()
start_url = 'http://www.baidu.com'
weixin_request = WeixinRequest(url=start_url, callback='hello', need_proxy=True)
db.add(weixin_request)
request = db.pop()
print(request)
print(request.callback, request.need_proxy) | none | 1 | 2.660346 | 3 |
|
pyredis/pool.py | schlitzered/pyredis | 43 | 6632329 | from random import shuffle
import threading
from pyredis import commands
from pyredis.client import Client, ClusterClient, HashClient, SentinelClient
from pyredis.exceptions import *
from pyredis.helper import ClusterMap
class BasePool(object):
""" Base Class for all other pools.
All other pools inherit from this base class.
This class itself, cannot be used directly.
:param database:
Select which db should be used for this pool
:type database: int
:param password:
Password used for authentication. If None, no authentication is done
:type password: str
:param encoding:
Convert result strings with this encoding. If None, no encoding is done.
:type encoding: str
:param conn_timeout:
Connect Timeout.
:type conn_timeout: float
:param read_timeout:
Read Timeout.
:type read_timeout: float
:param pool_size:
Upper limit of connections this pool can handle.
:type pool_size: int
:param lock:
Class implementing a Lock.
:type lock: _lock object, defaults to threading.Lock
:param username:
Username used for acl scl authentication. If not set, fall back use legacy auth.
:type username: str
"""
def __init__(
self,
database=0,
password=<PASSWORD>,
encoding=None,
conn_timeout=2,
read_timeout=2,
pool_size=16,
lock=threading.Lock(),
username=None
):
self._conn_timeout = conn_timeout
self._read_timeout = read_timeout
self._lock = lock
self._pool_free = set()
self._pool_used = set()
self._database = database
self._password = password
self._encoding = encoding
self._pool_size = pool_size
self._close_on_err = False
self._cluster = False
self._username = username
@property
def conn_timeout(self):
""" Return configured connection timeout
:return: float
"""
return self._conn_timeout
@property
def read_timeout(self):
""" Return configured read timeout
:return: float
"""
return self._read_timeout
@property
def database(self):
""" Return configured database.
:return: int
"""
return self._database
@property
def password(self):
""" Return configured password for this pool.
:return: str, None
"""
return self._password
@property
def encoding(self):
""" Return configured encoding
:return: str, None
"""
return self._encoding
@property
def pool_size(self):
""" Return, or adjust the current pool size.
shrinking is implemented via closing unused connections.
if there not enough unused connections to fulfil the shrink request,
connections returned via pool.release are closed.
:return: int, None
"""
return self._pool_size
@pool_size.setter
def pool_size(self, size):
try:
self._lock.acquire()
self._pool_size = size
current_size = len(self._pool_free) + len(self._pool_used)
while current_size > size:
try:
client = self._pool_free.pop()
client.close()
current_size -= 1
except KeyError:
break
finally:
self._lock.release()
@property
def close_on_err(self):
return self._close_on_err
@property
def username(self):
return self._username
def _connect(self):
raise NotImplemented
def acquire(self):
""" Acquire a client connection from the pool.
:return: redis.Client, exception
"""
try:
self._lock.acquire()
client = self._pool_free.pop()
self._pool_used.add(client)
except KeyError:
if len(self._pool_used) < self.pool_size:
client = self._connect()
self._pool_used.add(client)
else:
raise PyRedisError('Max connections {0} exhausted'.format(self.pool_size))
finally:
self._lock.release()
return client
def release(self, conn):
""" Return a client connection to the pool.
:param conn:
redis.Client instance, managed by this pool.
:return: None
"""
try:
self._lock.acquire()
current_size = len(self._pool_free) + len(self._pool_used)
self._pool_used.remove(conn)
if conn.closed and self.close_on_err:
for conn in self._pool_free:
conn.close()
self._pool_free = set()
self._pool_used = set()
elif not conn.closed:
if current_size > self.pool_size:
conn.close()
else:
self._pool_free.add(conn)
except KeyError:
conn.close()
finally:
self._lock.release()
class ClusterPool(
BasePool,
commands.Connection,
commands.Hash,
commands.HyperLogLog,
commands.Key,
commands.List,
commands.Scripting,
commands.Set,
commands.SSet,
commands.String,
):
""" Redis Cluster Pool.
Inherits all the arguments, methods and attributes from BasePool.
:param seeds:
Accepts a list of seed nodes in this form: [('host1', 6379), ('host2', 6379), ('host3', 6379)]
:type sentinels: list
:param slave_ok:
Defaults to False. If True, this pool will return connections to slave instances.
:type slave_ok: bool
:param retries:
In case there is a chunk move ongoing, while executing a command, how many times should
we try to find the right node, before giving up.
:type retries: int
"""
def __init__(
self,
seeds,
slave_ok=False,
password=<PASSWORD>,
username=None,
**kwargs
):
super().__init__(password=password, **kwargs)
self._map = ClusterMap(
seeds=seeds, password=password, username=username
)
self._slave_ok = slave_ok
self._cluster = True
@property
def slave_ok(self):
""" True if this pool will return slave connections
:return: bool
"""
return self._slave_ok
def _connect(self):
return ClusterClient(
database=self.database,
password=<PASSWORD>,
encoding=self.encoding,
slave_ok=self.slave_ok,
conn_timeout=self.conn_timeout,
read_timeout=self.read_timeout,
cluster_map=self._map,
username=self.username
)
def execute(self, *args, **kwargs):
""" Execute arbitrary redis command.
:param args:
:type args: list, int, float
:return: result, exception
"""
conn = self.acquire()
try:
return conn.execute(*args, **kwargs)
finally:
self.release(conn)
class HashPool(
BasePool,
commands.Connection,
commands.Hash,
commands.HyperLogLog,
commands.Key,
commands.List,
commands.Publish,
commands.Scripting,
commands.Set,
commands.SSet,
commands.String,
):
""" Pool for straight connections to Redis
Inherits all the arguments, methods and attributes from BasePool.
The Client will calculate a crc16 hash using the shard_key,
which is be default the first Key in case the command supports multiple keys.
If the Key is using the TAG annotation "bla{tag}blarg",
then only the tag portion is used, in this case "tag".
The key space is split into 16384 buckets, so in theory you could provide
a list with 16384 ('host', port) pairs to the "buckets" parameter.
If you have less then 16384 ('host', port) pairs, the client will try to
distribute the key spaces evenly between available pairs.
--- Warning ---
Since this is static hashing, the the order of pairs has to match on each client you use!
Also changing the number of pairs will change the mapping between buckets and pairs,
rendering your data inaccessible!
:param buckets:
list of ('host', port) pairs, where each pair represents a bucket
example: [('localhost', 7001), ('localhost', 7002), ('localhost', 7003)]
:type port: list
"""
def __init__(self, buckets, **kwargs):
super().__init__(**kwargs)
self._buckets = buckets
self._cluster = True
@property
def buckets(self):
""" Return configured buckets.
:return: list
"""
return self._buckets
def _connect(self):
return HashClient(
buckets=self.buckets,
database=self.database,
password=self.password,
encoding=self.encoding,
conn_timeout=self.conn_timeout,
read_timeout=self.read_timeout,
username=self.username
)
def execute(self, *args, **kwargs):
""" Execute arbitrary redis command.
:param args:
:type args: list, int, float
:return: result, exception
"""
conn = self.acquire()
try:
return conn.execute(*args, **kwargs)
finally:
self.release(conn)
class Pool(
BasePool,
commands.Connection,
commands.Hash,
commands.HyperLogLog,
commands.Key,
commands.List,
commands.Publish,
commands.Scripting,
commands.Set,
commands.SSet,
commands.String,
):
""" Pool for straight connections to Redis
Inherits all the arguments, methods and attributes from BasePool.
:param host:
Host IP or Name to connect,
can only be set when unix_sock is None.
:type host: str
:param port:
Port to connect, only used when host is also set.
:type port: int
:param unix_sock:
Unix Socket to connect,
can only be set when host is None.
:type unix_sock: str
"""
def __init__(self, host=None, port=6379, unix_sock=None, **kwargs):
if not bool(host) != bool(unix_sock):
raise PyRedisError("Ether host or unix_sock has to be provided")
super().__init__(**kwargs)
self._host = host
self._port = port
self._unix_sock = unix_sock
@property
def host(self):
""" Return configured host.
:return: str, None
"""
return self._host
@property
def port(self):
""" Return configured port.
:return: int
"""
return self._port
@property
def unix_sock(self):
""" Return configured Unix socket.
:return: str, None
"""
return self._unix_sock
def _connect(self):
return Client(
host=self.host,
port=self.port,
unix_sock=self.unix_sock,
database=self.database,
password=<PASSWORD>.password,
encoding=self.encoding,
conn_timeout=self.conn_timeout,
read_timeout=self.read_timeout,
username=self.username
)
def execute(self, *args):
""" Execute arbitrary redis command.
:param args:
:type args: list, int, float
:return: result, exception
"""
conn = self.acquire()
try:
return conn.execute(*args)
finally:
self.release(conn)
class SentinelHashPool(
BasePool,
commands.Connection,
commands.Hash,
commands.HyperLogLog,
commands.Key,
commands.List,
commands.Publish,
commands.Scripting,
commands.Set,
commands.SSet,
commands.String,
):
""" Sentinel backed Pool.
Inherits all the arguments, methods and attributes from BasePool.
:param sentinels:
Accepts a list of sentinels in this form: [('sentinel1', 26379), ('sentinel2', 26379), ('sentinel3', 26379)]
:type sentinels: list
:param buckets:
list of Sentinel managed replications sets which make up this HashPool
:type name: list
:param slave_ok:
Defaults to False. If True, this pool will return connections to slave instances.
:type slave_ok: bool
:param retries:
In case a sentinel delivers stale data, how many other sentinels should be tried.
:type retries: int
:param sentinel_password:
Password used for authentication of Sentinel instance itself. If None, no authentication is done.
Only available starting with Redis 5.0.1.
:type sentinel_password: str
:param sentinel_username:
Username used for acl style authentication of Sentinel instance itself. If None, no authentication is done.
Only available starting with Redis 5.0.1.
:type sentinel_username: str
"""
def __init__(
self,
sentinels,
buckets,
slave_ok=False,
retries=3,
sentinel_password=None,
sentinel_username=None,
**kwargs
):
super().__init__(**kwargs)
self._sentinel = SentinelClient(
sentinels=sentinels,
password=sentinel_password,
username=sentinel_username
)
self._buckets = buckets
self._slave_ok = slave_ok
self._retries = retries
self._close_on_err = True
self._cluster = True
@property
def slave_ok(self):
""" True if this pool return slave connections
:return: bool
"""
return self._slave_ok
@property
def buckets(self):
""" Name of the configured Sentinel managed cluster.
:return: str
"""
return self._buckets
@property
def retries(self):
""" Number of retries in case of stale sentinel.
:return: int
"""
return self._retries
@property
def sentinels(self):
""" Deque with configured sentinels.
:return: deque
"""
return self._sentinel.sentinels
def _connect(self):
if self.slave_ok:
client = self._get_slaves()
else:
client = self._get_masters()
if client:
return client
def _get_hash_client(self, buckets):
return HashClient(
buckets=buckets,
database=self.database,
password=<PASSWORD>,
encoding=self.encoding,
conn_timeout=self.conn_timeout,
read_timeout=self.read_timeout,
username=self.username
)
def _get_master(self, bucket):
candidate = self._sentinel.get_master(bucket)
host = candidate[b'ip'].decode('utf8')
port = int(candidate[b'port'])
return host, port
def _get_masters(self):
buckets = list()
for bucket in self.buckets:
_counter = self.retries
while _counter >= 0:
_counter -= 1
_bucket = self._get_master(bucket)
if _bucket:
buckets.append(_bucket)
break
if _counter == 0:
raise PyRedisConnError("Could not connect to bucket {0}".format(bucket))
return self._get_hash_client(buckets=buckets)
def _get_slave(self, bucket):
candidates = []
for candidate in self._sentinel.get_slaves(bucket):
candidates.append((candidate[b'ip'], int(candidate[b'port'])))
shuffle(candidates)
host = candidates[0][0].decode('utf8')
port = int(candidates[0][1])
return host, port
def _get_slaves(self):
buckets = list()
for bucket in self.buckets:
_counter = self.retries
while _counter >= 0:
_counter -= 1
_bucket = self._get_slave(bucket)
if _bucket:
buckets.append(_bucket)
break
if _counter == 0:
raise PyRedisConnError("Could not connect to bucket {0}".format(bucket))
return self._get_hash_client(buckets=buckets)
def execute(self, *args, **kwargs):
""" Execute arbitrary redis command.
:param args:
:type args: list, int, float
:return: result, exception
"""
conn = self.acquire()
try:
return conn.execute(*args, **kwargs)
finally:
self.release(conn)
class SentinelPool(
BasePool,
commands.Connection,
commands.Hash,
commands.HyperLogLog,
commands.Key,
commands.List,
commands.Publish,
commands.Scripting,
commands.Set,
commands.SSet,
commands.String,
):
""" Sentinel backed Pool.
Inherits all the arguments, methods and attributes from BasePool.
:param sentinels:
Accepts a list of sentinels in this form: [('sentinel1', 26379), ('sentinel2', 26379), ('sentinel3', 26379)]
:type sentinels: list
:param name:
Name of the cluster managed by sentinel, that this pool should manage.
:type name: str
:param slave_ok:
Defaults to False. If True, this pool will return connections to slave instances.
:type slave_ok: bool
:param retries:
In case a sentinel delivers stale data, how many other sentinels should be tried.
:type retries: int
:param sentinel_password:
Password used for authentication of Sentinel instance itself. If None, no authentication is done.
Only available starting with Redis 5.0.1.
:type sentinel_password: str
:param sentinel_username:
Username used for acl style authentication of Sentinel instance itself. If None, no authentication is done.
Only available starting with Redis 5.0.1.
:type sentinel_username: str
"""
def __init__(
self,
sentinels,
name,
slave_ok=False,
retries=3,
sentinel_password=<PASSWORD>,
sentinel_username=None,
**kwargs
):
super().__init__(**kwargs)
self._sentinel = SentinelClient(
sentinels=sentinels,
password=<PASSWORD>,
username=sentinel_username
)
self._name = name
self._slave_ok = slave_ok
self._retries = retries
self._close_on_err = True
@property
def slave_ok(self):
""" True if this pool return slave connections
:return: bool
"""
return self._slave_ok
@property
def name(self):
""" Name of the configured Sentinel managed cluster.
:return: str
"""
return self._name
@property
def retries(self):
""" Number of retries in case of stale sentinel.
:return: int
"""
return self._retries
@property
def sentinels(self):
""" Deque with configured sentinels.
:return: deque
"""
return self._sentinel.sentinels
def _connect(self):
for _ in range(self.retries):
if self.slave_ok:
client = self._get_slave()
else:
client = self._get_master()
if client:
return client
raise PyRedisConnError("Could not connect to Redis")
def _get_client(self, host, port):
return Client(
host=host,
port=port,
database=self.database,
password=<PASSWORD>,
encoding=self.encoding,
conn_timeout=self.conn_timeout,
read_timeout=self.read_timeout,
username=self.username
)
def _get_master(self):
candidate = self._sentinel.get_master(self.name)
host = candidate[b'ip']
port = int(candidate[b'port'])
client = self._get_client(host, port)
return client
def _get_slave(self):
candidates = []
for candidate in self._sentinel.get_slaves(self.name):
candidates.append((candidate[b'ip'], int(candidate[b'port'])))
shuffle(candidates)
host = candidates[0][0]
port = int(candidates[0][1])
client = self._get_client(host, port)
return client
def execute(self, *args):
""" Execute arbitrary redis command.
:param args:
:type args: list, int, float
:return: result, exception
"""
conn = self.acquire()
try:
return conn.execute(*args)
finally:
self.release(conn)
| from random import shuffle
import threading
from pyredis import commands
from pyredis.client import Client, ClusterClient, HashClient, SentinelClient
from pyredis.exceptions import *
from pyredis.helper import ClusterMap
class BasePool(object):
""" Base Class for all other pools.
All other pools inherit from this base class.
This class itself, cannot be used directly.
:param database:
Select which db should be used for this pool
:type database: int
:param password:
Password used for authentication. If None, no authentication is done
:type password: str
:param encoding:
Convert result strings with this encoding. If None, no encoding is done.
:type encoding: str
:param conn_timeout:
Connect Timeout.
:type conn_timeout: float
:param read_timeout:
Read Timeout.
:type read_timeout: float
:param pool_size:
Upper limit of connections this pool can handle.
:type pool_size: int
:param lock:
Class implementing a Lock.
:type lock: _lock object, defaults to threading.Lock
:param username:
Username used for acl scl authentication. If not set, fall back use legacy auth.
:type username: str
"""
def __init__(
self,
database=0,
password=<PASSWORD>,
encoding=None,
conn_timeout=2,
read_timeout=2,
pool_size=16,
lock=threading.Lock(),
username=None
):
self._conn_timeout = conn_timeout
self._read_timeout = read_timeout
self._lock = lock
self._pool_free = set()
self._pool_used = set()
self._database = database
self._password = password
self._encoding = encoding
self._pool_size = pool_size
self._close_on_err = False
self._cluster = False
self._username = username
@property
def conn_timeout(self):
""" Return configured connection timeout
:return: float
"""
return self._conn_timeout
@property
def read_timeout(self):
""" Return configured read timeout
:return: float
"""
return self._read_timeout
@property
def database(self):
""" Return configured database.
:return: int
"""
return self._database
@property
def password(self):
""" Return configured password for this pool.
:return: str, None
"""
return self._password
@property
def encoding(self):
""" Return configured encoding
:return: str, None
"""
return self._encoding
@property
def pool_size(self):
""" Return, or adjust the current pool size.
shrinking is implemented via closing unused connections.
if there not enough unused connections to fulfil the shrink request,
connections returned via pool.release are closed.
:return: int, None
"""
return self._pool_size
@pool_size.setter
def pool_size(self, size):
try:
self._lock.acquire()
self._pool_size = size
current_size = len(self._pool_free) + len(self._pool_used)
while current_size > size:
try:
client = self._pool_free.pop()
client.close()
current_size -= 1
except KeyError:
break
finally:
self._lock.release()
@property
def close_on_err(self):
return self._close_on_err
@property
def username(self):
return self._username
def _connect(self):
raise NotImplemented
def acquire(self):
""" Acquire a client connection from the pool.
:return: redis.Client, exception
"""
try:
self._lock.acquire()
client = self._pool_free.pop()
self._pool_used.add(client)
except KeyError:
if len(self._pool_used) < self.pool_size:
client = self._connect()
self._pool_used.add(client)
else:
raise PyRedisError('Max connections {0} exhausted'.format(self.pool_size))
finally:
self._lock.release()
return client
def release(self, conn):
""" Return a client connection to the pool.
:param conn:
redis.Client instance, managed by this pool.
:return: None
"""
try:
self._lock.acquire()
current_size = len(self._pool_free) + len(self._pool_used)
self._pool_used.remove(conn)
if conn.closed and self.close_on_err:
for conn in self._pool_free:
conn.close()
self._pool_free = set()
self._pool_used = set()
elif not conn.closed:
if current_size > self.pool_size:
conn.close()
else:
self._pool_free.add(conn)
except KeyError:
conn.close()
finally:
self._lock.release()
class ClusterPool(
BasePool,
commands.Connection,
commands.Hash,
commands.HyperLogLog,
commands.Key,
commands.List,
commands.Scripting,
commands.Set,
commands.SSet,
commands.String,
):
""" Redis Cluster Pool.
Inherits all the arguments, methods and attributes from BasePool.
:param seeds:
Accepts a list of seed nodes in this form: [('host1', 6379), ('host2', 6379), ('host3', 6379)]
:type sentinels: list
:param slave_ok:
Defaults to False. If True, this pool will return connections to slave instances.
:type slave_ok: bool
:param retries:
In case there is a chunk move ongoing, while executing a command, how many times should
we try to find the right node, before giving up.
:type retries: int
"""
def __init__(
self,
seeds,
slave_ok=False,
password=<PASSWORD>,
username=None,
**kwargs
):
super().__init__(password=password, **kwargs)
self._map = ClusterMap(
seeds=seeds, password=password, username=username
)
self._slave_ok = slave_ok
self._cluster = True
@property
def slave_ok(self):
""" True if this pool will return slave connections
:return: bool
"""
return self._slave_ok
def _connect(self):
return ClusterClient(
database=self.database,
password=<PASSWORD>,
encoding=self.encoding,
slave_ok=self.slave_ok,
conn_timeout=self.conn_timeout,
read_timeout=self.read_timeout,
cluster_map=self._map,
username=self.username
)
def execute(self, *args, **kwargs):
""" Execute arbitrary redis command.
:param args:
:type args: list, int, float
:return: result, exception
"""
conn = self.acquire()
try:
return conn.execute(*args, **kwargs)
finally:
self.release(conn)
class HashPool(
BasePool,
commands.Connection,
commands.Hash,
commands.HyperLogLog,
commands.Key,
commands.List,
commands.Publish,
commands.Scripting,
commands.Set,
commands.SSet,
commands.String,
):
""" Pool for straight connections to Redis
Inherits all the arguments, methods and attributes from BasePool.
The Client will calculate a crc16 hash using the shard_key,
which is be default the first Key in case the command supports multiple keys.
If the Key is using the TAG annotation "bla{tag}blarg",
then only the tag portion is used, in this case "tag".
The key space is split into 16384 buckets, so in theory you could provide
a list with 16384 ('host', port) pairs to the "buckets" parameter.
If you have less then 16384 ('host', port) pairs, the client will try to
distribute the key spaces evenly between available pairs.
--- Warning ---
Since this is static hashing, the the order of pairs has to match on each client you use!
Also changing the number of pairs will change the mapping between buckets and pairs,
rendering your data inaccessible!
:param buckets:
list of ('host', port) pairs, where each pair represents a bucket
example: [('localhost', 7001), ('localhost', 7002), ('localhost', 7003)]
:type port: list
"""
def __init__(self, buckets, **kwargs):
super().__init__(**kwargs)
self._buckets = buckets
self._cluster = True
@property
def buckets(self):
""" Return configured buckets.
:return: list
"""
return self._buckets
def _connect(self):
return HashClient(
buckets=self.buckets,
database=self.database,
password=self.password,
encoding=self.encoding,
conn_timeout=self.conn_timeout,
read_timeout=self.read_timeout,
username=self.username
)
def execute(self, *args, **kwargs):
""" Execute arbitrary redis command.
:param args:
:type args: list, int, float
:return: result, exception
"""
conn = self.acquire()
try:
return conn.execute(*args, **kwargs)
finally:
self.release(conn)
class Pool(
BasePool,
commands.Connection,
commands.Hash,
commands.HyperLogLog,
commands.Key,
commands.List,
commands.Publish,
commands.Scripting,
commands.Set,
commands.SSet,
commands.String,
):
""" Pool for straight connections to Redis
Inherits all the arguments, methods and attributes from BasePool.
:param host:
Host IP or Name to connect,
can only be set when unix_sock is None.
:type host: str
:param port:
Port to connect, only used when host is also set.
:type port: int
:param unix_sock:
Unix Socket to connect,
can only be set when host is None.
:type unix_sock: str
"""
def __init__(self, host=None, port=6379, unix_sock=None, **kwargs):
if not bool(host) != bool(unix_sock):
raise PyRedisError("Ether host or unix_sock has to be provided")
super().__init__(**kwargs)
self._host = host
self._port = port
self._unix_sock = unix_sock
@property
def host(self):
""" Return configured host.
:return: str, None
"""
return self._host
@property
def port(self):
""" Return configured port.
:return: int
"""
return self._port
@property
def unix_sock(self):
""" Return configured Unix socket.
:return: str, None
"""
return self._unix_sock
def _connect(self):
return Client(
host=self.host,
port=self.port,
unix_sock=self.unix_sock,
database=self.database,
password=<PASSWORD>.password,
encoding=self.encoding,
conn_timeout=self.conn_timeout,
read_timeout=self.read_timeout,
username=self.username
)
def execute(self, *args):
""" Execute arbitrary redis command.
:param args:
:type args: list, int, float
:return: result, exception
"""
conn = self.acquire()
try:
return conn.execute(*args)
finally:
self.release(conn)
class SentinelHashPool(
BasePool,
commands.Connection,
commands.Hash,
commands.HyperLogLog,
commands.Key,
commands.List,
commands.Publish,
commands.Scripting,
commands.Set,
commands.SSet,
commands.String,
):
""" Sentinel backed Pool.
Inherits all the arguments, methods and attributes from BasePool.
:param sentinels:
Accepts a list of sentinels in this form: [('sentinel1', 26379), ('sentinel2', 26379), ('sentinel3', 26379)]
:type sentinels: list
:param buckets:
list of Sentinel managed replications sets which make up this HashPool
:type name: list
:param slave_ok:
Defaults to False. If True, this pool will return connections to slave instances.
:type slave_ok: bool
:param retries:
In case a sentinel delivers stale data, how many other sentinels should be tried.
:type retries: int
:param sentinel_password:
Password used for authentication of Sentinel instance itself. If None, no authentication is done.
Only available starting with Redis 5.0.1.
:type sentinel_password: str
:param sentinel_username:
Username used for acl style authentication of Sentinel instance itself. If None, no authentication is done.
Only available starting with Redis 5.0.1.
:type sentinel_username: str
"""
def __init__(
self,
sentinels,
buckets,
slave_ok=False,
retries=3,
sentinel_password=None,
sentinel_username=None,
**kwargs
):
super().__init__(**kwargs)
self._sentinel = SentinelClient(
sentinels=sentinels,
password=sentinel_password,
username=sentinel_username
)
self._buckets = buckets
self._slave_ok = slave_ok
self._retries = retries
self._close_on_err = True
self._cluster = True
@property
def slave_ok(self):
""" True if this pool return slave connections
:return: bool
"""
return self._slave_ok
@property
def buckets(self):
""" Name of the configured Sentinel managed cluster.
:return: str
"""
return self._buckets
@property
def retries(self):
""" Number of retries in case of stale sentinel.
:return: int
"""
return self._retries
@property
def sentinels(self):
""" Deque with configured sentinels.
:return: deque
"""
return self._sentinel.sentinels
def _connect(self):
if self.slave_ok:
client = self._get_slaves()
else:
client = self._get_masters()
if client:
return client
def _get_hash_client(self, buckets):
return HashClient(
buckets=buckets,
database=self.database,
password=<PASSWORD>,
encoding=self.encoding,
conn_timeout=self.conn_timeout,
read_timeout=self.read_timeout,
username=self.username
)
def _get_master(self, bucket):
candidate = self._sentinel.get_master(bucket)
host = candidate[b'ip'].decode('utf8')
port = int(candidate[b'port'])
return host, port
def _get_masters(self):
buckets = list()
for bucket in self.buckets:
_counter = self.retries
while _counter >= 0:
_counter -= 1
_bucket = self._get_master(bucket)
if _bucket:
buckets.append(_bucket)
break
if _counter == 0:
raise PyRedisConnError("Could not connect to bucket {0}".format(bucket))
return self._get_hash_client(buckets=buckets)
def _get_slave(self, bucket):
candidates = []
for candidate in self._sentinel.get_slaves(bucket):
candidates.append((candidate[b'ip'], int(candidate[b'port'])))
shuffle(candidates)
host = candidates[0][0].decode('utf8')
port = int(candidates[0][1])
return host, port
def _get_slaves(self):
buckets = list()
for bucket in self.buckets:
_counter = self.retries
while _counter >= 0:
_counter -= 1
_bucket = self._get_slave(bucket)
if _bucket:
buckets.append(_bucket)
break
if _counter == 0:
raise PyRedisConnError("Could not connect to bucket {0}".format(bucket))
return self._get_hash_client(buckets=buckets)
def execute(self, *args, **kwargs):
""" Execute arbitrary redis command.
:param args:
:type args: list, int, float
:return: result, exception
"""
conn = self.acquire()
try:
return conn.execute(*args, **kwargs)
finally:
self.release(conn)
class SentinelPool(
BasePool,
commands.Connection,
commands.Hash,
commands.HyperLogLog,
commands.Key,
commands.List,
commands.Publish,
commands.Scripting,
commands.Set,
commands.SSet,
commands.String,
):
""" Sentinel backed Pool.
Inherits all the arguments, methods and attributes from BasePool.
:param sentinels:
Accepts a list of sentinels in this form: [('sentinel1', 26379), ('sentinel2', 26379), ('sentinel3', 26379)]
:type sentinels: list
:param name:
Name of the cluster managed by sentinel, that this pool should manage.
:type name: str
:param slave_ok:
Defaults to False. If True, this pool will return connections to slave instances.
:type slave_ok: bool
:param retries:
In case a sentinel delivers stale data, how many other sentinels should be tried.
:type retries: int
:param sentinel_password:
Password used for authentication of Sentinel instance itself. If None, no authentication is done.
Only available starting with Redis 5.0.1.
:type sentinel_password: str
:param sentinel_username:
Username used for acl style authentication of Sentinel instance itself. If None, no authentication is done.
Only available starting with Redis 5.0.1.
:type sentinel_username: str
"""
def __init__(
self,
sentinels,
name,
slave_ok=False,
retries=3,
sentinel_password=<PASSWORD>,
sentinel_username=None,
**kwargs
):
super().__init__(**kwargs)
self._sentinel = SentinelClient(
sentinels=sentinels,
password=<PASSWORD>,
username=sentinel_username
)
self._name = name
self._slave_ok = slave_ok
self._retries = retries
self._close_on_err = True
@property
def slave_ok(self):
""" True if this pool return slave connections
:return: bool
"""
return self._slave_ok
@property
def name(self):
""" Name of the configured Sentinel managed cluster.
:return: str
"""
return self._name
@property
def retries(self):
""" Number of retries in case of stale sentinel.
:return: int
"""
return self._retries
@property
def sentinels(self):
""" Deque with configured sentinels.
:return: deque
"""
return self._sentinel.sentinels
def _connect(self):
for _ in range(self.retries):
if self.slave_ok:
client = self._get_slave()
else:
client = self._get_master()
if client:
return client
raise PyRedisConnError("Could not connect to Redis")
def _get_client(self, host, port):
return Client(
host=host,
port=port,
database=self.database,
password=<PASSWORD>,
encoding=self.encoding,
conn_timeout=self.conn_timeout,
read_timeout=self.read_timeout,
username=self.username
)
def _get_master(self):
candidate = self._sentinel.get_master(self.name)
host = candidate[b'ip']
port = int(candidate[b'port'])
client = self._get_client(host, port)
return client
def _get_slave(self):
candidates = []
for candidate in self._sentinel.get_slaves(self.name):
candidates.append((candidate[b'ip'], int(candidate[b'port'])))
shuffle(candidates)
host = candidates[0][0]
port = int(candidates[0][1])
client = self._get_client(host, port)
return client
def execute(self, *args):
""" Execute arbitrary redis command.
:param args:
:type args: list, int, float
:return: result, exception
"""
conn = self.acquire()
try:
return conn.execute(*args)
finally:
self.release(conn)
| en | 0.732306 | Base Class for all other pools. All other pools inherit from this base class. This class itself, cannot be used directly. :param database: Select which db should be used for this pool :type database: int :param password: Password used for authentication. If None, no authentication is done :type password: str :param encoding: Convert result strings with this encoding. If None, no encoding is done. :type encoding: str :param conn_timeout: Connect Timeout. :type conn_timeout: float :param read_timeout: Read Timeout. :type read_timeout: float :param pool_size: Upper limit of connections this pool can handle. :type pool_size: int :param lock: Class implementing a Lock. :type lock: _lock object, defaults to threading.Lock :param username: Username used for acl scl authentication. If not set, fall back use legacy auth. :type username: str Return configured connection timeout :return: float Return configured read timeout :return: float Return configured database. :return: int Return configured password for this pool. :return: str, None Return configured encoding :return: str, None Return, or adjust the current pool size. shrinking is implemented via closing unused connections. if there not enough unused connections to fulfil the shrink request, connections returned via pool.release are closed. :return: int, None Acquire a client connection from the pool. :return: redis.Client, exception Return a client connection to the pool. :param conn: redis.Client instance, managed by this pool. :return: None Redis Cluster Pool. Inherits all the arguments, methods and attributes from BasePool. :param seeds: Accepts a list of seed nodes in this form: [('host1', 6379), ('host2', 6379), ('host3', 6379)] :type sentinels: list :param slave_ok: Defaults to False. If True, this pool will return connections to slave instances. :type slave_ok: bool :param retries: In case there is a chunk move ongoing, while executing a command, how many times should we try to find the right node, before giving up. :type retries: int True if this pool will return slave connections :return: bool Execute arbitrary redis command. :param args: :type args: list, int, float :return: result, exception Pool for straight connections to Redis Inherits all the arguments, methods and attributes from BasePool. The Client will calculate a crc16 hash using the shard_key, which is be default the first Key in case the command supports multiple keys. If the Key is using the TAG annotation "bla{tag}blarg", then only the tag portion is used, in this case "tag". The key space is split into 16384 buckets, so in theory you could provide a list with 16384 ('host', port) pairs to the "buckets" parameter. If you have less then 16384 ('host', port) pairs, the client will try to distribute the key spaces evenly between available pairs. --- Warning --- Since this is static hashing, the the order of pairs has to match on each client you use! Also changing the number of pairs will change the mapping between buckets and pairs, rendering your data inaccessible! :param buckets: list of ('host', port) pairs, where each pair represents a bucket example: [('localhost', 7001), ('localhost', 7002), ('localhost', 7003)] :type port: list Return configured buckets. :return: list Execute arbitrary redis command. :param args: :type args: list, int, float :return: result, exception Pool for straight connections to Redis Inherits all the arguments, methods and attributes from BasePool. :param host: Host IP or Name to connect, can only be set when unix_sock is None. :type host: str :param port: Port to connect, only used when host is also set. :type port: int :param unix_sock: Unix Socket to connect, can only be set when host is None. :type unix_sock: str Return configured host. :return: str, None Return configured port. :return: int Return configured Unix socket. :return: str, None Execute arbitrary redis command. :param args: :type args: list, int, float :return: result, exception Sentinel backed Pool. Inherits all the arguments, methods and attributes from BasePool. :param sentinels: Accepts a list of sentinels in this form: [('sentinel1', 26379), ('sentinel2', 26379), ('sentinel3', 26379)] :type sentinels: list :param buckets: list of Sentinel managed replications sets which make up this HashPool :type name: list :param slave_ok: Defaults to False. If True, this pool will return connections to slave instances. :type slave_ok: bool :param retries: In case a sentinel delivers stale data, how many other sentinels should be tried. :type retries: int :param sentinel_password: Password used for authentication of Sentinel instance itself. If None, no authentication is done. Only available starting with Redis 5.0.1. :type sentinel_password: str :param sentinel_username: Username used for acl style authentication of Sentinel instance itself. If None, no authentication is done. Only available starting with Redis 5.0.1. :type sentinel_username: str True if this pool return slave connections :return: bool Name of the configured Sentinel managed cluster. :return: str Number of retries in case of stale sentinel. :return: int Deque with configured sentinels. :return: deque Execute arbitrary redis command. :param args: :type args: list, int, float :return: result, exception Sentinel backed Pool. Inherits all the arguments, methods and attributes from BasePool. :param sentinels: Accepts a list of sentinels in this form: [('sentinel1', 26379), ('sentinel2', 26379), ('sentinel3', 26379)] :type sentinels: list :param name: Name of the cluster managed by sentinel, that this pool should manage. :type name: str :param slave_ok: Defaults to False. If True, this pool will return connections to slave instances. :type slave_ok: bool :param retries: In case a sentinel delivers stale data, how many other sentinels should be tried. :type retries: int :param sentinel_password: Password used for authentication of Sentinel instance itself. If None, no authentication is done. Only available starting with Redis 5.0.1. :type sentinel_password: str :param sentinel_username: Username used for acl style authentication of Sentinel instance itself. If None, no authentication is done. Only available starting with Redis 5.0.1. :type sentinel_username: str True if this pool return slave connections :return: bool Name of the configured Sentinel managed cluster. :return: str Number of retries in case of stale sentinel. :return: int Deque with configured sentinels. :return: deque Execute arbitrary redis command. :param args: :type args: list, int, float :return: result, exception | 2.739195 | 3 |
config/local/grafana/dashboards/example.dashboard.py | m-lab/prometheus-support | 12 | 6632330 | from grafanalib.core import *
dashboard = Dashboard(
title='Frontend Stats2',
refresh='',
time=Time('now-12h', 'now'),
rows=[
Row(
height=Pixels(500),
panels=[
Graph(
title="Frontend QPS",
dataSource='Prometheus',
targets=[
Target(
expr='sum by(code) (rate(http_requests_total{container="prometheus"}[2m]))',
legendFormat="{{code}}",
refId='A',
),
],
yAxes=[
YAxis(format=OPS_FORMAT),
YAxis(format=SHORT_FORMAT),
],
legend=Legend(
alignAsTable=True,
rightSide=True,
),
),
Graph(
title="Handler latency",
dataSource='Prometheus',
targets=[
Target(
expr='sum by (handler) (rate(http_request_duration_microseconds{quantile="0.9"}[2m]))',
legendFormat="{{handler}}",
refId='A',
),
],
yAxes=single_y_axis(format='µs'),
legend=Legend(
alignAsTable=True,
rightSide=True,
),
),
],
),
],
).auto_panel_ids()
| from grafanalib.core import *
dashboard = Dashboard(
title='Frontend Stats2',
refresh='',
time=Time('now-12h', 'now'),
rows=[
Row(
height=Pixels(500),
panels=[
Graph(
title="Frontend QPS",
dataSource='Prometheus',
targets=[
Target(
expr='sum by(code) (rate(http_requests_total{container="prometheus"}[2m]))',
legendFormat="{{code}}",
refId='A',
),
],
yAxes=[
YAxis(format=OPS_FORMAT),
YAxis(format=SHORT_FORMAT),
],
legend=Legend(
alignAsTable=True,
rightSide=True,
),
),
Graph(
title="Handler latency",
dataSource='Prometheus',
targets=[
Target(
expr='sum by (handler) (rate(http_request_duration_microseconds{quantile="0.9"}[2m]))',
legendFormat="{{handler}}",
refId='A',
),
],
yAxes=single_y_axis(format='µs'),
legend=Legend(
alignAsTable=True,
rightSide=True,
),
),
],
),
],
).auto_panel_ids()
| none | 1 | 1.687918 | 2 |
|
machine_learning/dimensionality_reduction/pca/tf_pca/pca_out_of_core_distributed_module/trainer/resnet.py | ryangillard/artificial_intelligence | 4 | 6632331 | <gh_stars>1-10
import tensorflow as tf
class ResNet(object):
"""Class that contains methods that preprocess images through ResNet.
Attributes:
params: dict, user passed parameters.
"""
def __init__(self, params):
"""Initializes `ResNet` class instance.
Args:
params: dict, user passed parameters.
"""
self.params = params
self.resnet_model, self.pooling_layer = self.get_resnet_layers(
input_shape=(
self.params["image_height"],
self.params["image_width"],
self.params["image_depth"]
)
)
def get_resnet_layers(self, input_shape):
"""Gets ResNet layers from ResNet50 model.
Args:
input_shape: tuple, input shape of images.
"""
# Load the ResNet50 model.
resnet50_model = tf.keras.applications.resnet50.ResNet50(
include_top=False,
weights=self.params["resnet_weights"],
input_shape=input_shape
)
resnet50_model.trainable = False
# Create a new Model based on original resnet50 model ended after the
# chosen residual block.
layer_name = self.params["resnet_layer_name"]
resnet50 = tf.keras.Model(
inputs=resnet50_model.input,
outputs=resnet50_model.get_layer(layer_name).output
)
# Add adaptive mean-spatial pooling after the new model.
adaptive_mean_spatial_layer = tf.keras.layers.GlobalAvgPool2D()
return resnet50, adaptive_mean_spatial_layer
def preprocess_image_batch(self, images):
"""Preprocesses batch of images.
Args:
images: tensor, rank 4 image tensor of shape
(batch_size, image_height, image_width, image_depth).
Returns:
Preprocessed images tensor.
"""
images = tf.cast(x=images, dtype=tf.float32)
if self.params["preprocess_input"]:
images = tf.keras.applications.resnet50.preprocess_input(x=images)
return images
def get_image_resnet_feature_vectors(self, images):
"""Gets image ResNet feature vectors.
Args:
images: tensor, rank 4 image tensor of shape
(batch_size, image_height, image_width, image_depth).
Returns:
Processed ResNet feature rank 1 tensor for each image.
"""
preprocessed_images = self.preprocess_image_batch(images=images)
resnet_feature_image = self.resnet_model(inputs=preprocessed_images)
resnet_feature_vector = self.pooling_layer(inputs=resnet_feature_image)
return resnet_feature_vector
| import tensorflow as tf
class ResNet(object):
"""Class that contains methods that preprocess images through ResNet.
Attributes:
params: dict, user passed parameters.
"""
def __init__(self, params):
"""Initializes `ResNet` class instance.
Args:
params: dict, user passed parameters.
"""
self.params = params
self.resnet_model, self.pooling_layer = self.get_resnet_layers(
input_shape=(
self.params["image_height"],
self.params["image_width"],
self.params["image_depth"]
)
)
def get_resnet_layers(self, input_shape):
"""Gets ResNet layers from ResNet50 model.
Args:
input_shape: tuple, input shape of images.
"""
# Load the ResNet50 model.
resnet50_model = tf.keras.applications.resnet50.ResNet50(
include_top=False,
weights=self.params["resnet_weights"],
input_shape=input_shape
)
resnet50_model.trainable = False
# Create a new Model based on original resnet50 model ended after the
# chosen residual block.
layer_name = self.params["resnet_layer_name"]
resnet50 = tf.keras.Model(
inputs=resnet50_model.input,
outputs=resnet50_model.get_layer(layer_name).output
)
# Add adaptive mean-spatial pooling after the new model.
adaptive_mean_spatial_layer = tf.keras.layers.GlobalAvgPool2D()
return resnet50, adaptive_mean_spatial_layer
def preprocess_image_batch(self, images):
"""Preprocesses batch of images.
Args:
images: tensor, rank 4 image tensor of shape
(batch_size, image_height, image_width, image_depth).
Returns:
Preprocessed images tensor.
"""
images = tf.cast(x=images, dtype=tf.float32)
if self.params["preprocess_input"]:
images = tf.keras.applications.resnet50.preprocess_input(x=images)
return images
def get_image_resnet_feature_vectors(self, images):
"""Gets image ResNet feature vectors.
Args:
images: tensor, rank 4 image tensor of shape
(batch_size, image_height, image_width, image_depth).
Returns:
Processed ResNet feature rank 1 tensor for each image.
"""
preprocessed_images = self.preprocess_image_batch(images=images)
resnet_feature_image = self.resnet_model(inputs=preprocessed_images)
resnet_feature_vector = self.pooling_layer(inputs=resnet_feature_image)
return resnet_feature_vector | en | 0.787358 | Class that contains methods that preprocess images through ResNet. Attributes: params: dict, user passed parameters. Initializes `ResNet` class instance. Args: params: dict, user passed parameters. Gets ResNet layers from ResNet50 model. Args: input_shape: tuple, input shape of images. # Load the ResNet50 model. # Create a new Model based on original resnet50 model ended after the # chosen residual block. # Add adaptive mean-spatial pooling after the new model. Preprocesses batch of images. Args: images: tensor, rank 4 image tensor of shape (batch_size, image_height, image_width, image_depth). Returns: Preprocessed images tensor. Gets image ResNet feature vectors. Args: images: tensor, rank 4 image tensor of shape (batch_size, image_height, image_width, image_depth). Returns: Processed ResNet feature rank 1 tensor for each image. | 3.051592 | 3 |
pymutation/pymutation.py | kajchang/pymutation | 0 | 6632332 | from functools import reduce
from math import factorial
def pymutation(
elements: list,
event: list,
combination: bool = False
):
if len(event) > len(elements):
raise ValueError('event can\'t be longer than elements')
cache = {}
for condition in event:
if cache.get(condition) is not None:
continue
cache[condition] = {}
for element in elements:
if cache[condition].get(element) is None:
cache[condition][element] = 0
if callable(condition) and condition(element):
cache[condition][element] += 1
elif condition == element:
cache[condition][element] += 1
if combination:
func = nCr
else:
func = nPr
event_set = set(event)
return int(reduce((lambda x, y: x * y), map(lambda x: func(sum(cache[x].values()), event.count(x)), event_set)))
def nPr(n, r):
return factorial(n) / factorial(n - r)
def nCr(n, r):
return factorial(n) / (factorial(r) * factorial(n - r))
| from functools import reduce
from math import factorial
def pymutation(
elements: list,
event: list,
combination: bool = False
):
if len(event) > len(elements):
raise ValueError('event can\'t be longer than elements')
cache = {}
for condition in event:
if cache.get(condition) is not None:
continue
cache[condition] = {}
for element in elements:
if cache[condition].get(element) is None:
cache[condition][element] = 0
if callable(condition) and condition(element):
cache[condition][element] += 1
elif condition == element:
cache[condition][element] += 1
if combination:
func = nCr
else:
func = nPr
event_set = set(event)
return int(reduce((lambda x, y: x * y), map(lambda x: func(sum(cache[x].values()), event.count(x)), event_set)))
def nPr(n, r):
return factorial(n) / factorial(n - r)
def nCr(n, r):
return factorial(n) / (factorial(r) * factorial(n - r))
| none | 1 | 3.011166 | 3 |
|
src/services/solutionsService.py | Hiestaa/ml-bench | 0 | 6632333 | # -*- coding: utf8 -*-
from __future__ import unicode_literals
import logging
from bson.objectid import ObjectId
from baseService import Service
"""
# solutions collection
Describes a solution computed by a solver to a problem, and contains
stores performed as the algorithm was running and on the final solution
allowing to draw chart and tables showing the relevance of each solution.
Schema:
* _id:ObjectId, id of the solution
* problemId:ObjectId, id of the problem this solution has been computed for
* solverId:ObjectId, id of the instance of solver that was used to compute
this solution.
* solution:list<float> this actual solution found as vectorized data.
Each problem should be able to interpret this vector and transform is as
a semantic, human-understandable solution.
* ... measurements ...
"""
class SolutionsService(Service):
def __init__(self, db):
super(SolutionsService, self).__init__(db, 'solutions')
def schema(self):
return {
'problemId': True,
'solverId': True,
'solution': True,
}
def insert(self, **kwargs):
"""
Insert a new document entry into the "solutions" collection.
The keywords arguments should match the schema defined for this
entry. If the optional `_id` keyword argument is given, the `_id` of
the document will not be automatically generated.
Note: the given `_id` (if any) will be converted to an ObjectId and has
to be a compatible string.
Note bis: the given "problemId" and "solverId" should be ObjectId
compatible strings as well.
"""
logging.debug("Saving new entry: %s" % ", ".join(
reduce(lambda name, value: "%s - %s" % (name, value),
kwargs.iteritems())))
post = self.schema()
for name, value in kwargs.iteritems():
post[name] = value
if name == 'problemId' or name == 'solverId':
post[name] = ObjectId(post[name])
if '_id' in post:
if not isinstance(post['_id'], ObjectId):
post['_id'] = ObjectId(post['_id'])
return self._collection.insert(self.validate(post))
| # -*- coding: utf8 -*-
from __future__ import unicode_literals
import logging
from bson.objectid import ObjectId
from baseService import Service
"""
# solutions collection
Describes a solution computed by a solver to a problem, and contains
stores performed as the algorithm was running and on the final solution
allowing to draw chart and tables showing the relevance of each solution.
Schema:
* _id:ObjectId, id of the solution
* problemId:ObjectId, id of the problem this solution has been computed for
* solverId:ObjectId, id of the instance of solver that was used to compute
this solution.
* solution:list<float> this actual solution found as vectorized data.
Each problem should be able to interpret this vector and transform is as
a semantic, human-understandable solution.
* ... measurements ...
"""
class SolutionsService(Service):
def __init__(self, db):
super(SolutionsService, self).__init__(db, 'solutions')
def schema(self):
return {
'problemId': True,
'solverId': True,
'solution': True,
}
def insert(self, **kwargs):
"""
Insert a new document entry into the "solutions" collection.
The keywords arguments should match the schema defined for this
entry. If the optional `_id` keyword argument is given, the `_id` of
the document will not be automatically generated.
Note: the given `_id` (if any) will be converted to an ObjectId and has
to be a compatible string.
Note bis: the given "problemId" and "solverId" should be ObjectId
compatible strings as well.
"""
logging.debug("Saving new entry: %s" % ", ".join(
reduce(lambda name, value: "%s - %s" % (name, value),
kwargs.iteritems())))
post = self.schema()
for name, value in kwargs.iteritems():
post[name] = value
if name == 'problemId' or name == 'solverId':
post[name] = ObjectId(post[name])
if '_id' in post:
if not isinstance(post['_id'], ObjectId):
post['_id'] = ObjectId(post['_id'])
return self._collection.insert(self.validate(post))
| en | 0.875162 | # -*- coding: utf8 -*- # solutions collection Describes a solution computed by a solver to a problem, and contains stores performed as the algorithm was running and on the final solution allowing to draw chart and tables showing the relevance of each solution. Schema: * _id:ObjectId, id of the solution * problemId:ObjectId, id of the problem this solution has been computed for * solverId:ObjectId, id of the instance of solver that was used to compute this solution. * solution:list<float> this actual solution found as vectorized data. Each problem should be able to interpret this vector and transform is as a semantic, human-understandable solution. * ... measurements ... Insert a new document entry into the "solutions" collection. The keywords arguments should match the schema defined for this entry. If the optional `_id` keyword argument is given, the `_id` of the document will not be automatically generated. Note: the given `_id` (if any) will be converted to an ObjectId and has to be a compatible string. Note bis: the given "problemId" and "solverId" should be ObjectId compatible strings as well. | 2.673004 | 3 |
projetoBancoPoo/main.py | gfcarvalho2304/PythonExercicios | 0 | 6632334 | <gh_stars>0
from banco import Banco
from cliente import Cliente
from conta import ContaCorrente, ContaPoupanca
banco = Banco()
cliente1 = Cliente ('Gabriel', 32)
cliente2 = Cliente('Tamires', 31)
cliente3 = Cliente('<NAME>', 63)
conta1 = ContaPoupanca(1111, 230489, 0)
conta2 = ContaCorrente(2222, 111289, 0)
conta3 = ContaPoupanca(1212, 110101, 0)
cliente1.inserirConta(conta1)
cliente2.inserirConta(conta2)
cliente3.inserirConta(conta3)
banco.inserirCliente(cliente1)
banco.inserirConta(conta1)
banco.inserirCliente(cliente2)
banco.inserirConta(conta2)
if banco.autenticar(cliente1):
cliente1.conta.depositar(0)
cliente1.conta.sacar(20)
else:
print('Cliente não autenticado.')
print('#' * 30)
if banco.autenticar(cliente2):
cliente2.conta.depositar(0)
cliente2.conta.sacar(20)
else:
print('Cliente não autenticado.') | from banco import Banco
from cliente import Cliente
from conta import ContaCorrente, ContaPoupanca
banco = Banco()
cliente1 = Cliente ('Gabriel', 32)
cliente2 = Cliente('Tamires', 31)
cliente3 = Cliente('<NAME>', 63)
conta1 = ContaPoupanca(1111, 230489, 0)
conta2 = ContaCorrente(2222, 111289, 0)
conta3 = ContaPoupanca(1212, 110101, 0)
cliente1.inserirConta(conta1)
cliente2.inserirConta(conta2)
cliente3.inserirConta(conta3)
banco.inserirCliente(cliente1)
banco.inserirConta(conta1)
banco.inserirCliente(cliente2)
banco.inserirConta(conta2)
if banco.autenticar(cliente1):
cliente1.conta.depositar(0)
cliente1.conta.sacar(20)
else:
print('Cliente não autenticado.')
print('#' * 30)
if banco.autenticar(cliente2):
cliente2.conta.depositar(0)
cliente2.conta.sacar(20)
else:
print('Cliente não autenticado.') | none | 1 | 2.793387 | 3 |
|
nctuLab1/src/sender.py | CyCTW/NCTU-Computer-Network | 0 | 6632335 | <gh_stars>0
#!/usr/bin/env python
import argparse
from scapy.all import *
# Import protocol
from Protocol import Protocol
# Set source and destination IP address (Task 3.)
src_ip = '10.0.1.1'
dst_ip = '10.0.1.2'
# Set source and destination port (Task 3.)
src_port = 1024
dst_port = 80
'''
Main function
'''
def main():
# Define IP header (Task 3.)
ip = IP(src = src_ip, dst = dst_ip)
# Define customized header (Task 3.)
my_id = '0616225'
my_dept = 'cs'
my_gender = '2'
student = Protocol(id = my_id, dept = my_dept, gender = my_gender)
# Read file and store into list
count = 0
secret = []
tmp = ''.join(reversed(my_id))
with open('./data/secret.txt', 'r') as file:
for line in file:
line = tmp[count % 7] + line
secret.append(line)
count += 1
# Send packets
for i in range(0, len(secret)):
# TCP connection - SYN / SYN-ACK
tcp_syn = TCP(sport = src_port, dport = dst_port, flags = 'S', seq = 0)
packet = ip / tcp_syn
tcp_syn_ack = sr1(packet)
print '[INFO] Send SYN and receive SYN-ACK'
tcp_syn_ack.show()
# TCP connection - ACK (Task 3.)
ack = tcp_syn_ack.seq + 1
tcp_ack = TCP(sport = src_port, dport = dst_port, flags =
'A', seq = 1, ack = ack)
packet = ip / tcp_ack
send(packet)
print '[INFO] Send ACK'
# Send packet with customized header (Task 3.)
ack = tcp_ack.seq + 1
tcp = TCP(sport = src_port, dport = dst_port, flags = '',
seq = 2, ack = ack)
packet = ip / tcp / student
send(packet)
print '[INFO] Send packet with customized header'
# Send packet with secret payload (Task 3.)
ack = tcp.seq + 1
tcp = TCP(sport = src_port, dport = dst_port, flags = '',
seq = 3, ack = ack)
payload = Raw(secret[i])
packet = ip / tcp / payload
send(packet)
print '[INFO] Send packet with secret payload'
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import argparse
from scapy.all import *
# Import protocol
from Protocol import Protocol
# Set source and destination IP address (Task 3.)
src_ip = '10.0.1.1'
dst_ip = '10.0.1.2'
# Set source and destination port (Task 3.)
src_port = 1024
dst_port = 80
'''
Main function
'''
def main():
# Define IP header (Task 3.)
ip = IP(src = src_ip, dst = dst_ip)
# Define customized header (Task 3.)
my_id = '0616225'
my_dept = 'cs'
my_gender = '2'
student = Protocol(id = my_id, dept = my_dept, gender = my_gender)
# Read file and store into list
count = 0
secret = []
tmp = ''.join(reversed(my_id))
with open('./data/secret.txt', 'r') as file:
for line in file:
line = tmp[count % 7] + line
secret.append(line)
count += 1
# Send packets
for i in range(0, len(secret)):
# TCP connection - SYN / SYN-ACK
tcp_syn = TCP(sport = src_port, dport = dst_port, flags = 'S', seq = 0)
packet = ip / tcp_syn
tcp_syn_ack = sr1(packet)
print '[INFO] Send SYN and receive SYN-ACK'
tcp_syn_ack.show()
# TCP connection - ACK (Task 3.)
ack = tcp_syn_ack.seq + 1
tcp_ack = TCP(sport = src_port, dport = dst_port, flags =
'A', seq = 1, ack = ack)
packet = ip / tcp_ack
send(packet)
print '[INFO] Send ACK'
# Send packet with customized header (Task 3.)
ack = tcp_ack.seq + 1
tcp = TCP(sport = src_port, dport = dst_port, flags = '',
seq = 2, ack = ack)
packet = ip / tcp / student
send(packet)
print '[INFO] Send packet with customized header'
# Send packet with secret payload (Task 3.)
ack = tcp.seq + 1
tcp = TCP(sport = src_port, dport = dst_port, flags = '',
seq = 3, ack = ack)
payload = Raw(secret[i])
packet = ip / tcp / payload
send(packet)
print '[INFO] Send packet with secret payload'
if __name__ == '__main__':
main() | en | 0.67298 | #!/usr/bin/env python # Import protocol # Set source and destination IP address (Task 3.) # Set source and destination port (Task 3.) Main function # Define IP header (Task 3.) # Define customized header (Task 3.) # Read file and store into list # Send packets # TCP connection - SYN / SYN-ACK # TCP connection - ACK (Task 3.) # Send packet with customized header (Task 3.) # Send packet with secret payload (Task 3.) | 3.215603 | 3 |
Core/Solvers/MTSSP/PRDP_Data_Processing.py | zztcok/SNAC_PSNAC | 1 | 6632336 | <filename>Core/Solvers/MTSSP/PRDP_Data_Processing.py
import os
import sys
import pdb
import itertools
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)))
import MSSP.scenario_class as scenario_class
class MTSSP_PRDP_Data_Processing:
def __init__(self, model_data):
## List of parameters to fix
## Time independent
self.product = []
self.stage_gate = []
self.time_step = []
self.resource_type = []
self.resource_max = {}
self.gammaL = {}
self.gammaD = {}
self.duration = {}
self.trial_cost = {}
self.resource_required = {}
self.revenue_max = {}
self.success = {}
self.running_revenue = {}
self.open_revenue = {}
self.discounting_factor ={}
self.Scenario_Generation(model_data)
##Set product
self.product = model_data['product'][None]
##Set stage_gate
self.stage_gate = model_data['trial'][None]
## Set time step
self.time_step = model_data['time_step'][None]
##Set resource type
self.resource_type = model_data['resource_type'][None]
## Set resource_max
for items in model_data['max_resource']:
self.resource_max[items[0]] = model_data['max_resource'][items]
## Set GammaL
for items in model_data['gammaL']:
self.gammaL[items[0]] = model_data['gammaL'][items]
## Set GammaD
for items in model_data['gammaD']:
self.gammaD[items[0]] = model_data['gammaD'][items]
## Set duration
self.duration = model_data['trial_duration']
## Set trial cost
self.trial_cost = model_data['trial_cost']
## Set resources required
self.resource_required = model_data['resource_requirement']
## Set revenue_max
for items in model_data['maximum_revenue']:
self.revenue_max[items[0]] = model_data['maximum_revenue'][items]
## Set Last Time Step
self.Last_Time_Step = len(self.time_step)
## Set Last Trial
self.last_trial = len(self.stage_gate)
##Calculate Success matrix
self.success = self.calc_success(self.product, len(self.stage_gate), self.List_of_Scenarios)
## Calculate running rev
self.running_revenue = self.calc_rr(self.revenue_max,self.gammaL,self.duration, self.product, self.stage_gate, self.time_step)
##Calculate open rev
self.open_revenue = self.calc_openrev(self.revenue_max,self.gammaL,self.duration, self.product, self.stage_gate, self.time_step, self.Last_Time_Step)
##Calculate Discounting Factor
self.discounting_factor = self.calc_discounting_factor(self.revenue_max,self.gammaL,self.trial_cost, self.product, self.stage_gate, self.Last_Time_Step)
def calc_success(self,product, num_trial, List_of_Scenarios):
## Generates a matrix based on the success of each product in each scenario
success = {}
try:
len(List_of_Scenarios)
for scenario in List_of_Scenarios:
oc = 0
while oc < len(List_of_Scenarios[scenario].outcome):
coords = (product[oc], scenario)
if List_of_Scenarios[scenario].outcome[oc] == num_trial:
success[coords] = 1
else:
success[coords] = 0
oc += 1
except:
oc = 0
while oc < len(List_of_Scenarios.outcome):
coords = (product[oc])
if List_of_Scenarios.outcome[oc] == num_trial:
success[coords] = 1
else:
success[coords] = 0
oc += 1
return success
def calc_rr(self,revenue_max,gammaL,duration, product, trial, time_step):
##Calculates the Running Revenue according to the formulation given by Colvin
rr = {}
for i in product:
for j in trial:
for t in time_step:
rr[(i,j,t)] = revenue_max[i] - gammaL[i] * ( t + sum(duration[(i,k)] for k in trial if k >= j))
return rr
def calc_openrev(self,revenue_max,gammaL,duration, product, stage_gate, time_step, Last_Time_Step):
##Calculates the Open Revenue according to the formulation given by Colvin
opnrev = {}
for i in product:
for j in stage_gate:
opnrev[(i,j)] = revenue_max[i] - gammaL[i] * ( Last_Time_Step + sum(duration[(i,k)] for k in stage_gate if k >= j))
return opnrev
def calc_discounting_factor(self,revenue_max,gammaL,trial_cost, product, stage_gate, Last_Time_Step):
##Calculates the discounting factor according to the formulation given by Colvin
fij = {}
for i in product:
for j in stage_gate:
top = .9 * revenue_max[i] - gammaL[i]* Last_Time_Step - sum(trial_cost[(i,k)] for k in stage_gate if k >= j)
bottom = (revenue_max[i] - gammaL[i] * Last_Time_Step)
fij[(i,j)] = top/bottom
return fij
def Scenario_Generation(self,model_data):
### Determine the set size of the independent variables (products, trials, and time steps)
num_product = len(model_data['product'][None])
num_trial = len(model_data['trial'][None])
num_ts = len(model_data['time_step'][None])
### Generate Outcomes
self.Outcomes = itertools.product(range(num_trial + 1), repeat = num_product)
self.Outcomes = tuple(self.Outcomes)
### Generate Empty Variables
self.List_of_Scenarios = {}
self.SS=[]
prod = model_data['product'][None]
sg = model_data['trial'][None]
prob = model_data['probability']
### Initialize Scenario Counter
scenario = 1
### Name and generate Scenario Objects
for items in self.Outcomes:
scenario_name = scenario
self.List_of_Scenarios[scenario_name] = scenario_class.scenario(items,prob,prod,sg)
self.SS.append(scenario_name)
scenario += 1
def PRDP_Realization(s,ts,model_data, results):
### Generate new empty Scenario Set
intermediate = []
scenario_set = []
#######################################
### Define Parameters from model_data
#######################################
### List of products
prod = model_data.Product
### List of stages
sg = model_data.Stage_Gate
### List of the duration of each trial
duration = model_data.Duration
### For all combinations of drug/trial pairs
for i in prod:
for j in sg:
### If the trial could have been started in the planning horizon
if ts-duration[(i,j)] >= 0:
### When would it have started
previous = ts-duration[(i,j)]
### Define the indicies of the drug trial pair
index = prod.index(i)
jndex = sg.index(j)
#Check to see if the trial was started at that point
try:
### Check to see if scenario set has scenarios in it
s[0]
### If the trial was started
if Scenario_Results[s[0]][index][jndex][previous] == 1:
### If the new set is empty
if intermediate == []:
### Create New Outcome Sets
p = []
f = []
### For all scenarios in the scenario set
for items in s:
### If the trial is successful add to pass set
if model_data.List_of_Scenarios[items].outcome[index] > jndex:
p.append(items)
### Otherwise add to fail set
else:
f.append(items)
### Add subsets to the New Set
intermediate.append(p)
intermediate.append(f)
else:
### Duplicate the Intermediate Variable
intermediate2 = list(intermediate)
### See which Items need to be replaced
for items in intermediate2:
### Generate New Outcome Sets
p = []
f = []
### Determine the index of the scenario set
itemtoreplace = intermediate.index(items)
### Sort scenarios based on outcome
for k in items:
if model_data.List_of_Scenarios[k].outcome[index] > jndex:
p.append(k)
else:
f.append(k)
intermediate[itemtoreplace] = p
intermediate.append(f)
except:
pass
#Set the Scenario Subsets
if intermediate == []:
scenario_set.append(s)
else:
scenario_set += intermediate
return scenario_set
def results_matrix_generator(mtssp_data):
## Generate Results Lists
Scenario_Results = {}
for items in mtssp_data.SS:
ibox = []
for i in mtssp_data.Product:
jbox = []
for j in mtssp_data.Stage_gate:
tbox = [0] * num_ts
jbox.append(tbox)
ibox.append(jbox)
Scenario_Results[items] = ibox
return Scenario_Results
def resource_utilization(ts,mtssp_data,Scenario_Results,scenarios):
### determine the number of scenarios in the current set
if len(scenarios) <= 1:
### If the length is less than or equal to one then we have a realization
pass
else:
### Get a scenario
sis = scenarios[0]
### Count the resource utilization
resource_count = {}
for r in mtssp_data.resource_type:
resource_count[r] = 0
for i in mtssp_data.Product:
for j in mtssp_data.Stage_Gate:
index = mtssp_data.Product.index(i)
jndex = mtssp_data.Stage_Gate.index(j)
tpr = 0
while tpr < ts:
if Scenario_Results[sis][index][jndex][tpr] == 1:
if tpr > ts - mtssp_data.duration[(i,j)]:
for r in mtssp_data.resource_type:
resource_count[r] += mtssp_data.resource_required[(i,j,r)]
tpr += 1
return resource_count
| <filename>Core/Solvers/MTSSP/PRDP_Data_Processing.py
import os
import sys
import pdb
import itertools
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)))
import MSSP.scenario_class as scenario_class
class MTSSP_PRDP_Data_Processing:
def __init__(self, model_data):
## List of parameters to fix
## Time independent
self.product = []
self.stage_gate = []
self.time_step = []
self.resource_type = []
self.resource_max = {}
self.gammaL = {}
self.gammaD = {}
self.duration = {}
self.trial_cost = {}
self.resource_required = {}
self.revenue_max = {}
self.success = {}
self.running_revenue = {}
self.open_revenue = {}
self.discounting_factor ={}
self.Scenario_Generation(model_data)
##Set product
self.product = model_data['product'][None]
##Set stage_gate
self.stage_gate = model_data['trial'][None]
## Set time step
self.time_step = model_data['time_step'][None]
##Set resource type
self.resource_type = model_data['resource_type'][None]
## Set resource_max
for items in model_data['max_resource']:
self.resource_max[items[0]] = model_data['max_resource'][items]
## Set GammaL
for items in model_data['gammaL']:
self.gammaL[items[0]] = model_data['gammaL'][items]
## Set GammaD
for items in model_data['gammaD']:
self.gammaD[items[0]] = model_data['gammaD'][items]
## Set duration
self.duration = model_data['trial_duration']
## Set trial cost
self.trial_cost = model_data['trial_cost']
## Set resources required
self.resource_required = model_data['resource_requirement']
## Set revenue_max
for items in model_data['maximum_revenue']:
self.revenue_max[items[0]] = model_data['maximum_revenue'][items]
## Set Last Time Step
self.Last_Time_Step = len(self.time_step)
## Set Last Trial
self.last_trial = len(self.stage_gate)
##Calculate Success matrix
self.success = self.calc_success(self.product, len(self.stage_gate), self.List_of_Scenarios)
## Calculate running rev
self.running_revenue = self.calc_rr(self.revenue_max,self.gammaL,self.duration, self.product, self.stage_gate, self.time_step)
##Calculate open rev
self.open_revenue = self.calc_openrev(self.revenue_max,self.gammaL,self.duration, self.product, self.stage_gate, self.time_step, self.Last_Time_Step)
##Calculate Discounting Factor
self.discounting_factor = self.calc_discounting_factor(self.revenue_max,self.gammaL,self.trial_cost, self.product, self.stage_gate, self.Last_Time_Step)
def calc_success(self,product, num_trial, List_of_Scenarios):
## Generates a matrix based on the success of each product in each scenario
success = {}
try:
len(List_of_Scenarios)
for scenario in List_of_Scenarios:
oc = 0
while oc < len(List_of_Scenarios[scenario].outcome):
coords = (product[oc], scenario)
if List_of_Scenarios[scenario].outcome[oc] == num_trial:
success[coords] = 1
else:
success[coords] = 0
oc += 1
except:
oc = 0
while oc < len(List_of_Scenarios.outcome):
coords = (product[oc])
if List_of_Scenarios.outcome[oc] == num_trial:
success[coords] = 1
else:
success[coords] = 0
oc += 1
return success
def calc_rr(self,revenue_max,gammaL,duration, product, trial, time_step):
##Calculates the Running Revenue according to the formulation given by Colvin
rr = {}
for i in product:
for j in trial:
for t in time_step:
rr[(i,j,t)] = revenue_max[i] - gammaL[i] * ( t + sum(duration[(i,k)] for k in trial if k >= j))
return rr
def calc_openrev(self,revenue_max,gammaL,duration, product, stage_gate, time_step, Last_Time_Step):
##Calculates the Open Revenue according to the formulation given by Colvin
opnrev = {}
for i in product:
for j in stage_gate:
opnrev[(i,j)] = revenue_max[i] - gammaL[i] * ( Last_Time_Step + sum(duration[(i,k)] for k in stage_gate if k >= j))
return opnrev
def calc_discounting_factor(self,revenue_max,gammaL,trial_cost, product, stage_gate, Last_Time_Step):
##Calculates the discounting factor according to the formulation given by Colvin
fij = {}
for i in product:
for j in stage_gate:
top = .9 * revenue_max[i] - gammaL[i]* Last_Time_Step - sum(trial_cost[(i,k)] for k in stage_gate if k >= j)
bottom = (revenue_max[i] - gammaL[i] * Last_Time_Step)
fij[(i,j)] = top/bottom
return fij
def Scenario_Generation(self,model_data):
### Determine the set size of the independent variables (products, trials, and time steps)
num_product = len(model_data['product'][None])
num_trial = len(model_data['trial'][None])
num_ts = len(model_data['time_step'][None])
### Generate Outcomes
self.Outcomes = itertools.product(range(num_trial + 1), repeat = num_product)
self.Outcomes = tuple(self.Outcomes)
### Generate Empty Variables
self.List_of_Scenarios = {}
self.SS=[]
prod = model_data['product'][None]
sg = model_data['trial'][None]
prob = model_data['probability']
### Initialize Scenario Counter
scenario = 1
### Name and generate Scenario Objects
for items in self.Outcomes:
scenario_name = scenario
self.List_of_Scenarios[scenario_name] = scenario_class.scenario(items,prob,prod,sg)
self.SS.append(scenario_name)
scenario += 1
def PRDP_Realization(s,ts,model_data, results):
### Generate new empty Scenario Set
intermediate = []
scenario_set = []
#######################################
### Define Parameters from model_data
#######################################
### List of products
prod = model_data.Product
### List of stages
sg = model_data.Stage_Gate
### List of the duration of each trial
duration = model_data.Duration
### For all combinations of drug/trial pairs
for i in prod:
for j in sg:
### If the trial could have been started in the planning horizon
if ts-duration[(i,j)] >= 0:
### When would it have started
previous = ts-duration[(i,j)]
### Define the indicies of the drug trial pair
index = prod.index(i)
jndex = sg.index(j)
#Check to see if the trial was started at that point
try:
### Check to see if scenario set has scenarios in it
s[0]
### If the trial was started
if Scenario_Results[s[0]][index][jndex][previous] == 1:
### If the new set is empty
if intermediate == []:
### Create New Outcome Sets
p = []
f = []
### For all scenarios in the scenario set
for items in s:
### If the trial is successful add to pass set
if model_data.List_of_Scenarios[items].outcome[index] > jndex:
p.append(items)
### Otherwise add to fail set
else:
f.append(items)
### Add subsets to the New Set
intermediate.append(p)
intermediate.append(f)
else:
### Duplicate the Intermediate Variable
intermediate2 = list(intermediate)
### See which Items need to be replaced
for items in intermediate2:
### Generate New Outcome Sets
p = []
f = []
### Determine the index of the scenario set
itemtoreplace = intermediate.index(items)
### Sort scenarios based on outcome
for k in items:
if model_data.List_of_Scenarios[k].outcome[index] > jndex:
p.append(k)
else:
f.append(k)
intermediate[itemtoreplace] = p
intermediate.append(f)
except:
pass
#Set the Scenario Subsets
if intermediate == []:
scenario_set.append(s)
else:
scenario_set += intermediate
return scenario_set
def results_matrix_generator(mtssp_data):
## Generate Results Lists
Scenario_Results = {}
for items in mtssp_data.SS:
ibox = []
for i in mtssp_data.Product:
jbox = []
for j in mtssp_data.Stage_gate:
tbox = [0] * num_ts
jbox.append(tbox)
ibox.append(jbox)
Scenario_Results[items] = ibox
return Scenario_Results
def resource_utilization(ts,mtssp_data,Scenario_Results,scenarios):
### determine the number of scenarios in the current set
if len(scenarios) <= 1:
### If the length is less than or equal to one then we have a realization
pass
else:
### Get a scenario
sis = scenarios[0]
### Count the resource utilization
resource_count = {}
for r in mtssp_data.resource_type:
resource_count[r] = 0
for i in mtssp_data.Product:
for j in mtssp_data.Stage_Gate:
index = mtssp_data.Product.index(i)
jndex = mtssp_data.Stage_Gate.index(j)
tpr = 0
while tpr < ts:
if Scenario_Results[sis][index][jndex][tpr] == 1:
if tpr > ts - mtssp_data.duration[(i,j)]:
for r in mtssp_data.resource_type:
resource_count[r] += mtssp_data.resource_required[(i,j,r)]
tpr += 1
return resource_count
| en | 0.728954 | ## List of parameters to fix ## Time independent ##Set product ##Set stage_gate ## Set time step ##Set resource type ## Set resource_max ## Set GammaL ## Set GammaD ## Set duration ## Set trial cost ## Set resources required ## Set revenue_max ## Set Last Time Step ## Set Last Trial ##Calculate Success matrix ## Calculate running rev ##Calculate open rev ##Calculate Discounting Factor ## Generates a matrix based on the success of each product in each scenario ##Calculates the Running Revenue according to the formulation given by Colvin ##Calculates the Open Revenue according to the formulation given by Colvin ##Calculates the discounting factor according to the formulation given by Colvin ### Determine the set size of the independent variables (products, trials, and time steps) ### Generate Outcomes ### Generate Empty Variables ### Initialize Scenario Counter ### Name and generate Scenario Objects ### Generate new empty Scenario Set ####################################### ### Define Parameters from model_data ####################################### ### List of products ### List of stages ### List of the duration of each trial ### For all combinations of drug/trial pairs ### If the trial could have been started in the planning horizon ### When would it have started ### Define the indicies of the drug trial pair #Check to see if the trial was started at that point ### Check to see if scenario set has scenarios in it ### If the trial was started ### If the new set is empty ### Create New Outcome Sets ### For all scenarios in the scenario set ### If the trial is successful add to pass set ### Otherwise add to fail set ### Add subsets to the New Set ### Duplicate the Intermediate Variable ### See which Items need to be replaced ### Generate New Outcome Sets ### Determine the index of the scenario set ### Sort scenarios based on outcome #Set the Scenario Subsets ## Generate Results Lists ### determine the number of scenarios in the current set ### If the length is less than or equal to one then we have a realization ### Get a scenario ### Count the resource utilization | 2.081779 | 2 |
__main__.py | WillKenzie/CaptureTheFlagServer | 0 | 6632337 | <reponame>WillKenzie/CaptureTheFlagServer<gh_stars>0
from flask import Flask, abort, request, Response
from faker import Faker
fake = Faker('en_US')
app = Flask(__name__)
@app.route('/users')
def users():
return("{{[for i in range(1000) {'email':fake.ascii_email(), 'password':fake.ascii()}]}}")
@app.route('/database')
def database():
if request.authorization == None:
abort(503)
else:
return "[]"
@app.route('/posts', methods=['GET', 'PUT'])
def posts():
if request.cookies:
return 200
else:
abort(503)
@app.route('/admin')
def admin():
abort(503)
@app.route('/api')
def api():
if request.authorization == None:
abort(503)
@app.route('/api/admin')
def apiadmin():
@app.route('/')
def users():
abort(404) | from flask import Flask, abort, request, Response
from faker import Faker
fake = Faker('en_US')
app = Flask(__name__)
@app.route('/users')
def users():
return("{{[for i in range(1000) {'email':fake.ascii_email(), 'password':fake.ascii()}]}}")
@app.route('/database')
def database():
if request.authorization == None:
abort(503)
else:
return "[]"
@app.route('/posts', methods=['GET', 'PUT'])
def posts():
if request.cookies:
return 200
else:
abort(503)
@app.route('/admin')
def admin():
abort(503)
@app.route('/api')
def api():
if request.authorization == None:
abort(503)
@app.route('/api/admin')
def apiadmin():
@app.route('/')
def users():
abort(404) | none | 1 | 2.492472 | 2 |
|
app/trading/game.py | SteffenRossberg/pomalu-ale-jiste | 0 | 6632338 | import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import rc
from app.utility.logger import Logger
from app.environment.enums import Actions
from datetime import timedelta
class Game:
def __init__(
self,
stock_exchange,
agent,
max_positions,
max_limit_positions,
all_quotes,
all_tickers,
start_date,
end_date,
capital,
spread,
order_fee,
capital_gains_tax,
solidarity_surcharge,
device,
days=5):
self.stock_exchange = stock_exchange
self.agent = agent
self.max_positions = max_positions
self.max_limit_positions = max_limit_positions
self.all_quotes = all_quotes
self.all_tickers = all_tickers
self.start_date = start_date
self.end_date = end_date
self.start_capital = capital
self.capital_gains_tax = capital_gains_tax
self.solidarity_surcharge = solidarity_surcharge
self.device = device
self.days = days
self.tax_rate = self.capital_gains_tax / 100.0
self.tax_rate *= self.solidarity_surcharge / 100.0 + 1.0
self.order_fee = order_fee
self.spread = spread / 100.0
def trade(
self,
run_id,
profit_taking_threshold=None,
buy_and_hold=False,
intra_day=False):
result = ''
print(f"Trade limited all stocks from {self.start_date} to {self.end_date} ...")
message, limit_all_investments, limit_all_gain_loss = \
self._trade(
quotes=self.all_quotes,
all_tickers=self.all_tickers,
profit_taking_threshold=profit_taking_threshold,
buy_and_hold=buy_and_hold,
report_each_trade=True,
tickers=self.stock_exchange.tickers,
max_positions=self.max_limit_positions)
result += f'\nTrade Portfolio (max {self.max_limit_positions} stocks): {message}'
print(f"Trade all stocks from {self.start_date} to {self.end_date} ...")
message, all_investments, all_gain_loss = \
self._trade(
quotes=self.all_quotes,
all_tickers=self.all_tickers,
profit_taking_threshold=profit_taking_threshold,
buy_and_hold=buy_and_hold,
report_each_trade=True,
tickers=self.stock_exchange.tickers,
max_positions=self.max_positions)
result += f'\nTrade All ({self.max_positions} stocks): {message}'
print(f"Buy and hold all stocks from {self.start_date} to {self.end_date} ...")
message = \
self._buy_and_hold(
self.all_quotes,
self.all_tickers,
False,
self.stock_exchange.tickers)
result += f'\nBuy % Hold All ({len(self.stock_exchange.tickers)} stocks): {message}'
print(result)
Logger.log(run_id, result)
index_ticker = 'URTH'
index_title = self.stock_exchange.etf_tickers[index_ticker]
if intra_day:
compare_index = self.stock_exchange.load_intra_day(index_ticker, self.start_date, self.end_date, True)
else:
compare_index = self.stock_exchange.load(index_ticker, self.start_date, self.end_date, True)
all_title = f'All stocks ({self.max_positions} positions)'
limit_all_title = f'All stocks (max. {self.max_limit_positions} positions at once)'
gain_loss_all_title = f'Return all stocks ({self.max_positions} positions)'
gain_loss_limit_all_title = f'Return all stocks (max. {self.max_limit_positions} positions at once)'
length = (len(compare_index)
if len(compare_index) < len(all_investments)
else len(all_investments))
resulting_frame = pd.DataFrame(
data={
'index': range(length),
'date': np.array(compare_index['date'].values[-length:]),
index_title: np.array(compare_index['adj_close'].values[-length:]),
all_title: np.array(all_investments[-length:]),
limit_all_title: np.array(limit_all_investments[-length:]),
gain_loss_all_title: np.array(all_gain_loss[-length:]) + self.start_capital,
gain_loss_limit_all_title: np.array(limit_all_gain_loss[-length:]) + self.start_capital
})
all_columns = [
index_title,
all_title,
limit_all_title,
gain_loss_all_title,
gain_loss_limit_all_title
]
for column in all_columns:
change_column = f'Change {column}'
resulting_frame[change_column] = resulting_frame[column].pct_change(1).fillna(0.0) * 100.0
resulting_frame[column] = \
resulting_frame.apply(
lambda row: resulting_frame[change_column].values[0:int(row['index'] + 1)].sum(),
axis=1)
resulting_frame.set_index(resulting_frame['date'], inplace=True)
fig, axis = plt.subplots(nrows=2)
investment_columns = [
all_title,
limit_all_title
]
resulting_frame[index_title].plot.area(ax=axis[0], stacked=False)
resulting_frame[investment_columns].plot(
ax=axis[0],
figsize=(20, 10),
linewidth=2,
title=f'Investment vs {index_title}')
gain_loss_columns = [
gain_loss_all_title,
gain_loss_limit_all_title
]
resulting_frame[index_title].plot.area(ax=axis[1], stacked=False)
resulting_frame[gain_loss_columns].plot(
ax=axis[1],
figsize=(20, 10),
linewidth=2,
title=f'Portfolio Changes vs {index_title}')
x_min, x_max = self._get_x_min_max(resulting_frame)
axis[0].set_xlim(x_min, x_max)
axis[1].set_xlim(x_min, x_max)
y_min, y_max = self._get_y_min_max(resulting_frame, investment_columns, index_title)
axis[0].set_ylim(y_min, y_max)
y_min, y_max = self._get_y_min_max(resulting_frame, gain_loss_columns, index_title)
axis[1].set_ylim(y_min, y_max)
results = resulting_frame[gain_loss_columns].copy()
results.to_csv(f'data/{run_id}.trading.gain_loss.csv')
self._colorize_plot(fig, axis)
plt.savefig(f'data/{run_id}.chart.png')
plt.show()
plt.close()
@classmethod
def _get_x_min_max(cls, resulting_frame):
x_min = resulting_frame.index.min() - timedelta(days=10)
x_max = resulting_frame.index.max() + timedelta(days=10)
return x_min, x_max
@classmethod
def _get_y_min_max(cls, resulting_frame, columns, index_column):
columns = columns + [index_column]
y_min = resulting_frame[columns].values.min() - 10
y_max = resulting_frame[columns].values.max() + 10
return y_min, y_max
@classmethod
def _colorize_plot(cls, fig, axis):
text_color = '#cfcfcf'
rc('font', weight='bold')
rc('text', color=text_color)
fig.patch.set_facecolor('#1d1d1d')
for ax in axis:
ax.grid(which='major', axis='both')
ax.spines['bottom'].set_color(text_color)
ax.spines['top'].set_color(text_color)
ax.spines['right'].set_color(text_color)
ax.spines['left'].set_color(text_color)
ax.tick_params(axis='both', which='major', colors=text_color, labelsize='large', grid_alpha=0.2)
ax.set_facecolor('#1f1f1f')
ax.xaxis.label.set_visible(False)
ax.yaxis.label.set_visible(False)
ax.title.set_color(text_color)
ax.title.set_weight('bold')
legend = ax.legend(
facecolor='#333333',
framealpha=0.4,
ncol=1)
for text in legend.get_texts():
text.set_color(text_color)
def _buy_and_hold(
self,
quotes,
all_tickers,
report_each_trade=True,
tickers=None):
if tickers is None:
tickers = self.stock_exchange.tickers
tickers = {ticker: self.stock_exchange.tickers[ticker] for ticker in tickers.keys() if ticker in all_tickers}
start_investment = self.start_capital / len(tickers)
capital = 0.0
for ticker, company in tickers.items():
investment = start_investment
ticker_quotes = quotes[quotes[f'{ticker}_close'] > 0.0]
row = ticker_quotes.iloc[0]
price = row[f'{ticker}_close']
count = int((investment - self.order_fee) / price)
if count > 0:
buy_price = price * (self.spread + 1.0)
investment -= self.order_fee
investment -= count * price
message = f'Buy & Hold - {row["date"]} - {ticker:5} - buy '
message += f'{count:5} x ${price:7.2f} = ${count * price:10.2f}'
self.report(message, report_each_trade)
row = ticker_quotes.iloc[-1]
price = row[f'{ticker}_close']
investment -= self.order_fee
investment += count * price
earnings = (count * price) - (count * buy_price)
if earnings > 0.0:
tax = earnings * self.tax_rate
investment -= tax
message = f'Buy & Hold - {row["date"]} - {ticker:5} - sell '
message += f'{count:5} x ${price:7.2f} = ${count * price:10.2f}'
self.report(message, report_each_trade)
capital += investment
message = f'Buy & Hold - Total '
message += f'${self.start_capital:10.2f} => ${capital:10.2f} = ${capital - self.start_capital:10.2f}'
self.report(message, True)
return message
def _trade(
self,
quotes,
all_tickers,
profit_taking_threshold,
buy_and_hold,
report_each_trade=True,
tickers=None,
max_positions=None):
tickers = self.stock_exchange.tickers if tickers is None else tickers
tickers = {ticker: self.stock_exchange.tickers[ticker] for ticker in tickers.keys() if ticker in all_tickers}
max_positions = len(tickers) if max_positions is None else max_positions
investment = self.start_capital
portfolio = {}
investments = []
gain_loss = []
total_gain_loss = 0.0
actions = None
for index, row in quotes.iterrows():
if actions is not None:
self.update_last_prices(row, portfolio)
if buy_and_hold:
self.prepare_buy_and_hold(actions)
if profit_taking_threshold > 0.0:
self.prepare_profit_taking(row, portfolio, actions, profit_taking_threshold)
investment, earnings = self.sell(row, investment, portfolio, actions, report_each_trade)
investment = self.buy(row, investment, portfolio, actions, max_positions)
total_gain_loss += earnings
new_investment = self.calculate_current_investment(investment, portfolio, row)
investments.append(new_investment)
gain_loss.append(total_gain_loss)
actions = self.calculate_actions(tickers, portfolio, quotes, row, index)
investment = self.start_capital + total_gain_loss
investments.append(investment)
gain_loss.append(total_gain_loss)
message = f'Total '
message += f'${self.start_capital:10.2f} => ${investment:10.2f} = ${total_gain_loss:10.2f}'
self.report(message, True)
return message, investments, gain_loss
def buy(self, row, investment, portfolio, actions, max_positions):
def action_filter(t):
if t in portfolio:
return False
action = actions[t]
if action['index'] != Actions.Buy:
return False
if action['value'] < 0.99:
return False
if action['predictions'][Actions.SkipOrHold] > 0.5:
return False
if action['predictions'][Actions.Sell] > 0.5:
return False
return True
def action_sort(t):
return actions[t]['value']
possible_position_count = max_positions - len(portfolio)
if possible_position_count <= 0:
return investment
tickers = [ticker for ticker in sorted(filter(action_filter, actions.keys()), key=action_sort, reverse=True)]
tickers = tickers[:possible_position_count]
possible_investment = investment / possible_position_count
for ticker in tickers:
price = row[f'{ticker}_close'] * (self.spread + 1.0)
if possible_investment < price + self.order_fee:
continue
investment -= self.order_fee
count = int((possible_investment - self.order_fee) / price)
investment -= count * price
portfolio[ticker] = {
'buy_date': row['date'],
'buy_price': price,
'count': count,
'last_price': price
}
return investment
def sell(self, row, investment, portfolio, actions, report_each_trade, clear_positions=False):
total_earnings = 0.0
for ticker, action in actions.items():
if ticker not in portfolio or not portfolio[ticker]['count'] > 0:
continue
price = row[f'{ticker}_close']
if not price > 0.0:
price = portfolio[ticker]['last_price']
action['index'] = Actions.Sell
if action['index'] == Actions.Sell:
count = portfolio[ticker]['count']
buy_price = portfolio[ticker]['buy_price']
investment -= self.order_fee
investment += count * price
earnings = (count * price) - (count * buy_price)
if earnings > 0.0:
tax = earnings * self.tax_rate
investment -= tax
earnings -= tax
total_earnings += earnings
message = f'{portfolio[ticker]["buy_date"]} - {row["date"]} - {ticker:5} - '
message += f'${buy_price:.2f} -> ${price:.2f}{" ... clear positions" if clear_positions else ""}'
self.report(message, report_each_trade)
del portfolio[ticker]
return investment, total_earnings
@classmethod
def prepare_buy_and_hold(cls, actions):
for ticker in actions.keys():
if actions[ticker]['index'] == Actions.Sell:
actions[ticker] = {'index': Actions.SkipOrHold}
def prepare_profit_taking(self, row, portfolio, actions, threshold=5.0):
for ticker in portfolio.keys():
position = portfolio[ticker]
if self.calculate_position_gain_loss(ticker, position, row) >= threshold:
actions[ticker] = {'index': Actions.Sell}
@classmethod
def update_last_prices(cls, row, portfolio):
for ticker in portfolio.keys():
if row[f'{ticker}_close'] > 0.0:
portfolio[ticker]['last_price'] = row[f'{ticker}_close']
def calculate_position_gain_loss(
self,
ticker,
position,
row):
price = row[f'{ticker}_close']
if not price > 0.0:
price = position['last_price']
count = position['count']
buy_price = position['buy_price']
buy_in = count * buy_price
sell_out = count * price
earnings = sell_out - buy_in
if earnings > 0.0:
earnings -= earnings * self.tax_rate
gain_loss = earnings
gain_loss -= self.order_fee
returns = (((gain_loss + buy_in) / buy_in) - 1.0) * 100.0
return returns
def calculate_actions(
self,
tickers,
portfolio,
quotes,
row,
index):
features, eval_tickers = self.calculate_features(tickers, portfolio, quotes, row, index)
if eval_tickers is None:
return None
prediction = self.agent(features).cpu().detach().numpy()
action_indexes = np.argmax(prediction, axis=1)
action_values = np.amax(prediction, axis=1)
actions = {
eval_tickers[i]: {
'index': action_indexes[i],
'value': action_values[i],
'predictions': prediction[i]
}
for i in range(len(eval_tickers))
}
return actions
def calculate_features(
self,
tickers,
portfolio,
quotes,
row,
index):
evaluated_tickers = []
feature_batch = []
for ticker in tickers.keys():
window = row[f'{ticker}_window']
if (window is np.nan or
window is None or
np.isnan(window).any() or
np.sum(window) == 0.0):
continue
day_yield = quotes[f'{ticker}_close'][index] / quotes[f'{ticker}_close'][index - 1] - 1.0
has_stocks = 1.0 if ticker in portfolio and portfolio[ticker]['count'] > 0 else 0.0
price_window = np.array(window, dtype=np.float32).flatten()
features = np.append(price_window, np.array([day_yield, has_stocks], dtype=np.float32))
feature_batch.append(features)
evaluated_tickers.append(ticker)
if len(evaluated_tickers) == 0:
return None, None
feature_batch = torch.tensor(feature_batch, dtype=torch.float32)
feature_batch = feature_batch.reshape(feature_batch.shape[0], feature_batch.shape[-1])
feature_batch = feature_batch.to(self.device)
return feature_batch, evaluated_tickers
def calculate_current_investment(
self,
investment,
portfolio,
row):
for ticker, position in portfolio.items():
price = row[f'{ticker}_close']
if not price > 0.0:
price = portfolio[ticker]['last_price']
investment += position['count'] * price
investment -= self.order_fee
earnings = (position['count'] * price) - (position['count'] * position['buy_price'])
if earnings > 0.0:
tax = earnings * self.tax_rate
investment -= tax
earnings -= tax
return investment
@staticmethod
def report(
message,
verbose):
if not verbose:
return
print(message)
| import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import rc
from app.utility.logger import Logger
from app.environment.enums import Actions
from datetime import timedelta
class Game:
def __init__(
self,
stock_exchange,
agent,
max_positions,
max_limit_positions,
all_quotes,
all_tickers,
start_date,
end_date,
capital,
spread,
order_fee,
capital_gains_tax,
solidarity_surcharge,
device,
days=5):
self.stock_exchange = stock_exchange
self.agent = agent
self.max_positions = max_positions
self.max_limit_positions = max_limit_positions
self.all_quotes = all_quotes
self.all_tickers = all_tickers
self.start_date = start_date
self.end_date = end_date
self.start_capital = capital
self.capital_gains_tax = capital_gains_tax
self.solidarity_surcharge = solidarity_surcharge
self.device = device
self.days = days
self.tax_rate = self.capital_gains_tax / 100.0
self.tax_rate *= self.solidarity_surcharge / 100.0 + 1.0
self.order_fee = order_fee
self.spread = spread / 100.0
def trade(
self,
run_id,
profit_taking_threshold=None,
buy_and_hold=False,
intra_day=False):
result = ''
print(f"Trade limited all stocks from {self.start_date} to {self.end_date} ...")
message, limit_all_investments, limit_all_gain_loss = \
self._trade(
quotes=self.all_quotes,
all_tickers=self.all_tickers,
profit_taking_threshold=profit_taking_threshold,
buy_and_hold=buy_and_hold,
report_each_trade=True,
tickers=self.stock_exchange.tickers,
max_positions=self.max_limit_positions)
result += f'\nTrade Portfolio (max {self.max_limit_positions} stocks): {message}'
print(f"Trade all stocks from {self.start_date} to {self.end_date} ...")
message, all_investments, all_gain_loss = \
self._trade(
quotes=self.all_quotes,
all_tickers=self.all_tickers,
profit_taking_threshold=profit_taking_threshold,
buy_and_hold=buy_and_hold,
report_each_trade=True,
tickers=self.stock_exchange.tickers,
max_positions=self.max_positions)
result += f'\nTrade All ({self.max_positions} stocks): {message}'
print(f"Buy and hold all stocks from {self.start_date} to {self.end_date} ...")
message = \
self._buy_and_hold(
self.all_quotes,
self.all_tickers,
False,
self.stock_exchange.tickers)
result += f'\nBuy % Hold All ({len(self.stock_exchange.tickers)} stocks): {message}'
print(result)
Logger.log(run_id, result)
index_ticker = 'URTH'
index_title = self.stock_exchange.etf_tickers[index_ticker]
if intra_day:
compare_index = self.stock_exchange.load_intra_day(index_ticker, self.start_date, self.end_date, True)
else:
compare_index = self.stock_exchange.load(index_ticker, self.start_date, self.end_date, True)
all_title = f'All stocks ({self.max_positions} positions)'
limit_all_title = f'All stocks (max. {self.max_limit_positions} positions at once)'
gain_loss_all_title = f'Return all stocks ({self.max_positions} positions)'
gain_loss_limit_all_title = f'Return all stocks (max. {self.max_limit_positions} positions at once)'
length = (len(compare_index)
if len(compare_index) < len(all_investments)
else len(all_investments))
resulting_frame = pd.DataFrame(
data={
'index': range(length),
'date': np.array(compare_index['date'].values[-length:]),
index_title: np.array(compare_index['adj_close'].values[-length:]),
all_title: np.array(all_investments[-length:]),
limit_all_title: np.array(limit_all_investments[-length:]),
gain_loss_all_title: np.array(all_gain_loss[-length:]) + self.start_capital,
gain_loss_limit_all_title: np.array(limit_all_gain_loss[-length:]) + self.start_capital
})
all_columns = [
index_title,
all_title,
limit_all_title,
gain_loss_all_title,
gain_loss_limit_all_title
]
for column in all_columns:
change_column = f'Change {column}'
resulting_frame[change_column] = resulting_frame[column].pct_change(1).fillna(0.0) * 100.0
resulting_frame[column] = \
resulting_frame.apply(
lambda row: resulting_frame[change_column].values[0:int(row['index'] + 1)].sum(),
axis=1)
resulting_frame.set_index(resulting_frame['date'], inplace=True)
fig, axis = plt.subplots(nrows=2)
investment_columns = [
all_title,
limit_all_title
]
resulting_frame[index_title].plot.area(ax=axis[0], stacked=False)
resulting_frame[investment_columns].plot(
ax=axis[0],
figsize=(20, 10),
linewidth=2,
title=f'Investment vs {index_title}')
gain_loss_columns = [
gain_loss_all_title,
gain_loss_limit_all_title
]
resulting_frame[index_title].plot.area(ax=axis[1], stacked=False)
resulting_frame[gain_loss_columns].plot(
ax=axis[1],
figsize=(20, 10),
linewidth=2,
title=f'Portfolio Changes vs {index_title}')
x_min, x_max = self._get_x_min_max(resulting_frame)
axis[0].set_xlim(x_min, x_max)
axis[1].set_xlim(x_min, x_max)
y_min, y_max = self._get_y_min_max(resulting_frame, investment_columns, index_title)
axis[0].set_ylim(y_min, y_max)
y_min, y_max = self._get_y_min_max(resulting_frame, gain_loss_columns, index_title)
axis[1].set_ylim(y_min, y_max)
results = resulting_frame[gain_loss_columns].copy()
results.to_csv(f'data/{run_id}.trading.gain_loss.csv')
self._colorize_plot(fig, axis)
plt.savefig(f'data/{run_id}.chart.png')
plt.show()
plt.close()
@classmethod
def _get_x_min_max(cls, resulting_frame):
x_min = resulting_frame.index.min() - timedelta(days=10)
x_max = resulting_frame.index.max() + timedelta(days=10)
return x_min, x_max
@classmethod
def _get_y_min_max(cls, resulting_frame, columns, index_column):
columns = columns + [index_column]
y_min = resulting_frame[columns].values.min() - 10
y_max = resulting_frame[columns].values.max() + 10
return y_min, y_max
@classmethod
def _colorize_plot(cls, fig, axis):
text_color = '#cfcfcf'
rc('font', weight='bold')
rc('text', color=text_color)
fig.patch.set_facecolor('#1d1d1d')
for ax in axis:
ax.grid(which='major', axis='both')
ax.spines['bottom'].set_color(text_color)
ax.spines['top'].set_color(text_color)
ax.spines['right'].set_color(text_color)
ax.spines['left'].set_color(text_color)
ax.tick_params(axis='both', which='major', colors=text_color, labelsize='large', grid_alpha=0.2)
ax.set_facecolor('#1f1f1f')
ax.xaxis.label.set_visible(False)
ax.yaxis.label.set_visible(False)
ax.title.set_color(text_color)
ax.title.set_weight('bold')
legend = ax.legend(
facecolor='#333333',
framealpha=0.4,
ncol=1)
for text in legend.get_texts():
text.set_color(text_color)
def _buy_and_hold(
self,
quotes,
all_tickers,
report_each_trade=True,
tickers=None):
if tickers is None:
tickers = self.stock_exchange.tickers
tickers = {ticker: self.stock_exchange.tickers[ticker] for ticker in tickers.keys() if ticker in all_tickers}
start_investment = self.start_capital / len(tickers)
capital = 0.0
for ticker, company in tickers.items():
investment = start_investment
ticker_quotes = quotes[quotes[f'{ticker}_close'] > 0.0]
row = ticker_quotes.iloc[0]
price = row[f'{ticker}_close']
count = int((investment - self.order_fee) / price)
if count > 0:
buy_price = price * (self.spread + 1.0)
investment -= self.order_fee
investment -= count * price
message = f'Buy & Hold - {row["date"]} - {ticker:5} - buy '
message += f'{count:5} x ${price:7.2f} = ${count * price:10.2f}'
self.report(message, report_each_trade)
row = ticker_quotes.iloc[-1]
price = row[f'{ticker}_close']
investment -= self.order_fee
investment += count * price
earnings = (count * price) - (count * buy_price)
if earnings > 0.0:
tax = earnings * self.tax_rate
investment -= tax
message = f'Buy & Hold - {row["date"]} - {ticker:5} - sell '
message += f'{count:5} x ${price:7.2f} = ${count * price:10.2f}'
self.report(message, report_each_trade)
capital += investment
message = f'Buy & Hold - Total '
message += f'${self.start_capital:10.2f} => ${capital:10.2f} = ${capital - self.start_capital:10.2f}'
self.report(message, True)
return message
def _trade(
self,
quotes,
all_tickers,
profit_taking_threshold,
buy_and_hold,
report_each_trade=True,
tickers=None,
max_positions=None):
tickers = self.stock_exchange.tickers if tickers is None else tickers
tickers = {ticker: self.stock_exchange.tickers[ticker] for ticker in tickers.keys() if ticker in all_tickers}
max_positions = len(tickers) if max_positions is None else max_positions
investment = self.start_capital
portfolio = {}
investments = []
gain_loss = []
total_gain_loss = 0.0
actions = None
for index, row in quotes.iterrows():
if actions is not None:
self.update_last_prices(row, portfolio)
if buy_and_hold:
self.prepare_buy_and_hold(actions)
if profit_taking_threshold > 0.0:
self.prepare_profit_taking(row, portfolio, actions, profit_taking_threshold)
investment, earnings = self.sell(row, investment, portfolio, actions, report_each_trade)
investment = self.buy(row, investment, portfolio, actions, max_positions)
total_gain_loss += earnings
new_investment = self.calculate_current_investment(investment, portfolio, row)
investments.append(new_investment)
gain_loss.append(total_gain_loss)
actions = self.calculate_actions(tickers, portfolio, quotes, row, index)
investment = self.start_capital + total_gain_loss
investments.append(investment)
gain_loss.append(total_gain_loss)
message = f'Total '
message += f'${self.start_capital:10.2f} => ${investment:10.2f} = ${total_gain_loss:10.2f}'
self.report(message, True)
return message, investments, gain_loss
def buy(self, row, investment, portfolio, actions, max_positions):
def action_filter(t):
if t in portfolio:
return False
action = actions[t]
if action['index'] != Actions.Buy:
return False
if action['value'] < 0.99:
return False
if action['predictions'][Actions.SkipOrHold] > 0.5:
return False
if action['predictions'][Actions.Sell] > 0.5:
return False
return True
def action_sort(t):
return actions[t]['value']
possible_position_count = max_positions - len(portfolio)
if possible_position_count <= 0:
return investment
tickers = [ticker for ticker in sorted(filter(action_filter, actions.keys()), key=action_sort, reverse=True)]
tickers = tickers[:possible_position_count]
possible_investment = investment / possible_position_count
for ticker in tickers:
price = row[f'{ticker}_close'] * (self.spread + 1.0)
if possible_investment < price + self.order_fee:
continue
investment -= self.order_fee
count = int((possible_investment - self.order_fee) / price)
investment -= count * price
portfolio[ticker] = {
'buy_date': row['date'],
'buy_price': price,
'count': count,
'last_price': price
}
return investment
def sell(self, row, investment, portfolio, actions, report_each_trade, clear_positions=False):
total_earnings = 0.0
for ticker, action in actions.items():
if ticker not in portfolio or not portfolio[ticker]['count'] > 0:
continue
price = row[f'{ticker}_close']
if not price > 0.0:
price = portfolio[ticker]['last_price']
action['index'] = Actions.Sell
if action['index'] == Actions.Sell:
count = portfolio[ticker]['count']
buy_price = portfolio[ticker]['buy_price']
investment -= self.order_fee
investment += count * price
earnings = (count * price) - (count * buy_price)
if earnings > 0.0:
tax = earnings * self.tax_rate
investment -= tax
earnings -= tax
total_earnings += earnings
message = f'{portfolio[ticker]["buy_date"]} - {row["date"]} - {ticker:5} - '
message += f'${buy_price:.2f} -> ${price:.2f}{" ... clear positions" if clear_positions else ""}'
self.report(message, report_each_trade)
del portfolio[ticker]
return investment, total_earnings
@classmethod
def prepare_buy_and_hold(cls, actions):
for ticker in actions.keys():
if actions[ticker]['index'] == Actions.Sell:
actions[ticker] = {'index': Actions.SkipOrHold}
def prepare_profit_taking(self, row, portfolio, actions, threshold=5.0):
for ticker in portfolio.keys():
position = portfolio[ticker]
if self.calculate_position_gain_loss(ticker, position, row) >= threshold:
actions[ticker] = {'index': Actions.Sell}
@classmethod
def update_last_prices(cls, row, portfolio):
for ticker in portfolio.keys():
if row[f'{ticker}_close'] > 0.0:
portfolio[ticker]['last_price'] = row[f'{ticker}_close']
def calculate_position_gain_loss(
self,
ticker,
position,
row):
price = row[f'{ticker}_close']
if not price > 0.0:
price = position['last_price']
count = position['count']
buy_price = position['buy_price']
buy_in = count * buy_price
sell_out = count * price
earnings = sell_out - buy_in
if earnings > 0.0:
earnings -= earnings * self.tax_rate
gain_loss = earnings
gain_loss -= self.order_fee
returns = (((gain_loss + buy_in) / buy_in) - 1.0) * 100.0
return returns
def calculate_actions(
self,
tickers,
portfolio,
quotes,
row,
index):
features, eval_tickers = self.calculate_features(tickers, portfolio, quotes, row, index)
if eval_tickers is None:
return None
prediction = self.agent(features).cpu().detach().numpy()
action_indexes = np.argmax(prediction, axis=1)
action_values = np.amax(prediction, axis=1)
actions = {
eval_tickers[i]: {
'index': action_indexes[i],
'value': action_values[i],
'predictions': prediction[i]
}
for i in range(len(eval_tickers))
}
return actions
def calculate_features(
self,
tickers,
portfolio,
quotes,
row,
index):
evaluated_tickers = []
feature_batch = []
for ticker in tickers.keys():
window = row[f'{ticker}_window']
if (window is np.nan or
window is None or
np.isnan(window).any() or
np.sum(window) == 0.0):
continue
day_yield = quotes[f'{ticker}_close'][index] / quotes[f'{ticker}_close'][index - 1] - 1.0
has_stocks = 1.0 if ticker in portfolio and portfolio[ticker]['count'] > 0 else 0.0
price_window = np.array(window, dtype=np.float32).flatten()
features = np.append(price_window, np.array([day_yield, has_stocks], dtype=np.float32))
feature_batch.append(features)
evaluated_tickers.append(ticker)
if len(evaluated_tickers) == 0:
return None, None
feature_batch = torch.tensor(feature_batch, dtype=torch.float32)
feature_batch = feature_batch.reshape(feature_batch.shape[0], feature_batch.shape[-1])
feature_batch = feature_batch.to(self.device)
return feature_batch, evaluated_tickers
def calculate_current_investment(
self,
investment,
portfolio,
row):
for ticker, position in portfolio.items():
price = row[f'{ticker}_close']
if not price > 0.0:
price = portfolio[ticker]['last_price']
investment += position['count'] * price
investment -= self.order_fee
earnings = (position['count'] * price) - (position['count'] * position['buy_price'])
if earnings > 0.0:
tax = earnings * self.tax_rate
investment -= tax
earnings -= tax
return investment
@staticmethod
def report(
message,
verbose):
if not verbose:
return
print(message)
| none | 1 | 2.633512 | 3 |
|
src/data_processing/interface/inline_print.py | kaderghal/ADNI_Data_processing | 5 | 6632339 | <reponame>kaderghal/ADNI_Data_processing
#!/usr/bin/python
import config.config_read as rsd
import config.ColorPrompt as CP
#------------------------------------------------------------------------------------------
# Display Data: to print data (Terminal)
#------------------------------------------------------------------------------------------
def print_author_info():
print(CP.style.BRIGHT + CP.fg.GREEN + "Author Information: " + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_author_info().items():
print('\t[' + k + ']: ' + str(v))
print ("\n")
def print_global_params():
print(CP.style.BRIGHT + CP.fg.GREEN + "Global parameters: " + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_global_params().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_adni_datasets_path():
print(CP.style.BRIGHT + CP.fg.GREEN + "Datasets Images: " + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_adni_datasets().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_classes_datasets_path():
print(CP.style.BRIGHT + CP.fg.GREEN + "Classes Datasets Paths: " + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_classes_datasets().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_augmentation_params():
print(CP.style.BRIGHT + CP.fg.GREEN + "Augmentation parameters: " + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_augmentation_params().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_split_params():
print(CP.style.BRIGHT + CP.fg.GREEN + "Splitting dataset parameters: " + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_split_params().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_roi_params_global():
print(CP.style.BRIGHT + CP.fg.GREEN + "Roi Global parameters: " + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_roi_params_global().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_roi_params_hippocampus():
print(CP.style.BRIGHT + CP.fg.GREEN + "Roi Hippocampus parameters: " + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_roi_params_hippocampus().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_roi_params_posterior_cc():
print(CP.style.BRIGHT + CP.fg.GREEN + "Roi Posterior CC parameters :" + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_roi_params_posterior_cc().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_label_binary_codes():
print(CP.style.BRIGHT + CP.fg.GREEN + "Labels Binary Codes :" + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_label_binary_codes().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_all_params_data():
print (CP.style.BRIGHT + CP.fg.GREEN + "All parameters Data :" + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_all_data_params().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_all_params_data_v2(data):
print(CP.style.BRIGHT + CP.fg.GREEN + "All parameters Data :" + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in data.items():
print('\t {} : {}'.format(k, v))
print("\n")
def print_dimensions_cubes_HIPP(l, r):
print(CP.style.BRIGHT + CP.fg.GREEN + "Hippocampus Cube (ROI) dimenssion after the extracting process :" + CP.fg.RESET + CP.style.RESET_ALL)
print('\tHippocampus L : ({}, {}, {})'.format(l[1] - l[0], l[3] - l[2], l[5] - l[4]))
print('\tHippocampus R : ({}, {}, {})'.format(r[1] - r[0], r[3] - r[2], r[5] - r[4]))
print("\n")
def print_dimensions_cubes_PPC(l, r):
print(CP.style.BRIGHT + CP.fg.GREEN + "Posterior CC Cube (ROI) dimenssion after the extracting process :" + CP.fg.RESET + CP.style.RESET_ALL)
print('\tPosterior_CC L : ({}, {}, {})'.format(l[1] - l[0], l[3] - l[2], l[5] - l[4]))
print('\tPosterior_CC R : ({}, {}, {})'.format(r[1] - r[0], r[3] - r[2], r[5] - r[4]))
print("\n")
def print_adni_desc(adni1):
print("\t------------------------------------------------------")
print("\t| ADNI Datasets |")
print("\t------------------------------------------------------")
print("\t---------- AD | MCI | NC ------")
print("\t------------------------------------------------------")
print("\t| ADNI 1 | {} | {} | {} ------".format(len(adni1[0]), len(adni1[1]), len(adni1[2])))
print("\t------------------------------------------------------")
def print_augmentation_table(data):
print(CP.style.BRIGHT + CP.fg.RED + "----------------------------------------------------------------------------------")
print("| Augmentation description ")
print("----------------------------------------------------------------------------------")
print("| | AD | MCI | NC | ")
print("----------------------------------------------------------------------------------")
print("| Train | {0} -> ({3}) | {1} -> ({3}) | {2} -> ({3}) | {4} -> ({5}) ".format(data[0][0],
data[0][1],
data[0][2],
data[0][3],
data[0][0] + data[0][1] + data[0][2],
data[0][3]*3))
print("----------------------------------------------------------------------------------")
print("| Valid | {0} -> ({3}) | {1} -> ({3}) | {2} -> ({3}) | {4} -> ({5}) ".format(data[1][0],
data[1][1],
data[1][2],
data[1][3],
data[1][0] + data[1][1] + data[1][2],
data[1][3]*3 ))
print("----------------------------------------------------------------------------------")
print("| Test | {0} -> ({3}) | {1} -> ({3}) | {2} -> ({3}) | {4} -> ({5}) ".format(data[2][0],
data[2][1],
data[2][2],
data[2][3],
data[2][0] + data[2][1] + data[2][2],
data[2][3]*3))
print("----------------------------------------------------------------------------------")
print("| | {0} -> ({3}) | {1} -> ({3}) | {2} -> ({3}) | {4} -> ({5}) ".format(
data[0][0] + data[1][0] + data[2][0],
data[0][1] + data[1][1] + data[2][1],
data[0][2] + data[1][2] + data[2][2],
data[0][3] + data[1][3] + data[2][3],
(data[0][0] + data[1][0] + data[2][0] +
data[0][1] + data[1][1] + data[2][1] +
data[0][2] + data[1][2] + data[2][2]),
(data[0][3] + data[1][3] + data[2][3])*3,
))
print("----------------------------------------------------------------------------------" + CP.fg.RESET + CP.style.RESET_ALL)
def print_datasetDescription(data):
print(CP.style.BRIGHT + CP.fg.CYAN + "----------------------------------------------------------------------------------------------------------")
print("| ADNI-1 description |")
print("----------------------------------------------------------------------------------------------------------")
print("| #Subject | Sex (F/M) | Age [min, max]/mean(std) | MMSE [min, max]mean/std |")
print("----------------------------------------------------------------------------------------------------------")
print("| AD | {} | {} | {} | {} |".format(data[0][1], data[0][2], data[0][3], data[0][4]))
print("----------------------------------------------------------------------------------------------------------")
print("| MCI | {} | {} | {} | {} |".format(data[1][1], data[1][2], data[1][3], data[1][4]))
print("----------------------------------------------------------------------------------------------------------")
print("| NC | {} | {} | {} | {} |".format(data[2][1], data[2][2], data[2][3], data[2][4]))
print("----------------------------------------------------------------------------------------------------------\n" + CP.fg.RESET + CP.style.RESET_ALL)
print(CP.style.BRIGHT + CP.fg.CYAN + "\n----------------------------------------------------------------------------------------------------------")
print("| suite >> ADNI-1 description |")
print("----------------------------------------------------------------------------------------------------------")
print("| | Global GDS [min, max]/mean(std) | Global CDR [min, max]mean/std |")
print("----------------------------------------------------------------------------------------------------------")
print("| AD | {} | {} |".format(data[0][5], data[0][6]))
print("----------------------------------------------------------------------------------------------------------")
print("| MCI | {} | {} |".format(data[1][5], data[1][6]))
print("----------------------------------------------------------------------------------------------------------")
print("| NC | {} | {} |".format(data[2][5], data[2][6]))
print("----------------------------------------------------------------------------------------------------------\n" + CP.fg.RESET + CP.style.RESET_ALL)
# def print_2D_or_3D_data():
# selected_decision = raw_input("Do you want create 3D Data roi or 2D slices ? \n - [0] 3D - [1] 2D \n ")
# return True if int(selected_decision) == 0 else False
| #!/usr/bin/python
import config.config_read as rsd
import config.ColorPrompt as CP
#------------------------------------------------------------------------------------------
# Display Data: to print data (Terminal)
#------------------------------------------------------------------------------------------
def print_author_info():
print(CP.style.BRIGHT + CP.fg.GREEN + "Author Information: " + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_author_info().items():
print('\t[' + k + ']: ' + str(v))
print ("\n")
def print_global_params():
print(CP.style.BRIGHT + CP.fg.GREEN + "Global parameters: " + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_global_params().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_adni_datasets_path():
print(CP.style.BRIGHT + CP.fg.GREEN + "Datasets Images: " + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_adni_datasets().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_classes_datasets_path():
print(CP.style.BRIGHT + CP.fg.GREEN + "Classes Datasets Paths: " + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_classes_datasets().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_augmentation_params():
print(CP.style.BRIGHT + CP.fg.GREEN + "Augmentation parameters: " + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_augmentation_params().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_split_params():
print(CP.style.BRIGHT + CP.fg.GREEN + "Splitting dataset parameters: " + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_split_params().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_roi_params_global():
print(CP.style.BRIGHT + CP.fg.GREEN + "Roi Global parameters: " + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_roi_params_global().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_roi_params_hippocampus():
print(CP.style.BRIGHT + CP.fg.GREEN + "Roi Hippocampus parameters: " + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_roi_params_hippocampus().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_roi_params_posterior_cc():
print(CP.style.BRIGHT + CP.fg.GREEN + "Roi Posterior CC parameters :" + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_roi_params_posterior_cc().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_label_binary_codes():
print(CP.style.BRIGHT + CP.fg.GREEN + "Labels Binary Codes :" + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_label_binary_codes().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_all_params_data():
print (CP.style.BRIGHT + CP.fg.GREEN + "All parameters Data :" + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in rsd.get_all_data_params().items():
print('\t[' + k + ']: ' + str(v))
print("\n")
def print_all_params_data_v2(data):
print(CP.style.BRIGHT + CP.fg.GREEN + "All parameters Data :" + CP.fg.RESET + CP.style.RESET_ALL)
for k, v in data.items():
print('\t {} : {}'.format(k, v))
print("\n")
def print_dimensions_cubes_HIPP(l, r):
print(CP.style.BRIGHT + CP.fg.GREEN + "Hippocampus Cube (ROI) dimenssion after the extracting process :" + CP.fg.RESET + CP.style.RESET_ALL)
print('\tHippocampus L : ({}, {}, {})'.format(l[1] - l[0], l[3] - l[2], l[5] - l[4]))
print('\tHippocampus R : ({}, {}, {})'.format(r[1] - r[0], r[3] - r[2], r[5] - r[4]))
print("\n")
def print_dimensions_cubes_PPC(l, r):
print(CP.style.BRIGHT + CP.fg.GREEN + "Posterior CC Cube (ROI) dimenssion after the extracting process :" + CP.fg.RESET + CP.style.RESET_ALL)
print('\tPosterior_CC L : ({}, {}, {})'.format(l[1] - l[0], l[3] - l[2], l[5] - l[4]))
print('\tPosterior_CC R : ({}, {}, {})'.format(r[1] - r[0], r[3] - r[2], r[5] - r[4]))
print("\n")
def print_adni_desc(adni1):
print("\t------------------------------------------------------")
print("\t| ADNI Datasets |")
print("\t------------------------------------------------------")
print("\t---------- AD | MCI | NC ------")
print("\t------------------------------------------------------")
print("\t| ADNI 1 | {} | {} | {} ------".format(len(adni1[0]), len(adni1[1]), len(adni1[2])))
print("\t------------------------------------------------------")
def print_augmentation_table(data):
print(CP.style.BRIGHT + CP.fg.RED + "----------------------------------------------------------------------------------")
print("| Augmentation description ")
print("----------------------------------------------------------------------------------")
print("| | AD | MCI | NC | ")
print("----------------------------------------------------------------------------------")
print("| Train | {0} -> ({3}) | {1} -> ({3}) | {2} -> ({3}) | {4} -> ({5}) ".format(data[0][0],
data[0][1],
data[0][2],
data[0][3],
data[0][0] + data[0][1] + data[0][2],
data[0][3]*3))
print("----------------------------------------------------------------------------------")
print("| Valid | {0} -> ({3}) | {1} -> ({3}) | {2} -> ({3}) | {4} -> ({5}) ".format(data[1][0],
data[1][1],
data[1][2],
data[1][3],
data[1][0] + data[1][1] + data[1][2],
data[1][3]*3 ))
print("----------------------------------------------------------------------------------")
print("| Test | {0} -> ({3}) | {1} -> ({3}) | {2} -> ({3}) | {4} -> ({5}) ".format(data[2][0],
data[2][1],
data[2][2],
data[2][3],
data[2][0] + data[2][1] + data[2][2],
data[2][3]*3))
print("----------------------------------------------------------------------------------")
print("| | {0} -> ({3}) | {1} -> ({3}) | {2} -> ({3}) | {4} -> ({5}) ".format(
data[0][0] + data[1][0] + data[2][0],
data[0][1] + data[1][1] + data[2][1],
data[0][2] + data[1][2] + data[2][2],
data[0][3] + data[1][3] + data[2][3],
(data[0][0] + data[1][0] + data[2][0] +
data[0][1] + data[1][1] + data[2][1] +
data[0][2] + data[1][2] + data[2][2]),
(data[0][3] + data[1][3] + data[2][3])*3,
))
print("----------------------------------------------------------------------------------" + CP.fg.RESET + CP.style.RESET_ALL)
def print_datasetDescription(data):
print(CP.style.BRIGHT + CP.fg.CYAN + "----------------------------------------------------------------------------------------------------------")
print("| ADNI-1 description |")
print("----------------------------------------------------------------------------------------------------------")
print("| #Subject | Sex (F/M) | Age [min, max]/mean(std) | MMSE [min, max]mean/std |")
print("----------------------------------------------------------------------------------------------------------")
print("| AD | {} | {} | {} | {} |".format(data[0][1], data[0][2], data[0][3], data[0][4]))
print("----------------------------------------------------------------------------------------------------------")
print("| MCI | {} | {} | {} | {} |".format(data[1][1], data[1][2], data[1][3], data[1][4]))
print("----------------------------------------------------------------------------------------------------------")
print("| NC | {} | {} | {} | {} |".format(data[2][1], data[2][2], data[2][3], data[2][4]))
print("----------------------------------------------------------------------------------------------------------\n" + CP.fg.RESET + CP.style.RESET_ALL)
print(CP.style.BRIGHT + CP.fg.CYAN + "\n----------------------------------------------------------------------------------------------------------")
print("| suite >> ADNI-1 description |")
print("----------------------------------------------------------------------------------------------------------")
print("| | Global GDS [min, max]/mean(std) | Global CDR [min, max]mean/std |")
print("----------------------------------------------------------------------------------------------------------")
print("| AD | {} | {} |".format(data[0][5], data[0][6]))
print("----------------------------------------------------------------------------------------------------------")
print("| MCI | {} | {} |".format(data[1][5], data[1][6]))
print("----------------------------------------------------------------------------------------------------------")
print("| NC | {} | {} |".format(data[2][5], data[2][6]))
print("----------------------------------------------------------------------------------------------------------\n" + CP.fg.RESET + CP.style.RESET_ALL)
# def print_2D_or_3D_data():
# selected_decision = raw_input("Do you want create 3D Data roi or 2D slices ? \n - [0] 3D - [1] 2D \n ")
# return True if int(selected_decision) == 0 else False | en | 0.21913 | #!/usr/bin/python #------------------------------------------------------------------------------------------ # Display Data: to print data (Terminal) #------------------------------------------------------------------------------------------ #Subject | Sex (F/M) | Age [min, max]/mean(std) | MMSE [min, max]mean/std |") # def print_2D_or_3D_data(): # selected_decision = raw_input("Do you want create 3D Data roi or 2D slices ? \n - [0] 3D - [1] 2D \n ") # return True if int(selected_decision) == 0 else False | 2.339134 | 2 |
netcrafter/__init__.py | cj-atmoscape/netcrafter | 0 | 6632340 | from netcrafter.netcrafter import netcraft
__ALL__ = (
'netcraft'
) | from netcrafter.netcrafter import netcraft
__ALL__ = (
'netcraft'
) | none | 1 | 1.123834 | 1 |
|
2021/day_06/solution.py | dandiez/AdventOfCode | 0 | 6632341 | <reponame>dandiez/AdventOfCode
from collections import Counter, defaultdict
from unittest import TestCase
def read_input(filename="input"):
with open(filename) as f:
lines = [line.strip() for line in f.readlines() if line.strip()]
inp = [int(val) for val in lines[0].split(',')] # parse here...
return inp
def part_1(inp):
return simulate_many(inp, 80)
def part_2(inp):
return simulate_many(inp, 256)
def simulate_many(inp, cycles):
inp = Counter(inp)
for n in range(cycles):
inp = simulate_one_dict(inp)
return sum(inp.values())
def simulate_one_dict(inp):
new_inp = defaultdict(int)
for k, v in inp.items():
if k == 0:
new_inp[8] += v
new_inp[6] += v
else:
new_inp[k - 1] += v
return new_inp
def main(input_file):
"""Solve puzzle and connect part 1 with part 2 if needed."""
# part 1
inp = read_input(input_file)
p1 = part_1(inp)
print(f"Solution to part 1: {p1}")
# part 2
inp = read_input(input_file)
p2 = part_2(inp)
print(f"Solution to part 2: {p2}")
return p1, p2
def test_sample_1(self):
inp = read_input("sample_1")
self.assertEqual(5934, part_1(inp))
if __name__ == "__main__":
print('*** solving tests ***')
test_sample_1(TestCase())
print('*** solving main ***')
main("input")
| from collections import Counter, defaultdict
from unittest import TestCase
def read_input(filename="input"):
with open(filename) as f:
lines = [line.strip() for line in f.readlines() if line.strip()]
inp = [int(val) for val in lines[0].split(',')] # parse here...
return inp
def part_1(inp):
return simulate_many(inp, 80)
def part_2(inp):
return simulate_many(inp, 256)
def simulate_many(inp, cycles):
inp = Counter(inp)
for n in range(cycles):
inp = simulate_one_dict(inp)
return sum(inp.values())
def simulate_one_dict(inp):
new_inp = defaultdict(int)
for k, v in inp.items():
if k == 0:
new_inp[8] += v
new_inp[6] += v
else:
new_inp[k - 1] += v
return new_inp
def main(input_file):
"""Solve puzzle and connect part 1 with part 2 if needed."""
# part 1
inp = read_input(input_file)
p1 = part_1(inp)
print(f"Solution to part 1: {p1}")
# part 2
inp = read_input(input_file)
p2 = part_2(inp)
print(f"Solution to part 2: {p2}")
return p1, p2
def test_sample_1(self):
inp = read_input("sample_1")
self.assertEqual(5934, part_1(inp))
if __name__ == "__main__":
print('*** solving tests ***')
test_sample_1(TestCase())
print('*** solving main ***')
main("input") | en | 0.817077 | # parse here... Solve puzzle and connect part 1 with part 2 if needed. # part 1 # part 2 | 3.485162 | 3 |
src/constants.py | PIVX-Project/PET4L | 9 | 6632342 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2019 Random.Zebra (https://github.com/random-zebra/)
# Distributed under the MIT software license, see the accompanying
# file LICENSE.txt or http://www.opensource.org/licenses/mit-license.php.
import os
from queue import Queue
wqueue = Queue() # type: Queue[str]
APPDATA_DIRNAME = ".PET4L-DATA"
MPATH_LEDGER = "44'/77'/"
MPATH_TREZOR = "44'/119'/"
MPATH_TESTNET = "44'/1'/"
WIF_PREFIX = 212 # 212 = d4
MAGIC_BYTE = 30
STAKE_MAGIC_BYTE = 63
TESTNET_WIF_PREFIX = 239
TESTNET_MAGIC_BYTE = 139
TESTNET_STAKE_MAGIC_BYTE = 73
DEFAULT_PROTOCOL_VERSION = 70915
MINIMUM_FEE = 0.0001 # minimum PIV/kB
SECONDS_IN_2_MONTHS = 60 * 24 * 60 * 60
MAX_INPUTS_NO_WARNING = 75
starting_width = 1033
starting_height = 585
home_dir = os.path.expanduser('~')
user_dir = os.path.join(home_dir, APPDATA_DIRNAME)
log_File = os.path.join(user_dir, 'debug.log')
database_File = os.path.join(user_dir, 'application.db')
DefaultCache = {
"lastAddress": "",
"window_width": starting_width,
"window_height": starting_height,
"splitter_x": 342,
"splitter_y": 133,
"console_hidden": False,
"selectedHW_index": 0,
"selectedRPC_index": 0,
"isTestnetRPC": False,
"hwAcc": 0,
"spathFrom": 0,
"spathTo": 10,
"intExt": 0
}
trusted_RPC_Servers = [
["https", "amsterdam.randomzebra.party:8080", "spmtUser_ams", "WUss6sr8956S5Paex254"],
["https", "losangeles.randomzebra.party:8080", "spmtUser_la", "8X88u7TuefPm7mQaJY52"],
["https", "singapore.randomzebra.party:8080", "spmtUser_sing", "ZyD936tm9dvqmMP8A777"]]
HW_devices = [
# (model name, api index)
("LEDGER Nano", 0),
("TREZOR One", 1),
("TREZOR Model T", 1)
]
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2019 Random.Zebra (https://github.com/random-zebra/)
# Distributed under the MIT software license, see the accompanying
# file LICENSE.txt or http://www.opensource.org/licenses/mit-license.php.
import os
from queue import Queue
wqueue = Queue() # type: Queue[str]
APPDATA_DIRNAME = ".PET4L-DATA"
MPATH_LEDGER = "44'/77'/"
MPATH_TREZOR = "44'/119'/"
MPATH_TESTNET = "44'/1'/"
WIF_PREFIX = 212 # 212 = d4
MAGIC_BYTE = 30
STAKE_MAGIC_BYTE = 63
TESTNET_WIF_PREFIX = 239
TESTNET_MAGIC_BYTE = 139
TESTNET_STAKE_MAGIC_BYTE = 73
DEFAULT_PROTOCOL_VERSION = 70915
MINIMUM_FEE = 0.0001 # minimum PIV/kB
SECONDS_IN_2_MONTHS = 60 * 24 * 60 * 60
MAX_INPUTS_NO_WARNING = 75
starting_width = 1033
starting_height = 585
home_dir = os.path.expanduser('~')
user_dir = os.path.join(home_dir, APPDATA_DIRNAME)
log_File = os.path.join(user_dir, 'debug.log')
database_File = os.path.join(user_dir, 'application.db')
DefaultCache = {
"lastAddress": "",
"window_width": starting_width,
"window_height": starting_height,
"splitter_x": 342,
"splitter_y": 133,
"console_hidden": False,
"selectedHW_index": 0,
"selectedRPC_index": 0,
"isTestnetRPC": False,
"hwAcc": 0,
"spathFrom": 0,
"spathTo": 10,
"intExt": 0
}
trusted_RPC_Servers = [
["https", "amsterdam.randomzebra.party:8080", "spmtUser_ams", "WUss6sr8956S5Paex254"],
["https", "losangeles.randomzebra.party:8080", "spmtUser_la", "8X88u7TuefPm7mQaJY52"],
["https", "singapore.randomzebra.party:8080", "spmtUser_sing", "ZyD936tm9dvqmMP8A777"]]
HW_devices = [
# (model name, api index)
("LEDGER Nano", 0),
("TREZOR One", 1),
("TREZOR Model T", 1)
]
| en | 0.611553 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright (c) 2017-2019 Random.Zebra (https://github.com/random-zebra/) # Distributed under the MIT software license, see the accompanying # file LICENSE.txt or http://www.opensource.org/licenses/mit-license.php. # type: Queue[str] # 212 = d4 # minimum PIV/kB # (model name, api index) | 1.782812 | 2 |
atx/record/android_hooks.py | jamjven/ATX | 1,132 | 6632343 | # -*- coding: utf-8 -*-
# An android event hook via getevent.
# Only ABS_MT_POSITION_X(Y) events are handled.
#
# Basic input: TouchDown(D), TouchUp(U), TouchMove(M)
# Basic timeouts: TouchPressTimeout(P), TouchFollowTimeout(F), TouchMoveStopTimeout(S)
# guestures are defined as follows:
# Tap/Touch/Click: DM?UF
# TapFollow: (DM?U)+DM?UF
# LongPress: DP, may be followed by Drag or Swipe
# Drag: D?M+S, may be followed by Drag or Swipe
# Swipe/Fling: D?M+U, difference with `Drag` is that `TouchMoveStopTimeout` cannot be fired.
# 2-Finger-Pinch: distance changing
# 2-Finger-Drag: distance hold while moving
# where '?' after M means a little movement and '+' means a large one.
# other guestures are ignored.
import re
import math
import time
import numpy as np
import subprocess
import threading
import Queue
import traceback
__all__ = ['AndroidInputHookManager', 'HookManager', 'HookConstants']
# global, max MultiTap count. Set to 1 to disable MultiTap, 0 for infinite.
_MULTI_TAP_NUM = 3
def set_multitap(count):
if count < 0:
print 'Cannot set to negative count.'
return
global _MULTI_TAP_NUM
_MULTI_TAP_NUM = int(count)
class HookConstants:
# basic events
TOUCH_ANY = 1 << 3
TOUCH_DOWN = 1 << 3 ^ 1
TOUCH_UP = 1 << 3 ^ 2
TOUCH_MOVE = 1 << 3 ^ 3
# only used for gesture analyze
TOUCH_PRESS_TIMEOUT = 1 << 3 ^ 4
TOUCH_FOLLOW_TIMEOUT = 1 << 3 ^ 5
TOUCH_MOVESTOP_TIMEOUT = 1 << 3 ^ 6
# DOWN is odd, UP is even & DONW + 1 == UP
KEY_ANY = 1 << 4
KEY_HOME_DOWN = 1 << 4 ^ 1
KEY_HOME_UP = 1 << 4 ^ 2
KEY_BACK_DOWN = 1 << 4 ^ 3
KEY_BACK_UP = 1 << 4 ^ 4
KEY_MENU_DOWN = 1 << 4 ^ 5
KEY_MENU_UP = 1 << 4 ^ 6
KEY_POWER_DOWN = 1 << 4 ^ 7
KEY_POWER_UP = 1 << 4 ^ 8
KEY_VOLUMEDOWN_DOWN = 1 << 4 ^ 9
KEY_VOLUMEDOWN_UP = 1 << 4 ^ 10
KEY_VOLUMEUP_DOWN = 1 << 4 ^ 11
KEY_VOLUMEUP_UP = 1 << 4 ^ 12
# gestures
GST_TAP = 1 << 5 ^ 1
GST_MULTI_TAP = 1 << 5 ^ 2
GST_LONG_PRESS = 1 << 5 ^ 3
GST_LONG_PRESS_RELEASE = 1 << 5 ^ 4
GST_DRAG = 1 << 5 ^ 5
GST_SWIPE = 1 << 5 ^ 6
GST_PINCH_IN = 1 << 5 ^ 7
GST_PINCH_OUT = 1 << 5 ^ 8
HC = HookConstants
HCREPR = {
HC.TOUCH_DOWN : 'D',
HC.TOUCH_UP : 'U',
HC.TOUCH_MOVE : 'M',
HC.TOUCH_PRESS_TIMEOUT : 'P',
HC.TOUCH_FOLLOW_TIMEOUT : 'F',
HC.TOUCH_MOVESTOP_TIMEOUT : 'S',
HC.GST_TAP: 'Tap',
HC.GST_MULTI_TAP: 'MultiTap',
HC.GST_LONG_PRESS: 'LongPress',
HC.GST_LONG_PRESS_RELEASE: 'PressRelease',
HC.GST_DRAG: 'Drag',
HC.GST_SWIPE: 'Swipe',
HC.GST_PINCH_IN: 'PinchIn',
HC.GST_PINCH_OUT: 'PinchOut',
}
class Event(object):
def __init__(self, time, msg):
self.time = time
self.msg = msg
def __str__(self):
return '%s_%s' % (self.__class__.__name__, HCREPR.get(self.msg, self.msg))
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join(['%s=%s' % (k, v) for k, v in self.__dict__.iteritems()]))
class KeyEvent(Event):
def __init__(self, time, msg, key):
super(KeyEvent, self).__init__(time, msg)
# convert to KEYCODE_xxx for 'adb input keyevent xxx'
if key.startswith('KEY_'):
key = 'KEYCODE_' + key[4:]
self.key = key
class TouchEvent(Event):
def __init__(self, time, msg, slotid, x, y, pressure, touch_major, **extra):
super(TouchEvent, self).__init__(time, msg)
self.slotid = slotid
self.x = x
self.y = y
self.pressure = pressure
self.touch_major = touch_major
self.__dict__.update(extra)
class TouchTimeoutEvent(Event):
def __init__(self, time, msg, slotid):
super(TouchTimeoutEvent, self).__init__(time, msg)
self.slotid = slotid
class GestureEvent(Event):
def __init__(self, msg, track):
# suffixes: s for start, e for end.
# two-finger guestures need two tracks
if msg in (HC.GST_PINCH_IN, HC.GST_PINCH_OUT):
t1, t2 = track[0], track[1]
ts = min(t1[0].time, t2[0].time)
te = max(t1[-1].time, t2[-1].time)
else:
es, ee = track[0], track[-1]
ts, te = track[0].time, track[-1].time
print 'Gesture', HCREPR.get(msg, msg), ''.join([HCREPR.get(e.msg, e.msg) for e in track]), (es.x, es.y), (ee.x, ee.y)
if msg in (HC.GST_SWIPE, HC.GST_DRAG):
# TODO: check for corners for complicated trace
self.points = [(es.x, es.y), (ee.x, ee.y)]
else:
self.points = [(es.x, es.y), (ee.x, ee.y)]
super(GestureEvent, self).__init__(ts, msg)
self.duration = te - ts
SLOT_NUM = 5
_X, _Y, _VR, _VA, _MJ, _PR, FIELD_NUM = range(7)
INF = 9999
class InputParser(object):
_pat = re.compile('\[\s*(?P<time>[0-9.]+)\] (?P<device>/dev/.*): +(?P<type>\w+) +(?P<code>\w+) +(?P<value>\w+)')
_move_radius = 10
def __init__(self, queue):
self.timediff = None
self.queue = queue
# the 'standard' status temp_status is compared to.
# if changes are great enough, new event are emitted.
# velocity will be calculated for touch-move events.
self._status = np.ones((SLOT_NUM, FIELD_NUM), dtype=int) * (-INF)
self._status_time = 0
# realtime status, minor changes are cumulated
self._temp_status = np.ones((SLOT_NUM, FIELD_NUM), dtype=int) * (-INF)
self._temp_status_time = 0
self._touch_batch = []
self._curr_slot = 0
def feed(self, line):
# print line
m = self._pat.search(line)
if not m:
return
_time, _device, _type, _code, _value = m.groups()
_time = float(_time)
if self.timediff is None:
self.timediff = time.time() - _time
_time = self.timediff + _time
try:
_value = int(_value, 16)
except:
pass
if _type == 'EV_SYN':
if _code in ('SYN_REPORT', 'SYN_MT_REPORT'):
try:
self._process_touch_batch()
except IndexError: # there might be a 6th finger, ignore that.
self._touch_batch = []
elif _code == 'SYN_DROPPED':
self._touch_batch = []
else:
# print 'unknown syn code', _code
pass
elif _type == 'EV_KEY':
self.emit_key_event(_time, _code, _value)
elif _type == 'EV_ABS':
self._touch_batch.append((_time, _device, _type, _code, _value))
else:
# print 'unknown input event type', _type
pass
def emit_key_event(self, _time, _code, _value):
name = '%s_%s' % (_code, _value)
msg = getattr(HC, name, None)
if msg is None:
return
event = KeyEvent(_time, msg, _code)
self.queue.put(event)
def emit_touch_event(self, event):
self.queue.put(event)
def _process_touch_batch(self):
'''a batch syncs in about 0.001 seconds.'''
if not self._touch_batch:
return
_time = self._temp_status_time
changed = False
for (_time, _device, _type, _code, _value) in self._touch_batch:
if _code == 'ABS_MT_TRACKING_ID':
if _value == 0xffffffff:
self._temp_status[self._curr_slot] = -INF
changed = True
else:
pass
elif _code == 'ABS_MT_SLOT':
self._curr_slot = _value
else:
if _code == 'ABS_MT_POSITION_X':
self._temp_status[self._curr_slot,_X] = _value
changed = True
elif _code == 'ABS_MT_POSITION_Y':
self._temp_status[self._curr_slot,_Y] = _value
changed = True
elif _code == 'ABS_MT_PRESSURE':
self._temp_status[self._curr_slot,_PR] = _value
elif _code == 'ABS_MT_TOUCH_MAJOR':
self._temp_status[self._curr_slot,_MJ] = _value
else:
print 'Unknown code', _code
self._temp_status_time = _time
self._touch_batch = []
if not changed:
return
# check differences, if position changes are big enough then emit events
diff = self._temp_status - self._status
dt = self._temp_status_time - self._status_time
emitted = False
for i in range(SLOT_NUM):
arr = self._temp_status[i]
oldarr = self._status[i]
dx, dy = diff[i,_X], diff[i,_Y]
if dx > INF or dy > INF:
# touch begin
event = TouchEvent(_time, HC.TOUCH_DOWN, i, arr[_X], arr[_Y], arr[_PR], arr[_MJ])
self.emit_touch_event(event)
emitted = True
elif dx < -INF or dy < -INF:
# touch end
event = TouchEvent(_time, HC.TOUCH_UP, i, oldarr[_X], oldarr[_Y], oldarr[_PR], oldarr[_MJ])
self.emit_touch_event(event)
emitted = True
else:
r, a = radang(float(dx), float(dy))
if r > self._move_radius:
v = r / dt
event = TouchEvent(_time, HC.TOUCH_MOVE, i, arr[_X], arr[_Y], arr[_PR], arr[_MJ], angle=a, velocity=v)
self.emit_touch_event(event)
emitted = True
if not emitted:
return
self._status = self._temp_status.copy()
self._status_time = self._temp_status_time
def radang(x, y):
'''return (radius, angle) of a vector(x, y)'''
if x == 0:
if y == 0:
return 0, 0
return abs(y), 90+180*(y<0)
if y == 0:
return abs(x), 180*(x<0)
r = math.sqrt(x*x+y*y)
a = math.degrees(math.atan(y/x))
if x < 0:
a += 180
elif y < 0:
a += 360
return r, a
class GestureRecognizer(object):
double_tap_delay = 0.5
long_press_delay = 1
move_stop_delay = 0.2
pinch_difference_square = 3000
def __init__(self, queue):
self.queue = queue
self.dispatch_map = {}
self.running = False
self.touches = [None] * SLOT_NUM
# used for recognition
self.tracks = [None for i in range(SLOT_NUM)]
self.track_slots = set()
def register(self, keycode, func):
self.dispatch_map[keycode] = func
def start(self):
if self.running:
return
self.running = True
t = threading.Thread(target=self.process)
t.setDaemon(True)
t.start()
def stop(self):
self.running = False
def process(self):
'''handle events and trigger time-related events'''
timediff = 0
while True:
try:
time.sleep(0.001)
event = self.queue.get_nowait()
self.handle_event(event)
if event.msg & HC.KEY_ANY:
continue
if timediff == 0:
timediff = time.time() - event.time
self.touches[event.slotid] = event
except Queue.Empty:
if not self.running:
break
now = time.time() - timediff
for i in range(SLOT_NUM):
e = self.touches[i]
if e is None:
continue
if e.msg == HC.TOUCH_DOWN and now - e.time > self.long_press_delay:
self.analyze_tracks(TouchTimeoutEvent(now, HC.TOUCH_PRESS_TIMEOUT, i))
self.touches[i] = None
elif e.msg == HC.TOUCH_UP and now - e.time > self.double_tap_delay:
self.analyze_tracks(TouchTimeoutEvent(now, HC.TOUCH_FOLLOW_TIMEOUT, i))
self.touches[i] = None
elif e.msg == HC.TOUCH_MOVE and now - e.time > self.move_stop_delay:
self.analyze_tracks(TouchTimeoutEvent(now, HC.TOUCH_MOVESTOP_TIMEOUT, i))
self.touches[i] = None
except:
traceback.print_exc()
print 'process done.'
def handle_event(self, event):
self.dispatch_event(event.msg, event)
if event.msg & HC.KEY_ANY:
self.dispatch_event(HC.KEY_ANY, event)
else:
self.dispatch_event(HC.TOUCH_ANY, event)
self.analyze_tracks(event)
def dispatch_event(self, msg, event):
func = self.dispatch_map.get(msg)
if func is None:
return
try:
func(event)
except:
traceback.print_exc()
def analyze_tracks(self, event):
pass
def handle_gesture(self, msg, tracks):
event = GestureEvent(msg, tracks)
func = self.dispatch_map.get(msg)
if func is None:
return
try:
func(event)
except:
traceback.print_exc()
## NOT COMPLEMENTED ##
class SimpleGestureRecognizer(GestureRecognizer):
N_FINGER = 2
def analyze_tracks(self, event):
# handle one-finger and two-finger gestures only
# means a third finger will be ignored even if one of the
# first two fingers leaves the screen.
i = event.slotid
# begin guesture when touch down
if event.msg == HC.TOUCH_DOWN:
if len(self.track_slots) == self.N_FINGER and i not in self.track_slots:
return
if self.tracks[i] is None:
self.tracks[i] = []
self.track_slots.add(i)
self.tracks[i].append(event)
return
if self.tracks[i] is None:
return
if event.msg == HC.TOUCH_FOLLOW_TIMEOUT:
self.tracks[i] = []
elif event.msg == HC.TOUCH_PRESS_TIMEOUT:
# print ''.join([HCREPR.get(e.msg) for e in self.tracks[i]]), 'long press'
self.tracks[i] = []
elif event.msg == HC.TOUCH_MOVESTOP_TIMEOUT:
# print ''.join([HCREPR.get(e.msg) for e in self.tracks[i]]), 'drag'
self.tracks[i] = []
if len(self.track_slots) == 2:
for s in self.track_slots:
print s, ''.join([HCREPR.get(e.msg) for e in self.tracks[s]])
print
elif event.msg == HC.TOUCH_UP:
self.tracks[i].append(event)
if len(self.track_slots) == 2:
for s in self.track_slots:
print s, ''.join([HCREPR.get(e.msg) for e in self.tracks[s]])
print
self.tracks[i] = None
self.track_slots.discard(i)
else: # TOUCH_MOVE
self.tracks[i].append(event)
return
# check for pinch/pan
if len(self.track_slots) == 2:
t1, t2 = [self.tracks[s] for s in self.track_slots]
if len(t1) == 0 or len(t2) == 0 or len(t1) + len(t2) < 6:
return
# make copy and check distance changing
t1, t2 = t1[:], t2[:]
dists = []
while len(dists) < 5:
e1, e2 = t1[-1], t2[-1]
dx, dy = e1.x-e2.x, e1.y-e2.y
dists.append(dx*dx+dy*dy)
if e1.time < e2.time:
if len(t2) == 1:
break
else:
t2.pop()
else:
if len(t1) == 1:
break
else:
t1.pop()
print [dists[j+1]-dists[j] for j in range(len(dists)-1)]
# just keep latest position
for s in self.track_slots:
self.tracks[s] = self.tracks[s][-1:]
class RegexpGestureRecognizer(GestureRecognizer):
N_FINGER = 1
def analyze_tracks(self, event):
# handle one-finger gestures only
i = event.slotid
# begin guesture when touch down
if event.msg == HC.TOUCH_DOWN:
if len(self.track_slots) == self.N_FINGER and i not in self.track_slots:
return
if not self.tracks[i]:
self.tracks[i] = []
self.track_slots.add(i)
self.tracks[i].append(event)
return
if self.tracks[i] is None:
return
s = ''.join([HCREPR.get(e.msg) for e in self.tracks[i]])
if event.msg == HC.TOUCH_FOLLOW_TIMEOUT:
if re.match('^DM?U$', s):
self.handle_gesture(HC.GST_TAP, self.tracks[i][:])
elif re.match('^(DM?U)+DM?U$', s):
self.handle_gesture(HC.GST_MULTI_TAP, self.tracks[i][:])
self.tracks[i] = None
self.track_slots.discard(i)
elif event.msg == HC.TOUCH_MOVESTOP_TIMEOUT:
if re.match('^D?MM+$', s):
self.handle_gesture(HC.GST_DRAG, self.tracks[i][:])
self.tracks[i] = []
elif event.msg == HC.TOUCH_PRESS_TIMEOUT:
if s == 'D':
self.handle_gesture(HC.GST_LONG_PRESS, self.tracks[i][:])
self.tracks[i] = []
elif event.msg == HC.TOUCH_UP:
self.tracks[i].append(event) # note: it's not the same with s after add
if s == '':
self.handle_gesture(HC.GST_LONG_PRESS_RELEASE, [event])
elif re.match('^D?MM+$', s):
self.handle_gesture(HC.GST_SWIPE, self.tracks[i][:])
self.tracks[i] = []
elif _MULTI_TAP_NUM == 1 and re.match('^DM?$', s):
self.handle_gesture(HC.GST_TAP, self.tracks[i][:])
self.tracks[i] = []
elif _MULTI_TAP_NUM > 1 and re.match('^(DM?U){%d}DM?$' % (_MULTI_TAP_NUM-1,), s):
self.handle_gesture(HC.GST_MULTI_TAP, self.tracks[i][:])
self.tracks[i] = []
elif event.msg == HC.TOUCH_MOVE:
if re.match('^(DU)+D$', s):
if s == 'DUD':
self.handle_gesture(HC.GST_TAP, self.tracks[i][:-1])
else:
self.handle_gesture(HC.GST_MULTI_TAP, self.tracks[i][:-1])
self.tracks[i] = self.tracks[i][-1:]
self.tracks[i].append(event)
NOTACTIVE, ACTIVE, STAGE_1, STAGE_2, TRIGGERED = range(5)
## NOT COMPLEMENTED ##
class StateMachineGestureRecognizer(GestureRecognizer):
state_map = {
HC.GST_TAP: {
NOTACTIVE: { HC.TOUCH_DOWN : ACTIVE },
ACTIVE: {
HC.TOUCH_MOVE: STAGE_1,
HC.TOUCH_PRESS_TIMEOUT : NOTACTIVE,
HC.TOUCH_FOLLOW_TIMEOUT : TRIGGERED,
},
STAGE_1: {
HC.TOUCH_MOVE: NOTACTIVE,
HC.TOUCH_PRESS_TIMEOUT : NOTACTIVE,
HC.TOUCH_FOLLOW_TIMEOUT : TRIGGERED,
}
},
HC.GST_SWIPE: {
NOTACTIVE: { HC.TOUCH_DOWN: ACTIVE },
ACTIVE: { HC.TOUCH_UP: NOTACTIVE, HC.TOUCH_MOVE: STAGE_1},
STAGE_1: { HC.TOUCH_UP: NOTACTIVE, HC.TOUCH_MOVE: STAGE_2 },
STAGE_2: { HC.TOUCH_UP: TRIGGERED, HC.TOUCH_MOVESTOP_TIMEOUT: TRIGGERED},
},
}
def __init__(self, queue):
super(self.__class__, self).__init__(queue)
self.state = {}
for k in self.state_map:
self.state[k] = NOTACTIVE
print self.state_map
def analyze_tracks(self, event):
for k, v in self.state.iteritems():
s = self.state_map.get(k, {}).get(v, {}).get(event.msg)
if s is not None:
self.state[k] = s
triggered = False
for k, v in self.state.iteritems():
if v == TRIGGERED:
print 'trigger event', k
triggered = True
if triggered:
for k in self.state:
self.state[k] = NOTACTIVE
class AndroidInputHookManager(object):
def __init__(self, serial=None, processor_class=RegexpGestureRecognizer):
self._serial = serial
self.running = False
self._queue = Queue.Queue()
self._listener = None
self._parser = InputParser(self._queue)
self._processor = processor_class(self._queue)
def set_serial(self, serial):
self._serial = serial
def register(self, keycode, func):
'''register hook function'''
self._processor.register(keycode, func)
def hook(self):
self._processor.start()
self.running = True
t = threading.Thread(target=self._run_hook)
t.setDaemon(True)
t.start()
def _run_hook(self):
cmd = ['adb']
if self._serial:
cmd.extend(['-s', self._serial])
cmd.extend(['shell', 'getevent', '-lt'])
while True:
# start listener
self._listener = p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
try:
line = p.stdout.readline().strip()
if not line:
if p.poll() is not None:
break
continue
self._parser.feed(line)
except KeyboardInterrupt:
p.kill()
except:
p.kill()
traceback.print_exc()
if not self.running:
break
state = subprocess.check_output(['adb', '-s', self._serial, 'get-state']).strip()
if state != 'device':
print 'adb status(%s) wrong! stop hook.' % (state,)
break
print 'adb getevent died, reconnecting...'
time.sleep(1)
def unhook(self):
self.running = False
self._processor.stop()
if self._listener:
self._listener.kill()
HookManager = AndroidInputHookManager
if __name__ == '__main__':
hm = AndroidInputHookManager(processor_class=RegexpGestureRecognizer)
hm.hook()
while True:
try:
time.sleep(0.1)
except KeyboardInterrupt:
break
hm.unhook()
| # -*- coding: utf-8 -*-
# An android event hook via getevent.
# Only ABS_MT_POSITION_X(Y) events are handled.
#
# Basic input: TouchDown(D), TouchUp(U), TouchMove(M)
# Basic timeouts: TouchPressTimeout(P), TouchFollowTimeout(F), TouchMoveStopTimeout(S)
# guestures are defined as follows:
# Tap/Touch/Click: DM?UF
# TapFollow: (DM?U)+DM?UF
# LongPress: DP, may be followed by Drag or Swipe
# Drag: D?M+S, may be followed by Drag or Swipe
# Swipe/Fling: D?M+U, difference with `Drag` is that `TouchMoveStopTimeout` cannot be fired.
# 2-Finger-Pinch: distance changing
# 2-Finger-Drag: distance hold while moving
# where '?' after M means a little movement and '+' means a large one.
# other guestures are ignored.
import re
import math
import time
import numpy as np
import subprocess
import threading
import Queue
import traceback
__all__ = ['AndroidInputHookManager', 'HookManager', 'HookConstants']
# global, max MultiTap count. Set to 1 to disable MultiTap, 0 for infinite.
_MULTI_TAP_NUM = 3
def set_multitap(count):
if count < 0:
print 'Cannot set to negative count.'
return
global _MULTI_TAP_NUM
_MULTI_TAP_NUM = int(count)
class HookConstants:
# basic events
TOUCH_ANY = 1 << 3
TOUCH_DOWN = 1 << 3 ^ 1
TOUCH_UP = 1 << 3 ^ 2
TOUCH_MOVE = 1 << 3 ^ 3
# only used for gesture analyze
TOUCH_PRESS_TIMEOUT = 1 << 3 ^ 4
TOUCH_FOLLOW_TIMEOUT = 1 << 3 ^ 5
TOUCH_MOVESTOP_TIMEOUT = 1 << 3 ^ 6
# DOWN is odd, UP is even & DONW + 1 == UP
KEY_ANY = 1 << 4
KEY_HOME_DOWN = 1 << 4 ^ 1
KEY_HOME_UP = 1 << 4 ^ 2
KEY_BACK_DOWN = 1 << 4 ^ 3
KEY_BACK_UP = 1 << 4 ^ 4
KEY_MENU_DOWN = 1 << 4 ^ 5
KEY_MENU_UP = 1 << 4 ^ 6
KEY_POWER_DOWN = 1 << 4 ^ 7
KEY_POWER_UP = 1 << 4 ^ 8
KEY_VOLUMEDOWN_DOWN = 1 << 4 ^ 9
KEY_VOLUMEDOWN_UP = 1 << 4 ^ 10
KEY_VOLUMEUP_DOWN = 1 << 4 ^ 11
KEY_VOLUMEUP_UP = 1 << 4 ^ 12
# gestures
GST_TAP = 1 << 5 ^ 1
GST_MULTI_TAP = 1 << 5 ^ 2
GST_LONG_PRESS = 1 << 5 ^ 3
GST_LONG_PRESS_RELEASE = 1 << 5 ^ 4
GST_DRAG = 1 << 5 ^ 5
GST_SWIPE = 1 << 5 ^ 6
GST_PINCH_IN = 1 << 5 ^ 7
GST_PINCH_OUT = 1 << 5 ^ 8
HC = HookConstants
HCREPR = {
HC.TOUCH_DOWN : 'D',
HC.TOUCH_UP : 'U',
HC.TOUCH_MOVE : 'M',
HC.TOUCH_PRESS_TIMEOUT : 'P',
HC.TOUCH_FOLLOW_TIMEOUT : 'F',
HC.TOUCH_MOVESTOP_TIMEOUT : 'S',
HC.GST_TAP: 'Tap',
HC.GST_MULTI_TAP: 'MultiTap',
HC.GST_LONG_PRESS: 'LongPress',
HC.GST_LONG_PRESS_RELEASE: 'PressRelease',
HC.GST_DRAG: 'Drag',
HC.GST_SWIPE: 'Swipe',
HC.GST_PINCH_IN: 'PinchIn',
HC.GST_PINCH_OUT: 'PinchOut',
}
class Event(object):
def __init__(self, time, msg):
self.time = time
self.msg = msg
def __str__(self):
return '%s_%s' % (self.__class__.__name__, HCREPR.get(self.msg, self.msg))
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join(['%s=%s' % (k, v) for k, v in self.__dict__.iteritems()]))
class KeyEvent(Event):
def __init__(self, time, msg, key):
super(KeyEvent, self).__init__(time, msg)
# convert to KEYCODE_xxx for 'adb input keyevent xxx'
if key.startswith('KEY_'):
key = 'KEYCODE_' + key[4:]
self.key = key
class TouchEvent(Event):
def __init__(self, time, msg, slotid, x, y, pressure, touch_major, **extra):
super(TouchEvent, self).__init__(time, msg)
self.slotid = slotid
self.x = x
self.y = y
self.pressure = pressure
self.touch_major = touch_major
self.__dict__.update(extra)
class TouchTimeoutEvent(Event):
def __init__(self, time, msg, slotid):
super(TouchTimeoutEvent, self).__init__(time, msg)
self.slotid = slotid
class GestureEvent(Event):
def __init__(self, msg, track):
# suffixes: s for start, e for end.
# two-finger guestures need two tracks
if msg in (HC.GST_PINCH_IN, HC.GST_PINCH_OUT):
t1, t2 = track[0], track[1]
ts = min(t1[0].time, t2[0].time)
te = max(t1[-1].time, t2[-1].time)
else:
es, ee = track[0], track[-1]
ts, te = track[0].time, track[-1].time
print 'Gesture', HCREPR.get(msg, msg), ''.join([HCREPR.get(e.msg, e.msg) for e in track]), (es.x, es.y), (ee.x, ee.y)
if msg in (HC.GST_SWIPE, HC.GST_DRAG):
# TODO: check for corners for complicated trace
self.points = [(es.x, es.y), (ee.x, ee.y)]
else:
self.points = [(es.x, es.y), (ee.x, ee.y)]
super(GestureEvent, self).__init__(ts, msg)
self.duration = te - ts
SLOT_NUM = 5
_X, _Y, _VR, _VA, _MJ, _PR, FIELD_NUM = range(7)
INF = 9999
class InputParser(object):
_pat = re.compile('\[\s*(?P<time>[0-9.]+)\] (?P<device>/dev/.*): +(?P<type>\w+) +(?P<code>\w+) +(?P<value>\w+)')
_move_radius = 10
def __init__(self, queue):
self.timediff = None
self.queue = queue
# the 'standard' status temp_status is compared to.
# if changes are great enough, new event are emitted.
# velocity will be calculated for touch-move events.
self._status = np.ones((SLOT_NUM, FIELD_NUM), dtype=int) * (-INF)
self._status_time = 0
# realtime status, minor changes are cumulated
self._temp_status = np.ones((SLOT_NUM, FIELD_NUM), dtype=int) * (-INF)
self._temp_status_time = 0
self._touch_batch = []
self._curr_slot = 0
def feed(self, line):
# print line
m = self._pat.search(line)
if not m:
return
_time, _device, _type, _code, _value = m.groups()
_time = float(_time)
if self.timediff is None:
self.timediff = time.time() - _time
_time = self.timediff + _time
try:
_value = int(_value, 16)
except:
pass
if _type == 'EV_SYN':
if _code in ('SYN_REPORT', 'SYN_MT_REPORT'):
try:
self._process_touch_batch()
except IndexError: # there might be a 6th finger, ignore that.
self._touch_batch = []
elif _code == 'SYN_DROPPED':
self._touch_batch = []
else:
# print 'unknown syn code', _code
pass
elif _type == 'EV_KEY':
self.emit_key_event(_time, _code, _value)
elif _type == 'EV_ABS':
self._touch_batch.append((_time, _device, _type, _code, _value))
else:
# print 'unknown input event type', _type
pass
def emit_key_event(self, _time, _code, _value):
name = '%s_%s' % (_code, _value)
msg = getattr(HC, name, None)
if msg is None:
return
event = KeyEvent(_time, msg, _code)
self.queue.put(event)
def emit_touch_event(self, event):
self.queue.put(event)
def _process_touch_batch(self):
'''a batch syncs in about 0.001 seconds.'''
if not self._touch_batch:
return
_time = self._temp_status_time
changed = False
for (_time, _device, _type, _code, _value) in self._touch_batch:
if _code == 'ABS_MT_TRACKING_ID':
if _value == 0xffffffff:
self._temp_status[self._curr_slot] = -INF
changed = True
else:
pass
elif _code == 'ABS_MT_SLOT':
self._curr_slot = _value
else:
if _code == 'ABS_MT_POSITION_X':
self._temp_status[self._curr_slot,_X] = _value
changed = True
elif _code == 'ABS_MT_POSITION_Y':
self._temp_status[self._curr_slot,_Y] = _value
changed = True
elif _code == 'ABS_MT_PRESSURE':
self._temp_status[self._curr_slot,_PR] = _value
elif _code == 'ABS_MT_TOUCH_MAJOR':
self._temp_status[self._curr_slot,_MJ] = _value
else:
print 'Unknown code', _code
self._temp_status_time = _time
self._touch_batch = []
if not changed:
return
# check differences, if position changes are big enough then emit events
diff = self._temp_status - self._status
dt = self._temp_status_time - self._status_time
emitted = False
for i in range(SLOT_NUM):
arr = self._temp_status[i]
oldarr = self._status[i]
dx, dy = diff[i,_X], diff[i,_Y]
if dx > INF or dy > INF:
# touch begin
event = TouchEvent(_time, HC.TOUCH_DOWN, i, arr[_X], arr[_Y], arr[_PR], arr[_MJ])
self.emit_touch_event(event)
emitted = True
elif dx < -INF or dy < -INF:
# touch end
event = TouchEvent(_time, HC.TOUCH_UP, i, oldarr[_X], oldarr[_Y], oldarr[_PR], oldarr[_MJ])
self.emit_touch_event(event)
emitted = True
else:
r, a = radang(float(dx), float(dy))
if r > self._move_radius:
v = r / dt
event = TouchEvent(_time, HC.TOUCH_MOVE, i, arr[_X], arr[_Y], arr[_PR], arr[_MJ], angle=a, velocity=v)
self.emit_touch_event(event)
emitted = True
if not emitted:
return
self._status = self._temp_status.copy()
self._status_time = self._temp_status_time
def radang(x, y):
'''return (radius, angle) of a vector(x, y)'''
if x == 0:
if y == 0:
return 0, 0
return abs(y), 90+180*(y<0)
if y == 0:
return abs(x), 180*(x<0)
r = math.sqrt(x*x+y*y)
a = math.degrees(math.atan(y/x))
if x < 0:
a += 180
elif y < 0:
a += 360
return r, a
class GestureRecognizer(object):
double_tap_delay = 0.5
long_press_delay = 1
move_stop_delay = 0.2
pinch_difference_square = 3000
def __init__(self, queue):
self.queue = queue
self.dispatch_map = {}
self.running = False
self.touches = [None] * SLOT_NUM
# used for recognition
self.tracks = [None for i in range(SLOT_NUM)]
self.track_slots = set()
def register(self, keycode, func):
self.dispatch_map[keycode] = func
def start(self):
if self.running:
return
self.running = True
t = threading.Thread(target=self.process)
t.setDaemon(True)
t.start()
def stop(self):
self.running = False
def process(self):
'''handle events and trigger time-related events'''
timediff = 0
while True:
try:
time.sleep(0.001)
event = self.queue.get_nowait()
self.handle_event(event)
if event.msg & HC.KEY_ANY:
continue
if timediff == 0:
timediff = time.time() - event.time
self.touches[event.slotid] = event
except Queue.Empty:
if not self.running:
break
now = time.time() - timediff
for i in range(SLOT_NUM):
e = self.touches[i]
if e is None:
continue
if e.msg == HC.TOUCH_DOWN and now - e.time > self.long_press_delay:
self.analyze_tracks(TouchTimeoutEvent(now, HC.TOUCH_PRESS_TIMEOUT, i))
self.touches[i] = None
elif e.msg == HC.TOUCH_UP and now - e.time > self.double_tap_delay:
self.analyze_tracks(TouchTimeoutEvent(now, HC.TOUCH_FOLLOW_TIMEOUT, i))
self.touches[i] = None
elif e.msg == HC.TOUCH_MOVE and now - e.time > self.move_stop_delay:
self.analyze_tracks(TouchTimeoutEvent(now, HC.TOUCH_MOVESTOP_TIMEOUT, i))
self.touches[i] = None
except:
traceback.print_exc()
print 'process done.'
def handle_event(self, event):
self.dispatch_event(event.msg, event)
if event.msg & HC.KEY_ANY:
self.dispatch_event(HC.KEY_ANY, event)
else:
self.dispatch_event(HC.TOUCH_ANY, event)
self.analyze_tracks(event)
def dispatch_event(self, msg, event):
func = self.dispatch_map.get(msg)
if func is None:
return
try:
func(event)
except:
traceback.print_exc()
def analyze_tracks(self, event):
pass
def handle_gesture(self, msg, tracks):
event = GestureEvent(msg, tracks)
func = self.dispatch_map.get(msg)
if func is None:
return
try:
func(event)
except:
traceback.print_exc()
## NOT COMPLEMENTED ##
class SimpleGestureRecognizer(GestureRecognizer):
N_FINGER = 2
def analyze_tracks(self, event):
# handle one-finger and two-finger gestures only
# means a third finger will be ignored even if one of the
# first two fingers leaves the screen.
i = event.slotid
# begin guesture when touch down
if event.msg == HC.TOUCH_DOWN:
if len(self.track_slots) == self.N_FINGER and i not in self.track_slots:
return
if self.tracks[i] is None:
self.tracks[i] = []
self.track_slots.add(i)
self.tracks[i].append(event)
return
if self.tracks[i] is None:
return
if event.msg == HC.TOUCH_FOLLOW_TIMEOUT:
self.tracks[i] = []
elif event.msg == HC.TOUCH_PRESS_TIMEOUT:
# print ''.join([HCREPR.get(e.msg) for e in self.tracks[i]]), 'long press'
self.tracks[i] = []
elif event.msg == HC.TOUCH_MOVESTOP_TIMEOUT:
# print ''.join([HCREPR.get(e.msg) for e in self.tracks[i]]), 'drag'
self.tracks[i] = []
if len(self.track_slots) == 2:
for s in self.track_slots:
print s, ''.join([HCREPR.get(e.msg) for e in self.tracks[s]])
print
elif event.msg == HC.TOUCH_UP:
self.tracks[i].append(event)
if len(self.track_slots) == 2:
for s in self.track_slots:
print s, ''.join([HCREPR.get(e.msg) for e in self.tracks[s]])
print
self.tracks[i] = None
self.track_slots.discard(i)
else: # TOUCH_MOVE
self.tracks[i].append(event)
return
# check for pinch/pan
if len(self.track_slots) == 2:
t1, t2 = [self.tracks[s] for s in self.track_slots]
if len(t1) == 0 or len(t2) == 0 or len(t1) + len(t2) < 6:
return
# make copy and check distance changing
t1, t2 = t1[:], t2[:]
dists = []
while len(dists) < 5:
e1, e2 = t1[-1], t2[-1]
dx, dy = e1.x-e2.x, e1.y-e2.y
dists.append(dx*dx+dy*dy)
if e1.time < e2.time:
if len(t2) == 1:
break
else:
t2.pop()
else:
if len(t1) == 1:
break
else:
t1.pop()
print [dists[j+1]-dists[j] for j in range(len(dists)-1)]
# just keep latest position
for s in self.track_slots:
self.tracks[s] = self.tracks[s][-1:]
class RegexpGestureRecognizer(GestureRecognizer):
N_FINGER = 1
def analyze_tracks(self, event):
# handle one-finger gestures only
i = event.slotid
# begin guesture when touch down
if event.msg == HC.TOUCH_DOWN:
if len(self.track_slots) == self.N_FINGER and i not in self.track_slots:
return
if not self.tracks[i]:
self.tracks[i] = []
self.track_slots.add(i)
self.tracks[i].append(event)
return
if self.tracks[i] is None:
return
s = ''.join([HCREPR.get(e.msg) for e in self.tracks[i]])
if event.msg == HC.TOUCH_FOLLOW_TIMEOUT:
if re.match('^DM?U$', s):
self.handle_gesture(HC.GST_TAP, self.tracks[i][:])
elif re.match('^(DM?U)+DM?U$', s):
self.handle_gesture(HC.GST_MULTI_TAP, self.tracks[i][:])
self.tracks[i] = None
self.track_slots.discard(i)
elif event.msg == HC.TOUCH_MOVESTOP_TIMEOUT:
if re.match('^D?MM+$', s):
self.handle_gesture(HC.GST_DRAG, self.tracks[i][:])
self.tracks[i] = []
elif event.msg == HC.TOUCH_PRESS_TIMEOUT:
if s == 'D':
self.handle_gesture(HC.GST_LONG_PRESS, self.tracks[i][:])
self.tracks[i] = []
elif event.msg == HC.TOUCH_UP:
self.tracks[i].append(event) # note: it's not the same with s after add
if s == '':
self.handle_gesture(HC.GST_LONG_PRESS_RELEASE, [event])
elif re.match('^D?MM+$', s):
self.handle_gesture(HC.GST_SWIPE, self.tracks[i][:])
self.tracks[i] = []
elif _MULTI_TAP_NUM == 1 and re.match('^DM?$', s):
self.handle_gesture(HC.GST_TAP, self.tracks[i][:])
self.tracks[i] = []
elif _MULTI_TAP_NUM > 1 and re.match('^(DM?U){%d}DM?$' % (_MULTI_TAP_NUM-1,), s):
self.handle_gesture(HC.GST_MULTI_TAP, self.tracks[i][:])
self.tracks[i] = []
elif event.msg == HC.TOUCH_MOVE:
if re.match('^(DU)+D$', s):
if s == 'DUD':
self.handle_gesture(HC.GST_TAP, self.tracks[i][:-1])
else:
self.handle_gesture(HC.GST_MULTI_TAP, self.tracks[i][:-1])
self.tracks[i] = self.tracks[i][-1:]
self.tracks[i].append(event)
NOTACTIVE, ACTIVE, STAGE_1, STAGE_2, TRIGGERED = range(5)
## NOT COMPLEMENTED ##
class StateMachineGestureRecognizer(GestureRecognizer):
state_map = {
HC.GST_TAP: {
NOTACTIVE: { HC.TOUCH_DOWN : ACTIVE },
ACTIVE: {
HC.TOUCH_MOVE: STAGE_1,
HC.TOUCH_PRESS_TIMEOUT : NOTACTIVE,
HC.TOUCH_FOLLOW_TIMEOUT : TRIGGERED,
},
STAGE_1: {
HC.TOUCH_MOVE: NOTACTIVE,
HC.TOUCH_PRESS_TIMEOUT : NOTACTIVE,
HC.TOUCH_FOLLOW_TIMEOUT : TRIGGERED,
}
},
HC.GST_SWIPE: {
NOTACTIVE: { HC.TOUCH_DOWN: ACTIVE },
ACTIVE: { HC.TOUCH_UP: NOTACTIVE, HC.TOUCH_MOVE: STAGE_1},
STAGE_1: { HC.TOUCH_UP: NOTACTIVE, HC.TOUCH_MOVE: STAGE_2 },
STAGE_2: { HC.TOUCH_UP: TRIGGERED, HC.TOUCH_MOVESTOP_TIMEOUT: TRIGGERED},
},
}
def __init__(self, queue):
super(self.__class__, self).__init__(queue)
self.state = {}
for k in self.state_map:
self.state[k] = NOTACTIVE
print self.state_map
def analyze_tracks(self, event):
for k, v in self.state.iteritems():
s = self.state_map.get(k, {}).get(v, {}).get(event.msg)
if s is not None:
self.state[k] = s
triggered = False
for k, v in self.state.iteritems():
if v == TRIGGERED:
print 'trigger event', k
triggered = True
if triggered:
for k in self.state:
self.state[k] = NOTACTIVE
class AndroidInputHookManager(object):
def __init__(self, serial=None, processor_class=RegexpGestureRecognizer):
self._serial = serial
self.running = False
self._queue = Queue.Queue()
self._listener = None
self._parser = InputParser(self._queue)
self._processor = processor_class(self._queue)
def set_serial(self, serial):
self._serial = serial
def register(self, keycode, func):
'''register hook function'''
self._processor.register(keycode, func)
def hook(self):
self._processor.start()
self.running = True
t = threading.Thread(target=self._run_hook)
t.setDaemon(True)
t.start()
def _run_hook(self):
cmd = ['adb']
if self._serial:
cmd.extend(['-s', self._serial])
cmd.extend(['shell', 'getevent', '-lt'])
while True:
# start listener
self._listener = p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
try:
line = p.stdout.readline().strip()
if not line:
if p.poll() is not None:
break
continue
self._parser.feed(line)
except KeyboardInterrupt:
p.kill()
except:
p.kill()
traceback.print_exc()
if not self.running:
break
state = subprocess.check_output(['adb', '-s', self._serial, 'get-state']).strip()
if state != 'device':
print 'adb status(%s) wrong! stop hook.' % (state,)
break
print 'adb getevent died, reconnecting...'
time.sleep(1)
def unhook(self):
self.running = False
self._processor.stop()
if self._listener:
self._listener.kill()
HookManager = AndroidInputHookManager
if __name__ == '__main__':
hm = AndroidInputHookManager(processor_class=RegexpGestureRecognizer)
hm.hook()
while True:
try:
time.sleep(0.1)
except KeyboardInterrupt:
break
hm.unhook()
| en | 0.776815 | # -*- coding: utf-8 -*- # An android event hook via getevent. # Only ABS_MT_POSITION_X(Y) events are handled. # # Basic input: TouchDown(D), TouchUp(U), TouchMove(M) # Basic timeouts: TouchPressTimeout(P), TouchFollowTimeout(F), TouchMoveStopTimeout(S) # guestures are defined as follows: # Tap/Touch/Click: DM?UF # TapFollow: (DM?U)+DM?UF # LongPress: DP, may be followed by Drag or Swipe # Drag: D?M+S, may be followed by Drag or Swipe # Swipe/Fling: D?M+U, difference with `Drag` is that `TouchMoveStopTimeout` cannot be fired. # 2-Finger-Pinch: distance changing # 2-Finger-Drag: distance hold while moving # where '?' after M means a little movement and '+' means a large one. # other guestures are ignored. # global, max MultiTap count. Set to 1 to disable MultiTap, 0 for infinite. # basic events # only used for gesture analyze # DOWN is odd, UP is even & DONW + 1 == UP # gestures # convert to KEYCODE_xxx for 'adb input keyevent xxx' # suffixes: s for start, e for end. # two-finger guestures need two tracks # TODO: check for corners for complicated trace # the 'standard' status temp_status is compared to. # if changes are great enough, new event are emitted. # velocity will be calculated for touch-move events. # realtime status, minor changes are cumulated # print line # there might be a 6th finger, ignore that. # print 'unknown syn code', _code # print 'unknown input event type', _type a batch syncs in about 0.001 seconds. # check differences, if position changes are big enough then emit events # touch begin # touch end return (radius, angle) of a vector(x, y) # used for recognition handle events and trigger time-related events ## NOT COMPLEMENTED ## # handle one-finger and two-finger gestures only # means a third finger will be ignored even if one of the # first two fingers leaves the screen. # begin guesture when touch down # print ''.join([HCREPR.get(e.msg) for e in self.tracks[i]]), 'long press' # print ''.join([HCREPR.get(e.msg) for e in self.tracks[i]]), 'drag' # TOUCH_MOVE # check for pinch/pan # make copy and check distance changing # just keep latest position # handle one-finger gestures only # begin guesture when touch down # note: it's not the same with s after add ## NOT COMPLEMENTED ## register hook function # start listener | 2.725555 | 3 |
swexpert/d2/sw_5177.py | ruslanlvivsky/python-algorithm | 3 | 6632344 | # 이진 힙
test_cases = int(input())
for t in range(1, test_cases + 1):
n = int(input())
nums = list(map(int, input().split()))
heap = [0]
for num in nums:
heap.append(num)
idx = len(heap) - 1
parent = idx // 2
while parent > 0:
if heap[idx] < heap[parent]:
heap[idx], heap[parent] = heap[parent], heap[idx]
idx = parent
parent = idx // 2
result = 0
parent = (len(heap) - 1) // 2
while parent > 0:
result += heap[parent]
parent = parent // 2
print('#{} {}'.format(t, result))
| # 이진 힙
test_cases = int(input())
for t in range(1, test_cases + 1):
n = int(input())
nums = list(map(int, input().split()))
heap = [0]
for num in nums:
heap.append(num)
idx = len(heap) - 1
parent = idx // 2
while parent > 0:
if heap[idx] < heap[parent]:
heap[idx], heap[parent] = heap[parent], heap[idx]
idx = parent
parent = idx // 2
result = 0
parent = (len(heap) - 1) // 2
while parent > 0:
result += heap[parent]
parent = parent // 2
print('#{} {}'.format(t, result))
| none | 1 | 3.073281 | 3 |
|
pylabs/lab1-4.py | Vizald/labs | 0 | 6632345 | from math import *
arrayG = []
arrayF = []
arrayY = []
arrayA = []
arrayX = []
while True:
a = int(input("Введите a:"))
x = int(input("Введите x:"))
number = int(input("Какую переменную ищем?(1-3; 1-G, 2-F, 3-Y): "))
step = int(input("Какой шаг?: "))
count = int(input("Сколько шагов?: "))
if number == 1:
for i in range(count):
if ((45 * a * a - 29 * a * x + 4 * x * x) != 0):
g = - (16 * a * a + 24 * a * x - 27 * x * x) / (45 * a * a - 29 * a * x + 4 * x * x)
arrayG.append(g)
arrayA.append(a)
arrayX.append(x)
print("G =", arrayG[i], "a =", arrayA[i], "x =", arrayX[i])
a += step
x += step
else:
print("На ноль делить нельзя ^-^")
print("Минимальное значение:", min(arrayG), "Максимальное значение:", max(arrayG))
continuation = str(input("Продолжить?(да/нет): "))
if continuation == "да":
arrayG.clear()
arrayA.clear()
arrayX.clear()
continue
else:
break
elif number == 2:
for i in range(count):
f = -atan(10 * a * a + 13 * a * a - 30 * x * x)
arrayF.append(f)
arrayA.append(a)
arrayX.append(x)
print("F =", arrayF[i], "a =", arrayA[i], "x =", arrayX[i])
a += step
x += step
print("Минимальное значение:", min(arrayF), "Максимальное значение:", max(arrayF))
continuation = str(input("Продолжить?(да/нет): "))
if continuation == "да":
arrayF.clear()
arrayA.clear()
arrayX.clear()
continue
else:
break
elif number == 3:
for i in range(count):
if (2 * a * a + 19 * a * x + 9 * x * x + 1) >= 0:
y = log(2 * a * a + 19 * a * x + 9 * x * x + 1) / log(10)
arrayY.append(y)
arrayA.append(a)
arrayX.append(x)
print("Y =", arrayY[i], "a =", arrayA[i], "x =", arrayX[i])
a += step
x += step
else:
print("Log отрицательный =(")
print("Минимальное значение:", min(arrayY), "Максимальное значение:", max(arrayY))
continuation = str(input("Продолжить?(да/нет): "))
if continuation == "да":
arrayY.clear()
arrayA.clear()
arrayX.clear()
continue
else:
break
else:
print("Вы ввели неправильную переменную :(")
| from math import *
arrayG = []
arrayF = []
arrayY = []
arrayA = []
arrayX = []
while True:
a = int(input("Введите a:"))
x = int(input("Введите x:"))
number = int(input("Какую переменную ищем?(1-3; 1-G, 2-F, 3-Y): "))
step = int(input("Какой шаг?: "))
count = int(input("Сколько шагов?: "))
if number == 1:
for i in range(count):
if ((45 * a * a - 29 * a * x + 4 * x * x) != 0):
g = - (16 * a * a + 24 * a * x - 27 * x * x) / (45 * a * a - 29 * a * x + 4 * x * x)
arrayG.append(g)
arrayA.append(a)
arrayX.append(x)
print("G =", arrayG[i], "a =", arrayA[i], "x =", arrayX[i])
a += step
x += step
else:
print("На ноль делить нельзя ^-^")
print("Минимальное значение:", min(arrayG), "Максимальное значение:", max(arrayG))
continuation = str(input("Продолжить?(да/нет): "))
if continuation == "да":
arrayG.clear()
arrayA.clear()
arrayX.clear()
continue
else:
break
elif number == 2:
for i in range(count):
f = -atan(10 * a * a + 13 * a * a - 30 * x * x)
arrayF.append(f)
arrayA.append(a)
arrayX.append(x)
print("F =", arrayF[i], "a =", arrayA[i], "x =", arrayX[i])
a += step
x += step
print("Минимальное значение:", min(arrayF), "Максимальное значение:", max(arrayF))
continuation = str(input("Продолжить?(да/нет): "))
if continuation == "да":
arrayF.clear()
arrayA.clear()
arrayX.clear()
continue
else:
break
elif number == 3:
for i in range(count):
if (2 * a * a + 19 * a * x + 9 * x * x + 1) >= 0:
y = log(2 * a * a + 19 * a * x + 9 * x * x + 1) / log(10)
arrayY.append(y)
arrayA.append(a)
arrayX.append(x)
print("Y =", arrayY[i], "a =", arrayA[i], "x =", arrayX[i])
a += step
x += step
else:
print("Log отрицательный =(")
print("Минимальное значение:", min(arrayY), "Максимальное значение:", max(arrayY))
continuation = str(input("Продолжить?(да/нет): "))
if continuation == "да":
arrayY.clear()
arrayA.clear()
arrayX.clear()
continue
else:
break
else:
print("Вы ввели неправильную переменную :(")
| none | 1 | 3.724279 | 4 |
|
connect/cli/plugins/product/sync/params.py | maxipavlovic/connect-cli | 0 | 6632346 | # -*- coding: utf-8 -*-
# This file is part of the Ingram Micro Cloud Blue Connect connect-cli.
# Copyright (c) 2019-2021 Ingram Micro. All Rights Reserved.
import json
import re
from collections import namedtuple
from json.decoder import JSONDecodeError
from tqdm import trange
from openpyxl.styles import Alignment
from connect.cli.core.constants import DEFAULT_BAR_FORMAT
from connect.cli.plugins.product.constants import (
PARAM_TYPES,
PARAMS_COLS_HEADERS,
)
from connect.cli.plugins.product.sync.base import ProductSynchronizer
from connect.cli.plugins.product.utils import get_json_object_for_param, ParamSwitchNotSupported
from connect.client import ClientError
fields = (v.replace(' ', '_').lower() for v in PARAMS_COLS_HEADERS.values())
_RowData = namedtuple('RowData', fields)
class ParamsSynchronizer(ProductSynchronizer):
def __init__(self, client, silent):
self._param_type = None
self._worksheet_name = None
super(ParamsSynchronizer, self).__init__(client, silent)
def open(self, input_file, worksheet):
if worksheet == "Ordering Parameters":
self._param_type = 'ordering'
elif worksheet == "Fulfillment Parameters":
self._param_type = 'fulfillment'
elif worksheet == "Configuration Parameters":
self._param_type = 'configuration'
self._worksheet_name = worksheet
return super(ParamsSynchronizer, self).open(input_file, worksheet)
def sync(self): # noqa: CCR001
ws = self._wb[self._worksheet_name]
errors = {}
skipped_count = 0
created_items = []
updated_items = []
deleted_items = []
row_indexes = trange(
2, ws.max_row + 1, disable=self._silent, leave=True, bar_format=DEFAULT_BAR_FORMAT,
)
for row_idx in row_indexes:
data = _RowData(*[ws.cell(row_idx, col_idx).value for col_idx in range(1, 15)])
row_indexes.set_description(f'Processing param {data.id}')
if data.action == '-':
skipped_count += 1
continue
row_errors = self._validate_row(data)
if row_errors:
errors[row_idx] = row_errors
continue
if data.action == 'delete':
try:
self._client.products[self._product_id].parameters[data.verbose_id].delete()
except ClientError:
pass
deleted_items.append(data)
continue
param_payload = {}
if data.json_properties:
param_payload = json.loads(data.json_properties)
param_payload['name'] = data.id
param_payload['title'] = data.title
param_payload['description'] = data.description
param_payload['phase'] = data.phase
param_payload['scope'] = data.scope
param_payload['type'] = data.type
if 'constraints' not in param_payload:
param_payload['constraints'] = {}
param_payload['constraints']['required'] = False if data.required == '-' else True
param_payload['constraints']['unique'] = False if data.unique == '-' else True
param_payload['constraints']['hidden'] = False if data.hidden == '-' else True
if data.action == 'update':
try:
original_param = self._client.products[self._product_id].parameters[
data.verbose_id
].get()
self._compare_param(original_param, data)
param = self._client.products[self._product_id].parameters[
data.verbose_id
].update(
param_payload,
)
self._update_sheet_row(ws, row_idx, param)
updated_items.append(param)
except Exception as e:
errors[row_idx] = [str(e)]
if data.action == 'create':
try:
param = self._client.products[self._product_id].parameters.create(
param_payload,
)
self._update_sheet_row(ws, row_idx, param)
created_items.append(param)
except Exception as e:
errors[row_idx] = [str(e)]
return (
skipped_count,
len(created_items),
len(updated_items),
len(deleted_items),
errors,
)
@staticmethod
def _update_sheet_row(ws, row_idx, param):
ws.cell(row_idx, 1, value=param['id']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 12, value=get_json_object_for_param(param))
ws.cell(row_idx, 13, value=param['events']['created']['at']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx,
14,
value=param['events'].get('updated', {}).get('at'),
).alignment = Alignment(
horizontal='left',
vertical='top',
)
@staticmethod
def _compare_param(original, data):
if original['type'] != data.type:
raise ParamSwitchNotSupported('Switching parameter type is not supported')
if original['scope'] != data.scope:
raise ParamSwitchNotSupported('switching scope is not supported')
if original['phase'] != data.phase:
raise ParamSwitchNotSupported('switching phase is not supported')
def _validate_row(self, data): # noqa: CCR001
errors = []
if not data.id:
errors.append(
'Parameter must have an id',
)
return errors
id_pattern = "^[A-Za-z0-9_-]*$"
if not bool(re.match(id_pattern, data.id)):
errors.append(
f'Parameter ID must contain only letters, numbers and `_`, provided {data.id}',
)
elif data.phase != self._param_type:
errors.append(
f'Parameters of type {self._param_type} are only supported when processing '
f'{self._worksheet_name}. Has been provided {data.phase}.',
)
elif data.action in ('update', 'delete') and (
not data.verbose_id or not data.verbose_id.startswith('PRM-')
):
errors.append(
'Verbose ID is required on update and delete actions.',
)
elif data.type not in PARAM_TYPES:
errors.append(
f'Parameter type {data.type} is not one of the supported ones:'
f'{",".join(PARAM_TYPES)}',
)
elif self._param_type in ('ordering', 'fulfillment') and data.scope not in (
'asset', 'tier1', 'tier2',
):
errors.append(
f'Only asset, tier1 and tier2 scopes are supported for {self._worksheet_name}',
)
elif self._param_type == 'configuration' and data.scope not in (
'item', 'item_marketplace', 'marketplace', 'product',
):
errors.append(
'Only item, item_marketplace, marketplace and product scopes are supported for '
f'{self._worksheet_name}',
)
elif data.required not in (True, 'True', '-'):
errors.append(
'Required must be either True or `-`',
)
elif data.unique not in (True, 'True', '-'):
errors.append(
'Unique must be either True or `-`',
)
elif data.hidden not in (True, 'True', '-'):
errors.append(
'Hidden must be either True or `-`',
)
if len(errors) > 0:
return errors
if data.json_properties:
try:
json.loads(data.json_properties)
except JSONDecodeError:
errors.append(
'JSON properties must have json format',
)
return errors
| # -*- coding: utf-8 -*-
# This file is part of the Ingram Micro Cloud Blue Connect connect-cli.
# Copyright (c) 2019-2021 Ingram Micro. All Rights Reserved.
import json
import re
from collections import namedtuple
from json.decoder import JSONDecodeError
from tqdm import trange
from openpyxl.styles import Alignment
from connect.cli.core.constants import DEFAULT_BAR_FORMAT
from connect.cli.plugins.product.constants import (
PARAM_TYPES,
PARAMS_COLS_HEADERS,
)
from connect.cli.plugins.product.sync.base import ProductSynchronizer
from connect.cli.plugins.product.utils import get_json_object_for_param, ParamSwitchNotSupported
from connect.client import ClientError
fields = (v.replace(' ', '_').lower() for v in PARAMS_COLS_HEADERS.values())
_RowData = namedtuple('RowData', fields)
class ParamsSynchronizer(ProductSynchronizer):
def __init__(self, client, silent):
self._param_type = None
self._worksheet_name = None
super(ParamsSynchronizer, self).__init__(client, silent)
def open(self, input_file, worksheet):
if worksheet == "Ordering Parameters":
self._param_type = 'ordering'
elif worksheet == "Fulfillment Parameters":
self._param_type = 'fulfillment'
elif worksheet == "Configuration Parameters":
self._param_type = 'configuration'
self._worksheet_name = worksheet
return super(ParamsSynchronizer, self).open(input_file, worksheet)
def sync(self): # noqa: CCR001
ws = self._wb[self._worksheet_name]
errors = {}
skipped_count = 0
created_items = []
updated_items = []
deleted_items = []
row_indexes = trange(
2, ws.max_row + 1, disable=self._silent, leave=True, bar_format=DEFAULT_BAR_FORMAT,
)
for row_idx in row_indexes:
data = _RowData(*[ws.cell(row_idx, col_idx).value for col_idx in range(1, 15)])
row_indexes.set_description(f'Processing param {data.id}')
if data.action == '-':
skipped_count += 1
continue
row_errors = self._validate_row(data)
if row_errors:
errors[row_idx] = row_errors
continue
if data.action == 'delete':
try:
self._client.products[self._product_id].parameters[data.verbose_id].delete()
except ClientError:
pass
deleted_items.append(data)
continue
param_payload = {}
if data.json_properties:
param_payload = json.loads(data.json_properties)
param_payload['name'] = data.id
param_payload['title'] = data.title
param_payload['description'] = data.description
param_payload['phase'] = data.phase
param_payload['scope'] = data.scope
param_payload['type'] = data.type
if 'constraints' not in param_payload:
param_payload['constraints'] = {}
param_payload['constraints']['required'] = False if data.required == '-' else True
param_payload['constraints']['unique'] = False if data.unique == '-' else True
param_payload['constraints']['hidden'] = False if data.hidden == '-' else True
if data.action == 'update':
try:
original_param = self._client.products[self._product_id].parameters[
data.verbose_id
].get()
self._compare_param(original_param, data)
param = self._client.products[self._product_id].parameters[
data.verbose_id
].update(
param_payload,
)
self._update_sheet_row(ws, row_idx, param)
updated_items.append(param)
except Exception as e:
errors[row_idx] = [str(e)]
if data.action == 'create':
try:
param = self._client.products[self._product_id].parameters.create(
param_payload,
)
self._update_sheet_row(ws, row_idx, param)
created_items.append(param)
except Exception as e:
errors[row_idx] = [str(e)]
return (
skipped_count,
len(created_items),
len(updated_items),
len(deleted_items),
errors,
)
@staticmethod
def _update_sheet_row(ws, row_idx, param):
ws.cell(row_idx, 1, value=param['id']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(row_idx, 12, value=get_json_object_for_param(param))
ws.cell(row_idx, 13, value=param['events']['created']['at']).alignment = Alignment(
horizontal='left',
vertical='top',
)
ws.cell(
row_idx,
14,
value=param['events'].get('updated', {}).get('at'),
).alignment = Alignment(
horizontal='left',
vertical='top',
)
@staticmethod
def _compare_param(original, data):
if original['type'] != data.type:
raise ParamSwitchNotSupported('Switching parameter type is not supported')
if original['scope'] != data.scope:
raise ParamSwitchNotSupported('switching scope is not supported')
if original['phase'] != data.phase:
raise ParamSwitchNotSupported('switching phase is not supported')
def _validate_row(self, data): # noqa: CCR001
errors = []
if not data.id:
errors.append(
'Parameter must have an id',
)
return errors
id_pattern = "^[A-Za-z0-9_-]*$"
if not bool(re.match(id_pattern, data.id)):
errors.append(
f'Parameter ID must contain only letters, numbers and `_`, provided {data.id}',
)
elif data.phase != self._param_type:
errors.append(
f'Parameters of type {self._param_type} are only supported when processing '
f'{self._worksheet_name}. Has been provided {data.phase}.',
)
elif data.action in ('update', 'delete') and (
not data.verbose_id or not data.verbose_id.startswith('PRM-')
):
errors.append(
'Verbose ID is required on update and delete actions.',
)
elif data.type not in PARAM_TYPES:
errors.append(
f'Parameter type {data.type} is not one of the supported ones:'
f'{",".join(PARAM_TYPES)}',
)
elif self._param_type in ('ordering', 'fulfillment') and data.scope not in (
'asset', 'tier1', 'tier2',
):
errors.append(
f'Only asset, tier1 and tier2 scopes are supported for {self._worksheet_name}',
)
elif self._param_type == 'configuration' and data.scope not in (
'item', 'item_marketplace', 'marketplace', 'product',
):
errors.append(
'Only item, item_marketplace, marketplace and product scopes are supported for '
f'{self._worksheet_name}',
)
elif data.required not in (True, 'True', '-'):
errors.append(
'Required must be either True or `-`',
)
elif data.unique not in (True, 'True', '-'):
errors.append(
'Unique must be either True or `-`',
)
elif data.hidden not in (True, 'True', '-'):
errors.append(
'Hidden must be either True or `-`',
)
if len(errors) > 0:
return errors
if data.json_properties:
try:
json.loads(data.json_properties)
except JSONDecodeError:
errors.append(
'JSON properties must have json format',
)
return errors
| en | 0.760775 | # -*- coding: utf-8 -*- # This file is part of the Ingram Micro Cloud Blue Connect connect-cli. # Copyright (c) 2019-2021 Ingram Micro. All Rights Reserved. # noqa: CCR001 # noqa: CCR001 | 2.001308 | 2 |
towhee/engine/task_queue.py | yanliang567/towhee | 1 | 6632347 | # Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import queue
from towhee.engine.task import Task
class TaskQueue:
"""
A queue for `Task` objects exposed to the `TaskScheduler` which allows the scheduler
to assign tasks to individual instances of `TaskExecutor`.
Args:
size:
Maximum number of allowable queue elements.
"""
def __init__(self, size: int = 0):
self._queue = queue.Queue(size)
@property
def empty(self) -> bool:
"""Indicates whether `TaskQueue` is empty. Returns true if the queue has no
tasks.
"""
return self._queue.empty()
@property
def full(self) -> bool:
"""Indicates whether or not the `TaskQueue` is at its maximum capacity.
"""
return self._queue.full()
@property
def size(self) -> int:
"""Returns the number of tasks in the TaskQueue.
"""
return self._queue.qsize()
def push(self, task: Task) -> bool:
"""Pushes a `Task` object to the end of the queue. Returns `True` if the
operation was successful and `False` otherwise. A return value of `False` most
likely indicates that the queue has reached its maximum capacity.
Args:
task: (`towhee.engine.Task`)
`Task` object to add to the end of the queue.
"""
try:
self._queue.put_nowait(task)
except queue.Full:
return False
return True
def pop(self) -> Task:
"""Attempts to acquire the first item off of the queue.
Returns:
(`towhee.engine.Task`)
First `Task` object available on the queue, or None if the queue is
empty
"""
try:
return self._queue.get_nowait()
except queue.Empty:
return None
| # Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import queue
from towhee.engine.task import Task
class TaskQueue:
"""
A queue for `Task` objects exposed to the `TaskScheduler` which allows the scheduler
to assign tasks to individual instances of `TaskExecutor`.
Args:
size:
Maximum number of allowable queue elements.
"""
def __init__(self, size: int = 0):
self._queue = queue.Queue(size)
@property
def empty(self) -> bool:
"""Indicates whether `TaskQueue` is empty. Returns true if the queue has no
tasks.
"""
return self._queue.empty()
@property
def full(self) -> bool:
"""Indicates whether or not the `TaskQueue` is at its maximum capacity.
"""
return self._queue.full()
@property
def size(self) -> int:
"""Returns the number of tasks in the TaskQueue.
"""
return self._queue.qsize()
def push(self, task: Task) -> bool:
"""Pushes a `Task` object to the end of the queue. Returns `True` if the
operation was successful and `False` otherwise. A return value of `False` most
likely indicates that the queue has reached its maximum capacity.
Args:
task: (`towhee.engine.Task`)
`Task` object to add to the end of the queue.
"""
try:
self._queue.put_nowait(task)
except queue.Full:
return False
return True
def pop(self) -> Task:
"""Attempts to acquire the first item off of the queue.
Returns:
(`towhee.engine.Task`)
First `Task` object available on the queue, or None if the queue is
empty
"""
try:
return self._queue.get_nowait()
except queue.Empty:
return None
| en | 0.853085 | # Copyright 2021 Zilliz. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. A queue for `Task` objects exposed to the `TaskScheduler` which allows the scheduler to assign tasks to individual instances of `TaskExecutor`. Args: size: Maximum number of allowable queue elements. Indicates whether `TaskQueue` is empty. Returns true if the queue has no tasks. Indicates whether or not the `TaskQueue` is at its maximum capacity. Returns the number of tasks in the TaskQueue. Pushes a `Task` object to the end of the queue. Returns `True` if the operation was successful and `False` otherwise. A return value of `False` most likely indicates that the queue has reached its maximum capacity. Args: task: (`towhee.engine.Task`) `Task` object to add to the end of the queue. Attempts to acquire the first item off of the queue. Returns: (`towhee.engine.Task`) First `Task` object available on the queue, or None if the queue is empty | 2.729226 | 3 |
examples/test_camera.py | Mavengers/smarthelmet | 1 | 6632348 | <filename>examples/test_camera.py<gh_stars>1-10
import cv2
import time
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
img = cv2.flip(img, -1)
cv2.imshow("img", img)
if cv2.waitKey(1) & 0xFF == 27:
break
cap.release()
cv2.destroyAllWindows()
| <filename>examples/test_camera.py<gh_stars>1-10
import cv2
import time
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
img = cv2.flip(img, -1)
cv2.imshow("img", img)
if cv2.waitKey(1) & 0xFF == 27:
break
cap.release()
cv2.destroyAllWindows()
| none | 1 | 2.758761 | 3 |
|
blog/apps/comment/filters.py | panchaoco/blog-py | 0 | 6632349 | <reponame>panchaoco/blog-py
# -*- coding: utf-8 -*-
__author__ = 'panchao'
import django_filters
from .models import Comment
class CommentFilter(django_filters.rest_framework.FilterSet):
article_id = django_filters.NumberFilter(field_name='article_id', lookup_expr='exact')
class Meta:
model = Comment
fields = ['article_id'] | # -*- coding: utf-8 -*-
__author__ = 'panchao'
import django_filters
from .models import Comment
class CommentFilter(django_filters.rest_framework.FilterSet):
article_id = django_filters.NumberFilter(field_name='article_id', lookup_expr='exact')
class Meta:
model = Comment
fields = ['article_id'] | en | 0.769321 | # -*- coding: utf-8 -*- | 1.758359 | 2 |
back-end/grocerybot/spiders/albertheijn_spider.py | TvanSchagen/grocerybot | 1 | 6632350 | import scrapy
import json
from datetime import datetime as dt
from grocerybot.items import create_grocery_bot_item
from grocerybot.spiders.models.page_attributes import PageAttributes
from grocerybot.helpers.weight_standardizer import WeightStandardizer
def parse_json_response(response):
# convert repsonse to json
return json.loads(response.body)
class ProductsSpider(scrapy.Spider):
name = 'ah'
root_url = 'https://www.ah.nl/service/rest/delegate'
start_urls = [f'{root_url}?url=%2Fproducten']
def parse(self, response):
# self.logger.info('main: %s' % response.url)
json_res = parse_json_response(response)
# select the json node where the categories are loaded
json_res = json_res[u'_embedded'][u'lanes'][0]['_embedded']['items']
for item in json_res:
# Visit only the product categories
if item['type'] == 'ProductCategory':
# from each category, extract the href
href = str(item[u'navItem'][u'link'][u'href'])
yield response.follow(f'{self.root_url}?url={href}', self.parse_categories)
def parse_categories(self, response):
json_res = parse_json_response(response)
json_res = json_res[u'_embedded'][u'lanes']
# Find the lanes that correspond to filters - these are the ones containing links to subcategory requests
filter_lane = next(lane for lane in json_res if lane['id'] == 'Filters')
subcat_filters = next(
(sub for sub in filter_lane['_embedded']['items'][0]['_embedded']['filters'] if sub['label'] == 'Soort'),
-1)
# If no 'Soort' section is found, then there are no more filters to be applied, so we crawl the product.
if subcat_filters == -1:
# Find the lane that corresponds to products.
product_lane = next(lane for lane in json_res if lane['type'] == 'ProductLane')
for item in product_lane['_embedded']['items']:
href = str(item[u'navItem'][u'link'][u'href'])
yield response.follow(f'{self.root_url}?url={href}', self.parse_products)
# If a Soort section is found, we follow this link recursively.
else:
for item in subcat_filters['_embedded']['filterItems']:
href = str(item[u'navItem'][u'link'][u'href'])
yield response.follow(f'{self.root_url}?url={href}', self.parse_categories)
def parse_products(self, response):
json_res = parse_json_response(response)
page_title = json_res['title']
json_res = json_res[u'_embedded'][u'lanes']
# Get the ProductDetailLane
product_lane = next(lane for lane in json_res if lane['type'] == 'ProductDetailLane')
product_details = product_lane['_embedded']['items'][0]
if product_details:
product_name = product_details['_embedded']['product']['description']
description = product_details['_embedded']['product']['details']['summary']
description = description.replace('[list]', '')
description = description.replace('[*]', '')
size_or_weight = product_details['_embedded']['product']['unitSize']
if size_or_weight is not None:
if "stuk" in size_or_weight:
size = size_or_weight
weight_q = None
weight_ind = None
else:
size = None
weight_q = WeightStandardizer.standardize_quantity(size_or_weight)
weight_ind = WeightStandardizer.standardize_indicator(size_or_weight)
else:
size = None
weight_q = None
weight_ind = None
images = product_details['_embedded']['product']['images']
img_src = None
if images:
first_image = images[0]
if first_image:
img_src = first_image['link']['href']
price = product_details['_embedded']['product']['priceLabel']['now']
# filename = f'data/ah/{title}.html'
# with open(filename, 'wb') as f:
# f.write(response.body)
yield create_grocery_bot_item(product_name, page_title, description,
'<NAME>', response.url, dt.now(), weight_q, weight_ind, size, '',
price, img_src)
| import scrapy
import json
from datetime import datetime as dt
from grocerybot.items import create_grocery_bot_item
from grocerybot.spiders.models.page_attributes import PageAttributes
from grocerybot.helpers.weight_standardizer import WeightStandardizer
def parse_json_response(response):
# convert repsonse to json
return json.loads(response.body)
class ProductsSpider(scrapy.Spider):
name = 'ah'
root_url = 'https://www.ah.nl/service/rest/delegate'
start_urls = [f'{root_url}?url=%2Fproducten']
def parse(self, response):
# self.logger.info('main: %s' % response.url)
json_res = parse_json_response(response)
# select the json node where the categories are loaded
json_res = json_res[u'_embedded'][u'lanes'][0]['_embedded']['items']
for item in json_res:
# Visit only the product categories
if item['type'] == 'ProductCategory':
# from each category, extract the href
href = str(item[u'navItem'][u'link'][u'href'])
yield response.follow(f'{self.root_url}?url={href}', self.parse_categories)
def parse_categories(self, response):
json_res = parse_json_response(response)
json_res = json_res[u'_embedded'][u'lanes']
# Find the lanes that correspond to filters - these are the ones containing links to subcategory requests
filter_lane = next(lane for lane in json_res if lane['id'] == 'Filters')
subcat_filters = next(
(sub for sub in filter_lane['_embedded']['items'][0]['_embedded']['filters'] if sub['label'] == 'Soort'),
-1)
# If no 'Soort' section is found, then there are no more filters to be applied, so we crawl the product.
if subcat_filters == -1:
# Find the lane that corresponds to products.
product_lane = next(lane for lane in json_res if lane['type'] == 'ProductLane')
for item in product_lane['_embedded']['items']:
href = str(item[u'navItem'][u'link'][u'href'])
yield response.follow(f'{self.root_url}?url={href}', self.parse_products)
# If a Soort section is found, we follow this link recursively.
else:
for item in subcat_filters['_embedded']['filterItems']:
href = str(item[u'navItem'][u'link'][u'href'])
yield response.follow(f'{self.root_url}?url={href}', self.parse_categories)
def parse_products(self, response):
json_res = parse_json_response(response)
page_title = json_res['title']
json_res = json_res[u'_embedded'][u'lanes']
# Get the ProductDetailLane
product_lane = next(lane for lane in json_res if lane['type'] == 'ProductDetailLane')
product_details = product_lane['_embedded']['items'][0]
if product_details:
product_name = product_details['_embedded']['product']['description']
description = product_details['_embedded']['product']['details']['summary']
description = description.replace('[list]', '')
description = description.replace('[*]', '')
size_or_weight = product_details['_embedded']['product']['unitSize']
if size_or_weight is not None:
if "stuk" in size_or_weight:
size = size_or_weight
weight_q = None
weight_ind = None
else:
size = None
weight_q = WeightStandardizer.standardize_quantity(size_or_weight)
weight_ind = WeightStandardizer.standardize_indicator(size_or_weight)
else:
size = None
weight_q = None
weight_ind = None
images = product_details['_embedded']['product']['images']
img_src = None
if images:
first_image = images[0]
if first_image:
img_src = first_image['link']['href']
price = product_details['_embedded']['product']['priceLabel']['now']
# filename = f'data/ah/{title}.html'
# with open(filename, 'wb') as f:
# f.write(response.body)
yield create_grocery_bot_item(product_name, page_title, description,
'<NAME>', response.url, dt.now(), weight_q, weight_ind, size, '',
price, img_src)
| en | 0.747312 | # convert repsonse to json # self.logger.info('main: %s' % response.url) # select the json node where the categories are loaded # Visit only the product categories # from each category, extract the href # Find the lanes that correspond to filters - these are the ones containing links to subcategory requests # If no 'Soort' section is found, then there are no more filters to be applied, so we crawl the product. # Find the lane that corresponds to products. # If a Soort section is found, we follow this link recursively. # Get the ProductDetailLane # filename = f'data/ah/{title}.html' # with open(filename, 'wb') as f: # f.write(response.body) | 2.61824 | 3 |
Subsets and Splits