repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
evansosenko/spin-lifetime-analysis | analysis/data.py | 1 | 3319 | import os
import numpy
import scipy_data_fitting
class Fig4(scipy_data_fitting.Data):
"""
Use this to load the data from Figure 4 in PhysRevLett.105.167202.
Should not be used directly, but only subclassed.
"""
def __init__(self, subfig):
super().__init__()
self.subfig = subfig
self.name = 'fig_4' + self.subfig
self.genfromtxt_args['delimiter'] = "\t"
self.genfromtxt_args['skip_header'] = 1
self.path = os.path.join('data', 'PhysRevLett.105.167202',
'figure_4' + subfig + '.tsv')
if subfig == 'd': self.scale = (1, 'milli')
class Fig4Parallel(Fig4):
"""
The parallel field data from Figure 4 in PhysRevLett.105.167202.
"""
def __init__(self, subfig):
super().__init__(subfig)
self.name = self.name + '_parallel'
self.genfromtxt_args['usecols'] = (0, 1)
if subfig == 'c': self.path = self.path.replace('.tsv', '.1.tsv')
class Fig4Antiparallel(Fig4):
"""
The antiparallel field data from Figure 4 in PhysRevLett.105.167202.
"""
def __init__(self, subfig):
super().__init__(subfig)
self.name = self.name + '_antiparallel'
if subfig == 'c':
self.path = self.path.replace('.tsv', '.2.tsv')
else:
self.genfromtxt_args['usecols'] = (0, 2)
class Fig4Difference(scipy_data_fitting.Data):
"""
The difference of the parallel and antiparallel field data
from Figure 4 in PhysRevLett.105.167202.
"""
def __init__(self, subfig):
super().__init__()
self.subfig = subfig
self.name = 'fig_4' + self.subfig + '_difference'
parallel = Fig4Parallel(self.subfig)
antiparallel = Fig4Antiparallel(self.subfig)
self.array = numpy.array([
parallel.array[0],
abs(parallel.array[1] - antiparallel.array[1])
])
class Fig4Normalized(scipy_data_fitting.Data):
"""
The normalized field data from Figure 4 in PhysRevLett.105.167202.
Should not be used directly, but only subclassed.
"""
def __init__(self, subfig, data_class):
super().__init__()
self.subfig = subfig
self.name = 'fig_4' + self.subfig + '_normalized'
self.unnormalized = data_class(self.subfig).array
self.array = numpy.array([
self.unnormalized[0],
self.unnormalized[1] / max(abs(self.unnormalized[1]))
])
class Fig4NormalizedParallel(Fig4Normalized):
"""
The normalized parallel field data from Figure 4 in PhysRevLett.105.167202.
"""
def __init__(self, subfig):
super().__init__(subfig, Fig4Parallel)
self.name = self.name + '_parallel'
class Fig4NormalizedAntiparallel(Fig4Normalized):
"""
The normalized antiparallel field data from Figure 4 in PhysRevLett.105.167202.
"""
def __init__(self, subfig):
super().__init__(subfig, Fig4Antiparallel)
self.name = self.name + '_antiparallel'
class Fig4NormalizedDifference(Fig4Normalized):
"""
The difference of the normalized parallel and antiparallel field data
from Figure 4 in PhysRevLett.105.167202.
"""
def __init__(self, subfig):
super().__init__(subfig, Fig4Difference)
self.name = self.name + '_difference'
| mit | 4,957,334,593,424,651,000 | 30.018692 | 83 | 0.6047 | false |
geomagpy/MARTAS | app/file_download.py | 1 | 42049 | #!/usr/bin/env python
"""
Get files from a remote server (to be reached by nfs, samba, ftp, html or local directory)
file content is directly added to a data bank (or local file if preferred).
"""
from __future__ import print_function
from magpy.stream import *
from magpy.database import *
from magpy.opt import cred as mpcred
import getopt
import fnmatch
import pwd
import zipfile
import tempfile
from dateutil import parser
from shutil import copyfile
import subprocess
import socket
# Relative import of core methods as long as martas is not configured as package
scriptpath = os.path.dirname(os.path.realpath(__file__))
coredir = os.path.abspath(os.path.join(scriptpath, '..', 'core'))
sys.path.insert(0, coredir)
from martas import martaslog as ml
from acquisitionsupport import GetConf2 as GetConf2
"""
DESCRIPTION
Downloads data by default in to an archive "raw" structure
like /srv/archive/STATIONID/SENSORID/raw
Adds data into a MagPy database (if writedatabase is True)
Adds data into a basic archive structure (if writearchive is True)
The application requires credentials of remote source and local database created by addcred
APPLICATION
1) Getting binary data from a FTP Source every, scheduled day
python3 collectfile-new.py -c ../conf/collect-ftpsource.cfg
in config "collect-ftpsource.cfg":
sourcedatapath : /remote/data
filenamestructure : *%s.bin
2) Getting binary data from a FTP Source every, scheduled day, using seconday time column and an offset of 2.3 seconds
python3 collectfile-new.py -c ../conf/collect-ftpsource.cfg
in config "collect-ftpsource.cfg":
sourcedatapath : /remote/data
filenamestructure : *%s.bin
LEMI025_28_0002 : defaulttimecolumn:sectime;time:-2.3
3) Just download raw data to archive
python3 collectfile-new.py -c ../conf/collect-ftpsource.cfg
in config "collect-ftpsource.cfg":
writedatabase : False
writearchive : False
4) Rsync from a ssh server (passwordless access to remote machine is necessary, cred file does not need to contain a pwd)
python3 collectfile-new.py -c ../conf/collect-ftpsource.cfg
in config "collect-ftpsource.cfg":
protocol : rsync
writedatabase : False
writearchive : False
5) Uploading raw data from local raw archive
python3 collectfile-new.py -c ../conf/collect-localsource.cfg
in config "collect-localsource.cfg":
protocol :
sourcedatapath : /srv/archive/DATA/SENSOR/raw
writedatabase : False
writearchive : True
forcerevision : 0001
python3 collectfile-new.py -c ../conf/collect-source.cfg -d 10 -e 2020-10-20
Debugging option:
python3 collectfile-new.py -c ../conf/collect-source.cfg -D
python collectfile.py -r "/data/magnetism/gam" -c cobsdb -e zamg -p ftp -t GAM ')
print(' -d 2 -f *%s.zip -a "%Y-%m-%d"'
print('1. get data from ftp server and add to database')
print(' python collectfile.py -r "/data/magnetism/gam" -c cobsdb -e zamg -p ftp -t GAM ')
print(' -d 2 -f *%s.zip -a "%Y-%m-%d"')
print('---------')
print('2. get data from local directory and add to database')
print('python collectfile.py -c cobsdb -r "/srv/data/"')
print(' -s LEMI036_1_0001 -t WIC -a "%Y-%m-%d" -f "LEMI036_1_0001_%s.bin" ')
print('---------')
print('3. get data from local directory and add to database, add raw data to archive')
print('python collectfile.py -c cobsdb -r "/Observatory/archive/WIK/DIDD_3121331_0002/DIDD_3121331_0002_0001/" -s DIDD_3121331_0002 -t WIK -b "2012-06-01" -d 100 -a "%Y-%m-%d" -f "DIDD_3121331_0002_0001_%s.cdf" -l "/srv/archive"')
print('---------')
print('4. get data from remote by ssh and store in local archive')
print('python collectfile.py -e phobostilt -r "/srv/gwr/" -p scp -s GWRSG_12345_0002 -t SGO -b "2012-06-01" -d 30 -a "%y%m%d" -f "G1%s.025" -l "/srv/archive"')
print('---------')
print('5. get recently created files from remote by ssh and store in local archive')
print('python collectfile.py -e themisto -r "/srv/" -p scp -t SGO -d 2 -a ctime -l "/srv/archive"')
CONFIGURATION FILE
one for each source e.g. collect-janus.cfg
# Data source
# -----------------
# Credentials
sourcecredentials : janus
# Path to the data to be collected
sourcedatapath : /srv/mqtt
# Protocol for data access (ftp,rsync,scp)
protocol : ftp
# Optional - ID of the sensor (required if not contained in the data')
#sensorid : xxx_xxx_0001
# Optional - ID of the station i.e. the Observatory code (required if')
#stationid : wic
# Dateformat in files to be read
# like "%Y-%m-%d" for 2014-02-01
# "%Y%m%d" for 20140201
# "ctime" or "mtime" for using timestamp of file
dateformat : %Y-%m-%d
# filename of data file to be read.
# Add %s as placeholder for date
# examples: "WIC_%s.bin"
# "*%s*"
# "WIC_%s.*"
# "WIC_2013.all" - no dateformat -> single file will be read
filenamestructure : *%s*
# Timerange
defaultdepth : 2
# Sensor specific modifications - defaulttimecolumn, offsets by KEY:value pairs
SENSORID : defaulttimecolumn:sectime;sectime:2.3
# Perform as user - uncomment if not used
# necessary for cron and other root jobs
defaultuser : cobs
# Walk through subdirectories
# if selected all subdirectories below remote path will be searched for
# filename pattern. Only works for local directories and scp.
walksubdirs : False
# Sensors present in path to be skipped (Begging of Sensorname is enough
#blacklist : None
# Collecting server
# -----------------
# Rawdatapath:
# two subdirectories will be created if not existing - based on stationID and sensorID
# e.g. WIC/LEMI025_22_0003
rawpath : /srv/archive
# If forcedirectory, then rawpath is used for saving data
forcedirectory : False
# Zip data in archive directory
zipdata : False
# delete from remote source after successful transfer
# (doesnt work with scp)
deleteremote : False
# Force data to the given revision number
#forcerevision : 0001
# Database (Credentials makes use of addcred.py)
dbcredentials : cobsdb
# Disable proxy settings of the system (seems to be unused - check)
disableproxy : False
writedatabase : True
# Create a basic archiving file without database if True
# basic path is /STATIONID/SENSORID/SENSORID_0001/
writearchive : False
archiveformat : PYCDF
# Logging
# -----------------
# Logging parameter
# ################
# path to log file
logpath : /var/log/magpy/archivestatus.log
# log,email,telegram
notification : telegram
# configuration for notification
notificationconf : /myconfpath/mynotificationtype.cfg
Changelog:
2014-08-02: RL removed break when no data was found (could happen if at this selected day not data is available. All other days need to be collected however.
2014-10-22: RL updated the description
2014-11-04: RL added the inserttable option to force data upload to a specific table (e.g. for rcs conrad data which has a variable sampling rate)
2015-10-20: RL changes for fast ndarrays and zip option
2016-10-10: RL updated imports, improved help and checked for pure file access
2017-03-10: RL activated force option
2018-10-22: RL changed all routines considerably
2020-10-01: RL included and tested rsync option (not perfect, but well, its working)
2021-02-09: RL rewriting with config (MARCOS sister script of file_upload) --- file_download with add to MagPy database option, column selectors and header updates (e.g. secondary time + offset option)
- database writing as option (fixed table or not)
- simple creation of MagPy archive without database tables (but making use of DB meta info)
- just download files using ftp, scp, rsync etc
- use a configuration file
"""
def GetBool(string):
if string in ['true','True','Yes','yes','y','TRUE',True]:
return True
else:
return False
def walk_dir(directory_path, filename, date, dateformat):
"""
Method to extract filename with wildcards or date patterns by walking through a local directory structure
"""
# Walk through files in directory_path, including subdirectories
pathlist = []
if filename == '':
filename = '*'
if dateformat in ['','ctime','mtime']:
filepat = filename
else:
filepat = filename % date
#print ("Checking directory {} for files with {}".format(directory_path, filepat))
for root, _, filenames in os.walk(directory_path):
for filename in filenames:
if fnmatch.fnmatch(filename, filepat):
file_path = os.path.join(root,filename)
if dateformat in ['ctime','mtime']:
if dateformat == 'ctime':
tcheck = datetime.fromtimestamp(os.path.getctime(file_path))
if dateformat == 'mtime':
tcheck = datetime.fromtimestamp(os.path.getmtime(file_path))
if tcheck.date() == date.date():
pathlist.append(file_path)
else:
pathlist.append(file_path)
return pathlist
def dir_extract(lines, filename, date, dateformat):
"""
Method to extract filename with wildcards or date patterns from a directory listing
"""
pathlist = []
if filename == '':
filename = '*'
if dateformat in ['','ctime','mtime']:
filepat = filename
else:
filepat = filename % date
for line in lines:
#print ("Checking line {}".format(line))
tokens = line.split()
# Not interested in directories
if not tokens[0][0] == "d" and len(tokens)==9:
time_str = tokens[5] + " " + tokens[6] + " " + tokens[7]
if dateformat in ['ctime','mtime']:
# cannot distinguish between mtime and ctime here
time = parser.parse(time_str)
#print (time.date())
if time.date() == date.date():
pathlist.append(tokens[8])
else:
if fnmatch.fnmatch(tokens[8], filepat):
file_path = tokens[8]
#print (tokens[8])
pathlist.append(file_path)
return pathlist
def die(child, errstr):
print (errstr)
print (child.before, child.after)
child.terminate()
exit(1)
def ssh_getlist(source, filename, date, dateformat, maxdate, cred=[], pwd_required=True, timeout=60):
"""
Method to extract filename with wildcards or date patterns from a directory listing
"""
pathlist = []
filename = filename.replace('*','')
if dateformat in ['','ctime','mtime']:
filepat = filename
else:
filepat = filename % date
if not dateformat in ['','ctime','mtime']:
searchstr = 'find %s -type f | grep "%s"' % (source,filepat)
elif dateformat in ['ctime','mtime']:
mindate = (datetime.utcnow() - date).days
maxdate = (datetime.utcnow() - maxdate).days
if maxdate == 0:
searchstr = 'find {} -type f -{} -{} | grep "{}"'.format(source,dateformat, mindate,filepat)
else:
searchstr = 'find {} -type f -{} -{} -{} +{} | grep "{}"'.format(source,dateformat,mindate,dateformat,(mindate-maxdate),filepat)
else:
searchstr = 'find {} -type f | grep "{}"'.format(source,filepat)
COMMAND= "ssh %s@%s '%s';" % (cred[0],cred[2],searchstr)
child = pexpect.spawn(COMMAND)
if timeout:
child.timeout=timeout
if pwd_required:
i = child.expect([pexpect.TIMEOUT, 'assword: '])
child.sendline(cred[1])
i = child.expect([pexpect.TIMEOUT, 'Permission denied', pexpect.EOF])
if i == 0:
die(child, 'ERROR!\nSSH timed out. Here is what SSH said:')
elif i == 1:
die(child, 'ERROR!\nIncorrect password Here is what SSH said:')
elif i == 2:
result = child.before
if sys.version_info.major == 3:
result = result.decode('ascii')
pathlist = result.split('\r\n')
pathlist = [elem for elem in pathlist if not elem == '' and not elem == ' ']
return pathlist
def CheckConfiguration(config={},debug=False):
"""
DESCRIPTION
configuration data will be checked
"""
user = ''
password = ''
address = ''
destination = ''
source = ''
port = 21
success = True
if debug:
print (" Checking configuration data")
if config.get('rawpath') == '' and creddb == '':
print('Specify either a shortcut to the credential information of the database or a local path:')
print('-- check collectfile.py -h for more options and requirements')
success = False
#sys.exit()
if config.get('rawpath') == '':
destination = tempfile.gettempdir()
else:
if not os.path.isdir(config.get('rawpath')):
print ("Destination directory {} not existing. Creating it".format(config.get('rawpath')))
os.makedirs(config.get('rawpath'))
destination = config.get('rawpath')
config['destination'] = destination
credtransfer = config.get('sourcecredentials')
if not credtransfer == '':
if debug:
print (" - checking credentials for remote access")
user=mpcred.lc(credtransfer,'user')
password=mpcred.lc(credtransfer,'passwd')
address = mpcred.lc(credtransfer,'address')
try:
port = int(mpcred.lc(credtransfer,'port'))
except:
port = 21
if debug:
print (" -> done")
config['rmuser'] = user
config['rmpassword'] = password
config['rmaddress'] = address
config['rmport'] = port
source = ''
protocol = config.get('protocol')
if not protocol in ['','ftp','FTP']:
source += protocol + "://"
if not user == '' and not password=='':
source += user + ":" + password + "@"
if not address == '':
source += address
remotepath = config.get('sourcedatapath')
if not remotepath == '':
source += remotepath
config['source'] = source
if not protocol in ['','scp','ftp','SCP','FTP','html','rsync']:
print('Specify a valid protocol:')
print('-- check collectfile.py -h for more options and requirements')
success = False
#sys.exit()
walk = config.get('walksubdirs')
if debug:
print (" Walk through subdirs: {}".format(walk))
if GetBool(walk):
if not protocol in ['','scp','rsync']:
print(' -> Walk mode only works for local directories and scp access.')
print(' -> Switching walk mode off.')
config['walksubdirs'] = False
creddb = config.get('dbcredentials')
if not creddb == '':
print(" Accessing data bank ...")
# required for either writeing to DB or getting meta in case of writing archive
try:
db = mysql.connect(host=mpcred.lc(creddb,'host'),user=mpcred.lc(creddb,'user'),passwd=mpcred.lc(creddb,'passwd'),db=mpcred.lc(creddb,'db'))
print(" -> success")
except:
print(" -> failure - check your credentials")
db = None
success = False
#sys.exit()
config['db'] = db
# loaded all credential (if started from root rootpermissions are relquired for that)
# now switch user for scp
# TODO check whether this is working in a function
if config.get('defaultuser'):
try:
uid=pwd.getpwnam(config.get('defaultuser'))[2]
os.setuid(uid)
except:
print (" User {} not existing - moving on".format(config.get('defaultuser')))
dateformat = config.get('dateformat')
filename = config.get('filenamestructure')
if dateformat == "" and filename == "":
print(' Specify either a fileformat: -f myformat.dat or a dateformat -d "%Y",ctime !')
print(' -- check collectfile.py -h for more options and requirements')
success = False
#sys.exit()
if not dateformat in ['','ctime','mtime']:
current = datetime.utcnow()
try:
newdate = datetime.strftime(current,dateformat)
except:
print(' Specify a vaild datetime dateformat like "%Y-%m-%d"')
print(' -- check collectfile.py -h for more options and requirements')
success = False
#sys.exit()
if "%s" in filename and dateformat in ['','ctime','mtime']:
print(' Specify a datetime dateformat for given placeholder in fileformat!')
print(' -- check collectfile.py -h for more options and requirements')
success = False
#sys.exit()
elif not "%s" in filename and "*" in filename and not dateformat in ['ctime','mtime']:
print(' Specify either ctime or mtime for dateformat to be used with your give fileformat!')
print(' -- check collectfile.py -h for more options and requirements')
success = False
#sys.exit()
elif not "%s" in filename and not "*" in filename and not dateformat in [""]:
print(' Give dateformat will be ignored!')
print(' -- check collectfile.py -h for more options and requirements')
print(' -- continuing ...')
if debug:
print(" => Configuration checked - success")
return config, success
def GetDatelist(config={},current=datetime.utcnow(),debug=False):
if debug:
print(" -> Obtaining timerange ...")
datelist = []
newcurrent = current
dateformat = config.get('dateformat')
depth = int(config.get('defaultdepth'))
if not dateformat in ['','ctime','mtime']:
for elem in range(depth):
if dateformat == '%b%d%y': #exception for MAGREC
newdate = datetime.strftime(newcurrent,dateformat)
datelist.append(newdate.upper())
else:
datelist.append(datetime.strftime(newcurrent,dateformat))
newcurrent = current-timedelta(days=elem+1)
elif dateformat in ['ctime','mtime']:
for elem in range(depth):
datelist.append(newcurrent)
newcurrent = current-timedelta(days=elem+1)
else:
datelist = ['dummy']
#if debug:
print(" -> Dealing with time range:\n {}".format(datelist))
return datelist
def CreateTransferList(config={},datelist=[],debug=False):
"""
DESCRIPTION
Create a list of files to be transfered
Define source based on 'protocol', 'remotepath', 'walk'
protocols: ''(local disk), 'scp', 'ftp', 'html'
What about rsync? -> no need to use create list but requires passwd-less connection
RETURNS
filelist (a list of remote filepaths to be transferred)
"""
#filelist = getfilelist(protocol, source, sensorid, filename, datelist, walk=True, option=None)
if debug:
print(" Getting filelists")
print(" -------------------")
protocol = config.get('protocol','')
source = config.get('source','')
remotepath = config.get('sourcedatapath')
filename = config.get('filenamestructure')
user = config.get('rmuser')
password = config.get('rmpassword')
address = config.get('rmaddress')
port = config.get('rmport')
dateformat = config.get('dateformat')
filelist = []
if protocol in ['ftp','FTP']:
print (" Connecting to FTP")
if debug:
print (" - Getting filelist - by ftp ")
import ftplib
if debug:
print (" - connecting to {} on port {}".format(address,port))
if not port == 21:
ftp = ftplib.FTP()
ftp.connect(address,port)
else:
ftp = ftplib.FTP(address)
if debug:
print (" - user: {} ".format(user))
ftp.login(user,password)
ftp.cwd(source)
lines = []
ftp.dir("", lines.append)
ftp.close()
for date in datelist:
path = dir_extract(lines, filename, date, dateformat)
if len(path) > 0:
filelist.extend(path)
elif protocol in ['scp','SCP','rsync']:
if debug:
print (" Connecting for {}".format(protocol))
pwd_required=True
if protocol == 'rsync':
pwd_required=False
print (" Rsync requires passwordless ssh connection to remote system")
import pexpect
if not dateformat in ['','ctime','mtime']:
for date in datelist:
path = ssh_getlist(remotepath, filename, date, dateformat, datetime.utcnow(), cred=[user,password,address],pwd_required=pwd_required)
if len(path) > 0:
filelist.extend(path)
else:
filelist = ssh_getlist(remotepath, filename, min(datelist), dateformat, max(datelist), cred=[user,password,address],pwd_required=pwd_required)
elif protocol == '':
if debug:
print (" Local directory access ")
### Search local directory - Working
for date in datelist:
path = walk_dir(source, filename, date, dateformat)
if len(path) > 0:
filelist.extend(path)
elif protocol == 'html':
print (filelist)
print (" HTML access not supported - use MagPy directly to access webservices")
#if debug:
print ("Files to be transferred")
print ("-----------------------------")
print (filelist)
print ("-----------------------------")
return filelist
def ObtainDatafiles(config={},filelist=[],debug=False):
"""
DESCRIPTION
Download data files ane write either to raw directory ot tmp (or to specified folder)
### 2.3 Get selected files and copy them to destination
###
### only if not protocol == '' and localpath
### update filelist with new filenamens on local harddisk
What about rsync? -> no need to use create list but requires passwd-less connection
RETURNS
localfilelist (a list with full paths to all files copied to the localfilesystem)
"""
# Requires
stationid = config.get('stationid')
# if sensorid is not provided it will be extracted from the filelist
sensorid = config.get('sensorid')
localpath = config.get('rawpath')
protocol = config.get('protocol')
source = config.get('source')
destination = config.get('destination')
deleteremote = config.get('deleteremote',False)
user = config.get('rmuser')
password = config.get('rmpassword')
address = config.get('rmaddress')
port = config.get('rmport')
zipping = GetBool(config.get('zipdata'))
forcelocal = GetBool(config.get('forcedirectory',False))
deleteopt = " "
#filename = config.get('filenamestructure')
#dateformat = config.get('dateformat')
def createdestinationpath(localpath,stationid,sensorid, forcelocal=False):
subdir = 'raw'
if not stationid and not sensorid or forcelocal:
destpath = os.path.join(localpath)
elif not stationid:
destpath = os.path.join(localpath,sensorid,'raw')
elif not sensorid:
destpath = os.path.join(localpath,stationid.upper())
else:
destpath = os.path.join(localpath,stationid.upper(),sensorid,'raw')
return destpath
print(" Writing data to a local directory (or tmp)")
if deleteremote in [True,'True']:
print(" IMPORTANT: deleting remote data has been activated")
if debug:
print (" Please Note: files will be copied to local filesystem even when debug is selected")
localpathlist = []
if not protocol == '' or (protocol == '' and not destination == tempfile.gettempdir()):
### Create a directory by getting sensorid names (from source directory)
# Open the specific channel
if protocol in ['ftp','FTP']:
if not port == 21:
ftp = ftplib.FTP()
ftp.connect(address,port)
else:
ftp = ftplib.FTP(address)
ftp.login(user,password)
ftp.cwd(source)
for f in filelist:
if debug:
print (" Accessing file {}".format(f))
path = os.path.normpath(f)
li = path.split(os.sep)
if not sensorid and not protocol in ['ftp','FTP']:
if len(li) >= 2:
sensid = li[-2]
if sensid == 'raw' and len(li) >= 3: # in case an archive raw data structure is loaded
sensid = li[-3]
elif not sensorid and protocol in ['ftp','FTP']:
sensid = f.split('.')[0].rpartition('_')[0]
else:
sensid = sensorid
destpath = createdestinationpath(destination,stationid,sensid,forcelocal=forcelocal)
destname = os.path.join(destpath,li[-1])
if not os.path.isdir(destpath):
os.makedirs(destpath)
if debug:
print (" -> write destination (for raw files): {} , {}".format(destpath, li[-1]))
if protocol in ['ftp','FTP']:
fhandle = open(destname, 'wb')
ftp.retrbinary('RETR ' + f, fhandle.write)
fhandle.close()
if deleteremote in [True,'True']:
ftp.delete(f)
elif protocol in ['scp','SCP']:
scptransfer(user+'@'+address+':'+f,destpath,password,timeout=600)
elif protocol in ['rsync']:
# create a command line string with rsync ### please note,,, rsync requires password less comminuctaion
if deleteremote in [True,'True']:
deleteopt = " --remove-source-files "
else:
deleteopt = " "
rsyncstring = "rsync -avz -e ssh{}{} {}".format(deleteopt, user+'@'+address+':'+f,destpath)
print ("Executing:", rsyncstring)
subprocess.call(rsyncstring.split())
elif protocol in ['html','HTML']:
pass
elif protocol in ['']:
if not os.path.exists(destname):
copyfile(f, destname)
if deleteremote in [True,'True']:
os.remove(f)
else:
print (" -> raw file already existing - skipping write")
if zipping:
if debug:
print (" raw data wil be zipped")
dirname = os.path.dirname(destname)
oldname = os.path.basename(destname)
pname = os.path.splitext(oldname)
if not pname[1] in [".zip",".gz",".ZIP",".GZ"]:
zipname = pname[0]+'.zip'
with zipfile.ZipFile(os.path.join(dirname,zipname), 'w') as myzip:
myzip.write(destname,oldname, zipfile.ZIP_DEFLATED)
os.remove(destname)
destname = os.path.join(dirname,zipname)
else:
if debug:
print (" data is zipped already")
localpathlist.append(destname)
if protocol in ['ftp','FTP']:
ftp.close()
else:
localpathlist = [elem for elem in filelist]
if debug:
print (" => all files are now on local system: {}".format(localpathlist))
return localpathlist
def WriteData(config={},localpathlist=[],debug=False):
"""
DESCRIPTION
Read local data and write to database
RETURNS
"""
print(" Writing data to database and/or archive")
db = config.get('db')
stationid = config.get('stationid','')
sensorid = config.get('sensorid','')
force = config.get('forcerevision','')
writemode = config.get('writemode','replace')
if not writemode in ['replace','overwrite']:
# replace will replace existing data and leave the rest unchanged
# overwrite will delete the file and write a new one
writemode = 'replace'
for f in localpathlist:
data = DataStream()
try:
data = read(f)
except:
data = DataStream()
if data.length()[0] > 0:
if debug:
print (" Dealing with {}. Length = {}".format(f,data.length()[0]))
print (" -------------------------------")
#print ("SensorID in file: {}".format(data.header.get('SensorID')))
# Station ID provided?
statiddata = data.header.get('StationID','')
if not stationid == '':
if not statiddata == stationid and not statiddata == '':
print(" StationID's from file and provided one (or dir) are different!")
print (" Using provided value")
data.header['StationID'] = stationid.upper()
else:
if data.header.get('StationID','') == '':
print(" Could not find station ID in datafile")
print(" Please provide by using -t stationid")
#sys.exit()
# Abort try clause
x= 1/0
if debug:
print(" -> Using StationID", data.header.get('StationID'))
# Sensor ID extractable?
sensiddata = data.header.get('SensorID','')
if not sensorid == '':
if not sensiddata == sensorid and not sensiddata == '':
print(" SensorID's from file and provided one (or dir) are different!")
print (" Using provided value")
data.header['SensorID'] = sensorid
else:
if data.header.get('SensorID','') == '':
print(" Could not find sensor ID in datafile")
print(" Please provide by using -s sensorid")
# Abort try clause
x= 1/0
#sys.exit()
fixsensorid = data.header.get('SensorID')
if debug:
print(" -> Using SensorID", data.header.get('SensorID'))
# Conversions
if config.get(fixsensorid):
comments = []
offdict = {}
comment = data.header.get('DataComments')
if comment:
comments.append(comment)
print (" Found modification parameters - applying ...")
paradict = config.get(fixsensorid)
if paradict.get('defaulttimecolumn') == 'sectime':
print (" -> secondary time column of raw data will be used as primary")
data = data.use_sectime()
comm1 = "secondary time colum moved to primary"
comments.append(comm1)
keylist = data._get_key_headers()
keylist.extend(['time','sectime'])
for key in keylist:
offset = paradict.get(key)
#if debug:
# print (" offset for key {}: {}".format(key, offset))
if offset:
#try:
offset = float(offset)
if offset and key in ['time','sectime']:
offdict[key] = timedelta(seconds=offset)
elif offset:
offdict[key] = offset
comm = "applied an offset of {} to column {}".format(offset, key)
comments.append(comm)
print (" -> offset of {} applied to {}".format(offset, key))
#except:
# print (" -> failure applying offset")
if offdict:
if debug:
print (" applying offsets: {}".format(offdict))
#'time': timedelta(hours=1), 'x': 4.2, 'f': -1.34242
data = data.offset(offdict)
# extend comment
if len(comments) > 1:
newcomment = ", ".join(comments)
data.header['DataComments'] = newcomment
elif len(comments) > 0:
newcomment = comments[0]
data.header['DataComments'] = newcomment
print (" => modifications done")
#print ("DATAComments", comments, newcomment, data.header['DataComments'])
def merge_two_dicts(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
datainfoid = data.header.get('DataID')
if force:
if not datainfoid:
datainfoid = "{}_{}".format(fixsensorid,str(force).zfill(4))
if debug:
print ("Using DataID: {}".format(datainfoid))
# Get existing header information from database and combine with new info
if datainfoid:
existheader = dbfields2dict(db,datainfoid)
data.header = merge_two_dicts(existheader,data.header)
# Writing data
if not debug and GetBool(config.get('writedatabase')):
print(" {}: Adding {} data points to DB now".format(data.header.get('SensorID'), data.length()[0]))
if not len(data.ndarray[0]) > 0:
data = data.linestruct2ndarray() # Dealing with very old formats
if len(data.ndarray[0]) > 0:
if not force == '':
tabname = "{}_{}".format(fixsensorid,str(force).zfill(4))
print (" - Force option chosen: forcing data to table {}".format(tabname))
print (" IMPORTANT: general database meta information will not be updated")
writeDB(db,data, tablename=tabname)
else:
writeDB(db,data)
elif debug:
print (" DEBUG selected - no database written")
# Writing data
if not debug and GetBool(config.get('writearchive')):
if force:
archivepath = os.path.join(config.get('rawpath'),stationid.upper(),fixsensorid,datainfoid)
if config.get('archiveformat'):
print(" {}: Writing {} data points archive".format(data.header.get('SensorID'), data.length()[0]))
if fixsensorid.startswith("LEMI"):
# LEMI bin files contains str1 column which cannot be written to PYCDF (TODO) - column contains GPS state
data = data._drop_column('str1')
if archivepath:
data.write(archivepath,filenamebegins=datainfoid+'_',format_type=config.get('archiveformat'),mode=writemode)
else:
print (" Writing to archive requires forcerevision")
elif debug:
print (" DEBUG selected - no archive written")
def main(argv):
version = "1.0.0"
conf = ''
etime = ''
deptharg = ''
writedbarg = ''
writearchivearg = ''
hostname = socket.gethostname().upper()
statusmsg = {}
debug = False
filelist = []
localpathlist = []
try:
opts, args = getopt.getopt(argv,"hc:e:d:w:a:D",["configuration=","endtime=","depth=","writedb=","writearchive=","debug=",])
except getopt.GetoptError:
print('file_download.py -c <configuration> -e <endtime> -d <depth>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('-------------------------------------')
print('Description:')
print('file_download.py reads data from various sources ')
print('and uploads data to a data bank.')
print('Filtering and archiving is done using "cleanup".')
print('-------------------------------------')
print('Usage:')
print('file_download.py -c <configuration> -e <endtime> -d <depth>')
print('-------------------------------------')
print('Options:')
print('-c : configurationfile')
print('-e : endtime')
print('-d : depth: 1 means today, 2 today and yesterday, 3 last three days, etc')
print('-w : write to database')
print('-a : write to archive')
print('-------------------------------------')
print('Examples:')
print('---------')
print('---------')
sys.exit()
elif opt in ("-c", "--configuration"):
conf = arg
elif opt in ("-e", "--endtime"):
etime = arg
elif opt in ("-d", "--depth"):
try:
deptharg = int(arg)
if not deptharg >= 1:
print("provided depth needs to be positve")
sys.exit()
except:
print("depth needs to be an integer")
sys.exit()
elif opt in ("-w", "--writedb"):
writedbarg = arg
elif opt in ("-a", "--writearchive"):
writearchivearg = arg
elif opt in ("-D", "--debug"):
debug = True
# Read configuration file
# -----------------------
print ("Running collectfile.py - version {}".format(version))
print ("-------------------------------")
name = "{}-collectfile-{}".format(hostname, os.path.split(conf)[1].split('.')[0])
statusmsg[name] = 'collectfile successfully finished'
if conf == '':
print ('Specify a path to a configuration file using the -c option:')
print ('-- check archive.py -h for more options and requirements')
sys.exit()
else:
if os.path.isfile(conf):
print (" Read file with GetConf")
config = GetConf2(conf)
print (" -> configuration data extracted")
else:
print ('Specify a valid path to a configuration file using the -c option:')
print ('-- check archive.py -h for more options and requirements')
sys.exit()
if etime == '':
current = datetime.utcnow() # make that a variable
else:
current = DataStream()._testtime(etime)
# check configuration information
# -----------------------
config, success = CheckConfiguration(config=config, debug=debug)
if not success:
statusmsg[name] = 'invalid cofiguration data - aborting'
else:
# Override config data with given inputs
# -----------------------
if writearchivearg:
config['writearchive'] = GetBool(writearchivearg)
if writedbarg:
config['writedatabase'] = GetBool(writedbarg)
if deptharg:
config['defaultdepth'] = deptharg
# Create datelist
# -----------------------
datelist = GetDatelist(config=config,current=current,debug=debug)
# Obtain list of files to be transferred
# -----------------------
try:
filelist = CreateTransferList(config=config,datelist=datelist,debug=debug)
moveon = True
except:
statusmsg[name] = 'could not obtain remote file list - aborting'
moveon = False
if moveon:
# Obtain list of files to be transferred
# -----------------------
try:
localpathlist = ObtainDatafiles(config=config,filelist=filelist,debug=debug)
except:
statusmsg[name] = 'getting local file list failed - check permission'
localpathlist = []
# Write data to specified destinations
# -----------------------
#try:
if config.get('db') and len(localpathlist) > 0 and (GetBool(config.get('writedatabase')) or GetBool(config.get('writearchive'))):
succ = WriteData(config=config,localpathlist=localpathlist,debug=debug)
#except:
# statusmsg[name] = 'problem when writing data'
# Send Logging
# -----------------------
receiver = config.get('notification')
receiverconf = config.get('notificationconf')
logpath = config.get('logpath')
if debug: #No update of statusmessages if only a selected sensor list is analyzed
print (statusmsg)
else:
martaslog = ml(logfile=logpath,receiver=receiver)
martaslog.telegram['config'] = receiverconf
martaslog.msg(statusmsg)
print ("----------------------------------------------------------------")
print ("collector app finished")
print ("----------------------------------------------------------------")
if statusmsg[name] == 'collectfile successfully finished':
print ("SUCCESS")
else:
print ("FAILURE")
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-3.0 | 1,479,611,255,749,253,000 | 37.862292 | 242 | 0.55509 | false |
rtibbles/kolibri | kolibri/utils/tests/test_cli.py | 1 | 3590 | """
Tests for `kolibri` module.
"""
from __future__ import absolute_import, print_function, unicode_literals
import copy
import logging
import pytest
from kolibri.utils import cli
from .base import KolibriTestBase
logger = logging.getLogger(__name__)
@pytest.fixture
def conf():
from kolibri.utils import conf
old_config = copy.deepcopy(conf.config)
yield conf
conf.update(old_config)
conf.save()
def test_bogus_plugin_disable(conf):
installed_apps_before = conf.config["INSTALLED_APPS"][:]
cli.plugin("i_do_not_exist", disable=True)
assert installed_apps_before == conf.config["INSTALLED_APPS"]
def test_plugin_cannot_be_imported_disable(conf):
"""
A plugin may be in conf.config['INSTALLED_APPS'] but broken or uninstalled
"""
plugin_name = "giraffe.horse"
conf.config["INSTALLED_APPS"].append(plugin_name)
conf.save()
cli.plugin(plugin_name, disable=True)
assert plugin_name not in conf.config["INSTALLED_APPS"]
def test_real_plugin_disable(conf):
installed_apps_before = conf.config["INSTALLED_APPS"][:]
test_plugin = "kolibri.plugins.audio_mp3_render"
assert test_plugin in installed_apps_before
# Because RIP example plugin
cli.plugin(test_plugin, disable=True)
assert test_plugin not in conf.config["INSTALLED_APPS"]
def test_real_plugin_disable_twice(conf):
installed_apps_before = conf.config["INSTALLED_APPS"][:]
test_plugin = "kolibri.plugins.audio_mp3_render"
assert test_plugin in installed_apps_before
# Because RIP example plugin
cli.plugin(test_plugin, disable=True)
assert test_plugin not in conf.config["INSTALLED_APPS"]
installed_apps_before = conf.config["INSTALLED_APPS"][:]
cli.plugin(test_plugin, disable=True)
assert test_plugin not in conf.config["INSTALLED_APPS"]
def test_plugin_with_no_plugin_class(conf):
"""
Expected behavior is that nothing blows up with exceptions, user just gets
a warning and nothing is enabled or changed in the configuration.
"""
# For fun, we pass in a system library
installed_apps_before = conf.config["INSTALLED_APPS"][:]
cli.plugin("os.path")
assert installed_apps_before == conf.config["INSTALLED_APPS"]
class TestKolibriCLI(KolibriTestBase):
def test_cli(self):
logger.debug("This is a unit test in the main Kolibri app space")
# Test the -h
with self.assertRaises(SystemExit):
cli.main("-h")
with self.assertRaises(SystemExit):
cli.main("--version")
def test_parsing(self):
test_patterns = (
(['start'], {'start': True}, []),
(['stop'], {'stop': True}, []),
(['shell'], {'shell': True}, []),
(['manage', 'shell'], {'manage': True, 'COMMAND': 'shell'}, []),
(['manage', 'help'], {'manage': True, 'COMMAND': 'help'}, []),
(['manage', 'blah'], {'manage': True, 'COMMAND': 'blah'}, []),
(
['manage', 'blah', '--debug', '--', '--django-arg'],
{'manage': True, 'COMMAND': 'blah', '--debug': True},
['--django-arg']
),
(
['manage', 'blah', '--django-arg'],
{'manage': True, 'COMMAND': 'blah'},
['--django-arg']
),
)
for p, docopt_expected, django_expected in test_patterns:
docopt, django = cli.parse_args(p)
for k, v in docopt_expected.items():
assert docopt[k] == v
assert django == django_expected
| mit | 682,227,345,977,819,300 | 31.342342 | 78 | 0.609749 | false |
hmendozap/master-arbeit-projects | autosk_dev_test/component/implementation/FeedForwardNet.py | 1 | 15383 | """
Created on Jul 22, 2015
Modified on Apr 21, 2016
@author: Aaron Klein
@modified: Hector Mendoza
"""
import numpy as np
from sklearn.utils.validation import check_random_state
import theano
import theano.tensor as T
import theano.sparse as S
import lasagne
DEBUG = True
def sharedX(X, dtype=theano.config.floatX, name=None):
return theano.shared(np.asarray(X, dtype=dtype), name=name)
def smorm3s(cost, params, learning_rate=1e-3, eps=1e-16, gather=False):
updates = []
optim_params = []
grads = T.grad(cost, params)
for p, grad in zip(params, grads):
mem = sharedX(p.get_value() * 0. + 1.)
g = sharedX(p.get_value() * 0.)
g2 = sharedX(p.get_value() * 0.)
if gather:
optim_params.append(mem)
optim_params.append(g)
optim_params.append(g2)
r_t = 1. / (mem + 1)
g_t = (1 - r_t) * g + r_t * grad
g2_t = (1 - r_t) * g2 + r_t * grad**2
p_t = p - grad * T.minimum(learning_rate, g_t * g_t / (g2_t + eps)) / \
(T.sqrt(g2_t + eps) + eps)
mem_t = 1 + mem * (1 - g_t * g_t / (g2_t + eps))
updates.append((g, g_t))
updates.append((g2, g2_t))
updates.append((p, p_t))
updates.append((mem, mem_t))
return updates
def iterate_minibatches(inputs, targets, batchsize, shuffle=False, random_state=None):
assert inputs.shape[0] == targets.shape[0],\
"The number of training points is not the same"
if shuffle:
seed = check_random_state(random_state)
indices = np.arange(inputs.shape[0])
seed.shuffle(indices)
# np.random.shuffle(indices)
for start_idx in range(0, inputs.shape[0] - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
class FeedForwardNet(object):
def __init__(self, input_shape=(100, 28*28), random_state=None,
batch_size=100, num_layers=4, num_units_per_layer=(10, 10, 10),
dropout_per_layer=(0.5, 0.5, 0.5), std_per_layer=(0.005, 0.005, 0.005),
num_output_units=2, dropout_output=0.5, learning_rate=0.01,
lambda2=1e-4, momentum=0.9, beta1=0.9, beta2=0.9,
rho=0.95, solver='adam', num_epochs=2,
lr_policy='fixed', gamma=0.01, power=1.0, epoch_step=1,
activation_per_layer=('relu',)*3, weight_init_per_layer=('henormal',)*3,
leakiness_per_layer=(1./3.,)*3, tanh_alpha_per_layer=(2./3.,)*3,
tanh_beta_per_layer=(1.7159,)*3,
is_sparse=False, is_binary=False, is_regression=False, is_multilabel=False):
self.random_state = random_state
self.batch_size = batch_size
self.input_shape = input_shape
self.num_layers = num_layers
self.num_units_per_layer = num_units_per_layer
self.dropout_per_layer = np.asarray(dropout_per_layer, dtype=theano.config.floatX)
self.num_output_units = num_output_units
self.dropout_output = T.cast(dropout_output, dtype=theano.config.floatX)
self.activation_per_layer = activation_per_layer
self.weight_init_per_layer = weight_init_per_layer
self.std_per_layer = np.asarray(std_per_layer, dtype=theano.config.floatX)
self.leakiness_per_layer = np.asarray(leakiness_per_layer, dtype=theano.config.floatX)
self.tanh_alpha_per_layer = np.asarray(tanh_alpha_per_layer, dtype=theano.config.floatX)
self.tanh_beta_per_layer = np.asarray(tanh_beta_per_layer, dtype=theano.config.floatX)
self.momentum = T.cast(momentum, dtype=theano.config.floatX)
self.learning_rate = np.asarray(learning_rate, dtype=theano.config.floatX)
self.lambda2 = T.cast(lambda2, dtype=theano.config.floatX)
self.beta1 = T.cast(beta1, dtype=theano.config.floatX)
self.beta2 = T.cast(beta2, dtype=theano.config.floatX)
self.rho = T.cast(rho, dtype=theano.config.floatX)
self.num_epochs = num_epochs
self.lr_policy = lr_policy
self.gamma = np.asarray(gamma, dtype=theano.config.floatX)
self.power = np.asarray(power, dtype=theano.config.floatX)
self.epoch_step = np.asarray(epoch_step, dtype=theano.config.floatX)
self.is_binary = is_binary
self.is_regression = is_regression
self.is_multilabel = is_multilabel
self.is_sparse = is_sparse
self.solver = solver
if is_sparse:
input_var = S.csr_matrix('inputs', dtype=theano.config.floatX)
else:
input_var = T.matrix('inputs')
if self.is_binary or self.is_multilabel or self.is_regression:
target_var = T.matrix('targets')
else:
target_var = T.ivector('targets')
if DEBUG:
if self.is_binary:
print("... using binary loss")
if self.is_multilabel:
print("... using multilabel prediction")
if self.is_regression:
print("... using regression loss")
print("... building network")
print(input_shape)
print("... with number of epochs")
print(num_epochs)
# Added for reproducibility
seed = check_random_state(self.random_state)
lasagne.random.set_rng(seed)
self.network = lasagne.layers.InputLayer(shape=input_shape,
input_var=input_var)
# Define each layer
for i in range(num_layers - 1):
init_weight = self._choose_weight_init(i)
activation_function = self._choose_activation(i)
self.network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(self.network,
p=self.dropout_per_layer[i]),
num_units=self.num_units_per_layer[i],
W=init_weight,
b=lasagne.init.Constant(val=0.0),
nonlinearity=activation_function)
# Define output layer and nonlinearity of last layer
if self.is_regression:
output_activation = lasagne.nonlinearities.linear
elif self.is_binary or self.is_multilabel:
output_activation = lasagne.nonlinearities.sigmoid
else:
output_activation = lasagne.nonlinearities.softmax
self.network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(self.network,
p=self.dropout_output),
num_units=self.num_output_units,
W=lasagne.init.GlorotNormal(),
b=lasagne.init.Constant(),
nonlinearity=output_activation)
prediction = lasagne.layers.get_output(self.network)
if self.is_regression:
loss_function = lasagne.objectives.squared_error
elif self.is_binary or self.is_multilabel:
loss_function = lasagne.objectives.binary_crossentropy
else:
loss_function = lasagne.objectives.categorical_crossentropy
loss = loss_function(prediction, target_var)
# Aggregate loss mean function with l2
# Regularization on all layers' params
if self.is_binary or self.is_multilabel:
loss = T.sum(loss, dtype=theano.config.floatX)
else:
loss = T.mean(loss, dtype=theano.config.floatX)
l2_penalty = self.lambda2 * lasagne.regularization.regularize_network_params(
self.network, lasagne.regularization.l2)
loss += l2_penalty
params = lasagne.layers.get_all_params(self.network, trainable=True)
# Create the symbolic scalar lr for loss & updates function
lr_scalar = T.scalar('lr', dtype=theano.config.floatX)
if solver == "nesterov":
updates = lasagne.updates.nesterov_momentum(loss, params,
learning_rate=lr_scalar,
momentum=self.momentum)
elif solver == "adam":
updates = lasagne.updates.adam(loss, params,
learning_rate=lr_scalar,
beta1=self.beta1, beta2=self.beta2)
elif solver == "adadelta":
updates = lasagne.updates.adadelta(loss, params,
learning_rate=lr_scalar,
rho=self.rho)
elif solver == "adagrad":
updates = lasagne.updates.adagrad(loss, params,
learning_rate=lr_scalar)
elif solver == "sgd":
updates = lasagne.updates.sgd(loss, params,
learning_rate=lr_scalar)
elif solver == "momentum":
updates = lasagne.updates.momentum(loss, params,
learning_rate=lr_scalar,
momentum=self.momentum)
elif solver == "smorm3s":
updates = smorm3s(loss, params,
learning_rate=lr_scalar)
else:
updates = lasagne.updates.sgd(loss, params,
learning_rate=lr_scalar)
# Validation was removed, as auto-sklearn handles that, if this net
# is to be used independently, validation accuracy has to be included
if DEBUG:
print("... compiling theano functions")
self.train_fn = theano.function([input_var, target_var, lr_scalar],
loss,
updates=updates,
allow_input_downcast=True,
profile=False,
on_unused_input='warn',
name='train_fn')
if DEBUG:
print('... compiling update function')
self.update_function = self._policy_function()
def _policy_function(self):
epoch, gm, powr, step = T.scalars('epoch', 'gm', 'powr', 'step')
if self.lr_policy == 'inv':
decay = T.power(1.0+gm*epoch, -powr)
elif self.lr_policy == 'exp':
decay = gm ** epoch
elif self.lr_policy == 'step':
decay = T.switch(T.eq(T.mod_check(epoch, step), 0.0),
T.power(gm, T.floor_div(epoch, step)),
1.0)
else:
decay = T.constant(1.0, name='fixed', dtype=theano.config.floatX)
return theano.function([gm, epoch, powr, step],
decay,
allow_input_downcast=True,
on_unused_input='ignore',
name='update_fn')
def fit(self, X, y):
if self.batch_size > X.shape[0]:
self.batch_size = X.shape[0]
print('One update per epoch batch size')
if self.is_sparse:
X = X.astype(np.float32)
else:
try:
X = np.asarray(X, dtype=theano.config.floatX)
y = np.asarray(y, dtype=theano.config.floatX)
except Exception as E:
print('Fit casting error: %s' % E)
for epoch in range(self.num_epochs):
train_err = 0
train_batches = 0
for inputs, targets in iterate_minibatches(X, y, self.batch_size, shuffle=True,
random_state=self.random_state):
train_err += self.train_fn(inputs, targets, self.learning_rate)
train_batches += 1
decay = self.update_function(self.gamma, epoch+1.0,
self.power, self.epoch_step)
self.learning_rate *= decay
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
return self
def predict(self, X, is_sparse=False):
predictions = self.predict_proba(X, is_sparse)
if self.is_multilabel:
return np.round(predictions)
elif self.is_regression:
return predictions
else:
return np.argmax(predictions, axis=1)
def predict_proba(self, X, is_sparse=False):
if is_sparse:
X = X.astype(np.float32)
X = S.as_sparse_or_tensor_variable(X)
else:
try:
X = np.asarray(X, dtype=theano.config.floatX)
except Exception as E:
print('Prediction casting error: %s' % E)
predictions = lasagne.layers.get_output(self.network,
X, deterministic=True).eval()
if self.is_binary:
return np.append(1.0 - predictions, predictions, axis=1)
else:
return predictions
def _choose_activation(self, index=0, output=False):
if output:
nl = getattr(self, 'output_activations', None)
else:
nl = getattr(self, 'activation_functions', None)
activation = self.activation_per_layer[index]
layer_activation = nl.get(activation)
if activation == 'scaledTanh':
layer_activation = layer_activation(scale_in=self.tanh_alpha_per_layer[index],
scale_out=self.tanh_beta_per_layer[index])
elif activation == 'leaky':
layer_activation = layer_activation(leakiness=self.leakiness_per_layer[index])
return layer_activation
def _choose_weight_init(self, index=0, output=False):
wi = getattr(self, 'weight_initializations', None)
initialization = self.weight_init_per_layer[index]
weight_init = wi.get(initialization)
if initialization == 'normal':
weight_init = weight_init(std=self.std_per_layer[index])
else:
weight_init = weight_init()
return weight_init
activation_functions = {
'relu': lasagne.nonlinearities.rectify,
'leaky': lasagne.nonlinearities.LeakyRectify,
'very_leaky': lasagne.nonlinearities.very_leaky_rectify,
'elu': lasagne.nonlinearities.elu,
'linear': lasagne.nonlinearities.linear,
'scaledTanh': lasagne.nonlinearities.ScaledTanH,
'sigmoid': lasagne.nonlinearities.sigmoid,
'tahn': lasagne.nonlinearities.tanh,
}
output_activations = {
'softmax': lasagne.nonlinearities.softmax,
'softplus': lasagne.nonlinearities.softplus,
'sigmoid': lasagne.nonlinearities.sigmoid,
'tahn': lasagne.nonlinearities.tanh,
}
weight_initializations = {
'constant': lasagne.init.Constant,
'normal': lasagne.init.Normal,
'uniform': lasagne.init.Uniform,
'glorot_normal': lasagne.init.GlorotNormal,
'glorot_uniform': lasagne.init.GlorotUniform,
'he_normal': lasagne.init.HeNormal,
'he_uniform': lasagne.init.HeUniform,
'ortogonal': lasagne.init.Orthogonal,
'sparse': lasagne.init.Sparse
}
| mit | -2,196,560,965,525,019,400 | 41.37741 | 96 | 0.556263 | false |
miguelinux/vbox | src/VBox/ValidationKit/common/utils.py | 1 | 58323 | # -*- coding: utf-8 -*-
# $Id: utils.py $
# pylint: disable=C0302
"""
Common Utility Functions.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 108487 $"
# Standard Python imports.
import datetime;
import os;
import platform;
import re;
import stat;
import subprocess;
import sys;
import time;
import traceback;
import unittest;
if sys.platform == 'win32':
import ctypes;
import win32api; # pylint: disable=F0401
import win32con; # pylint: disable=F0401
import win32console; # pylint: disable=F0401
import win32process; # pylint: disable=F0401
else:
import signal;
# Python 3 hacks:
if sys.version_info[0] >= 3:
unicode = str; # pylint: disable=redefined-builtin,invalid-name
xrange = range; # pylint: disable=redefined-builtin,invalid-name
long = int; # pylint: disable=redefined-builtin,invalid-name
#
# Host OS and CPU.
#
def getHostOs():
"""
Gets the host OS name (short).
See the KBUILD_OSES variable in kBuild/header.kmk for possible return values.
"""
sPlatform = platform.system();
if sPlatform in ('Linux', 'Darwin', 'Solaris', 'FreeBSD', 'NetBSD', 'OpenBSD'):
sPlatform = sPlatform.lower();
elif sPlatform == 'Windows':
sPlatform = 'win';
elif sPlatform == 'SunOS':
sPlatform = 'solaris';
else:
raise Exception('Unsupported platform "%s"' % (sPlatform,));
return sPlatform;
g_sHostArch = None;
def getHostArch():
"""
Gets the host CPU architecture.
See the KBUILD_ARCHES variable in kBuild/header.kmk for possible return values.
"""
global g_sHostArch;
if g_sHostArch is None:
sArch = platform.machine();
if sArch in ('i386', 'i486', 'i586', 'i686', 'i786', 'i886', 'x86'):
sArch = 'x86';
elif sArch in ('AMD64', 'amd64', 'x86_64'):
sArch = 'amd64';
elif sArch == 'i86pc': # SunOS
if platform.architecture()[0] == '64bit':
sArch = 'amd64';
else:
try:
sArch = processOutputChecked(['/usr/bin/isainfo', '-n',]);
except:
pass;
sArch = sArch.strip();
if sArch != 'amd64':
sArch = 'x86';
else:
raise Exception('Unsupported architecture/machine "%s"' % (sArch,));
g_sHostArch = sArch;
return g_sHostArch;
def getHostOsDotArch():
"""
Gets the 'os.arch' for the host.
"""
return '%s.%s' % (getHostOs(), getHostArch());
def isValidOs(sOs):
"""
Validates the OS name.
"""
if sOs in ('darwin', 'dos', 'dragonfly', 'freebsd', 'haiku', 'l4', 'linux', 'netbsd', 'nt', 'openbsd', \
'os2', 'solaris', 'win', 'os-agnostic'):
return True;
return False;
def isValidArch(sArch):
"""
Validates the CPU architecture name.
"""
if sArch in ('x86', 'amd64', 'sparc32', 'sparc64', 's390', 's390x', 'ppc32', 'ppc64', \
'mips32', 'mips64', 'ia64', 'hppa32', 'hppa64', 'arm', 'alpha'):
return True;
return False;
def isValidOsDotArch(sOsDotArch):
"""
Validates the 'os.arch' string.
"""
asParts = sOsDotArch.split('.');
if asParts.length() != 2:
return False;
return isValidOs(asParts[0]) \
and isValidArch(asParts[1]);
def getHostOsVersion():
"""
Returns the host OS version. This is platform.release with additional
distro indicator on linux.
"""
sVersion = platform.release();
sOs = getHostOs();
if sOs == 'linux':
sDist = '';
try:
# try /etc/lsb-release first to distinguish between Debian and Ubuntu
oFile = open('/etc/lsb-release');
for sLine in oFile:
oMatch = re.search(r'(?:DISTRIB_DESCRIPTION\s*=)\s*"*(.*)"', sLine);
if oMatch is not None:
sDist = oMatch.group(1).strip();
except:
pass;
if sDist:
sVersion += ' / ' + sDist;
else:
asFiles = \
[
[ '/etc/debian_version', 'Debian v'],
[ '/etc/gentoo-release', '' ],
[ '/etc/oracle-release', '' ],
[ '/etc/redhat-release', '' ],
[ '/etc/SuSE-release', '' ],
];
for sFile, sPrefix in asFiles:
if os.path.isfile(sFile):
try:
oFile = open(sFile);
sLine = oFile.readline();
oFile.close();
except:
continue;
sLine = sLine.strip()
if len(sLine) > 0:
sVersion += ' / ' + sPrefix + sLine;
break;
elif sOs == 'solaris':
sVersion = platform.version();
if os.path.isfile('/etc/release'):
try:
oFile = open('/etc/release');
sLast = oFile.readlines()[-1];
oFile.close();
sLast = sLast.strip();
if len(sLast) > 0:
sVersion += ' (' + sLast + ')';
except:
pass;
elif sOs == 'darwin':
sOsxVersion = platform.mac_ver()[0];
codenames = {"4": "Tiger",
"5": "Leopard",
"6": "Snow Leopard",
"7": "Lion",
"8": "Mountain Lion",
"9": "Mavericks",
"10": "Yosemite",
"11": "El Capitan",
"12": "Sierra",
"13": "Unknown 13",
"14": "Unknown 14", }
sVersion += ' / OS X ' + sOsxVersion + ' (' + codenames[sOsxVersion.split('.')[1]] + ')'
return sVersion;
#
# File system.
#
def openNoInherit(sFile, sMode = 'r'):
"""
Wrapper around open() that tries it's best to make sure the file isn't
inherited by child processes.
This is a best effort thing at the moment as it doesn't synchronizes with
child process spawning in any way. Thus it can be subject to races in
multithreaded programs.
"""
try:
from fcntl import FD_CLOEXEC, F_GETFD, F_SETFD, fcntl; # pylint: disable=F0401
except:
return open(sFile, sMode);
oFile = open(sFile, sMode)
#try:
fcntl(oFile, F_SETFD, fcntl(oFile, F_GETFD) | FD_CLOEXEC);
#except:
# pass;
return oFile;
def noxcptReadLink(sPath, sXcptRet):
"""
No exceptions os.readlink wrapper.
"""
try:
sRet = os.readlink(sPath); # pylint: disable=E1101
except:
sRet = sXcptRet;
return sRet;
def readFile(sFile, sMode = 'rb'):
"""
Reads the entire file.
"""
oFile = open(sFile, sMode);
sRet = oFile.read();
oFile.close();
return sRet;
def noxcptReadFile(sFile, sXcptRet, sMode = 'rb'):
"""
No exceptions common.readFile wrapper.
"""
try:
sRet = readFile(sFile, sMode);
except:
sRet = sXcptRet;
return sRet;
def noxcptRmDir(sDir, oXcptRet = False):
"""
No exceptions os.rmdir wrapper.
"""
oRet = True;
try:
os.rmdir(sDir);
except:
oRet = oXcptRet;
return oRet;
def noxcptDeleteFile(sFile, oXcptRet = False):
"""
No exceptions os.remove wrapper.
"""
oRet = True;
try:
os.remove(sFile);
except:
oRet = oXcptRet;
return oRet;
def dirEnumerateTree(sDir, fnCallback, fIgnoreExceptions = True):
# type: (string, (string, stat) -> bool) -> bool
"""
Recursively walks a directory tree, calling fnCallback for each.
fnCallback takes a full path and stat object (can be None). It
returns a boolean value, False stops walking and returns immediately.
Returns True or False depending on fnCallback.
Returns None fIgnoreExceptions is True and an exception was raised by listdir.
"""
def __worker(sCurDir):
""" Worker for """
try:
asNames = os.listdir(sCurDir);
except:
if not fIgnoreExceptions:
raise;
return None;
rc = True;
for sName in asNames:
if sName not in [ '.', '..' ]:
sFullName = os.path.join(sCurDir, sName);
try: oStat = os.lstat(sFullName);
except: oStat = None;
if fnCallback(sFullName, oStat) is False:
return False;
if oStat is not None and stat.S_ISDIR(oStat.st_mode):
rc = __worker(sFullName);
if rc is False:
break;
return rc;
# Ensure unicode path here so listdir also returns unicode on windows.
## @todo figure out unicode stuff on non-windows.
if sys.platform == 'win32':
sDir = unicode(sDir);
return __worker(sDir);
def formatFileMode(uMode):
# type: (int) -> string
"""
Format a st_mode value 'ls -la' fasion.
Returns string.
"""
if stat.S_ISDIR(uMode): sMode = 'd';
elif stat.S_ISREG(uMode): sMode = '-';
elif stat.S_ISLNK(uMode): sMode = 'l';
elif stat.S_ISFIFO(uMode): sMode = 'p';
elif stat.S_ISCHR(uMode): sMode = 'c';
elif stat.S_ISBLK(uMode): sMode = 'b';
elif stat.S_ISSOCK(uMode): sMode = 's';
else: sMode = '?';
## @todo sticky bits.
sMode += 'r' if uMode & stat.S_IRUSR else '-';
sMode += 'w' if uMode & stat.S_IWUSR else '-';
sMode += 'x' if uMode & stat.S_IXUSR else '-';
sMode += 'r' if uMode & stat.S_IRGRP else '-';
sMode += 'w' if uMode & stat.S_IWGRP else '-';
sMode += 'x' if uMode & stat.S_IXGRP else '-';
sMode += 'r' if uMode & stat.S_IROTH else '-';
sMode += 'w' if uMode & stat.S_IWOTH else '-';
sMode += 'x' if uMode & stat.S_IXOTH else '-';
sMode += ' ';
return sMode;
def formatFileStat(oStat):
# type: (stat) -> string
"""
Format a stat result 'ls -la' fasion (numeric IDs).
Returns string.
"""
return '%s %3s %4s %4s %10s %s' \
% (formatFileMode(oStat.st_mode), oStat.st_nlink, oStat.st_uid, oStat.st_gid, oStat.st_size,
time.strftime('%Y-%m-%d %H:%M', time.localtime(oStat.st_mtime)), );
## Good buffer for file operations.
g_cbGoodBufferSize = 256*1024;
## The original shutil.copyfileobj.
g_fnOriginalShCopyFileObj = None;
def __myshutilcopyfileobj(fsrc, fdst, length = g_cbGoodBufferSize):
""" shutil.copyfileobj with different length default value (16384 is slow with python 2.7 on windows). """
return g_fnOriginalShCopyFileObj(fsrc, fdst, length);
def __installShUtilHacks(shutil):
""" Installs the shutil buffer size hacks. """
global g_fnOriginalShCopyFileObj;
if g_fnOriginalShCopyFileObj is None:
g_fnOriginalShCopyFileObj = shutil.copyfileobj;
shutil.copyfileobj = __myshutilcopyfileobj;
return True;
def copyFileSimple(sFileSrc, sFileDst):
"""
Wrapper around shutil.copyfile that simply copies the data of a regular file.
Raises exception on failure.
Return True for show.
"""
import shutil;
__installShUtilHacks(shutil);
return shutil.copyfile(sFileSrc, sFileDst);
#
# SubProcess.
#
def _processFixPythonInterpreter(aPositionalArgs, dKeywordArgs):
"""
If the "executable" is a python script, insert the python interpreter at
the head of the argument list so that it will work on systems which doesn't
support hash-bang scripts.
"""
asArgs = dKeywordArgs.get('args');
if asArgs is None:
asArgs = aPositionalArgs[0];
if asArgs[0].endswith('.py'):
if sys.executable is not None and len(sys.executable) > 0:
asArgs.insert(0, sys.executable);
else:
asArgs.insert(0, 'python');
# paranoia...
if dKeywordArgs.get('args') is not None:
dKeywordArgs['args'] = asArgs;
else:
aPositionalArgs = (asArgs,) + aPositionalArgs[1:];
return None;
def processCall(*aPositionalArgs, **dKeywordArgs):
"""
Wrapper around subprocess.call to deal with its absense in older
python versions.
Returns process exit code (see subprocess.poll).
"""
assert dKeywordArgs.get('stdout') is None;
assert dKeywordArgs.get('stderr') is None;
_processFixPythonInterpreter(aPositionalArgs, dKeywordArgs);
oProcess = subprocess.Popen(*aPositionalArgs, **dKeywordArgs);
return oProcess.wait();
def processOutputChecked(*aPositionalArgs, **dKeywordArgs):
"""
Wrapper around subprocess.check_output to deal with its absense in older
python versions.
"""
_processFixPythonInterpreter(aPositionalArgs, dKeywordArgs);
oProcess = subprocess.Popen(stdout=subprocess.PIPE, *aPositionalArgs, **dKeywordArgs);
sOutput, _ = oProcess.communicate();
iExitCode = oProcess.poll();
if iExitCode is not 0:
asArgs = dKeywordArgs.get('args');
if asArgs is None:
asArgs = aPositionalArgs[0];
print(sOutput);
raise subprocess.CalledProcessError(iExitCode, asArgs);
return str(sOutput); # str() make pylint happy.
g_fOldSudo = None;
def _sudoFixArguments(aPositionalArgs, dKeywordArgs, fInitialEnv = True):
"""
Adds 'sudo' (or similar) to the args parameter, whereever it is.
"""
# Are we root?
fIsRoot = True;
try:
fIsRoot = os.getuid() == 0; # pylint: disable=E1101
except:
pass;
# If not, prepend sudo (non-interactive, simulate initial login).
if fIsRoot is not True:
asArgs = dKeywordArgs.get('args');
if asArgs is None:
asArgs = aPositionalArgs[0];
# Detect old sudo.
global g_fOldSudo;
if g_fOldSudo is None:
try:
sVersion = processOutputChecked(['sudo', '-V']);
except:
sVersion = '1.7.0';
sVersion = sVersion.strip().split('\n')[0];
sVersion = sVersion.replace('Sudo version', '').strip();
g_fOldSudo = len(sVersion) >= 4 \
and sVersion[0] == '1' \
and sVersion[1] == '.' \
and sVersion[2] <= '6' \
and sVersion[3] == '.';
asArgs.insert(0, 'sudo');
if not g_fOldSudo:
asArgs.insert(1, '-n');
if fInitialEnv and not g_fOldSudo:
asArgs.insert(1, '-i');
# paranoia...
if dKeywordArgs.get('args') is not None:
dKeywordArgs['args'] = asArgs;
else:
aPositionalArgs = (asArgs,) + aPositionalArgs[1:];
return None;
def sudoProcessCall(*aPositionalArgs, **dKeywordArgs):
"""
sudo (or similar) + subprocess.call
"""
_processFixPythonInterpreter(aPositionalArgs, dKeywordArgs);
_sudoFixArguments(aPositionalArgs, dKeywordArgs);
return processCall(*aPositionalArgs, **dKeywordArgs);
def sudoProcessOutputChecked(*aPositionalArgs, **dKeywordArgs):
"""
sudo (or similar) + subprocess.check_output.
"""
_processFixPythonInterpreter(aPositionalArgs, dKeywordArgs);
_sudoFixArguments(aPositionalArgs, dKeywordArgs);
return processOutputChecked(*aPositionalArgs, **dKeywordArgs);
def sudoProcessOutputCheckedNoI(*aPositionalArgs, **dKeywordArgs):
"""
sudo (or similar) + subprocess.check_output, except '-i' isn't used.
"""
_processFixPythonInterpreter(aPositionalArgs, dKeywordArgs);
_sudoFixArguments(aPositionalArgs, dKeywordArgs, False);
return processOutputChecked(*aPositionalArgs, **dKeywordArgs);
def sudoProcessPopen(*aPositionalArgs, **dKeywordArgs):
"""
sudo (or similar) + subprocess.Popen.
"""
_processFixPythonInterpreter(aPositionalArgs, dKeywordArgs);
_sudoFixArguments(aPositionalArgs, dKeywordArgs);
return subprocess.Popen(*aPositionalArgs, **dKeywordArgs);
#
# Generic process stuff.
#
def processInterrupt(uPid):
"""
Sends a SIGINT or equivalent to interrupt the specified process.
Returns True on success, False on failure.
On Windows hosts this may not work unless the process happens to be a
process group leader.
"""
if sys.platform == 'win32':
try:
win32console.GenerateConsoleCtrlEvent(win32con.CTRL_BREAK_EVENT, uPid); # pylint: disable=no-member
fRc = True;
except:
fRc = False;
else:
try:
os.kill(uPid, signal.SIGINT);
fRc = True;
except:
fRc = False;
return fRc;
def sendUserSignal1(uPid):
"""
Sends a SIGUSR1 or equivalent to nudge the process into shutting down
(VBoxSVC) or something.
Returns True on success, False on failure or if not supported (win).
On Windows hosts this may not work unless the process happens to be a
process group leader.
"""
if sys.platform == 'win32':
fRc = False;
else:
try:
os.kill(uPid, signal.SIGUSR1); # pylint: disable=E1101
fRc = True;
except:
fRc = False;
return fRc;
def processTerminate(uPid):
"""
Terminates the process in a nice manner (SIGTERM or equivalent).
Returns True on success, False on failure.
"""
fRc = False;
if sys.platform == 'win32':
try:
hProcess = win32api.OpenProcess(win32con.PROCESS_TERMINATE, False, uPid); # pylint: disable=no-member
except:
pass;
else:
try:
win32process.TerminateProcess(hProcess, 0x40010004); # DBG_TERMINATE_PROCESS # pylint: disable=no-member
fRc = True;
except:
pass;
win32api.CloseHandle(hProcess) # pylint: disable=no-member
else:
try:
os.kill(uPid, signal.SIGTERM);
fRc = True;
except:
pass;
return fRc;
def processKill(uPid):
"""
Terminates the process with extreme prejudice (SIGKILL).
Returns True on success, False on failure.
"""
if sys.platform == 'win32':
fRc = processTerminate(uPid);
else:
try:
os.kill(uPid, signal.SIGKILL); # pylint: disable=E1101
fRc = True;
except:
fRc = False;
return fRc;
def processKillWithNameCheck(uPid, sName):
"""
Like processKill(), but checks if the process name matches before killing
it. This is intended for killing using potentially stale pid values.
Returns True on success, False on failure.
"""
if processCheckPidAndName(uPid, sName) is not True:
return False;
return processKill(uPid);
def processExists(uPid):
"""
Checks if the specified process exits.
This will only work if we can signal/open the process.
Returns True if it positively exists, False otherwise.
"""
if sys.platform == 'win32':
fRc = False;
try:
hProcess = win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION, False, uPid); # pylint: disable=no-member
except:
pass;
else:
win32api.CloseHandle(hProcess); # pylint: disable=no-member
fRc = True;
else:
try:
os.kill(uPid, 0);
fRc = True;
except:
fRc = False;
return fRc;
def processCheckPidAndName(uPid, sName):
"""
Checks if a process PID and NAME matches.
"""
fRc = processExists(uPid);
if fRc is not True:
return False;
if sys.platform == 'win32':
try:
from win32com.client import GetObject; # pylint: disable=F0401
oWmi = GetObject('winmgmts:');
aoProcesses = oWmi.InstancesOf('Win32_Process');
for oProcess in aoProcesses:
if long(oProcess.Properties_("ProcessId").Value) == uPid:
sCurName = oProcess.Properties_("Name").Value;
#reporter.log2('uPid=%s sName=%s sCurName=%s' % (uPid, sName, sCurName));
sName = sName.lower();
sCurName = sCurName.lower();
if os.path.basename(sName) == sName:
sCurName = os.path.basename(sCurName);
if sCurName == sName \
or sCurName + '.exe' == sName \
or sCurName == sName + '.exe':
fRc = True;
break;
except:
#reporter.logXcpt('uPid=%s sName=%s' % (uPid, sName));
pass;
else:
if sys.platform in ('linux2', ):
asPsCmd = ['/bin/ps', '-p', '%u' % (uPid,), '-o', 'fname='];
elif sys.platform in ('sunos5',):
asPsCmd = ['/usr/bin/ps', '-p', '%u' % (uPid,), '-o', 'fname='];
elif sys.platform in ('darwin',):
asPsCmd = ['/bin/ps', '-p', '%u' % (uPid,), '-o', 'ucomm='];
else:
asPsCmd = None;
if asPsCmd is not None:
try:
oPs = subprocess.Popen(asPsCmd, stdout=subprocess.PIPE);
sCurName = oPs.communicate()[0];
iExitCode = oPs.wait();
except:
#reporter.logXcpt();
return False;
# ps fails with non-zero exit code if the pid wasn't found.
if iExitCode is not 0:
return False;
if sCurName is None:
return False;
sCurName = sCurName.strip();
if sCurName is '':
return False;
if os.path.basename(sName) == sName:
sCurName = os.path.basename(sCurName);
elif os.path.basename(sCurName) == sCurName:
sName = os.path.basename(sName);
if sCurName != sName:
return False;
fRc = True;
return fRc;
class ProcessInfo(object):
"""Process info."""
def __init__(self, iPid):
self.iPid = iPid;
self.iParentPid = None;
self.sImage = None;
self.sName = None;
self.asArgs = None;
self.sCwd = None;
self.iGid = None;
self.iUid = None;
self.iProcGroup = None;
self.iSessionId = None;
def loadAll(self):
"""Load all the info."""
sOs = getHostOs();
if sOs == 'linux':
sProc = '/proc/%s/' % (self.iPid,);
if self.sImage is None: self.sImage = noxcptReadLink(sProc + 'exe', None);
if self.sCwd is None: self.sCwd = noxcptReadLink(sProc + 'cwd', None);
if self.asArgs is None: self.asArgs = noxcptReadFile(sProc + 'cmdline', '').split('\x00');
elif sOs == 'solaris':
sProc = '/proc/%s/' % (self.iPid,);
if self.sImage is None: self.sImage = noxcptReadLink(sProc + 'path/a.out', None);
if self.sCwd is None: self.sCwd = noxcptReadLink(sProc + 'path/cwd', None);
else:
pass;
if self.sName is None and self.sImage is not None:
self.sName = self.sImage;
def windowsGrabProcessInfo(self, oProcess):
"""Windows specific loadAll."""
try: self.sName = oProcess.Properties_("Name").Value;
except: pass;
try: self.sImage = oProcess.Properties_("ExecutablePath").Value;
except: pass;
try: self.asArgs = oProcess.Properties_("CommandLine").Value; ## @todo split it.
except: pass;
try: self.iParentPid = oProcess.Properties_("ParentProcessId").Value;
except: pass;
try: self.iSessionId = oProcess.Properties_("SessionId").Value;
except: pass;
if self.sName is None and self.sImage is not None:
self.sName = self.sImage;
def getBaseImageName(self):
"""
Gets the base image name if available, use the process name if not available.
Returns image/process base name or None.
"""
sRet = self.sImage if self.sName is None else self.sName;
if sRet is None:
self.loadAll();
sRet = self.sImage if self.sName is None else self.sName;
if sRet is None:
if self.asArgs is None or len(self.asArgs) == 0:
return None;
sRet = self.asArgs[0];
if len(sRet) == 0:
return None;
return os.path.basename(sRet);
def getBaseImageNameNoExeSuff(self):
"""
Same as getBaseImageName, except any '.exe' or similar suffix is stripped.
"""
sRet = self.getBaseImageName();
if sRet is not None and len(sRet) > 4 and sRet[-4] == '.':
if (sRet[-4:]).lower() in [ '.exe', '.com', '.msc', '.vbs', '.cmd', '.bat' ]:
sRet = sRet[:-4];
return sRet;
def processListAll(): # pylint: disable=R0914
"""
Return a list of ProcessInfo objects for all the processes in the system
that the current user can see.
"""
asProcesses = [];
sOs = getHostOs();
if sOs == 'win':
from win32com.client import GetObject; # pylint: disable=F0401
oWmi = GetObject('winmgmts:');
aoProcesses = oWmi.InstancesOf('Win32_Process');
for oProcess in aoProcesses:
try:
iPid = int(oProcess.Properties_("ProcessId").Value);
except:
continue;
oMyInfo = ProcessInfo(iPid);
oMyInfo.windowsGrabProcessInfo(oProcess);
asProcesses.append(oMyInfo);
elif sOs in [ 'linux', 'solaris' ]:
try:
asDirs = os.listdir('/proc');
except:
asDirs = [];
for sDir in asDirs:
if sDir.isdigit():
asProcesses.append(ProcessInfo(int(sDir),));
elif sOs == 'darwin':
# Try our best to parse ps output. (Not perfect but does the job most of the time.)
try:
sRaw = processOutputChecked([ '/bin/ps', '-A',
'-o', 'pid=',
'-o', 'ppid=',
'-o', 'pgid=',
'-o', 'sess=',
'-o', 'uid=',
'-o', 'gid=',
'-o', 'comm=' ]);
except:
return asProcesses;
for sLine in sRaw.split('\n'):
sLine = sLine.lstrip();
if len(sLine) < 7 or not sLine[0].isdigit():
continue;
iField = 0;
off = 0;
aoFields = [None, None, None, None, None, None, None];
while iField < 7:
# Eat whitespace.
while off < len(sLine) and (sLine[off] == ' ' or sLine[off] == '\t'):
off += 1;
# Final field / EOL.
if iField == 6:
aoFields[6] = sLine[off:];
break;
if off >= len(sLine):
break;
# Generic field parsing.
offStart = off;
off += 1;
while off < len(sLine) and sLine[off] != ' ' and sLine[off] != '\t':
off += 1;
try:
if iField != 3:
aoFields[iField] = int(sLine[offStart:off]);
else:
aoFields[iField] = long(sLine[offStart:off], 16); # sess is a hex address.
except:
pass;
iField += 1;
if aoFields[0] is not None:
oMyInfo = ProcessInfo(aoFields[0]);
oMyInfo.iParentPid = aoFields[1];
oMyInfo.iProcGroup = aoFields[2];
oMyInfo.iSessionId = aoFields[3];
oMyInfo.iUid = aoFields[4];
oMyInfo.iGid = aoFields[5];
oMyInfo.sName = aoFields[6];
asProcesses.append(oMyInfo);
return asProcesses;
def processCollectCrashInfo(uPid, fnLog, fnCrashFile):
"""
Looks for information regarding the demise of the given process.
"""
sOs = getHostOs();
if sOs == 'darwin':
#
# On darwin we look for crash and diagnostic reports.
#
asLogDirs = [
u'/Library/Logs/DiagnosticReports/',
u'/Library/Logs/CrashReporter/',
u'~/Library/Logs/DiagnosticReports/',
u'~/Library/Logs/CrashReporter/',
];
for sDir in asLogDirs:
sDir = os.path.expanduser(sDir);
if not os.path.isdir(sDir):
continue;
try:
asDirEntries = os.listdir(sDir);
except:
continue;
for sEntry in asDirEntries:
# Only interested in .crash files.
_, sSuff = os.path.splitext(sEntry);
if sSuff != '.crash':
continue;
# The pid can be found at the end of the first line.
sFull = os.path.join(sDir, sEntry);
try:
oFile = open(sFull, 'r');
sFirstLine = oFile.readline();
oFile.close();
except:
continue;
if len(sFirstLine) <= 4 or sFirstLine[-2] != ']':
continue;
offPid = len(sFirstLine) - 3;
while offPid > 1 and sFirstLine[offPid - 1].isdigit():
offPid -= 1;
try: uReportPid = int(sFirstLine[offPid:-2]);
except: continue;
# Does the pid we found match?
if uReportPid == uPid:
fnLog('Found crash report for %u: %s' % (uPid, sFull,));
fnCrashFile(sFull, False);
elif sOs == 'win':
#
# Getting WER reports would be great, however we have trouble match the
# PID to those as they seems not to mention it in the brief reports.
# Instead we'll just look for crash dumps in C:\CrashDumps (our custom
# location - see the windows readme for the testbox script) and what
# the MSDN article lists for now.
#
# It's been observed on Windows server 2012 that the dump files takes
# the form: <processimage>.<decimal-pid>.dmp
#
asDmpDirs = [
u'%SystemDrive%/CrashDumps/', # Testboxes.
u'%LOCALAPPDATA%/CrashDumps/', # MSDN example.
u'%WINDIR%/ServiceProfiles/LocalServices/', # Local and network service.
u'%WINDIR%/ServiceProfiles/NetworkSerices/',
u'%WINDIR%/ServiceProfiles/',
u'%WINDIR%/System32/Config/SystemProfile/', # System services.
];
sMatchSuffix = '.%u.dmp' % (uPid,);
for sDir in asDmpDirs:
sDir = os.path.expandvars(sDir);
if not os.path.isdir(sDir):
continue;
try:
asDirEntries = os.listdir(sDir);
except:
continue;
for sEntry in asDirEntries:
if sEntry.endswith(sMatchSuffix):
sFull = os.path.join(sDir, sEntry);
fnLog('Found crash dump for %u: %s' % (uPid, sFull,));
fnCrashFile(sFull, True);
else:
pass; ## TODO
return None;
#
# Time.
#
#
# The following test case shows how time.time() only have ~ms resolution
# on Windows (tested W10) and why it therefore makes sense to try use
# performance counters.
#
# Note! We cannot use time.clock() as the timestamp must be portable across
# processes. See timeout testcase problem on win hosts (no logs).
#
#import sys;
#import time;
#from common import utils;
#
#atSeries = [];
#for i in xrange(1,160):
# if i == 159: time.sleep(10);
# atSeries.append((utils.timestampNano(), long(time.clock() * 1000000000), long(time.time() * 1000000000)));
#
#tPrev = atSeries[0]
#for tCur in atSeries:
# print 't1=%+22u, %u' % (tCur[0], tCur[0] - tPrev[0]);
# print 't2=%+22u, %u' % (tCur[1], tCur[1] - tPrev[1]);
# print 't3=%+22u, %u' % (tCur[2], tCur[2] - tPrev[2]);
# print '';
# tPrev = tCur
#
#print 't1=%u' % (atSeries[-1][0] - atSeries[0][0]);
#print 't2=%u' % (atSeries[-1][1] - atSeries[0][1]);
#print 't3=%u' % (atSeries[-1][2] - atSeries[0][2]);
g_fWinUseWinPerfCounter = sys.platform == 'win32';
g_fpWinPerfCounterFreq = None;
g_oFuncwinQueryPerformanceCounter = None;
def _winInitPerfCounter():
""" Initializes the use of performance counters. """
global g_fWinUseWinPerfCounter, g_fpWinPerfCounterFreq, g_oFuncwinQueryPerformanceCounter
uFrequency = ctypes.c_ulonglong(0);
if ctypes.windll.kernel32.QueryPerformanceFrequency(ctypes.byref(uFrequency)):
if uFrequency.value >= 1000:
#print 'uFrequency = %s' % (uFrequency,);
#print 'type(uFrequency) = %s' % (type(uFrequency),);
g_fpWinPerfCounterFreq = float(uFrequency.value);
# Check that querying the counter works too.
global g_oFuncwinQueryPerformanceCounter
g_oFuncwinQueryPerformanceCounter = ctypes.windll.kernel32.QueryPerformanceCounter;
uCurValue = ctypes.c_ulonglong(0);
if g_oFuncwinQueryPerformanceCounter(ctypes.byref(uCurValue)):
if uCurValue.value > 0:
return True;
g_fWinUseWinPerfCounter = False;
return False;
def _winFloatTime():
""" Gets floating point time on windows. """
if g_fpWinPerfCounterFreq is not None or _winInitPerfCounter():
uCurValue = ctypes.c_ulonglong(0);
if g_oFuncwinQueryPerformanceCounter(ctypes.byref(uCurValue)):
return float(uCurValue.value) / g_fpWinPerfCounterFreq;
return time.time();
def timestampNano():
"""
Gets a nanosecond timestamp.
"""
if g_fWinUseWinPerfCounter is True:
return long(_winFloatTime() * 1000000000);
return long(time.time() * 1000000000);
def timestampMilli():
"""
Gets a millisecond timestamp.
"""
if g_fWinUseWinPerfCounter is True:
return long(_winFloatTime() * 1000);
return long(time.time() * 1000);
def timestampSecond():
"""
Gets a second timestamp.
"""
if g_fWinUseWinPerfCounter is True:
return long(_winFloatTime());
return long(time.time());
def getTimePrefix():
"""
Returns a timestamp prefix, typically used for logging. UTC.
"""
try:
oNow = datetime.datetime.utcnow();
sTs = '%02u:%02u:%02u.%06u' % (oNow.hour, oNow.minute, oNow.second, oNow.microsecond);
except:
sTs = 'getTimePrefix-exception';
return sTs;
def getTimePrefixAndIsoTimestamp():
"""
Returns current UTC as log prefix and iso timestamp.
"""
try:
oNow = datetime.datetime.utcnow();
sTsPrf = '%02u:%02u:%02u.%06u' % (oNow.hour, oNow.minute, oNow.second, oNow.microsecond);
sTsIso = formatIsoTimestamp(oNow);
except:
sTsPrf = sTsIso = 'getTimePrefix-exception';
return (sTsPrf, sTsIso);
def formatIsoTimestamp(oNow):
"""Formats the datetime object as an ISO timestamp."""
assert oNow.tzinfo is None;
sTs = '%s.%09uZ' % (oNow.strftime('%Y-%m-%dT%H:%M:%S'), oNow.microsecond * 1000);
return sTs;
def getIsoTimestamp():
"""Returns the current UTC timestamp as a string."""
return formatIsoTimestamp(datetime.datetime.utcnow());
def getLocalHourOfWeek():
""" Local hour of week (0 based). """
oNow = datetime.datetime.now();
return (oNow.isoweekday() - 1) * 24 + oNow.hour;
def formatIntervalSeconds(cSeconds):
""" Format a seconds interval into a nice 01h 00m 22s string """
# Two simple special cases.
if cSeconds < 60:
return '%ss' % (cSeconds,);
if cSeconds < 3600:
cMins = cSeconds / 60;
cSecs = cSeconds % 60;
if cSecs == 0:
return '%sm' % (cMins,);
return '%sm %ss' % (cMins, cSecs,);
# Generic and a bit slower.
cDays = cSeconds / 86400;
cSeconds %= 86400;
cHours = cSeconds / 3600;
cSeconds %= 3600;
cMins = cSeconds / 60;
cSecs = cSeconds % 60;
sRet = '';
if cDays > 0:
sRet = '%sd ' % (cDays,);
if cHours > 0:
sRet += '%sh ' % (cHours,);
if cMins > 0:
sRet += '%sm ' % (cMins,);
if cSecs > 0:
sRet += '%ss ' % (cSecs,);
assert len(sRet) > 0; assert sRet[-1] == ' ';
return sRet[:-1];
def formatIntervalSeconds2(oSeconds):
"""
Flexible input version of formatIntervalSeconds for use in WUI forms where
data is usually already string form.
"""
if isinstance(oSeconds, int) or isinstance(oSeconds, long):
return formatIntervalSeconds(oSeconds);
if not isString(oSeconds):
try:
lSeconds = long(oSeconds);
except:
pass;
else:
if lSeconds >= 0:
return formatIntervalSeconds2(lSeconds);
return oSeconds;
def parseIntervalSeconds(sString):
"""
Reverse of formatIntervalSeconds.
Returns (cSeconds, sError), where sError is None on success.
"""
# We might given non-strings, just return them without any fuss.
if not isString(sString):
if isinstance(sString, int) or isinstance(sString, long) or sString is None:
return (sString, None);
## @todo time/date objects?
return (int(sString), None);
# Strip it and make sure it's not empty.
sString = sString.strip();
if len(sString) == 0:
return (0, 'Empty interval string.');
#
# Split up the input into a list of 'valueN, unitN, ...'.
#
# Don't want to spend too much time trying to make re.split do exactly what
# I need here, so please forgive the extra pass I'm making here.
#
asRawParts = re.split(r'\s*([0-9]+)\s*([^0-9,;]*)[\s,;]*', sString);
asParts = [];
for sPart in asRawParts:
sPart = sPart.strip();
if len(sPart) > 0:
asParts.append(sPart);
if len(asParts) == 0:
return (0, 'Empty interval string or something?');
#
# Process them one or two at the time.
#
cSeconds = 0;
asErrors = [];
i = 0;
while i < len(asParts):
sNumber = asParts[i];
i += 1;
if sNumber.isdigit():
iNumber = int(sNumber);
sUnit = 's';
if i < len(asParts) and not asParts[i].isdigit():
sUnit = asParts[i];
i += 1;
sUnitLower = sUnit.lower();
if sUnitLower in [ 's', 'se', 'sec', 'second', 'seconds' ]:
pass;
elif sUnitLower in [ 'm', 'mi', 'min', 'minute', 'minutes' ]:
iNumber *= 60;
elif sUnitLower in [ 'h', 'ho', 'hou', 'hour', 'hours' ]:
iNumber *= 3600;
elif sUnitLower in [ 'd', 'da', 'day', 'days' ]:
iNumber *= 86400;
elif sUnitLower in [ 'w', 'week', 'weeks' ]:
iNumber *= 7 * 86400;
else:
asErrors.append('Unknown unit "%s".' % (sUnit,));
cSeconds += iNumber;
else:
asErrors.append('Bad number "%s".' % (sNumber,));
return (cSeconds, None if len(asErrors) == 0 else ' '.join(asErrors));
def formatIntervalHours(cHours):
""" Format a hours interval into a nice 1w 2d 1h string. """
# Simple special cases.
if cHours < 24:
return '%sh' % (cHours,);
# Generic and a bit slower.
cWeeks = cHours / (7 * 24);
cHours %= 7 * 24;
cDays = cHours / 24;
cHours %= 24;
sRet = '';
if cWeeks > 0:
sRet = '%sw ' % (cWeeks,);
if cDays > 0:
sRet = '%sd ' % (cDays,);
if cHours > 0:
sRet += '%sh ' % (cHours,);
assert len(sRet) > 0; assert sRet[-1] == ' ';
return sRet[:-1];
def parseIntervalHours(sString):
"""
Reverse of formatIntervalHours.
Returns (cHours, sError), where sError is None on success.
"""
# We might given non-strings, just return them without any fuss.
if not isString(sString):
if isinstance(sString, int) or isinstance(sString, long) or sString is None:
return (sString, None);
## @todo time/date objects?
return (int(sString), None);
# Strip it and make sure it's not empty.
sString = sString.strip();
if len(sString) == 0:
return (0, 'Empty interval string.');
#
# Split up the input into a list of 'valueN, unitN, ...'.
#
# Don't want to spend too much time trying to make re.split do exactly what
# I need here, so please forgive the extra pass I'm making here.
#
asRawParts = re.split(r'\s*([0-9]+)\s*([^0-9,;]*)[\s,;]*', sString);
asParts = [];
for sPart in asRawParts:
sPart = sPart.strip();
if len(sPart) > 0:
asParts.append(sPart);
if len(asParts) == 0:
return (0, 'Empty interval string or something?');
#
# Process them one or two at the time.
#
cHours = 0;
asErrors = [];
i = 0;
while i < len(asParts):
sNumber = asParts[i];
i += 1;
if sNumber.isdigit():
iNumber = int(sNumber);
sUnit = 'h';
if i < len(asParts) and not asParts[i].isdigit():
sUnit = asParts[i];
i += 1;
sUnitLower = sUnit.lower();
if sUnitLower in [ 'h', 'ho', 'hou', 'hour', 'hours' ]:
pass;
elif sUnitLower in [ 'd', 'da', 'day', 'days' ]:
iNumber *= 24;
elif sUnitLower in [ 'w', 'week', 'weeks' ]:
iNumber *= 7 * 24;
else:
asErrors.append('Unknown unit "%s".' % (sUnit,));
cHours += iNumber;
else:
asErrors.append('Bad number "%s".' % (sNumber,));
return (cHours, None if len(asErrors) == 0 else ' '.join(asErrors));
#
# Introspection.
#
def getCallerName(oFrame=None, iFrame=2):
"""
Returns the name of the caller's caller.
"""
if oFrame is None:
try:
raise Exception();
except:
oFrame = sys.exc_info()[2].tb_frame.f_back;
while iFrame > 1:
if oFrame is not None:
oFrame = oFrame.f_back;
iFrame = iFrame - 1;
if oFrame is not None:
sName = '%s:%u' % (oFrame.f_code.co_name, oFrame.f_lineno);
return sName;
return "unknown";
def getXcptInfo(cFrames = 1):
"""
Gets text detailing the exception. (Good for logging.)
Returns list of info strings.
"""
#
# Try get exception info.
#
try:
oType, oValue, oTraceback = sys.exc_info();
except:
oType = oValue = oTraceback = None;
if oType is not None:
#
# Try format the info
#
asRet = [];
try:
try:
asRet = asRet + traceback.format_exception_only(oType, oValue);
asTraceBack = traceback.format_tb(oTraceback);
if cFrames is not None and cFrames <= 1:
asRet.append(asTraceBack[-1]);
else:
asRet.append('Traceback:')
for iFrame in range(min(cFrames, len(asTraceBack))):
asRet.append(asTraceBack[-iFrame - 1]);
asRet.append('Stack:')
asRet = asRet + traceback.format_stack(oTraceback.tb_frame.f_back, cFrames);
except:
asRet.append('internal-error: Hit exception #2! %s' % (traceback.format_exc(),));
if len(asRet) == 0:
asRet.append('No exception info...');
except:
asRet.append('internal-error: Hit exception! %s' % (traceback.format_exc(),));
else:
asRet = ['Couldn\'t find exception traceback.'];
return asRet;
#
# TestSuite stuff.
#
def isRunningFromCheckout(cScriptDepth = 1):
"""
Checks if we're running from the SVN checkout or not.
"""
try:
sFile = __file__;
cScriptDepth = 1;
except:
sFile = sys.argv[0];
sDir = os.path.abspath(sFile);
while cScriptDepth >= 0:
sDir = os.path.dirname(sDir);
if os.path.exists(os.path.join(sDir, 'Makefile.kmk')) \
or os.path.exists(os.path.join(sDir, 'Makefile.kup')):
return True;
cScriptDepth -= 1;
return False;
#
# Bourne shell argument fun.
#
def argsSplit(sCmdLine):
"""
Given a bourne shell command line invocation, split it up into arguments
assuming IFS is space.
Returns None on syntax error.
"""
## @todo bourne shell argument parsing!
return sCmdLine.split(' ');
def argsGetFirst(sCmdLine):
"""
Given a bourne shell command line invocation, get return the first argument
assuming IFS is space.
Returns None on invalid syntax, otherwise the parsed and unescaped argv[0] string.
"""
asArgs = argsSplit(sCmdLine);
if asArgs is None or len(asArgs) == 0:
return None;
return asArgs[0];
#
# String helpers.
#
def stricmp(sFirst, sSecond):
"""
Compares to strings in an case insensitive fashion.
Python doesn't seem to have any way of doing the correctly, so this is just
an approximation using lower.
"""
if sFirst == sSecond:
return 0;
sLower1 = sFirst.lower();
sLower2 = sSecond.lower();
if sLower1 == sLower2:
return 0;
if sLower1 < sLower2:
return -1;
return 1;
#
# Misc.
#
def versionCompare(sVer1, sVer2):
"""
Compares to version strings in a fashion similar to RTStrVersionCompare.
"""
## @todo implement me!!
if sVer1 == sVer2:
return 0;
if sVer1 < sVer2:
return -1;
return 1;
def formatNumber(lNum, sThousandSep = ' '):
"""
Formats a decimal number with pretty separators.
"""
sNum = str(lNum);
sRet = sNum[-3:];
off = len(sNum) - 3;
while off > 0:
off -= 3;
sRet = sNum[(off if off >= 0 else 0):(off + 3)] + sThousandSep + sRet;
return sRet;
def formatNumberNbsp(lNum):
"""
Formats a decimal number with pretty separators.
"""
sRet = formatNumber(lNum);
return unicode(sRet).replace(' ', u'\u00a0');
def isString(oString):
"""
Checks if the object is a string object, hiding difference between python 2 and 3.
Returns True if it's a string of some kind.
Returns False if not.
"""
if sys.version_info[0] >= 3:
return isinstance(oString, str);
return isinstance(oString, basestring);
def hasNonAsciiCharacters(sText):
"""
Returns True is specified string has non-ASCII characters.
"""
sTmp = unicode(sText, errors='ignore') if isinstance(sText, str) else sText
return not all(ord(cChar) < 128 for cChar in sTmp)
def chmodPlusX(sFile):
"""
Makes the specified file or directory executable.
Returns success indicator, no exceptions.
Note! Symbolic links are followed and the target will be changed.
"""
try:
oStat = os.stat(sFile);
except:
return False;
try:
os.chmod(sFile, oStat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH);
except:
return False;
return True;
def unpackZipFile(sArchive, sDstDir, fnLog, fnError = None, fnFilter = None):
# type: (string, string, (string) -> None, (string) -> None, (string) -> bool) -> list[string]
"""
Worker for unpackFile that deals with ZIP files, same function signature.
"""
import zipfile
if fnError is None:
fnError = fnLog;
fnLog('Unzipping "%s" to "%s"...' % (sArchive, sDstDir));
# Open it.
try: oZipFile = zipfile.ZipFile(sArchive, 'r')
except Exception as oXcpt:
fnError('Error opening "%s" for unpacking into "%s": %s' % (sArchive, sDstDir, oXcpt,));
return None;
# Extract all members.
asMembers = [];
try:
for sMember in oZipFile.namelist():
if fnFilter is None or fnFilter(sMember) is not False:
if sMember.endswith('/'):
os.makedirs(os.path.join(sDstDir, sMember.replace('/', os.path.sep)), 0x1fd); # octal: 0775 (python 3/2)
else:
oZipFile.extract(sMember, sDstDir);
asMembers.append(os.path.join(sDstDir, sMember.replace('/', os.path.sep)));
except Exception as oXcpt:
fnError('Error unpacking "%s" into "%s": %s' % (sArchive, sDstDir, oXcpt));
asMembers = None;
# close it.
try: oZipFile.close();
except Exception as oXcpt:
fnError('Error closing "%s" after unpacking into "%s": %s' % (sArchive, sDstDir, oXcpt));
asMembers = None;
return asMembers;
## Set if we've replaced tarfile.copyfileobj with __mytarfilecopyfileobj already.
g_fTarCopyFileObjOverriddend = False;
def __mytarfilecopyfileobj(src, dst, length = None, exception = OSError):
""" tarfile.copyfileobj with different buffer size (16384 is slow on windows). """
if length is None:
__myshutilcopyfileobj(src, dst, g_cbGoodBufferSize);
elif length > 0:
cFull, cbRemainder = divmod(length, g_cbGoodBufferSize);
for _ in xrange(cFull):
abBuffer = src.read(g_cbGoodBufferSize);
dst.write(abBuffer);
if len(abBuffer) != g_cbGoodBufferSize:
raise exception('unexpected end of source file');
if cbRemainder > 0:
abBuffer = src.read(cbRemainder);
dst.write(abBuffer);
if len(abBuffer) != cbRemainder:
raise exception('unexpected end of source file');
def unpackTarFile(sArchive, sDstDir, fnLog, fnError = None, fnFilter = None):
# type: (string, string, (string) -> None, (string) -> None, (string) -> bool) -> list[string]
"""
Worker for unpackFile that deals with tarballs, same function signature.
"""
import shutil;
import tarfile;
if fnError is None:
fnError = fnLog;
fnLog('Untarring "%s" to "%s"...' % (sArchive, sDstDir));
#
# Default buffer sizes of 16384 bytes is causing too many syscalls on Windows.
# 60%+ speedup for python 2.7 and 50%+ speedup for python 3.5, both on windows with PDBs.
# 20%+ speedup for python 2.7 and 15%+ speedup for python 3.5, both on windows skipping PDBs.
#
if True is True:
__installShUtilHacks(shutil);
global g_fTarCopyFileObjOverriddend;
if g_fTarCopyFileObjOverriddend is False:
g_fTarCopyFileObjOverriddend = True;
tarfile.copyfileobj = __mytarfilecopyfileobj;
#
# Open it.
#
# Note! We not using 'r:*' because we cannot allow seeking compressed files!
# That's how we got a 13 min unpack time for VBoxAll on windows (hardlinked pdb).
#
try: oTarFile = tarfile.open(sArchive, 'r|*', bufsize = g_cbGoodBufferSize);
except Exception as oXcpt:
fnError('Error opening "%s" for unpacking into "%s": %s' % (sArchive, sDstDir, oXcpt,));
return None;
# Extract all members.
asMembers = [];
try:
for oTarInfo in oTarFile:
try:
if fnFilter is None or fnFilter(oTarInfo.name) is not False:
if oTarInfo.islnk():
# Links are trouble, especially on Windows. We must avoid the falling that will end up seeking
# in the compressed tar stream. So, fall back on shutil.copy2 instead.
sLinkFile = os.path.join(sDstDir, oTarInfo.name.rstrip('/').replace('/', os.path.sep));
sLinkTarget = os.path.join(sDstDir, oTarInfo.linkname.rstrip('/').replace('/', os.path.sep));
sParentDir = os.path.dirname(sLinkFile);
try: os.unlink(sLinkFile);
except: pass;
if sParentDir is not '' and not os.path.exists(sParentDir):
os.makedirs(sParentDir);
try: os.link(sLinkTarget, sLinkFile);
except: shutil.copy2(sLinkTarget, sLinkFile);
else:
if oTarInfo.isdir():
# Just make sure the user (we) got full access to dirs. Don't bother getting it 100% right.
oTarInfo.mode |= 0x1c0; # (octal: 0700)
oTarFile.extract(oTarInfo, sDstDir);
asMembers.append(os.path.join(sDstDir, oTarInfo.name.replace('/', os.path.sep)));
except Exception as oXcpt:
fnError('Error unpacking "%s" member "%s" into "%s": %s' % (sArchive, oTarInfo.name, sDstDir, oXcpt));
for sAttr in [ 'name', 'linkname', 'type', 'mode', 'size', 'mtime', 'uid', 'uname', 'gid', 'gname' ]:
fnError('Info: %8s=%s' % (sAttr, getattr(oTarInfo, sAttr),));
for sFn in [ 'isdir', 'isfile', 'islnk', 'issym' ]:
fnError('Info: %8s=%s' % (sFn, getattr(oTarInfo, sFn)(),));
asMembers = None;
break;
except Exception as oXcpt:
fnError('Error unpacking "%s" into "%s": %s' % (sArchive, sDstDir, oXcpt));
asMembers = None;
#
# Finally, close it.
#
try: oTarFile.close();
except Exception as oXcpt:
fnError('Error closing "%s" after unpacking into "%s": %s' % (sArchive, sDstDir, oXcpt));
asMembers = None;
return asMembers;
def unpackFile(sArchive, sDstDir, fnLog, fnError = None, fnFilter = None):
# type: (string, string, (string) -> None, (string) -> None, (string) -> bool) -> list[string]
"""
Unpacks the given file if it has a know archive extension, otherwise do
nothing.
fnLog & fnError both take a string parameter.
fnFilter takes a member name (string) and returns True if it's included
and False if excluded.
Returns list of the extracted files (full path) on success.
Returns empty list if not a supported archive format.
Returns None on failure. Raises no exceptions.
"""
sBaseNameLower = os.path.basename(sArchive).lower();
#
# Zip file?
#
if sBaseNameLower.endswith('.zip'):
return unpackZipFile(sArchive, sDstDir, fnLog, fnError, fnFilter);
#
# Tarball?
#
if sBaseNameLower.endswith('.tar') \
or sBaseNameLower.endswith('.tar.gz') \
or sBaseNameLower.endswith('.tgz') \
or sBaseNameLower.endswith('.tar.bz2'):
return unpackTarFile(sArchive, sDstDir, fnLog, fnError, fnFilter);
#
# Cannot classify it from the name, so just return that to the caller.
#
fnLog('Not unpacking "%s".' % (sArchive,));
return [];
def getDiskUsage(sPath):
"""
Get free space of a partition that corresponds to specified sPath in MB.
Returns partition free space value in MB.
"""
if platform.system() == 'Windows':
oCTypeFreeSpace = ctypes.c_ulonglong(0);
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(sPath), None, None,
ctypes.pointer(oCTypeFreeSpace));
cbFreeSpace = oCTypeFreeSpace.value;
else:
oStats = os.statvfs(sPath); # pylint: disable=E1101
cbFreeSpace = long(oStats.f_frsize) * oStats.f_bfree;
# Convert to MB
cMbFreeSpace = long(cbFreeSpace) / (1024 * 1024);
return cMbFreeSpace;
#
# Unit testing.
#
# pylint: disable=C0111
class BuildCategoryDataTestCase(unittest.TestCase):
def testIntervalSeconds(self):
self.assertEqual(parseIntervalSeconds(formatIntervalSeconds(3600)), (3600, None));
self.assertEqual(parseIntervalSeconds(formatIntervalSeconds(1209438593)), (1209438593, None));
self.assertEqual(parseIntervalSeconds('123'), (123, None));
self.assertEqual(parseIntervalSeconds(123), (123, None));
self.assertEqual(parseIntervalSeconds(99999999999), (99999999999, None));
self.assertEqual(parseIntervalSeconds(''), (0, 'Empty interval string.'));
self.assertEqual(parseIntervalSeconds('1X2'), (3, 'Unknown unit "X".'));
self.assertEqual(parseIntervalSeconds('1 Y3'), (4, 'Unknown unit "Y".'));
self.assertEqual(parseIntervalSeconds('1 Z 4'), (5, 'Unknown unit "Z".'));
self.assertEqual(parseIntervalSeconds('1 hour 2m 5second'), (3725, None));
self.assertEqual(parseIntervalSeconds('1 hour,2m ; 5second'), (3725, None));
if __name__ == '__main__':
unittest.main();
# not reached.
| gpl-2.0 | -3,673,735,913,334,923,000 | 31.87655 | 124 | 0.55815 | false |
bright-sparks/wpull | wpull/url.py | 1 | 20604 | '''URL parsing based on WHATWG URL living standard.'''
import collections
import fnmatch
import functools
import gettext
import logging
import re
import string
import urllib.parse
import posixpath
from wpull.backport.logging import BraceMessage as __
import wpull.string
_logger = logging.getLogger(__name__)
_ = gettext.gettext
RELATIVE_SCHEME_DEFAULT_PORTS = {
'ftp': 21,
'gopher': 70,
'http': 80,
'https': 443,
'ws': 80,
'wss': 443,
}
DEFAULT_ENCODE_SET = frozenset(b' "#<>?`')
'''Percent encoding set as defined by WHATWG URL living standard.
Does not include U+0000 to U+001F nor U+001F or above.
'''
PASSWORD_ENCODE_SET = DEFAULT_ENCODE_SET | frozenset(b'/@\\')
'''Encoding set for passwords.'''
USERNAME_ENCODE_SET = PASSWORD_ENCODE_SET | frozenset(b':')
'''Encoding set for usernames.'''
QUERY_ENCODE_SET = frozenset(b'"#<>`')
'''Encoding set for query strings.
This set does not include U+0020 (space) so it can be replaced with
U+0043 (plus sign) later.
'''
FRAGMENT_ENCODE_SET = frozenset(b' "<>`')
'''Encoding set for fragment.'''
QUERY_VALUE_ENCODE_SET = QUERY_ENCODE_SET | frozenset(b'&+%')
'''Encoding set for a query value.'''
FORBIDDEN_HOSTNAME_CHARS = frozenset('#%/:?@[\\] ')
'''Forbidden hostname characters.
Does not include non-printing characters. Meant for ASCII.
'''
VALID_IPv6_ADDRESS_CHARS = frozenset(string.hexdigits + '.:')
'''Valid IPv6 address characters.'''
class URLInfo(object):
'''Represent parts of a URL.
Attributes:
raw (str): Original string.
scheme (str): Protocol (for example, HTTP, FTP).
authority (str): Raw userinfo and host.
path (str): Location of resource. This value always
begins with a slash (``/``).
query (str): Additional request parameters.
fragment (str): Named anchor of a document.
userinfo (str): Raw username and password.
username (str): Username.
password (str): Password.
host (str): Raw hostname and port.
hostname (str): Hostname or IP address.
port (int): IP address port number.
resource (int): Raw path, query, and fragment. This value always
begins with a slash (``/``).
query_map (dict): Mapping of the query. Values are lists.
url (str): A normalized URL without userinfo and fragment.
encoding (str): Codec name for IRI support.
If scheme is not something like HTTP or FTP, the remaining attributes
are None.
All attributes are read only.
For more information about how the URL parts are derived, see
https://medialize.github.io/URI.js/about-uris.html
'''
__slots__ = ('raw', 'scheme', 'authority', 'path', 'query', 'fragment',
'userinfo', 'username', 'password',
'host', 'hostname', 'port',
'resource',
'_query_map', '_url', 'encoding',
)
def __init__(self):
self.raw = None
self.scheme = None
self.authority = None
self.path = None
self.query = None
self.fragment = None
self.userinfo = None
self.username = None
self.password = None
self.host = None
self.hostname = None
self.port = None
self.resource = None
self._query_map = None
self._url = None
self.encoding = None
@classmethod
@functools.lru_cache()
def parse(cls, url, default_scheme='http', encoding='utf-8'):
'''Parse a URL and return a URLInfo.'''
url = url.strip()
if not url.isprintable():
raise ValueError('URL is not printable: {}'.format(ascii(url)))
scheme, sep, remaining = url.partition(':')
if not scheme:
raise ValueError('URL missing scheme: {}'.format(ascii(url)))
scheme = scheme.lower()
if not sep and default_scheme:
# Likely something like example.com/mystuff
remaining = url
scheme = default_scheme
elif not sep:
raise ValueError('URI missing colon: {}'.format(ascii(url)))
if default_scheme and '.' in scheme or scheme == 'localhost':
# Maybe something like example.com:8080/mystuff or
# maybe localhost:8080/mystuff
remaining = '{}:{}'.format(scheme, remaining)
scheme = default_scheme
info = URLInfo()
info.encoding = encoding
if scheme not in RELATIVE_SCHEME_DEFAULT_PORTS:
info.raw = url
info.scheme = scheme
info.path = remaining
return info
if remaining.startswith('//'):
remaining = remaining[2:]
path_index = remaining.find('/')
query_index = remaining.find('?')
fragment_index = remaining.find('#')
try:
index_tuple = (path_index, query_index, fragment_index)
authority_index = min(num for num in index_tuple if num >= 0)
except ValueError:
authority_index = len(remaining)
authority = remaining[:authority_index]
resource = remaining[authority_index:]
try:
index_tuple = (query_index, fragment_index)
path_index = min(num for num in index_tuple if num >= 0)
except ValueError:
path_index = len(remaining)
path = remaining[authority_index + 1:path_index] or '/'
if fragment_index >= 0:
query_index = fragment_index
else:
query_index = len(remaining)
query = remaining[path_index + 1:query_index]
fragment = remaining[query_index + 1:]
userinfo, host = cls.parse_authority(authority)
hostname, port = cls.parse_host(host)
username, password = cls.parse_userinfo(userinfo)
if not hostname:
raise ValueError('Hostname is empty: {}'.format(ascii(url)))
info.raw = url
info.scheme = scheme
info.authority = authority
info.path = normalize_path(path, encoding=encoding)
info.query = normalize_query(query, encoding=encoding)
info.fragment = normalize_fragment(fragment, encoding=encoding)
info.userinfo = userinfo
info.username = percent_decode(username, encoding=encoding)
info.password = percent_decode(password, encoding=encoding)
info.host = host
info.hostname = hostname
info.port = port or RELATIVE_SCHEME_DEFAULT_PORTS[scheme]
info.resource = resource
return info
@classmethod
def parse_authority(cls, authority):
'''Parse the authority part and return userinfo and host.'''
userinfo, sep, host = authority.partition('@')
if not sep:
return '', userinfo
else:
return userinfo, host
@classmethod
def parse_userinfo(cls, userinfo):
'''Parse the userinfo and return username and password.'''
username, sep, password = userinfo.partition(':')
return username, password
@classmethod
def parse_host(cls, host):
'''Parse the host and return hostname and port.'''
if host.endswith(']'):
return cls.parse_hostname(host), None
else:
hostname, sep, port = host.rpartition(':')
if sep:
port = int(port)
else:
hostname = port
port = None
return cls.parse_hostname(hostname), port
@classmethod
def parse_hostname(cls, hostname):
'''Parse the hostname and normalize.'''
if hostname.startswith('['):
return cls.parse_ipv6_hostname(hostname)
else:
new_hostname = normalize_hostname(hostname)
if any(char in new_hostname for char in FORBIDDEN_HOSTNAME_CHARS):
raise ValueError('Invalid hostname: {}'
.format(ascii(hostname)))
return new_hostname
@classmethod
def parse_ipv6_hostname(cls, hostname):
'''Parse and normalize a IPv6 address.'''
if not hostname.startswith('[') or not hostname.endswith(']'):
raise ValueError('Invalid IPv6 address: {}'
.format(ascii(hostname)))
hostname = hostname[1:-1]
if any(char not in VALID_IPv6_ADDRESS_CHARS for char in hostname):
raise ValueError('Invalid IPv6 address: {}'
.format(ascii(hostname)))
hostname = normalize_hostname(hostname)
return hostname
@property
def query_map(self):
if self._query_map is None:
self._query_map = query_to_map(self.query)
return self._query_map
@property
def url(self):
if self._url is None:
if self.scheme not in RELATIVE_SCHEME_DEFAULT_PORTS:
self._url = self.raw
return self._url
parts = [self.scheme, '://']
if self.username:
parts.append(normalize_username(self.username))
if self.password:
parts.append(':')
parts.append(normalize_password(self.password))
if self.username or self.password:
parts.append('@')
if self.is_ipv6():
parts.append('[{}]'.format(self.hostname))
else:
parts.append(self.hostname)
if RELATIVE_SCHEME_DEFAULT_PORTS[self.scheme] != self.port:
parts.append(':{}'.format(self.port))
parts.append(self.path)
if self.query:
parts.append('?')
parts.append(self.query)
self._url = ''.join(parts)
return self._url
def to_dict(self):
'''Return a dict of the attributes.'''
return dict(
raw=self.raw,
scheme=self.scheme,
authority=self.authority,
netloc=self.authority,
path=self.path,
query=self.query,
fragment=self.fragment,
userinfo=self.userinfo,
username=self.username,
password=self.password,
host=self.host,
hostname=self.hostname,
port=self.port,
resource=self.resource,
url=self.url,
encoding=self.encoding,
)
def is_port_default(self):
'''Return whether the URL is using the default port.'''
if self.scheme in RELATIVE_SCHEME_DEFAULT_PORTS:
return RELATIVE_SCHEME_DEFAULT_PORTS[self.scheme] == self.port
def is_ipv6(self):
'''Return whether the URL is IPv6.'''
if self.host:
return self.host.startswith('[')
@property
def hostname_with_port(self):
'''Return the host portion but omit default port if needed.'''
default_port = RELATIVE_SCHEME_DEFAULT_PORTS.get(self.scheme)
if not default_port:
return ''
assert '[' not in self.hostname
assert ']' not in self.hostname
if self.is_ipv6():
hostname = '[{}]'.format(self.hostname)
else:
hostname = self.hostname
if default_port != self.port:
return '{}:{}'.format(hostname, self.port)
else:
return hostname
def split_path(self):
'''Return the directory and filename from the path.
The results are not percent-decoded.
'''
return posixpath.split(self.path)
def __repr__(self):
return '<URLInfo at 0x{:x} url={} raw={}>'.format(
id(self), self.url, self.raw)
def __hash__(self):
return hash(self.raw)
def __eq__(self, other):
return self.raw == other.raw
def __ne__(self, other):
return self.raw != other.raw
def parse_url_or_log(url, encoding='utf-8'):
'''Parse and return a URLInfo.
This function logs a warning if the URL cannot be parsed and returns
None.
'''
try:
url_info = URLInfo.parse(url, encoding=encoding)
except ValueError as error:
_logger.warning(__(
_('Unable to parse URL ‘{url}’: {error}.'),
url=wpull.string.printable_str(url), error=error))
else:
return url_info
def normalize(url, **kwargs):
'''Normalize a URL.
This function is a convenience function that is equivalent to::
>>> URLInfo.parse('http://example.com').url
'http://example.com'
:seealso: :func:`URLInfo.parse`.
'''
return URLInfo.parse(url, **kwargs).url
@functools.lru_cache()
def normalize_hostname(hostname):
'''Normalizes a hostname so that it is ASCII and valid domain name.'''
new_hostname = hostname.encode('idna').decode('ascii').lower()
if hostname != new_hostname:
# Check for round-trip. May raise UnicodeError
new_hostname.encode('idna')
return new_hostname
def normalize_path(path, encoding='utf-8'):
'''Normalize a path string.
Flattens a path by removing dot parts,
percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
'''
if not path.startswith('/'):
path = '/' + path
path = percent_encode(flatten_path(path, flatten_slashes=True), encoding=encoding)
return uppercase_percent_encoding(path)
def normalize_query(text, encoding='utf-8'):
'''Normalize a query string.
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
'''
path = percent_encode_plus(text, encoding=encoding)
return uppercase_percent_encoding(path)
def normalize_fragment(text, encoding='utf-8'):
'''Normalize a fragment.
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
'''
path = percent_encode(text, encoding=encoding, encode_set=FRAGMENT_ENCODE_SET)
return uppercase_percent_encoding(path)
def normalize_username(text, encoding='utf-8'):
'''Normalize a username
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
'''
path = percent_encode(text, encoding=encoding, encode_set=USERNAME_ENCODE_SET)
return uppercase_percent_encoding(path)
def normalize_password(text, encoding='utf-8'):
'''Normalize a password
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
'''
path = percent_encode(text, encoding=encoding, encode_set=PASSWORD_ENCODE_SET)
return uppercase_percent_encoding(path)
class PercentEncoderMap(collections.defaultdict):
'''Helper map for percent encoding.'''
# This class is based on urllib.parse.Quoter
def __init__(self, encode_set):
super().__init__()
self.encode_set = encode_set
def __missing__(self, char):
if char < 0x20 or char > 0x7E or char in self.encode_set:
result = '%{:02X}'.format(char)
else:
result = chr(char)
self[char] = result
return result
_percent_encoder_map_cache = {}
'''Cache of :class:`PercentEncoderMap`.'''
def percent_encode(text, encode_set=DEFAULT_ENCODE_SET, encoding='utf-8'):
'''Percent encode text.
Unlike Python's ``quote``, this function accepts a blacklist instead of
a whitelist of safe characters.
'''
byte_string = text.encode(encoding)
try:
mapping = _percent_encoder_map_cache[encode_set]
except KeyError:
mapping = _percent_encoder_map_cache[encode_set] = PercentEncoderMap(
encode_set).__getitem__
return ''.join([mapping(char) for char in byte_string])
def percent_encode_plus(text, encode_set=QUERY_ENCODE_SET,
encoding='utf-8'):
'''Percent encode text for query strings.
Unlike Python's ``quote_plus``, this function accepts a blacklist instead
of a whitelist of safe characters.
'''
if ' ' not in text:
return percent_encode(text, encode_set, encoding)
else:
result = percent_encode(text, encode_set, encoding)
return result.replace(' ', '+')
def percent_encode_query_value(text, encoding='utf-8'):
'''Percent encode a query value.'''
result = percent_encode_plus(text, QUERY_VALUE_ENCODE_SET, encoding)
return result
percent_decode = urllib.parse.unquote
percent_decode_plus = urllib.parse.unquote_plus
def schemes_similar(scheme1, scheme2):
'''Return whether URL schemes are similar.
This function considers the following schemes to be similar:
* HTTP and HTTPS
'''
if scheme1 == scheme2:
return True
if scheme1 in ('http', 'https') and scheme2 in ('http', 'https'):
return True
return False
def is_subdir(base_path, test_path, trailing_slash=False, wildcards=False):
'''Return whether the a path is a subpath of another.
Args:
base_path: The base path
test_path: The path which we are testing
trailing_slash: If True, the trailing slash is treated with importance.
For example, ``/images/`` is a directory while ``/images`` is a
file.
wildcards: If True, globbing wildcards are matched against paths
'''
if trailing_slash:
base_path = base_path.rsplit('/', 1)[0] + '/'
test_path = test_path.rsplit('/', 1)[0] + '/'
else:
if not base_path.endswith('/'):
base_path += '/'
if not test_path.endswith('/'):
test_path += '/'
if wildcards:
return fnmatch.fnmatchcase(test_path, base_path)
else:
return test_path.startswith(base_path)
def uppercase_percent_encoding(text):
'''Uppercases percent-encoded sequences.'''
if '%' not in text:
return text
return re.sub(
r'%[a-f0-9][a-f0-9]',
lambda match: match.group(0).upper(),
text)
def split_query(qs, keep_blank_values=False):
'''Split the query string.
Note for empty values: If an equal sign (``=``) is present, the value
will be an empty string (``''``). Otherwise, the value will be ``None``::
>>> list(split_query('a=&b', keep_blank_values=True))
[('a', ''), ('b', None)]
No processing is done on the actual values.
'''
items = []
for pair in qs.split('&'):
name, delim, value = pair.partition('=')
if not delim and keep_blank_values:
value = None
if keep_blank_values or value:
items.append((name, value))
return items
def query_to_map(text):
'''Return a key-values mapping from a query string.
Plus symbols are replaced with spaces.
'''
dict_obj = {}
for key, value in split_query(text, True):
if key not in dict_obj:
dict_obj[key] = []
if value:
dict_obj[key].append(value.replace('+', ' '))
else:
dict_obj[key].append('')
return query_to_map(text)
@functools.lru_cache()
def urljoin(base_url, url, allow_fragments=True):
'''Join URLs like ``urllib.parse.urljoin`` but allow scheme-relative URL.'''
if url.startswith('//') and len(url) > 2:
scheme = base_url.partition(':')[0]
if scheme:
return urllib.parse.urljoin(
base_url,
'{0}:{1}'.format(scheme, url),
allow_fragments=allow_fragments
)
return urllib.parse.urljoin(
base_url, url, allow_fragments=allow_fragments)
def flatten_path(path, flatten_slashes=False):
'''Flatten an absolute URL path by removing the dot segments.
:func:`urllib.parse.urljoin` has some support for removing dot segments,
but it is conservative and only removes them as needed.
Arguments:
path (str): The URL path.
flatten_slashes (bool): If True, consecutive slashes are removed.
The path returned will always have a leading slash.
'''
# Based on posixpath.normpath
# Fast path
if not path or path == '/':
return '/'
# Take off leading slash
if path[0] == '/':
path = path[1:]
parts = path.split('/')
new_parts = collections.deque()
for part in parts:
if part == '.' or (flatten_slashes and not part):
continue
elif part != '..':
new_parts.append(part)
elif new_parts:
new_parts.pop()
# If the filename is empty string
if flatten_slashes and path.endswith('/') or not len(new_parts):
new_parts.append('')
# Put back leading slash
new_parts.appendleft('')
return '/'.join(new_parts)
| gpl-3.0 | 7,461,028,754,398,663,000 | 28.219858 | 86 | 0.597282 | false |
GoogleCloudPlatform/sap-deployment-automation | third_party/github.com/ansible/awx/awxkit/awxkit/api/pages/roles.py | 1 | 1150 | import logging
from awxkit.api.resources import resources
from . import base
from . import page
log = logging.getLogger(__name__)
class Role(base.Base):
NATURAL_KEY = ('name',)
def get_natural_key(self, cache=None):
if cache is None:
cache = page.PageCache()
natural_key = super(Role, self).get_natural_key(cache=cache)
related_objs = [
related for name, related in self.related.items()
if name not in ('users', 'teams')
]
if related_objs:
related_endpoint = cache.get_page(related_objs[0])
if related_endpoint is None:
log.error("Unable to obtain content_object %s for role %s",
related_objs[0], self.endpoint)
return None
natural_key['content_object'] = related_endpoint.get_natural_key(cache=cache)
return natural_key
page.register_page(resources.role, Role)
class Roles(page.PageList, Role):
pass
page.register_page([resources.roles,
resources.related_roles,
resources.related_object_roles], Roles)
| apache-2.0 | 3,697,434,702,690,880,500 | 24 | 89 | 0.595652 | false |
nyergler/pythonslides | readthedocs/builds/models.py | 1 | 11261 | import re
from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext
from guardian.shortcuts import assign, get_objects_for_user
from taggit.managers import TaggableManager
from projects.models import Project
from projects import constants
from .constants import BUILD_STATE, BUILD_TYPES, VERSION_TYPES
class VersionManager(models.Manager):
def _filter_queryset(self, user, project, privacy_level, only_active):
if isinstance(privacy_level, basestring):
privacy_level = (privacy_level,)
queryset = Version.objects.filter(privacy_level__in=privacy_level)
# Remove this so we can use public() for all active public projects
#if not user and not project:
#return queryset
if user and user.is_authenticated():
# Add in possible user-specific views
user_queryset = get_objects_for_user(user, 'builds.view_version')
queryset = user_queryset | queryset
elif user:
# Hack around get_objects_for_user not supporting global perms
global_access = user.has_perm('builds.view_version')
if global_access:
queryset = Version.objects.all()
if project:
# Filter by project if requested
queryset = queryset.filter(project=project)
if only_active:
queryset = queryset.filter(active=True)
return queryset
def active(self, user=None, project=None, *args, **kwargs):
queryset = self._filter_queryset(
user,
project,
privacy_level=(constants.PUBLIC, constants.PROTECTED,
constants.PRIVATE),
only_active=True,
)
return queryset.filter(*args, **kwargs)
def public(self, user=None, project=None, only_active=True, *args,
**kwargs):
queryset = self._filter_queryset(
user,
project,
privacy_level=(constants.PUBLIC),
only_active=only_active
)
return queryset.filter(*args, **kwargs)
def protected(self, user=None, project=None, only_active=True, *args,
**kwargs):
queryset = self._filter_queryset(
user,
project,
privacy_level=(constants.PUBLIC, constants.PROTECTED),
only_active=only_active
)
return queryset.filter(*args, **kwargs)
def private(self, user=None, project=None, only_active=True, *args,
**kwargs):
queryset = self._filter_queryset(
user,
project,
privacy_level=(constants.PRIVATE),
only_active=only_active
)
return queryset.filter(*args, **kwargs)
class Version(models.Model):
project = models.ForeignKey(Project, verbose_name=_('Project'),
related_name='versions')
type = models.CharField(
_('Type'), max_length=20,
choices=VERSION_TYPES, default='unknown',
)
# used by the vcs backend
identifier = models.CharField(_('Identifier'), max_length=255)
verbose_name = models.CharField(_('Verbose Name'), max_length=255)
slug = models.CharField(_('Slug'), max_length=255)
supported = models.BooleanField(_('Supported'), default=True)
active = models.BooleanField(_('Active'), default=False)
built = models.BooleanField(_('Built'), default=False)
uploaded = models.BooleanField(_('Uploaded'), default=False)
privacy_level = models.CharField(
_('Privacy Level'), max_length=20, choices=constants.PRIVACY_CHOICES,
default='public', help_text=_("Level of privacy for this Version.")
)
tags = TaggableManager(blank=True)
objects = VersionManager()
class Meta:
unique_together = [('project', 'slug')]
ordering = ['-verbose_name']
permissions = (
# Translators: Permission around whether a user can view the
# version
('view_version', _('View Version')),
)
def __unicode__(self):
return ugettext(u"Version %(version)s of %(project)s (%(pk)s)" % {
'version': self.verbose_name,
'project': self.project,
'pk': self.pk
})
def get_absolute_url(self):
if not self.built and not self.uploaded:
return ''
return self.project.get_docs_url(version_slug=self.slug)
def save(self, *args, **kwargs):
"""
Add permissions to the Version for all owners on save.
"""
obj = super(Version, self).save(*args, **kwargs)
for owner in self.project.users.all():
assign('view_version', owner, self)
self.project.sync_supported_versions()
return obj
@property
def remote_slug(self):
if self.slug == 'latest':
if self.project.default_branch:
return self.project.default_branch
else:
return self.project.vcs_repo().fallback_branch
else:
return self.slug
def get_subdomain_url(self):
use_subdomain = getattr(settings, 'USE_SUBDOMAIN', False)
if use_subdomain:
return "/%s/%s/" % (
self.project.language,
self.slug,
)
else:
return reverse('docs_detail', kwargs={
'project_slug': self.project.slug,
'lang_slug': self.project.language,
'version_slug': self.slug,
'filename': ''
})
def get_subproject_url(self):
return "/projects/%s/%s/%s/" % (
self.project.slug,
self.project.language,
self.slug,
)
def get_downloads(self, pretty=False):
project = self.project
data = {}
if pretty:
if project.has_pdf(self.slug):
data['PDF'] = project.get_pdf_url(self.slug)
if project.has_htmlzip(self.slug):
data['HTML'] = project.get_htmlzip_url(self.slug)
if project.has_epub(self.slug):
data['Epub'] = project.get_epub_url(self.slug)
else:
if project.has_pdf(self.slug):
data['pdf_url'] = project.get_pdf_url(self.slug)
if project.has_htmlzip(self.slug):
data['htmlzip_url'] = project.get_htmlzip_url(self.slug)
if project.has_epub(self.slug):
data['epub_url'] = project.get_epub_url(self.slug)
if project.has_manpage(self.slug):
data['manpage_url'] = project.get_manpage_url(self.slug)
if project.has_dash(self.slug):
data['dash_url'] = project.get_dash_url(self.slug)
data['dash_feed_url'] = project.get_dash_feed_url(self.slug)
return data
def get_conf_py_path(self):
# Hack this for now.
return "/docs/"
conf_py_path = self.project.conf_file(self.slug)
conf_py_path = conf_py_path.replace(
self.project.checkout_path(self.slug), '')
return conf_py_path.replace('conf.py', '')
def get_github_url(self, docroot, filename):
GITHUB_REGEXS = [
re.compile('github.com/(.+)/(.+)(?:\.git){1}'),
re.compile('github.com/(.+)/(.+)'),
re.compile('github.com:(.+)/(.+).git'),
]
GITHUB_URL = 'https://github.com/{user}/{repo}/blob/{version}{docroot}{path}.rst'
repo_url = self.project.repo
if 'github' not in repo_url:
return ''
if not docroot:
return ''
for regex in GITHUB_REGEXS:
match = regex.search(repo_url)
if match:
user, repo = match.groups()
break
else:
return ''
repo = repo.rstrip('/')
return GITHUB_URL.format(
user=user,
repo=repo,
version=self.remote_slug,
docroot=docroot,
path=filename,
)
def get_bitbucket_url(self, docroot, filename):
BB_REGEXS = [
re.compile('bitbucket.org/(.+)/(.+).git'),
re.compile('bitbucket.org/(.+)/(.+)/'),
re.compile('bitbucket.org/(.+)/(.+)'),
]
BB_URL = 'https://bitbucket.org/{user}/{repo}/src/{version}{docroot}{path}.rst'
repo_url = self.project.repo
if 'bitbucket' not in repo_url:
return ''
if not docroot:
return ''
for regex in BB_REGEXS:
match = regex.search(repo_url)
if match:
user, repo = match.groups()
break
else:
return ''
repo = repo.rstrip('/')
return BB_URL.format(
user=user,
repo=repo,
version=self.remote_slug,
docroot=docroot,
path=filename,
)
class VersionAlias(models.Model):
project = models.ForeignKey(Project, verbose_name=_('Project'),
related_name='aliases')
from_slug = models.CharField(_('From slug'), max_length=255, default='')
to_slug = models.CharField(_('To slug'), max_length=255, default='',
blank=True)
largest = models.BooleanField(_('Largest'), default=False)
def __unicode__(self):
return ugettext(u"Alias for %(project)s: %(from)s -> %(to)s" % {
'project': self.project,
'form': self.from_slug,
'to': self.to_slug,
})
class Build(models.Model):
project = models.ForeignKey(Project, verbose_name=_('Project'),
related_name='builds')
version = models.ForeignKey(Version, verbose_name=_('Version'), null=True,
related_name='builds')
type = models.CharField(_('Type'), max_length=55, choices=BUILD_TYPES,
default='html')
state = models.CharField(_('State'), max_length=55, choices=BUILD_STATE,
default='finished')
date = models.DateTimeField(_('Date'), auto_now_add=True)
success = models.BooleanField(_('Success'))
setup = models.TextField(_('Setup'), null=True, blank=True)
setup_error = models.TextField(_('Setup error'), null=True, blank=True)
output = models.TextField(_('Output'), default='', blank=True)
error = models.TextField(_('Error'), default='', blank=True)
exit_code = models.IntegerField(_('Exit code'), max_length=3, null=True,
blank=True)
class Meta:
ordering = ['-date']
get_latest_by = 'date'
def __unicode__(self):
return ugettext(u"Build %(project)s for %(usernames)s (%(pk)s)" % {
'project': self.project,
'usernames': ' '.join(self.project.users.all()
.values_list('username', flat=True)),
'pk': self.pk,
})
@models.permalink
def get_absolute_url(self):
return ('builds_detail', [self.project.slug, self.pk])
| mit | 4,639,291,233,820,865,000 | 34.749206 | 89 | 0.552704 | false |
mghpcc-projects/user_level_slurm_reservations | commands/hil_slurmctld_prolog.py | 1 | 14582 | """
MassOpenCloud / Hardware Isolation Layer (HIL)
Slurm Control Daemon - HIL Reservation Prolog
May 2017, Tim Donahue [email protected]
"""
import argparse
import hostlist
import inspect
import logging
import os
import sys
from datetime import datetime, timedelta
from time import strftime
libdir = os.path.realpath(os.path.join(os.path.dirname(inspect.getfile(inspect.currentframe())), '../common'))
sys.path.append(libdir)
from hil_slurm_helpers import (get_partition_data, get_job_data, get_object_data,
exec_scontrol_cmd, exec_scontrol_show_cmd,
get_hil_reservation_name, is_hil_reservation,
create_slurm_reservation, delete_slurm_reservation,
log_hil_reservation)
from hil_slurm_constants import (SHOW_OBJ_TIME_FMT, RES_CREATE_TIME_FMT,
SHOW_PARTITION_MAXTIME_HMS_FMT,
RES_CREATE_HIL_FEATURES,
HIL_RESERVE, HIL_RELEASE,
HIL_RESERVATION_COMMANDS,
RES_CREATE_FLAGS)
from hil_slurm_logging import log_init, log_info, log_debug, log_error
from hil_slurm_settings import (HIL_PARTITION_PREFIX,
RES_CHECK_DEFAULT_PARTITION,
RES_CHECK_EXCLUSIVE_PARTITION,
RES_CHECK_SHARED_PARTITION,
RES_CHECK_PARTITION_STATE,
HIL_RESERVATION_DEFAULT_DURATION,
HIL_RESERVATION_GRACE_PERIOD,
HIL_SLURMCTLD_PROLOG_LOGFILE,
HIL_ENDPOINT,
HIL_SLURM_PROJECT)
def _get_prolog_environment():
'''
Returns a job's prolog environment in dictionary form
'''
env_map = {'jobname': 'SLURM_JOB_NAME',
'partition': 'SLURM_JOB_PARTITION',
'username': 'SLURM_JOB_USER',
'job_id': 'SLURM_JOB_ID',
'job_uid': 'SLURM_JOB_UID',
'job_account': 'SLURM_JOB_ACCOUNT',
'nodelist': 'SLURM_JOB_NODELIST'
}
return {env_var: os.environ.get(slurm_env_var) for env_var, slurm_env_var in env_map.iteritems()}
def _check_hil_partition(env_dict, pdata_dict):
'''
Check if the partition exists and, if so, is properly named
Retrieve partition data via 'scontrol show'
'''
status = True
pname = pdata_dict['PartitionName']
if not pname.startswith(HIL_PARTITION_PREFIX):
log_info('Partition name `%s` does not match `%s*`' %
(pname, HIL_PARTITION_PREFIX))
status = False
# Verify the partition state is UP
if RES_CHECK_PARTITION_STATE:
if (pdata_dict['State'] != 'UP'):
log_info('Partition `%s` state (`%s`) is not UP' %
(pname, pdata_dict['State']))
status = False
# Verify the partition is not the default partition
if RES_CHECK_DEFAULT_PARTITION:
if (pdata_dict['Default'] == 'YES'):
log_info('Partition `%s` is the default partition, cannot be used for HIL' % pname)
status = False
# Verify the partition is not shared by checking 'Shared' and
# 'ExclusiveUser' attributes
if RES_CHECK_SHARED_PARTITION:
if (pdata_dict['Shared'] != 'NO'):
log_info('Partition `%s` is shared, cannot be used for HIL' % pname)
status = False
if RES_CHECK_EXCLUSIVE_PARTITION:
if (pdata_dict['ExclusiveUser'] != 'YES'):
log_info('Partition `%s` not exclusive to `%s`, cannot be used for HIL' % (pname, env_dict['username']))
status = False
return status
def _check_hil_command(env_dict):
'''
Get and validate the HIL command specified with srun / sbatch
'''
jobname = env_dict['jobname']
if jobname in HIL_RESERVATION_COMMANDS:
return jobname
else:
log_debug('Jobname `%s` is not a HIL reservation command, nothing to do.' % jobname)
return None
def _get_hil_reservation_times(env_dict, pdata_dict, jobdata_dict):
'''
Calculate the start time and end time of the reservation
Start time:
If the user specified a start time for the job, use that
Otherwise, use the current time
End time:
if the job has an end time, use that and extend it by the HIL
grace period.
If the job does not have an end time (e.g., TimeLimit UNLIMITED),
set the reservation end time to either the partition MaxTime,
if defined, or the HIL default maximum time.
'''
t_job_start_s = jobdata_dict['StartTime']
t_job_end_s = jobdata_dict['EndTime']
# log_debug('Job start %s Job end %s' % (t_job_start_s, t_job_end_s))
t_start_dt = datetime.strptime(t_job_start_s, SHOW_OBJ_TIME_FMT)
if 'Unknown' not in t_job_end_s:
log_debug('Using job end time for reservation')
# Job has a defined end time. Use it.
t_end_dt = datetime.strptime(t_job_end_s, SHOW_OBJ_TIME_FMT)
t_end_dt += timedelta(seconds=HIL_RESERVATION_GRACE_PERIOD)
else:
# Job does not have a defined end time. See if there's a time limit.
if 'UNLIMITED' in jobdata_dict['TimeLimit']:
# Job does not have a time limit. See if the partition has a
# max time. If so, use that. If not, use the HIL default duration.
p_max_time_s = pdata_dict['MaxTime']
log_debug('Partition MaxTime is %s' % p_max_time_s)
if 'UNLIMITED' in p_max_time_s:
# Partition does not have a max time, use HIL default.
log_debug('No job or partition time limit, using HIL default reservation duration')
t_end_dt = (t_start_dt +
timedelta(seconds=HIL_RESERVATION_DEFAULT_DURATION))
else:
# Partition has a max time, parse it. Output format is [days-]H:M:S.
log_debug('Using partition time limit to calculate reservation end time')
d_hms = p_max_time_s.split('-')
if (len(d_hms) == 1):
p_max_hms_dt = datetime.strptime(d_hms[0],
SHOW_PARTITION_MAXTIME_HMS_FMT)
p_max_timedelta = timedelta(hours=p_max_hms_dt.hour,
minutes=p_max_hms_dt.minute,
seconds=p_max_hms_dt.second)
elif (len(d_hms) == 2):
# Days field is present
p_max_days_timedelta = datetime.timedelta(days=int(d_hms[0]))
p_max_hms_dt = datetime.strptime(d_hms[1],
SHOW_PARTITION_MAXTIME_HMS_FMT)
p_max_hms_timedelta = timedelta(hours=p_max_hms_dt.hour,
minutes=p_max_hms_dt.minute,
seconds=p_max_hms_dt.second)
p_max_timedelta = p_max_days_timedelta + p_max_hms_timedelta
log_debug(p_max_timedelta)
t_end_dt = t_start_dt + p_max_timedelta
else:
log_error('Cannot parse partition MaxTime (`%s`)' % p_max_time_s)
else:
# Job has a time limit. Use it.
# $$$ FIX
log_debug('Job has a time limit! Unsupported!')
pass
# We now have a defined reservation t_start and t_end in datetime format.
# Convert to strings and return.
t_start_s = t_start_dt.strftime(RES_CREATE_TIME_FMT)
t_end_s = t_end_dt.strftime(RES_CREATE_TIME_FMT)
# log_debug('Start time %s' % t_start_s)
# log_debug('End time %s' % t_end_s)
return t_start_s, t_end_s
def _create_hil_reservation(restype_s, t_start_s, t_end_s, env_dict, pdata_dict, jobdata_dict):
'''
Create a HIL reservation
'''
# Generate a HIL reservation name
resname = get_hil_reservation_name(env_dict, restype_s, t_start_s)
# Check if reservation exists. If so, do nothing
resdata_dict_list, stdout_data, stderr_data = exec_scontrol_show_cmd('reservation', resname)
if (stderr_data) and ('not found' not in stderr_data):
log_info('HIL reservation `%s` already exists' % resname)
return resname, stderr_data
log_info('Creating HIL reservation `%s`, ending %s' % (resname, t_end_s))
stdout_data, stderr_data = create_slurm_reservation(resname, env_dict['username'],
t_start_s, t_end_s,
nodes=None, flags=RES_CREATE_FLAGS,
features=RES_CREATE_HIL_FEATURES,
debug=False)
return resname, stderr_data
def _delete_hil_reservation(env_dict, pdata_dict, jobdata_dict, resname):
'''
Delete a HIL reservation after validating HIL name prefix and owner name
The latter restricts 'hil_release' of a reservation to the owner
It is always possible to delete the reservation with 'scontrol delete'.
'''
# Minimally validate the specified reservation
if is_hil_reservation(resname, None):
log_info('Deleting HIL reservation `%s`' % resname)
return delete_slurm_reservation(resname, debug=False)
else:
log_info('Cannot delete HIL reservation, error in name (`%s`)' %
resname)
return None, 'hil_release: error: Invalid reservation name'
def _hil_reserve_cmd(env_dict, pdata_dict, jobdata_dict):
'''
Runs in Slurm control daemon prolog context
Create HIL reserve reservation if it does not already exist.
The HIL monitor will reserve the nodes and create the corresponding Slurm HIL release
reservation.
Reservation start and end times may overlap so long as the MAINT flag is set
'''
t_start_s, t_end_s = _get_hil_reservation_times(env_dict, pdata_dict, jobdata_dict)
resname, stderr_data = _create_hil_reservation(HIL_RESERVE, t_start_s, t_end_s,
env_dict, pdata_dict, jobdata_dict)
log_hil_reservation(resname, stderr_data, t_start_s, t_end_s)
def _hil_release_cmd(env_dict, pdata_dict, jobdata_dict):
'''
Runs in Slurm control daemon epilog context
Delete the reserve reservation in which the release job was run.
- Verify the reservation is a HIL reserve reservation
- Verify the reservation is owned by the user
- Get reserve reservation data via 'scontrol'
- Delete the reserve reservation in which the hil_release command was run
Release reservation will be deleted later by the HIL reservation monitor
'''
reserve_resname = jobdata_dict['Reservation']
if reserve_resname:
if not is_hil_reservation(reserve_resname, HIL_RESERVE):
log_error('Reservation `%s` is not a HIL reserve reservation' %
reserve_resname)
elif env_dict['username'] not in reserve_resname:
log_error('Reservation `%s` not owned by user `%s`' %
(reserve_resname, env_dict['username']))
else:
# Basic validation done
# Get reserve reservation data
reserve_rdata = get_object_data('reservation', reserve_resname)[0]
# Delete the reserve reservation
stdout_data, stderr_data = _delete_hil_reservation(env_dict, pdata_dict,
jobdata_dict, reserve_resname)
if (len(stderr_data) == 0):
log_info('Deleted HIL reserve reservation `%s`' % reserve_resname)
else:
log_error('Error deleting HIL reserve reservation `%s`' % reserve_resname)
log_error(stderr_data)
else:
log_error('No reservation name specified to `%s` command' %
jobdata_dict['JobName'])
def process_args():
parser = argparse.ArgumentParser()
parser.add_argument('--hil_prolog', action='store_true', default=False,
help='Function as the HIL prolog')
parser.add_argument('--hil_epilog', action='store_true', default=False,
help='Function as the HIL epilog')
return parser.parse_args()
def main(argv=[]):
args = process_args()
log_init('hil_slurmctld.prolog', HIL_SLURMCTLD_PROLOG_LOGFILE,
logging.DEBUG)
if args.hil_prolog:
pass
elif args.hil_epilog:
pass
else:
log_debug('Must specify one of --hil_prolog or --hil_epilog',
separator=True)
return False
# Collect prolog/epilog environment, job data, and partition data into
# dictionaries, perform basic sanity checks.
# Since data for one partition and one job is expected, select the
# first dict in the list
env_dict = _get_prolog_environment()
if not env_dict['partition']:
log_debug('Missing Slurm control daemon prolog / epilog environment.')
return False
pdata_dict = get_partition_data(env_dict['partition'])[0]
jobdata_dict = get_job_data(env_dict['job_id'])[0]
if not pdata_dict or not jobdata_dict:
log_debug('One of pdata_dict, jobdata_dict, or env_dict is empty')
log_debug('Job data', jobdata_dict)
log_debug('P data', pdata_dict)
return False
if not _check_hil_partition(env_dict, pdata_dict):
return False
# Verify the command is a HIL command. If so, process it.
hil_cmd = _check_hil_command(env_dict)
if not hil_cmd:
return True
status = True
if args.hil_prolog:
if (hil_cmd == 'hil_reserve'):
log_info('HIL Slurmctld Prolog', separator=True)
log_debug('Processing reserve request')
status = _hil_reserve_cmd(env_dict, pdata_dict, jobdata_dict)
elif args.hil_epilog:
if (hil_cmd == 'hil_release'):
log_info('HIL Slurmctld Epilog', separator=True)
log_debug('Processing release request')
status = _hil_release_cmd(env_dict, pdata_dict, jobdata_dict)
return status
if __name__ == '__main__':
main(sys.argv[1:])
exit(0)
# EOF
| mit | -5,637,913,810,003,291,000 | 37.885333 | 116 | 0.577904 | false |
AlJohri/nucraigslist | listings/management/commands/download.py | 1 | 2466 | from django.core.management.base import BaseCommand, CommandError
from listings.models import Listing, User, Comment, Group
import os, sys
from django.utils import timezone
from optparse import make_option
from listings.lib import save_obj, get_fb_graph_api, get_word_bank, filter_listing
from dateutil.parser import parse
from socialscraper.facebook.graphapi import get_feed
class Command(BaseCommand):
# args = '<poll_id poll_id ...>'
# help = 'Closes the specified poll for voting'
option_list = BaseCommand.option_list + (
make_option('--recent',
action='store_true',
dest='recent',
default=False,
help='Download most recent posts'
),
make_option('--backfill',
action='store_true',
dest='backfill',
default=False,
help='Backfill database'
),
)
def handle(self, *args, **options):
# mutually exclusive
if options['backfill'] and options['recent']: sys.exit()
if not options['backfill'] and not options['recent']: sys.exit()
api = get_fb_graph_api()
word_bank = get_word_bank(dl=False)
for group in Group.objects.all():
print "Downloading posts from %s" % group
if options['recent']:
print "Downloading most recent posts (no pagination)"
feed = api.get_object("%s/feed" % group.id)
for i,obj in enumerate(feed['data']):
listing, listing_created = save_obj(obj)
if listing_created:
filter_listing(listing, word_bank, i)
print ""
elif options['backfill']:
start = parse("01-1-2012")
if Listing.objects.filter(group_id=group.id).count() >= 1:
end = Listing.objects.filter(group_id=group.id).earliest('updated_time').updated_time.replace(tzinfo=None)
else:
end = timezone.now().replace(tzinfo=None)
print "Downloading from ", start, "to", end, "in reverse chronological order (latest first)."
for i,obj in enumerate(get_feed(api, str(group.id), start=start, end=end)):
listing, listing_created = save_obj(obj)
if listing_created:
filter_listing(listing, word_bank, i)
print ""
| gpl-3.0 | -2,108,560,672,248,644,000 | 35.264706 | 126 | 0.564477 | false |
xcme/briseis | devices/SW-Common-28.py | 1 | 5978 | # coding=UTF8
# Строчка выше нужна на случай использования Non-ASCII символов, например кириллицы.
ms_RxTx = {
# RX .1.3.6.1.2.1.31.1.1.1.6 ifHCInOctets
'~RX.1' : '.1.3.6.1.2.1.31.1.1.1.6.1',
'~RX.2' : '.1.3.6.1.2.1.31.1.1.1.6.2',
'~RX.3' : '.1.3.6.1.2.1.31.1.1.1.6.3',
'~RX.4' : '.1.3.6.1.2.1.31.1.1.1.6.4',
'~RX.5' : '.1.3.6.1.2.1.31.1.1.1.6.5',
'~RX.6' : '.1.3.6.1.2.1.31.1.1.1.6.6',
'~RX.7' : '.1.3.6.1.2.1.31.1.1.1.6.7',
'~RX.8' : '.1.3.6.1.2.1.31.1.1.1.6.8',
'~RX.9' : '.1.3.6.1.2.1.31.1.1.1.6.9',
'~RX.10' : '.1.3.6.1.2.1.31.1.1.1.6.10',
'~RX.11' : '.1.3.6.1.2.1.31.1.1.1.6.11',
'~RX.12' : '.1.3.6.1.2.1.31.1.1.1.6.12',
'~RX.13' : '.1.3.6.1.2.1.31.1.1.1.6.13',
'~RX.14' : '.1.3.6.1.2.1.31.1.1.1.6.14',
'~RX.15' : '.1.3.6.1.2.1.31.1.1.1.6.15',
'~RX.16' : '.1.3.6.1.2.1.31.1.1.1.6.16',
'~RX.17' : '.1.3.6.1.2.1.31.1.1.1.6.17',
'~RX.18' : '.1.3.6.1.2.1.31.1.1.1.6.18',
'~RX.19' : '.1.3.6.1.2.1.31.1.1.1.6.19',
'~RX.20' : '.1.3.6.1.2.1.31.1.1.1.6.20',
'~RX.21' : '.1.3.6.1.2.1.31.1.1.1.6.21',
'~RX.22' : '.1.3.6.1.2.1.31.1.1.1.6.22',
'~RX.23' : '.1.3.6.1.2.1.31.1.1.1.6.23',
'~RX.24' : '.1.3.6.1.2.1.31.1.1.1.6.24',
'~RX.25' : '.1.3.6.1.2.1.31.1.1.1.6.25',
'~RX.26' : '.1.3.6.1.2.1.31.1.1.1.6.26',
'~RX.27' : '.1.3.6.1.2.1.31.1.1.1.6.27',
'~RX.28' : '.1.3.6.1.2.1.31.1.1.1.6.28',
# TX .1.3.6.1.2.1.31.1.1.1.10 ifHCOutOctets
'~TX.1' : '.1.3.6.1.2.1.31.1.1.1.10.1',
'~TX.2' : '.1.3.6.1.2.1.31.1.1.1.10.2',
'~TX.3' : '.1.3.6.1.2.1.31.1.1.1.10.3',
'~TX.4' : '.1.3.6.1.2.1.31.1.1.1.10.4',
'~TX.5' : '.1.3.6.1.2.1.31.1.1.1.10.5',
'~TX.6' : '.1.3.6.1.2.1.31.1.1.1.10.6',
'~TX.7' : '.1.3.6.1.2.1.31.1.1.1.10.7',
'~TX.8' : '.1.3.6.1.2.1.31.1.1.1.10.8',
'~TX.9' : '.1.3.6.1.2.1.31.1.1.1.10.9',
'~TX.10' : '.1.3.6.1.2.1.31.1.1.1.10.10',
'~TX.11' : '.1.3.6.1.2.1.31.1.1.1.10.11',
'~TX.12' : '.1.3.6.1.2.1.31.1.1.1.10.12',
'~TX.13' : '.1.3.6.1.2.1.31.1.1.1.10.13',
'~TX.14' : '.1.3.6.1.2.1.31.1.1.1.10.14',
'~TX.15' : '.1.3.6.1.2.1.31.1.1.1.10.15',
'~TX.16' : '.1.3.6.1.2.1.31.1.1.1.10.16',
'~TX.17' : '.1.3.6.1.2.1.31.1.1.1.10.17',
'~TX.18' : '.1.3.6.1.2.1.31.1.1.1.10.18',
'~TX.19' : '.1.3.6.1.2.1.31.1.1.1.10.19',
'~TX.20' : '.1.3.6.1.2.1.31.1.1.1.10.20',
'~TX.21' : '.1.3.6.1.2.1.31.1.1.1.10.21',
'~TX.22' : '.1.3.6.1.2.1.31.1.1.1.10.22',
'~TX.23' : '.1.3.6.1.2.1.31.1.1.1.10.23',
'~TX.24' : '.1.3.6.1.2.1.31.1.1.1.10.24',
'~TX.25' : '.1.3.6.1.2.1.31.1.1.1.10.25',
'~TX.26' : '.1.3.6.1.2.1.31.1.1.1.10.26',
'~TX.27' : '.1.3.6.1.2.1.31.1.1.1.10.27',
'~TX.28' : '.1.3.6.1.2.1.31.1.1.1.10.28',
}
ms_RX_CRC = {
# RX_CRC .1.3.6.1.2.1.16.1.1.1.8 etherStatsCRCAlignErrors
'RX_CRC.1' : '.1.3.6.1.2.1.16.1.1.1.8.1',
'RX_CRC.2' : '.1.3.6.1.2.1.16.1.1.1.8.2',
'RX_CRC.3' : '.1.3.6.1.2.1.16.1.1.1.8.3',
'RX_CRC.4' : '.1.3.6.1.2.1.16.1.1.1.8.4',
'RX_CRC.5' : '.1.3.6.1.2.1.16.1.1.1.8.5',
'RX_CRC.6' : '.1.3.6.1.2.1.16.1.1.1.8.6',
'RX_CRC.7' : '.1.3.6.1.2.1.16.1.1.1.8.7',
'RX_CRC.8' : '.1.3.6.1.2.1.16.1.1.1.8.8',
'RX_CRC.9' : '.1.3.6.1.2.1.16.1.1.1.8.9',
'RX_CRC.10' : '.1.3.6.1.2.1.16.1.1.1.8.10',
'RX_CRC.11' : '.1.3.6.1.2.1.16.1.1.1.8.11',
'RX_CRC.12' : '.1.3.6.1.2.1.16.1.1.1.8.12',
'RX_CRC.13' : '.1.3.6.1.2.1.16.1.1.1.8.13',
'RX_CRC.14' : '.1.3.6.1.2.1.16.1.1.1.8.14',
'RX_CRC.15' : '.1.3.6.1.2.1.16.1.1.1.8.15',
'RX_CRC.16' : '.1.3.6.1.2.1.16.1.1.1.8.16',
'RX_CRC.17' : '.1.3.6.1.2.1.16.1.1.1.8.17',
'RX_CRC.18' : '.1.3.6.1.2.1.16.1.1.1.8.18',
'RX_CRC.19' : '.1.3.6.1.2.1.16.1.1.1.8.19',
'RX_CRC.20' : '.1.3.6.1.2.1.16.1.1.1.8.20',
'RX_CRC.21' : '.1.3.6.1.2.1.16.1.1.1.8.21',
'RX_CRC.22' : '.1.3.6.1.2.1.16.1.1.1.8.22',
'RX_CRC.23' : '.1.3.6.1.2.1.16.1.1.1.8.23',
'RX_CRC.24' : '.1.3.6.1.2.1.16.1.1.1.8.24',
'RX_CRC.25' : '.1.3.6.1.2.1.16.1.1.1.8.25',
'RX_CRC.26' : '.1.3.6.1.2.1.16.1.1.1.8.26',
'RX_CRC.27' : '.1.3.6.1.2.1.16.1.1.1.8.27',
'RX_CRC.28' : '.1.3.6.1.2.1.16.1.1.1.8.28',
}
ms_DS = {
# DS .1.3.6.1.2.1.10.7.2.1.19 dot3StatsDuplexStatus
'DS.1' : '.1.3.6.1.2.1.10.7.2.1.19.1',
'DS.2' : '.1.3.6.1.2.1.10.7.2.1.19.2',
'DS.3' : '.1.3.6.1.2.1.10.7.2.1.19.3',
'DS.4' : '.1.3.6.1.2.1.10.7.2.1.19.4',
'DS.5' : '.1.3.6.1.2.1.10.7.2.1.19.5',
'DS.6' : '.1.3.6.1.2.1.10.7.2.1.19.6',
'DS.7' : '.1.3.6.1.2.1.10.7.2.1.19.7',
'DS.8' : '.1.3.6.1.2.1.10.7.2.1.19.8',
'DS.9' : '.1.3.6.1.2.1.10.7.2.1.19.9',
'DS.10' : '.1.3.6.1.2.1.10.7.2.1.19.10',
'DS.11' : '.1.3.6.1.2.1.10.7.2.1.19.11',
'DS.12' : '.1.3.6.1.2.1.10.7.2.1.19.12',
'DS.13' : '.1.3.6.1.2.1.10.7.2.1.19.13',
'DS.14' : '.1.3.6.1.2.1.10.7.2.1.19.14',
'DS.15' : '.1.3.6.1.2.1.10.7.2.1.19.15',
'DS.16' : '.1.3.6.1.2.1.10.7.2.1.19.16',
'DS.17' : '.1.3.6.1.2.1.10.7.2.1.19.17',
'DS.18' : '.1.3.6.1.2.1.10.7.2.1.19.18',
'DS.19' : '.1.3.6.1.2.1.10.7.2.1.19.19',
'DS.20' : '.1.3.6.1.2.1.10.7.2.1.19.20',
'DS.21' : '.1.3.6.1.2.1.10.7.2.1.19.21',
'DS.22' : '.1.3.6.1.2.1.10.7.2.1.19.22',
'DS.23' : '.1.3.6.1.2.1.10.7.2.1.19.23',
'DS.24' : '.1.3.6.1.2.1.10.7.2.1.19.24',
'DS.25' : '.1.3.6.1.2.1.10.7.2.1.19.25',
'DS.26' : '.1.3.6.1.2.1.10.7.2.1.19.26',
'DS.27' : '.1.3.6.1.2.1.10.7.2.1.19.27',
'DS.28' : '.1.3.6.1.2.1.10.7.2.1.19.28',
}
ms_UpTime = {
# UP .1.3.6.1.2.1.1.3.0 sysUpTimeInstance
'UP.' : '.1.3.6.1.2.1.1.3.0'
}
| gpl-2.0 | -7,170,010,892,784,283,000 | 43.818182 | 84 | 0.398749 | false |
Alymantara/maelstorm | tests/test_main.py | 1 | 1272 | from pytest import raises
# The parametrize function is generated, so this doesn't work:
#
# from pytest.mark import parametrize
#
import pytest
parametrize = pytest.mark.parametrize
from maelstorm import metadata
from maelstorm.main import main
class TestMain(object):
@parametrize('helparg', ['-h', '--help'])
def test_help(self, helparg, capsys):
with raises(SystemExit) as exc_info:
main(['progname', helparg])
out, err = capsys.readouterr()
# Should have printed some sort of usage message. We don't
# need to explicitly test the content of the message.
assert 'usage' in out
# Should have used the program name from the argument
# vector.
assert 'progname' in out
# Should exit with zero return code.
assert exc_info.value.code == 0
@parametrize('versionarg', ['-v', '--version'])
def test_version(self, versionarg, capsys):
with raises(SystemExit) as exc_info:
main(['progname', versionarg])
out, err = capsys.readouterr()
# Should print out version.
assert err == '{0} {1}\n'.format(metadata.project, metadata.version)
# Should exit with zero return code.
assert exc_info.value.code == 0
| mit | 4,045,476,757,722,540,500 | 34.333333 | 76 | 0.643082 | false |
jbenden/ansible | lib/ansible/modules/cloud/amazon/efs_facts.py | 1 | 11177 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: efs_facts
short_description: Get information about Amazon EFS file systems
description:
- Module searches Amazon EFS file systems
version_added: "2.2"
requirements: [ boto3 ]
author:
- "Ryan Sydnor (@ryansydnor)"
options:
name:
description:
- Creation Token of Amazon EFS file system.
required: false
default: None
id:
description:
- ID of Amazon EFS.
required: false
default: None
tags:
description:
- List of tags of Amazon EFS. Should be defined as dictionary
required: false
default: None
targets:
description:
- "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
- SubnetId - Mandatory. The ID of the subnet to add the mount target in.
- IpAddress - Optional. A valid IPv4 address within the address range of the specified subnet.
- SecurityGroups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified."
required: false
default: None
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
# find all existing efs
- efs_facts:
register: result
- efs_facts:
name: myTestNameTag
- efs_facts:
id: fs-1234abcd
# Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
- efs_facts:
tags:
name: myTestNameTag
targets:
- subnet-1a2b3c4d
- sg-4d3c2b1a
'''
RETURN = '''
creation_time:
description: timestamp of creation date
returned: always
type: str
sample: "2015-11-16 07:30:57-05:00"
creation_token:
description: EFS creation token
returned: always
type: str
sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
file_system_id:
description: ID of the file system
returned: always
type: str
sample: fs-xxxxxxxx
life_cycle_state:
description: state of the EFS file system
returned: always
type: str
sample: creating, available, deleting, deleted
mount_point:
description: url of file system
returned: always
type: str
sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
mount_targets:
description: list of mount targets
returned: always
type: list
sample:
[
{
"file_system_id": "fs-a7ad440e",
"ip_address": "172.31.17.173",
"life_cycle_state": "available",
"mount_target_id": "fsmt-d8907871",
"network_interface_id": "eni-6e387e26",
"owner_id": "740748460359",
"security_groups": [
"sg-a30b22c6"
],
"subnet_id": "subnet-e265c895"
},
...
]
name:
description: name of the file system
returned: always
type: str
sample: my-efs
number_of_mount_targets:
description: the number of targets mounted
returned: always
type: int
sample: 3
owner_id:
description: AWS account ID of EFS owner
returned: always
type: str
sample: XXXXXXXXXXXX
size_in_bytes:
description: size of the file system in bytes as of a timestamp
returned: always
type: dict
sample:
{
"timestamp": "2015-12-21 13:59:59-05:00",
"value": 12288
}
performance_mode:
description: performance mode of the file system
returned: always
type: str
sample: "generalPurpose"
tags:
description: tags on the efs instance
returned: always
type: dict
sample:
{
"name": "my-efs",
"key": "Value"
}
'''
from collections import defaultdict
from time import sleep
try:
from botocore.exceptions import ClientError
except ImportError:
pass # caught by imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, get_aws_connection_info, ec2_argument_spec,
camel_dict_to_snake_dict, HAS_BOTO3)
class EFSConnection(object):
STATE_CREATING = 'creating'
STATE_AVAILABLE = 'available'
STATE_DELETING = 'deleting'
STATE_DELETED = 'deleted'
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = boto3_conn(module, conn_type='client',
resource='efs', region=region,
**aws_connect_params)
except Exception as e:
module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
self.region = region
def get_file_systems(self, **kwargs):
"""
Returns generator of file systems including all attributes of FS
"""
items = iterate_all(
'FileSystems',
self.connection.describe_file_systems,
**kwargs
)
for item in items:
item['CreationTime'] = str(item['CreationTime'])
"""
Suffix of network path to be used as NFS device for mount. More detail here:
http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
"""
item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
if 'Timestamp' in item['SizeInBytes']:
item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
if item['LifeCycleState'] == self.STATE_AVAILABLE:
item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
else:
item['Tags'] = {}
item['MountTargets'] = []
yield item
def get_tags(self, **kwargs):
"""
Returns tag list for selected instance of EFS
"""
tags = iterate_all(
'Tags',
self.connection.describe_tags,
**kwargs
)
return dict((tag['Key'], tag['Value']) for tag in tags)
def get_mount_targets(self, **kwargs):
"""
Returns mount targets for selected instance of EFS
"""
targets = iterate_all(
'MountTargets',
self.connection.describe_mount_targets,
**kwargs
)
for target in targets:
if target['LifeCycleState'] == self.STATE_AVAILABLE:
target['SecurityGroups'] = list(self.get_security_groups(
MountTargetId=target['MountTargetId']
))
else:
target['SecurityGroups'] = []
yield target
def get_security_groups(self, **kwargs):
"""
Returns security groups for selected instance of EFS
"""
return iterate_all(
'SecurityGroups',
self.connection.describe_mount_target_security_groups,
**kwargs
)
def iterate_all(attr, map_method, **kwargs):
"""
Method creates iterator from boto result set
"""
args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
wait = 1
while True:
try:
data = map_method(**args)
for elm in data[attr]:
yield elm
if 'NextMarker' in data:
args['Marker'] = data['Nextmarker']
continue
break
except ClientError as e:
if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
sleep(wait)
wait = wait * 2
continue
def prefix_to_attr(attr_id):
"""
Helper method to convert ID prefix to mount target attribute
"""
attr_by_prefix = {
'fsmt-': 'MountTargetId',
'subnet-': 'SubnetId',
'eni-': 'NetworkInterfaceId',
'sg-': 'SecurityGroups'
}
prefix = first_or_default(filter(
lambda pref: str(attr_id).startswith(pref),
attr_by_prefix.keys()
))
if prefix:
return attr_by_prefix[prefix]
return 'IpAddress'
def first_or_default(items, default=None):
"""
Helper method to fetch first element of list (if exists)
"""
for item in items:
return item
return default
def has_tags(available, required):
"""
Helper method to determine if tag requested already exists
"""
for key, value in required.items():
if key not in available or value != available[key]:
return False
return True
def has_targets(available, required):
"""
Helper method to determine if mount tager requested already exists
"""
grouped = group_list_of_dict(available)
for (value, field) in required:
if field not in grouped or value not in grouped[field]:
return False
return True
def group_list_of_dict(array):
"""
Helper method to group list of dict to dict with all possible values
"""
result = defaultdict(list)
for item in array:
for key, value in item.items():
result[key] += value if isinstance(value, list) else [value]
return result
def main():
"""
Module action handler
"""
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
id=dict(),
name=dict(),
tags=dict(type="dict", default={}),
targets=dict(type="list", default=[])
))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = EFSConnection(module, region, **aws_connect_params)
name = module.params.get('name')
fs_id = module.params.get('id')
tags = module.params.get('tags')
targets = module.params.get('targets')
file_systems_info = connection.get_file_systems(FileSystemId=fs_id, CreationToken=name)
if tags:
file_systems_info = filter(lambda item: has_tags(item['Tags'], tags), file_systems_info)
if targets:
targets = [(item, prefix_to_attr(item)) for item in targets]
file_systems_info = filter(lambda item:
has_targets(item['MountTargets'], targets), file_systems_info)
file_systems_info = [camel_dict_to_snake_dict(x) for x in file_systems_info]
module.exit_json(changed=False, ansible_facts={'efs': file_systems_info})
if __name__ == '__main__':
main()
| gpl-3.0 | -2,514,477,651,479,542,000 | 28.568783 | 156 | 0.585756 | false |
yelizariev/addons-yelizariev | ir_attachment_s3/tests/test_resized_attachments.py | 1 | 5679 | # Copyright 2019 Rafis Bikbov <https://it-projects.info/team/RafiZz>
# Copyright 2019 Alexandr Kolushov <https://it-projects.info/team/KolushovAlexandr>
# Copyright 2019 Eugene Molotov <https://it-projects.info/team/em230418>
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
import logging
from odoo import api, exceptions
from odoo.tests.common import HttpCase, tagged
_logger = logging.getLogger(__name__)
@tagged("post_install", "-at_install")
class TestResizedAttachments(HttpCase):
def setUp(self):
super(TestResizedAttachments, self).setUp()
self.original_image_url = "https://upload.wikimedia.org/wikipedia/commons/1/1e/Gullfoss%2C_an_iconic_waterfall_of_Iceland.jpg"
def _get_odoo_image_url(self, model, record_id, field):
return "/web/image?model={}&id={}&field={}".format(model, record_id, field)
def test_getting_cached_images_url_instead_computing(self):
env = api.Environment(self.registry.test_cr, self.uid, {})
env["ir.config_parameter"].set_param("ir_attachment_url.storage", "s3")
if not env["ir.attachment"]._get_s3_resource():
self.skipTest("Bad S3 credidentials given")
return
product_tmpl = env["product.template"].create(
{
"name": "Test template",
# set the image so that it is not installed from the product (is the white pixel)
"image": "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII=",
}
)
product_product = env["product.product"].create(
{
"name": "Test product",
"image_variant": self.original_image_url,
"product_tmpl_id": product_tmpl.id,
}
)
odoo_image_url = self._get_odoo_image_url(
"product.product", product_product.id, "image"
)
odoo_image_medium_url = self._get_odoo_image_url(
"product.product", product_product.id, "image_medium"
)
odoo_image_small_url = self._get_odoo_image_url(
"product.product", product_product.id, "image_small"
)
self.authenticate("demo", "demo")
redirected_image = self.url_open(odoo_image_url, timeout=30)
redirected_image_medium = self.url_open(odoo_image_medium_url, timeout=30)
redirected_image_small = self.url_open(odoo_image_small_url, timeout=30)
self.assertEqual(redirected_image.status_code, 200)
self.assertEqual(redirected_image_medium.status_code, 200)
self.assertEqual(redirected_image_small.status_code, 200)
redirected_image_url = redirected_image.url
redirected_image_medium_url = redirected_image_medium.url
redirected_image_small_url = redirected_image_small.url
# Attachments must be created during the execution of requests that are written above.
product_product_image_variant_attachment = env[
"ir.http"
]._find_field_attachment(
env, "product.product", "image_variant", product_product.id
)
product_product_image_attachment = env["ir.http"]._find_field_attachment(
env, "product.product", "image", product_product.id
)
product_product_image_medium_attachment = env["ir.http"]._find_field_attachment(
env, "product.product", "image_medium", product_product.id
)
product_product_image_small_attachment = env["ir.http"]._find_field_attachment(
env, "product.product", "image_small", product_product.id
)
a = set(
product_product_image_variant_attachment.resized_ids.mapped(
"resized_attachment_id"
)
)
b = {
product_product_image_attachment,
product_product_image_medium_attachment,
product_product_image_small_attachment,
}
self.assertFalse(a.difference(b))
self.assertTrue(product_product_image_attachment)
self.assertTrue(product_product_image_medium_attachment)
self.assertTrue(product_product_image_small_attachment)
self.assertEqual(redirected_image_url, product_product_image_attachment.url)
self.assertEqual(
redirected_image_medium_url, product_product_image_medium_attachment.url
)
self.assertEqual(
redirected_image_small_url, product_product_image_small_attachment.url
)
urls = [
self.original_image_url,
redirected_image_url,
redirected_image_medium_url,
redirected_image_small_url,
]
self.assertEqual(len(urls), len(set(urls)), "Duplicates in URLs: %s" % urls)
def test_unlink_resized_attachments_when_parent_unlink(self):
env = api.Environment(self.registry.test_cr, self.uid, {})
ir_att_model = env["ir.attachment"]
ir_att_resized_model = env["ir.attachment.resized"]
original_att = ir_att_model.create({"name": "test att"})
resized_att = ir_att_model.create({"name": "resized test att"})
ir_att_resized = ir_att_resized_model.create(
{"attachment_id": original_att.id, "resized_attachment_id": resized_att.id}
)
self.assertTrue(original_att.unlink())
with self.assertRaises(exceptions.MissingError):
original_att.write({"name": "foo"})
with self.assertRaises(exceptions.MissingError):
ir_att_resized.write({"width": 1})
with self.assertRaises(exceptions.MissingError):
resized_att.write({"name": "bar"})
| lgpl-3.0 | 5,909,971,476,124,248,000 | 39.276596 | 134 | 0.631625 | false |
philipgian/pre-commit | tests/commands/install_uninstall_test.py | 1 | 20438 | from __future__ import absolute_import
from __future__ import unicode_literals
import io
import os.path
import re
import shutil
import subprocess
import sys
import mock
import pre_commit.constants as C
from pre_commit.commands.install_uninstall import CURRENT_HASH
from pre_commit.commands.install_uninstall import install
from pre_commit.commands.install_uninstall import install_hooks
from pre_commit.commands.install_uninstall import is_our_script
from pre_commit.commands.install_uninstall import PRIOR_HASHES
from pre_commit.commands.install_uninstall import uninstall
from pre_commit.runner import Runner
from pre_commit.util import cmd_output
from pre_commit.util import cwd
from pre_commit.util import make_executable
from pre_commit.util import mkdirp
from pre_commit.util import resource_filename
from testing.fixtures import git_dir
from testing.fixtures import make_consuming_repo
from testing.fixtures import remove_config_from_repo
from testing.util import cmd_output_mocked_pre_commit_home
from testing.util import xfailif_no_symlink
def test_is_not_script():
assert is_our_script('setup.py') is False
def test_is_script():
assert is_our_script(resource_filename('hook-tmpl'))
def test_is_previous_pre_commit(tmpdir):
f = tmpdir.join('foo')
f.write(PRIOR_HASHES[0] + '\n')
assert is_our_script(f.strpath)
def test_install_pre_commit(tempdir_factory):
path = git_dir(tempdir_factory)
runner = Runner(path, C.CONFIG_FILE)
ret = install(runner)
assert ret == 0
assert os.path.exists(runner.pre_commit_path)
pre_commit_contents = io.open(runner.pre_commit_path).read()
pre_commit_script = resource_filename('hook-tmpl')
expected_contents = io.open(pre_commit_script).read().format(
sys_executable=sys.executable,
hook_type='pre-commit',
pre_push='',
skip_on_missing_conf='false',
)
assert pre_commit_contents == expected_contents
assert os.access(runner.pre_commit_path, os.X_OK)
ret = install(runner, hook_type='pre-push')
assert ret == 0
assert os.path.exists(runner.pre_push_path)
pre_push_contents = io.open(runner.pre_push_path).read()
pre_push_tmpl = resource_filename('pre-push-tmpl')
pre_push_template_contents = io.open(pre_push_tmpl).read()
expected_contents = io.open(pre_commit_script).read().format(
sys_executable=sys.executable,
hook_type='pre-push',
pre_push=pre_push_template_contents,
skip_on_missing_conf='false',
)
assert pre_push_contents == expected_contents
def test_install_hooks_directory_not_present(tempdir_factory):
path = git_dir(tempdir_factory)
# Simulate some git clients which don't make .git/hooks #234
hooks = os.path.join(path, '.git', 'hooks')
if os.path.exists(hooks): # pragma: no cover (latest git)
shutil.rmtree(hooks)
runner = Runner(path, C.CONFIG_FILE)
install(runner)
assert os.path.exists(runner.pre_commit_path)
@xfailif_no_symlink
def test_install_hooks_dead_symlink(
tempdir_factory,
): # pragma: no cover (non-windows)
path = git_dir(tempdir_factory)
runner = Runner(path, C.CONFIG_FILE)
mkdirp(os.path.dirname(runner.pre_commit_path))
os.symlink('/fake/baz', os.path.join(path, '.git', 'hooks', 'pre-commit'))
install(runner)
assert os.path.exists(runner.pre_commit_path)
def test_uninstall_does_not_blow_up_when_not_there(tempdir_factory):
path = git_dir(tempdir_factory)
runner = Runner(path, C.CONFIG_FILE)
ret = uninstall(runner)
assert ret == 0
def test_uninstall(tempdir_factory):
path = git_dir(tempdir_factory)
runner = Runner(path, C.CONFIG_FILE)
assert not os.path.exists(runner.pre_commit_path)
install(runner)
assert os.path.exists(runner.pre_commit_path)
uninstall(runner)
assert not os.path.exists(runner.pre_commit_path)
def _get_commit_output(tempdir_factory, touch_file='foo', **kwargs):
cmd_output('touch', touch_file)
cmd_output('git', 'add', touch_file)
return cmd_output_mocked_pre_commit_home(
'git', 'commit', '-am', 'Commit!', '--allow-empty',
# git commit puts pre-commit to stderr
stderr=subprocess.STDOUT,
retcode=None,
tempdir_factory=tempdir_factory,
**kwargs
)[:2]
# osx does this different :(
FILES_CHANGED = (
r'('
r' 1 file changed, 0 insertions\(\+\), 0 deletions\(-\)\r?\n'
r'|'
r' 0 files changed\r?\n'
r')'
)
NORMAL_PRE_COMMIT_RUN = re.compile(
r'^\[INFO\] Initializing environment for .+\.\r?\n'
r'Bash hook\.+Passed\r?\n'
r'\[master [a-f0-9]{7}\] Commit!\r?\n' +
FILES_CHANGED +
r' create mode 100644 foo\r?\n$'
)
def test_install_pre_commit_and_run(tempdir_factory):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
assert install(Runner(path, C.CONFIG_FILE)) == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
assert NORMAL_PRE_COMMIT_RUN.match(output)
def test_install_in_submodule_and_run(tempdir_factory):
src_path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
parent_path = git_dir(tempdir_factory)
with cwd(parent_path):
cmd_output('git', 'submodule', 'add', src_path, 'sub')
cmd_output('git', 'commit', '-m', 'foo')
sub_pth = os.path.join(parent_path, 'sub')
with cwd(sub_pth):
assert install(Runner(sub_pth, C.CONFIG_FILE)) == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
assert NORMAL_PRE_COMMIT_RUN.match(output)
def test_commit_am(tempdir_factory):
"""Regression test for #322."""
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
# Make an unstaged change
open('unstaged', 'w').close()
cmd_output('git', 'add', '.')
cmd_output('git', 'commit', '-m', 'foo')
with io.open('unstaged', 'w') as foo_file:
foo_file.write('Oh hai')
assert install(Runner(path, C.CONFIG_FILE)) == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
def test_install_idempotent(tempdir_factory):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
assert install(Runner(path, C.CONFIG_FILE)) == 0
assert install(Runner(path, C.CONFIG_FILE)) == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
assert NORMAL_PRE_COMMIT_RUN.match(output)
def _path_without_us():
# Choose a path which *probably* doesn't include us
return os.pathsep.join([
x for x in os.environ['PATH'].split(os.pathsep)
if x.lower() != os.path.dirname(sys.executable).lower()
])
def test_environment_not_sourced(tempdir_factory):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
# Patch the executable to simulate rming virtualenv
with mock.patch.object(sys, 'executable', '/bin/false'):
assert install(Runner(path, C.CONFIG_FILE)) == 0
# Use a specific homedir to ignore --user installs
homedir = tempdir_factory.get()
ret, stdout, stderr = cmd_output(
'git', 'commit', '--allow-empty', '-m', 'foo',
env={
'HOME': homedir,
'PATH': _path_without_us(),
# Git needs this to make a commit
'GIT_AUTHOR_NAME': os.environ['GIT_AUTHOR_NAME'],
'GIT_COMMITTER_NAME': os.environ['GIT_COMMITTER_NAME'],
'GIT_AUTHOR_EMAIL': os.environ['GIT_AUTHOR_EMAIL'],
'GIT_COMMITTER_EMAIL': os.environ['GIT_COMMITTER_EMAIL'],
},
retcode=None,
)
assert ret == 1
assert stdout == ''
assert stderr == (
'`pre-commit` not found. '
'Did you forget to activate your virtualenv?\n'
)
FAILING_PRE_COMMIT_RUN = re.compile(
r'^\[INFO\] Initializing environment for .+\.\r?\n'
r'Failing hook\.+Failed\r?\n'
r'hookid: failing_hook\r?\n'
r'\r?\n'
r'Fail\r?\n'
r'foo\r?\n'
r'\r?\n$'
)
def test_failing_hooks_returns_nonzero(tempdir_factory):
path = make_consuming_repo(tempdir_factory, 'failing_hook_repo')
with cwd(path):
assert install(Runner(path, C.CONFIG_FILE)) == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 1
assert FAILING_PRE_COMMIT_RUN.match(output)
EXISTING_COMMIT_RUN = re.compile(
r'^legacy hook\r?\n'
r'\[master [a-f0-9]{7}\] Commit!\r?\n' +
FILES_CHANGED +
r' create mode 100644 baz\r?\n$'
)
def test_install_existing_hooks_no_overwrite(tempdir_factory):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
runner = Runner(path, C.CONFIG_FILE)
# Write out an "old" hook
mkdirp(os.path.dirname(runner.pre_commit_path))
with io.open(runner.pre_commit_path, 'w') as hook_file:
hook_file.write('#!/usr/bin/env bash\necho "legacy hook"\n')
make_executable(runner.pre_commit_path)
# Make sure we installed the "old" hook correctly
ret, output = _get_commit_output(tempdir_factory, touch_file='baz')
assert ret == 0
assert EXISTING_COMMIT_RUN.match(output)
# Now install pre-commit (no-overwrite)
assert install(runner) == 0
# We should run both the legacy and pre-commit hooks
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
assert output.startswith('legacy hook\n')
assert NORMAL_PRE_COMMIT_RUN.match(output[len('legacy hook\n'):])
def test_install_existing_hook_no_overwrite_idempotent(tempdir_factory):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
runner = Runner(path, C.CONFIG_FILE)
# Write out an "old" hook
mkdirp(os.path.dirname(runner.pre_commit_path))
with io.open(runner.pre_commit_path, 'w') as hook_file:
hook_file.write('#!/usr/bin/env bash\necho "legacy hook"\n')
make_executable(runner.pre_commit_path)
# Install twice
assert install(runner) == 0
assert install(runner) == 0
# We should run both the legacy and pre-commit hooks
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
assert output.startswith('legacy hook\n')
assert NORMAL_PRE_COMMIT_RUN.match(output[len('legacy hook\n'):])
FAIL_OLD_HOOK = re.compile(
r'fail!\r?\n'
r'\[INFO\] Initializing environment for .+\.\r?\n'
r'Bash hook\.+Passed\r?\n'
)
def test_failing_existing_hook_returns_1(tempdir_factory):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
runner = Runner(path, C.CONFIG_FILE)
# Write out a failing "old" hook
mkdirp(os.path.dirname(runner.pre_commit_path))
with io.open(runner.pre_commit_path, 'w') as hook_file:
hook_file.write('#!/usr/bin/env bash\necho "fail!"\nexit 1\n')
make_executable(runner.pre_commit_path)
assert install(runner) == 0
# We should get a failure from the legacy hook
ret, output = _get_commit_output(tempdir_factory)
assert ret == 1
assert FAIL_OLD_HOOK.match(output)
def test_install_overwrite_no_existing_hooks(tempdir_factory):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
assert install(Runner(path, C.CONFIG_FILE), overwrite=True) == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
assert NORMAL_PRE_COMMIT_RUN.match(output)
def test_install_overwrite(tempdir_factory):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
runner = Runner(path, C.CONFIG_FILE)
# Write out the "old" hook
mkdirp(os.path.dirname(runner.pre_commit_path))
with io.open(runner.pre_commit_path, 'w') as hook_file:
hook_file.write('#!/usr/bin/env bash\necho "legacy hook"\n')
make_executable(runner.pre_commit_path)
assert install(runner, overwrite=True) == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
assert NORMAL_PRE_COMMIT_RUN.match(output)
def test_uninstall_restores_legacy_hooks(tempdir_factory):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
runner = Runner(path, C.CONFIG_FILE)
# Write out an "old" hook
mkdirp(os.path.dirname(runner.pre_commit_path))
with io.open(runner.pre_commit_path, 'w') as hook_file:
hook_file.write('#!/usr/bin/env bash\necho "legacy hook"\n')
make_executable(runner.pre_commit_path)
# Now install and uninstall pre-commit
assert install(runner) == 0
assert uninstall(runner) == 0
# Make sure we installed the "old" hook correctly
ret, output = _get_commit_output(tempdir_factory, touch_file='baz')
assert ret == 0
assert EXISTING_COMMIT_RUN.match(output)
def test_replace_old_commit_script(tempdir_factory):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
runner = Runner(path, C.CONFIG_FILE)
# Install a script that looks like our old script
pre_commit_contents = io.open(
resource_filename('hook-tmpl'),
).read()
new_contents = pre_commit_contents.replace(
CURRENT_HASH, PRIOR_HASHES[-1],
)
mkdirp(os.path.dirname(runner.pre_commit_path))
with io.open(runner.pre_commit_path, 'w') as pre_commit_file:
pre_commit_file.write(new_contents)
make_executable(runner.pre_commit_path)
# Install normally
assert install(runner) == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
assert NORMAL_PRE_COMMIT_RUN.match(output)
def test_uninstall_doesnt_remove_not_our_hooks(tempdir_factory):
path = git_dir(tempdir_factory)
with cwd(path):
runner = Runner(path, C.CONFIG_FILE)
mkdirp(os.path.dirname(runner.pre_commit_path))
with io.open(runner.pre_commit_path, 'w') as pre_commit_file:
pre_commit_file.write('#!/usr/bin/env bash\necho 1\n')
make_executable(runner.pre_commit_path)
assert uninstall(runner) == 0
assert os.path.exists(runner.pre_commit_path)
PRE_INSTALLED = re.compile(
r'Bash hook\.+Passed\r?\n'
r'\[master [a-f0-9]{7}\] Commit!\r?\n' +
FILES_CHANGED +
r' create mode 100644 foo\r?\n$'
)
def test_installs_hooks_with_hooks_True(
tempdir_factory,
mock_out_store_directory,
):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
install(Runner(path, C.CONFIG_FILE), hooks=True)
ret, output = _get_commit_output(
tempdir_factory, pre_commit_home=mock_out_store_directory,
)
assert ret == 0
assert PRE_INSTALLED.match(output)
def test_install_hooks_command(tempdir_factory, mock_out_store_directory):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
runner = Runner(path, C.CONFIG_FILE)
install(runner)
install_hooks(runner)
ret, output = _get_commit_output(
tempdir_factory, pre_commit_home=mock_out_store_directory,
)
assert ret == 0
assert PRE_INSTALLED.match(output)
def test_installed_from_venv(tempdir_factory):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
install(Runner(path, C.CONFIG_FILE))
# No environment so pre-commit is not on the path when running!
# Should still pick up the python from when we installed
ret, output = _get_commit_output(
tempdir_factory,
env={
'HOME': os.path.expanduser('~'),
'PATH': _path_without_us(),
'TERM': os.environ.get('TERM', ''),
# Windows needs this to import `random`
'SYSTEMROOT': os.environ.get('SYSTEMROOT', ''),
# Windows needs this to resolve executables
'PATHEXT': os.environ.get('PATHEXT', ''),
# Git needs this to make a commit
'GIT_AUTHOR_NAME': os.environ['GIT_AUTHOR_NAME'],
'GIT_COMMITTER_NAME': os.environ['GIT_COMMITTER_NAME'],
'GIT_AUTHOR_EMAIL': os.environ['GIT_AUTHOR_EMAIL'],
'GIT_COMMITTER_EMAIL': os.environ['GIT_COMMITTER_EMAIL'],
},
)
assert ret == 0
assert NORMAL_PRE_COMMIT_RUN.match(output)
def _get_push_output(tempdir_factory):
return cmd_output_mocked_pre_commit_home(
'git', 'push', 'origin', 'HEAD:new_branch',
# git push puts pre-commit to stderr
stderr=subprocess.STDOUT,
tempdir_factory=tempdir_factory,
retcode=None,
)[:2]
def test_pre_push_integration_failing(tempdir_factory):
upstream = make_consuming_repo(tempdir_factory, 'failing_hook_repo')
path = tempdir_factory.get()
cmd_output('git', 'clone', upstream, path)
with cwd(path):
install(Runner(path, C.CONFIG_FILE), hook_type='pre-push')
# commit succeeds because pre-commit is only installed for pre-push
assert _get_commit_output(tempdir_factory)[0] == 0
retc, output = _get_push_output(tempdir_factory)
assert retc == 1
assert 'Failing hook' in output
assert 'Failed' in output
assert 'hookid: failing_hook' in output
def test_pre_push_integration_accepted(tempdir_factory):
upstream = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
path = tempdir_factory.get()
cmd_output('git', 'clone', upstream, path)
with cwd(path):
install(Runner(path, C.CONFIG_FILE), hook_type='pre-push')
assert _get_commit_output(tempdir_factory)[0] == 0
retc, output = _get_push_output(tempdir_factory)
assert retc == 0
assert 'Bash hook' in output
assert 'Passed' in output
def test_pre_push_integration_empty_push(tempdir_factory):
upstream = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
path = tempdir_factory.get()
cmd_output('git', 'clone', upstream, path)
with cwd(path):
install(Runner(path, C.CONFIG_FILE), hook_type='pre-push')
_get_push_output(tempdir_factory)
retc, output = _get_push_output(tempdir_factory)
assert output == 'Everything up-to-date\n'
assert retc == 0
def test_install_disallow_mising_config(tempdir_factory):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
runner = Runner(path, C.CONFIG_FILE)
remove_config_from_repo(path)
assert install(runner, overwrite=True, skip_on_missing_conf=False) == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 1
def test_install_allow_mising_config(tempdir_factory):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
runner = Runner(path, C.CONFIG_FILE)
remove_config_from_repo(path)
assert install(runner, overwrite=True, skip_on_missing_conf=True) == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
expected = (
'`.pre-commit-config.yaml` config file not found. '
'Skipping `pre-commit`.'
)
assert expected in output
def test_install_temporarily_allow_mising_config(tempdir_factory):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
runner = Runner(path, C.CONFIG_FILE)
remove_config_from_repo(path)
assert install(runner, overwrite=True, skip_on_missing_conf=False) == 0
env = dict(os.environ, PRE_COMMIT_ALLOW_NO_CONFIG='1')
ret, output = _get_commit_output(tempdir_factory, env=env)
assert ret == 0
expected = (
'`.pre-commit-config.yaml` config file not found. '
'Skipping `pre-commit`.'
)
assert expected in output
| mit | -5,424,046,404,525,893,000 | 33.407407 | 79 | 0.635728 | false |
pavolloffay/jaeger | plugin/storage/es/esCleaner.py | 1 | 4828 | #!/usr/bin/env python3
import curator
import elasticsearch
import os
import ssl
import sys
TIMEOUT=120
def main():
if len(sys.argv) != 3:
print('USAGE: [INDEX_PREFIX=(default "")] [ARCHIVE=(default false)] ... {} NUM_OF_DAYS http://HOSTNAME[:PORT]'.format(sys.argv[0]))
print('NUM_OF_DAYS ... delete indices that are older than the given number of days.')
print('HOSTNAME ... specifies which Elasticsearch hosts URL to search and delete indices from.')
print('TIMEOUT ... number of seconds to wait for master node response.'.format(TIMEOUT))
print('INDEX_PREFIX ... specifies index prefix.')
print('ARCHIVE ... specifies whether to remove archive indices (only works for rollover) (default false).')
print('ROLLOVER ... specifies whether to remove indices created by rollover (default false).')
print('ES_USERNAME ... The username required by Elasticsearch.')
print('ES_PASSWORD ... The password required by Elasticsearch.')
print('ES_TLS ... enable TLS (default false).')
print('ES_TLS_CA ... Path to TLS CA file.')
print('ES_TLS_CERT ... Path to TLS certificate file.')
print('ES_TLS_KEY ... Path to TLS key file.')
print('ES_TLS_SKIP_HOST_VERIFY ... (insecure) Skip server\'s certificate chain and host name verification.')
sys.exit(1)
client = create_client(os.getenv("ES_USERNAME"), os.getenv("ES_PASSWORD"), str2bool(os.getenv("ES_TLS", 'false')), os.getenv("ES_TLS_CA"), os.getenv("ES_TLS_CERT"), os.getenv("ES_TLS_KEY"), str2bool(os.getenv("ES_TLS_SKIP_HOST_VERIFY", 'false')))
ilo = curator.IndexList(client)
empty_list(ilo, 'Elasticsearch has no indices')
prefix = os.getenv("INDEX_PREFIX", '')
if prefix != '':
prefix += '-'
if str2bool(os.getenv("ARCHIVE", 'false')):
filter_archive_indices_rollover(ilo, prefix)
else:
if str2bool(os.getenv("ROLLOVER", 'false')):
filter_main_indices_rollover(ilo, prefix)
else:
filter_main_indices(ilo, prefix)
empty_list(ilo, 'No indices to delete')
for index in ilo.working_list():
print("Removing", index)
timeout = int(os.getenv("TIMEOUT", TIMEOUT))
delete_indices = curator.DeleteIndices(ilo, master_timeout=timeout)
delete_indices.do_action()
def filter_main_indices(ilo, prefix):
ilo.filter_by_regex(kind='regex', value=prefix + "jaeger-(span|service|dependencies)-\d{4}-\d{2}-\d{2}")
empty_list(ilo, "No indices to delete")
# This excludes archive index as we use source='name'
# source `creation_date` would include archive index
ilo.filter_by_age(source='name', direction='older', timestring='%Y-%m-%d', unit='days', unit_count=int(sys.argv[1]))
def filter_main_indices_rollover(ilo, prefix):
ilo.filter_by_regex(kind='regex', value=prefix + "jaeger-(span|service)-\d{6}")
empty_list(ilo, "No indices to delete")
# do not remove active write indices
ilo.filter_by_alias(aliases=[prefix + 'jaeger-span-write'], exclude=True)
empty_list(ilo, "No indices to delete")
ilo.filter_by_alias(aliases=[prefix + 'jaeger-service-write'], exclude=True)
empty_list(ilo, "No indices to delete")
ilo.filter_by_age(source='creation_date', direction='older', unit='days', unit_count=int(sys.argv[1]))
def filter_archive_indices_rollover(ilo, prefix):
# Remove only rollover archive indices
# Do not remove active write archive index
ilo.filter_by_regex(kind='regex', value=prefix + "jaeger-span-archive-\d{6}")
empty_list(ilo, "No indices to delete")
ilo.filter_by_alias(aliases=[prefix + 'jaeger-span-archive-write'], exclude=True)
empty_list(ilo, "No indices to delete")
ilo.filter_by_age(source='creation_date', direction='older', unit='days', unit_count=int(sys.argv[1]))
def empty_list(ilo, error_msg):
try:
ilo.empty_list_check()
except curator.NoIndices:
print(error_msg)
sys.exit(0)
def str2bool(v):
return v.lower() in ('true', '1')
def create_client(username, password, tls, ca, cert, key, skipHostVerify):
context = ssl.create_default_context()
if ca is not None:
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=ca)
elif skipHostVerify:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
if username is not None and password is not None:
return elasticsearch.Elasticsearch(sys.argv[2:], http_auth=(username, password), ssl_context=context)
elif tls:
context.load_cert_chain(certfile=cert, keyfile=key)
return elasticsearch.Elasticsearch(sys.argv[2:], ssl_context=context)
else:
return elasticsearch.Elasticsearch(sys.argv[2:], ssl_context=context)
if __name__ == "__main__":
main()
| apache-2.0 | 3,711,925,970,792,277,000 | 42.107143 | 250 | 0.663422 | false |
Lamelos/django-allauth-office365 | allauth_office365/adapter.py | 1 | 2506 | from django.contrib import messages
from django.dispatch import receiver
from django.http import HttpResponseForbidden
from allauth.account.signals import user_logged_in
from allauth.exceptions import ImmediateHttpResponse
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter, get_adapter
from allauth.socialaccount.providers import registry
from .provider import Office365Provider
class SocialAccountAdapter(DefaultSocialAccountAdapter):
# based on: https://github.com/thenewguy/django-allauth-adfs/blob/master/allauth_adfs/socialaccount/adapter.py
def pre_social_login(self, request, sociallogin):
# new user logins are handled by populate_user
if sociallogin.is_existing:
changed, user = self.update_user_fields(request, sociallogin)
if changed:
user.save()
def populate_user(self, request, sociallogin, data):
user = super(SocialAccountAdapter, self).populate_user(request, sociallogin, data)
self.update_user_fields(request, sociallogin, user)
return user
def update_user_fields(self, request, sociallogin=None, user=None):
changed = False
if user is None:
user = sociallogin.account.user
office365_provider = registry.by_id(Office365Provider.id, request)
false_keys = ["is_staff", "is_superuser"]
boolean_keys = false_keys + ["is_active"]
copy_keys = boolean_keys + ["first_name", "last_name", "email", "username"]
if sociallogin is not None and sociallogin.account.provider == Office365Provider.id:
data = sociallogin.account.extra_data
values = office365_provider.extract_common_fields(data)
for key in copy_keys:
# it is assumed that values are cleaned and set for all
# fields and if any of the boolean_keys are not provided
# in the raw data they should be set to False by
# the extract_common_fields method
if key in values and getattr(user, key) != values[key]:
setattr(user, key, values[key])
changed = True
else:
for key in false_keys:
if getattr(user, key):
msg = "Staff users must authenticate via the %s provider!" % office365_provider.name
response = HttpResponseForbidden(msg)
raise ImmediateHttpResponse(response)
return changed, user
| mit | 1,764,432,020,011,735,000 | 44.563636 | 114 | 0.659218 | false |
cyberang3l/sysdata-collector | libs/globalvars.py | 1 | 2230 | # Copyright (C) 2014 Vangelis Tasoulas <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Define default constants
PROGRAM_NAME = 'sysdata-collector'
VERSION = '0.0.1'
AUTHOR = 'Vangelis Tasoulas'
# Default config file location where the program should
# look for a configuration file
CONFIG_FILE_LOCATIONS = [".", "/etc/template"]
# The default config filename which might exist
# in CONFIG_FILE_LOCATIONS
DEFAULT_CONFIG_FILENAME = PROGRAM_NAME + ".conf"
# Console logging level (If you change this to DEBUG)
# text sent to STDOUT will be too much
# CRITICAL = 50
# ERROR = 40
# WARNING = 30
# INFO = 20
# DEBUG = 10
CONSOLE_LOG_LEVEL = 20
class exitCode():
"""
Define static exit Codes
"""
SUCCESS = 0
FAILURE = 1
INCORRECT_USAGE = 2
PRINT_SEPARATOR = "#######################################"
# Define AND set default values for the global variables here
# Default file logging level
# CRITICAL = 50
# ERROR = 40
# WARNING = 30
# INFO = 20
# DEBUG = 10
FileLogLevel = 20
# Default absolute path for the log file
log_file = "{0}/{1}".format(".", PROGRAM_NAME + ".log")
# Conf will be found on runtime (if any)
conf_file = ""
# If your program can run in daemon mode,
# check this variable in runtime if it is true
daemonMode = False
##################################################
list_available_plugins = False
list_active_plugins = False
only_print_samples = False
append_file = False
test_plugin = None
output_file = 'data_collected-%{ts}.csv'
delimiter = ","
active_plugins_dir = "active-plugins"
plugin_directories = []
intervalBetweenSamples = 10
| gpl-3.0 | -8,446,234,026,580,170,000 | 26.875 | 71 | 0.685202 | false |
dcsch/pyif | pyif/story.py | 1 | 3162 | '''
Created on Nov 21, 2013
@author: david
'''
from thing import Thing, Player
import grammar
import parser
import action
import glk
class Story:
def __init__(self, name, headline, delegate):
self.name = name
self.headline = headline
self.release = 1
self.serial = 81001
self.delegate = delegate
self.root = Thing("root", None)
self.compass = Thing("compass", self.root)
self.north = Thing("north", self.compass)
self.north.nouns = ["north"]
self.east = Thing("east", self.compass)
self.east.nouns = ["east"]
self.south = Thing("south", self.compass)
self.south.nouns = ["south"]
self.west = Thing("west", self.compass)
self.west.nouns = ["west"]
self.northeast = Thing("northeast", self.compass)
self.northeast.nouns = ["northeast"]
self.northwest = Thing("northwest", self.compass)
self.northwest.nouns = ["northwest"]
self.southeast = Thing("southeast", self.compass)
self.southeast.nouns = ["southeast"]
self.southwest = Thing("southwest", self.compass)
self.southwest.nouns = ["southwest"]
self.up_above = Thing("up above", self.compass)
self.up_above.nouns = ["up", "above"]
self.ground = Thing("ground", self.compass)
self.ground.nouns = ["ground"]
self.inside = Thing("inside", self.compass)
self.inside.nouns = ["inside"]
self.outside = Thing("outside", self.compass)
self.outside.nouns = ["outside"]
# Player
self.player = Player("cretin", self.root)
self.player.nouns = ["cretin", "me"]
self.player.description = "As good looking as ever."
self.actor = self.player
self.nouns = []
# State and Parser
self.has_quit = False
self.deadflag = 0
self.keep_silent = False
self.grammar = grammar.Grammar(self)
self.parser = parser.Parser(self, self.grammar)
def run(self):
"The main story loop"
if self.delegate:
self.delegate.initialise()
# The initial text
action.version(self)
glk.put_char("\n")
action.look(self, True)
# while True:
# event = glk.select()
# if event.type == EVTYPE_LINEINPUT:
while self.parser.read_input():
if self.deadflag:
self.handle_deadflag()
def handle_deadflag(self):
"Report the player's end-of-game status"
glk.put_string("\n *** ")
handled = False
if self.delegate and "death_message" in dir(self.delegate):
handled = self.delegate.death_message()
if not handled:
self.death_message()
glk.put_string(" ***\n\n\n")
def death_message(self):
"The text of the death message"
if self.deadflag == 1:
glk.put_string("You have died")
elif self.deadflag == 2:
glk.put_string("You have won")
| mit | -1,471,477,336,399,294,700 | 29.114286 | 67 | 0.550285 | false |
HomeRad/TorCleaner | doc/bfknav.py | 1 | 7157 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2009 Bastian Kleineidam
"""
General navigation writer reading .nav file info.
"""
import sys
import os
import re
from cStringIO import StringIO
_slashes_ro = re.compile(r"/+")
_thisdir_ro = re.compile(r"^\./")
_samedir_ro = re.compile(r"/\./|/\.$")
_parentdir_ro = re.compile(r"^/(\.\./)+|/(?!\.\./)[^/]+/\.\.(/|$)")
_relparentdir_ro = re.compile(r"^(?!\.\./)[^/]+/\.\.(/|$)")
def collapse_segments(path):
"""
Remove all redundant segments from the given URL path.
Precondition: path is an unquoted url path
"""
# shrink multiple slashes to one slash
path = _slashes_ro.sub("/", path)
# collapse redundant path segments
path = _thisdir_ro.sub("", path)
path = _samedir_ro.sub("/", path)
# collapse parent path segments
# note: here we exploit the fact that the replacements happen
# to be from left to right (see also _parentdir_ro above)
newpath = _parentdir_ro.sub("/", path)
while newpath != path:
path = newpath
newpath = _parentdir_ro.sub("/", path)
# collapse parent path segments of relative paths
# (ie. without leading slash)
newpath = _relparentdir_ro.sub("", path)
while newpath != path:
path = newpath
newpath = _relparentdir_ro.sub("", path)
return path
class Node(object):
"""
Node class for use in a navigation tree, with abilities to write
HTML output.
"""
def __init__(self, name, order, filename):
"""Initialize node information"""
self.name = name
self.order = order
self.filename = filename
self.level = 0
self.children = []
self.sibling_right = None
self.active = False
self.parent = None
def get_url(self, level):
"""Get relative URL to this node."""
if self.children:
url = self.children[0].get_url(level)
else:
url = "../"*level + self.filename
return collapse_segments(url)
def addChildren(self, nodes):
"""
Add given nodes as children of this node, setting parent
and level information accordingly.
"""
for node in nodes:
node.parent = self
node.level = self.level + 1
self.children.append(node)
def write_nav(self, fp, active):
"""
Write HTML node navigation.
"""
descend = has_node(active, self.children)
if self.active or descend:
self.write_active(fp)
else:
self.write_inactive(fp, active.level)
if self.sibling_right:
self.sibling_right.write_nav(fp, active)
if descend:
# go to next level
self.write_nextlevel(fp)
self.children[0].write_nav(fp, active)
def write_inactive(self, fp, level):
"""
Write HTML of inactive navigation node.
"""
s = '<a href="%s">%s' % (self.get_url(level), self.name)
if self.children:
s += ' >'
s += "</a>\n"
fp.write(s)
def write_active(self, fp):
"""
Write HTML of active navigation node.
"""
s = "<span>"
#if not self.children:
# s += '> '
s += self.name
if self.children:
s += ' >'
s += "</span>\n"
fp.write(s)
def write_nextlevel(self, fp):
fp.write('</div>\n<div class="navrow" style="padding: 0em 0em 0em %dem;">'% (self.level+2))
def new_node(self):
return Node(self.name, sys.maxint, self.filename)
def __repr__(self):
return "<Node %r>"%self.name
def __lt__(self, other):
return self.order < other.order
def __le__(self, other):
return self.order <= other.order
def __eq__(self, other):
return self.order == other.order
def __ne__(self, other):
return self.order != other.order
def __gt__(self, other):
return self.order > other.order
def __ge__(self, other):
return self.order >= other.order
def parse_navtree(dirname):
"""
Parse a hierarchy of .nav files into a tree structure,
consisting of lists of lists. The list entries are sorted in
navigation order.
"""
nodes = []
files = os.listdir(dirname)
for f in files:
filename = os.path.join(dirname, f)
htmlname = os.path.join(dirname, os.path.splitext(f)[0]+".html")
if os.path.isfile(filename) and os.path.isfile(htmlname) and \
f.endswith('.nav'):
nodes.append(get_nav_node(filename, htmlname))
elif os.path.isdir(filename):
subnodes = parse_navtree(filename)
if subnodes:
if os.path.isfile(filename+".nav"):
node = get_nav_node(filename+".nav", filename)
else:
node = subnodes[0].new_node()
node.addChildren(subnodes)
nodes.append(node)
nodes.sort()
for i,n in enumerate(nodes):
if (i+1)<len(nodes):
n.sibling_right = nodes[i+1]
#print_nodes(nodes)
return nodes
def get_nav_node(navfile, htmlname):
"""
Get a Node() instance with info of given navfile.
"""
flocals = {}
execfile(navfile, {}, flocals)
order = flocals.get('order', sys.maxint)
name = flocals['name']
return Node(name, order, htmlname)
def print_nodes(nodes):
"""
Print a tree structure to stdout.
"""
for node in nodes:
print " "*node.level+node.name
if node.children:
print_nodes(node.children)
def has_node(node, nodes):
"""
Look for node in a tree structure.
@return True if node is found
"""
for n in nodes:
if node.filename == n.filename:
return True
if has_node(node, n.children):
return True
return False
def generate_nav(start, nodes):
"""
Write one navigation tree level into HTML files, with given
start node as root node.
"""
for node in nodes:
print node.filename
if node.children:
generate_nav(start, node.children)
else:
node.active = True
fp = StringIO()
start.write_nav(fp, node)
nav = """<div class="navigation">
<div class="navrow" style="padding: 0em 0em 0em 1em;">
%s
</div>
</div>
""" % fp.getvalue()
node.active = False
write_nav(node.filename, nav)
def write_nav(filename, nav):
"""
Write navigation into filename.
"""
lines = []
skip = False
f = open(filename)
for line in f:
if not skip:
lines.append(line)
if line.startswith("<!-- bfknav -->"):
skip = True
lines.append(nav)
elif line.startswith("<!-- /bfknav -->"):
skip = False
lines.append(line)
f.close()
f = open(filename, 'w')
for line in lines:
f.write(line)
f.close()
if __name__=='__main__':
nodes = parse_navtree(".")
if nodes:
generate_nav(nodes[0], nodes)
| gpl-2.0 | 5,557,763,311,401,146,000 | 26.526923 | 99 | 0.551069 | false |
EKiefer/edge-starter | py34env/Scripts/thresholder.py | 1 | 1845 | #!c:\users\ekiefer\projects\django\my_edge\py34env\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates how a 1-bit BitmapImage can be used
# as a dynamically updated overlay
#
try:
from tkinter import *
except ImportError:
from Tkinter import *
from PIL import Image, ImageTk
import sys
#
# an image viewer
class UI(Frame):
def __init__(self, master, im, value=128):
Frame.__init__(self, master)
self.image = im
self.value = value
self.canvas = Canvas(self, width=im.size[0], height=im.size[1])
self.backdrop = ImageTk.PhotoImage(im)
self.canvas.create_image(0, 0, image=self.backdrop, anchor=NW)
self.canvas.pack()
scale = Scale(self, orient=HORIZONTAL, from_=0, to=255,
resolution=1, command=self.update_scale, length=256)
scale.set(value)
scale.bind("<ButtonRelease-1>", self.redraw)
scale.pack()
# uncomment the following line for instant feedback (might
# be too slow on some platforms)
# self.redraw()
def update_scale(self, value):
self.value = eval(value)
self.redraw()
def redraw(self, event=None):
# create overlay (note the explicit conversion to mode "1")
im = self.image.point(lambda v, t=self.value: v >= t, "1")
self.overlay = ImageTk.BitmapImage(im, foreground="green")
# update canvas
self.canvas.delete("overlay")
self.canvas.create_image(0, 0, image=self.overlay, anchor=NW,
tags="overlay")
# --------------------------------------------------------------------
# main
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "L":
im = im.convert("L")
# im.thumbnail((320,200))
UI(root, im).pack()
root.mainloop()
| mit | 5,243,874,531,555,993,000 | 23.932432 | 74 | 0.58916 | false |
DimensionDataCBUSydney/plumbery | plumbery/infrastructure.py | 1 | 66146 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import time
from uuid import uuid4
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.common.dimensiondata import DimensionDataFirewallRule
from libcloud.common.dimensiondata import DimensionDataFirewallAddress
from libcloud.common.dimensiondata import TYPES_URN
from libcloud.utils.xml import findtext, findall
from plumbery.terraform import Terraform
from plumbery.exception import PlumberyException
from plumbery.plogging import plogging
__all__ = ['PlumberyInfrastructure']
class PlumberyInfrastructure(object):
"""
Infrastructure as code, for network and security
:param facility: the underlying physical facility
:type facility: :class:`plumbery.PlumberyFacility`
This is an abstraction of a virtual data center. It is a secured
container for multiple nodes.
Example::
from plumbery.infrastructure import PlumberyInfrastructure
infrastructure = PlumberyInfrastructure(facility)
infrastructure.build(blueprint)
In this example an infrastructure is initialised at the given facility, and
then it is asked to create pipes and plumbing described in the
provided blueprint. This is covering solely the network and the security,
not the nodes themselves.
Attributes:
facility (PlumberyFacility):
a handle to the physical facility where network domains
are implemented
"""
# the physical data center
facility = None
def __init__(self, facility=None):
"""A virtual data centre attached to a physical data centre"""
# handle to parent parameters and functions
self.facility = facility
self.region = facility.region
self.plumbery = facility.plumbery
self.network = None
self.domain = None
self.terraform = Terraform(facility.plumbery.working_directory)
self._cache_remote_vlan = []
self._cache_offshore_vlan = []
self._cache_firewall_rules = []
self._cache_balancers = None
self._cache_pools = None
self._network_domains_already_built = []
self._vlans_already_built = []
def get_region_id(self):
return self.facility.get_setting('regionId')
def get_default(self, label, default=None):
"""
Retrieves default value for a given name
"""
value = self.facility.get_setting(label)
if value is not None:
return value
return default
def get_container(self, blueprint):
"""
Retrieves a domain and a network attached to a blueprint
:param blueprint: the various attributes of the target fittings
:type blueprint: ``dict``
:return: the infrastructure associated to the provided blueprint
:rtype: :class:`plumbery.PlumberyInfrastructure` or `None``
The returned object has at least a network domain and an Ethernet
network, like in the following example::
>>>container = infrastructure.get_container(blueprint)
>>>print(container.domain.name)
...
>>>print(container.network.name)
...
"""
target = PlumberyInfrastructure(self.facility)
target.blueprint = blueprint
if ('domain' not in blueprint
or type(blueprint['domain']) is not dict):
raise PlumberyException(
"Error: no network domain has been defined "
"for the blueprint '{}'!".format(blueprint['target']))
if ('ethernet' not in blueprint
or type(blueprint['ethernet']) is not dict):
raise PlumberyException(
"Error: no ethernet network has been defined "
"for the blueprint '{}'!".format(blueprint['target']))
domainName = blueprint['domain']['name']
target.domain = self.get_network_domain(domainName)
networkName = blueprint['ethernet']['name']
target.network = self.get_ethernet(networkName)
return target
def get_network_domain(self, name):
"""
Retrieves a network domain by name
:param name: name of the target network domain
:type name: ``str``
"""
if len(self.facility._cache_network_domains) < 1:
plogging.debug("Listing network domains")
self.facility._cache_network_domains = \
self.region.ex_list_network_domains(
self.facility.get_location_id())
plogging.debug("- found {} network domains"
.format(len(self.facility._cache_network_domains)))
for domain in self.facility._cache_network_domains:
if domain.name == name:
return domain
return None
def get_ethernet(self, path):
"""
Retrieves an Ethernet network by name
:param path: the name of the target Ethernet network
:type path: ``str`` or ``list``of ``str``
:return: an instance of an Ethernet network
:rtype: :class:`VLAN` or ``None``
This function searches firstly at the current facility. If the
name is a complete path to a remote network, then plumbery looks
there. If a different region is provided, then authentication is done
against the related endpoint.
For example if ``MyNetwork`` has been defined in a data centre in
Europe::
>>>infrastructure.get_ethernet('MyNetwork')
>>>infrastructure.get_ethernet(['EU6', 'MyNetwork'])
Looking for remote Ethernet network 'EU6::MyNetwork'
- found it
>>>infrastructure.get_ethernet(['dd-eu', 'EU6', 'MyNetwork'])
Looking for offshore Ethernet network 'dd-eu::EU6::MyNetwork'
- found it
"""
if isinstance(path, str):
path = path.split('::')
if len(path) == 2: # force offshore lookup if needed
target_region = self.facility.get_region(path[0])
if target_region != self.facility.get_region():
path.insert(0, target_region)
if len(path) == 1: # local name
if len(self.facility._cache_vlans) < 1:
plogging.debug("Listing Ethernet networks")
self.facility._cache_vlans = self.region.ex_list_vlans(
location=self.facility.get_location_id())
plogging.debug("- found {} Ethernet networks"
.format(len(self.facility._cache_vlans)))
for network in self.facility._cache_vlans:
if network.name == path[0]:
return network
elif len(path) == 2: # different location, same region
if (len(self._cache_remote_vlan) == 3
and self._cache_remote_vlan[0] == path[0]
and self._cache_remote_vlan[1] == path[1]):
return self._cache_remote_vlan[2]
plogging.info("Looking for remote Ethernet network '%s'",
'::'.join(path))
try:
remoteLocation = self.region.ex_get_location_by_id(path[0])
except IndexError:
plogging.info("- '%s' is unknown", path[0])
return None
vlans = self.region.ex_list_vlans(location=remoteLocation)
for network in vlans:
if network.name == path[1]:
self._cache_remote_vlan += path
self._cache_remote_vlan.append(network)
plogging.info("- found it")
return network
plogging.info("- not found")
elif len(path) == 3: # other region
if (len(self._cache_offshore_vlan) == 4
and self._cache_offshore_vlan[0] == path[0]
and self._cache_offshore_vlan[1] == path[1]
and self._cache_offshore_vlan[2] == path[2]):
return self._cache_offshore_vlan[3]
plogging.info("Looking for offshore Ethernet network '{}'"
.format('::'.join(path)))
offshore = self.plumbery.get_compute_driver(region=path[0])
try:
remoteLocation = offshore.ex_get_location_by_id(path[1])
except IndexError:
plogging.info("- '{}' is unknown".format(path[1]))
return None
vlans = offshore.ex_list_vlans(location=remoteLocation)
for network in vlans:
if network.name == path[2]:
self._cache_offshore_vlan += path
self._cache_offshore_vlan.append(network)
plogging.info("- found it")
return network
plogging.info("- not found")
return None
def build(self, blueprint):
"""
Creates the infrastructure for one blueprint
:param blueprint: the various attributes of the target fittings
:type blueprint: ``dict``
:return: ``True`` if the network has been created or is already there,
``False`` otherwise
:rtype: ``bool``
:raises: :class:`plumbery.PlumberyException`
- if some unrecoverable error occurs
This function is looking at all fittings in the blueprint except the
nodes. This is including:
* a network domain
* one Ethernet network
* eventually, several public IPv4 addresses
* address translation rules to private IPv4 addresses
* firewall rules
In safe mode, the function will stop on any missing component since
it is not in a position to add fittings, and return ``False``.
If all components already exist then the funciton will return ``True``.
"""
self.blueprint = blueprint
plogging.debug("Building infrastructure of blueprint '{}'".format(
blueprint['target']))
if 'domain' not in blueprint or type(blueprint['domain']) is not dict:
raise PlumberyException(
"Error: no network domain has been defined "
"for the blueprint '{}'!".format(blueprint['target']))
domainName = blueprint['domain']['name']
if 'ethernet' not in blueprint \
or type(blueprint['ethernet']) is not dict:
raise PlumberyException(
"Error: no ethernet network has been defined "
"for the blueprint '{}'!".format(blueprint['target']))
if 'subnet' not in blueprint['ethernet']:
raise PlumberyException(
"Error: no IPv4 subnet "
"(e.g., '10.0.34.0') as been defined for the blueprint '{}'!"
.format(blueprint['target']))
networkName = blueprint['ethernet']['name']
self.domain = self.get_network_domain(domainName)
if self.domain is not None:
plogging.info("Creating network domain '{}'".format(domainName))
plogging.info("- already there")
elif self.plumbery.safeMode:
plogging.info("Creating network domain '{}'".format(domainName))
plogging.info("- skipped - safe mode")
plogging.info("Creating Ethernet network '{}'"
.format(networkName))
plogging.info("- skipped - safe mode")
return False
else:
plogging.info("Creating network domain '{}'".format(domainName))
# the description attribute is a smart way to tag resources
description = '#plumbery'
if 'description' in blueprint['domain']:
description = blueprint['domain']['description']+' #plumbery'
# level of service
service = 'ESSENTIALS'
if 'service' in blueprint['domain']:
service = blueprint['domain']['service'].upper()
while True:
try:
self.domain = self.region.ex_create_network_domain(
location=self.facility.location,
name=domainName,
service_plan=service,
description=description)
plogging.info("- in progress")
# prevent locks in xops
self.region.ex_wait_for_state(
'NORMAL', self.region.ex_get_network_domain,
poll_interval=5, timeout=1200,
network_domain_id=self.domain.id)
self.facility._cache_network_domains.append(self.domain)
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'OPERATION_NOT_SUPPORTED' in str(feedback):
plogging.info("- operation not supported")
return False
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
return False
else:
plogging.info("- unable to create network domain")
plogging.error(str(feedback))
return False
break
self.network = self.get_ethernet(networkName)
if self.network is not None:
plogging.info("Creating Ethernet network '{}'"
.format(networkName))
plogging.info("- already there")
elif self.plumbery.safeMode:
plogging.info("Creating Ethernet network '{}'"
.format(networkName))
plogging.info("- skipped - safe mode")
return False
else:
plogging.info("Creating Ethernet network '{}'"
.format(networkName))
# the description attribute is a smart way to tag resources
description = '#plumbery'
if 'description' in blueprint['ethernet']:
description = blueprint['ethernet']['description']+' #plumbery'
while True:
try:
self.network = self.region.ex_create_vlan(
network_domain=self.domain,
name=networkName,
private_ipv4_base_address=blueprint['ethernet']['subnet'],
description=description)
plogging.info("- in progress")
# prevent locks in xops
self.region.ex_wait_for_state(
'NORMAL',
self.region.ex_get_vlan,
poll_interval=5, timeout=1200,
vlan_id=self.network.id)
self.facility._cache_vlans.append(self.network)
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- not possible "
"- network already exists elsewhere")
elif 'IP_ADDRESS_NOT_UNIQUE' in str(feedback):
plogging.info("- not possible "
"- subnet is used elsewhere")
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
return False
else:
plogging.info("- unable to create Ethernet network")
plogging.error(str(feedback))
return False
break
if 'reserved' in blueprint['ethernet']:
for reserved in blueprint['ethernet']['reserved']:
plogging.info("Reserving address '{}'"
.format(reserved))
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
continue
while True:
try:
self.ex_reserve_private_ip_addresses(
vlan=self.network,
address=reserved)
plogging.info("- in progress")
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
else:
plogging.info("- unable to create Ethernet network")
plogging.error(str(feedback))
return False
break
if 'multicloud' in blueprint \
and isinstance(blueprint['multicloud'], dict):
plogging.info("Starting multicloud deployment")
self.terraform.build(blueprint['multicloud'])
return True
def destroy_blueprint(self, blueprint):
"""
Destroys network and security elements of a blueprint
:param blueprint: the various attributes of the target fittings
:type blueprint: ``dict``
This function looks after following service elements:
* it releases public IPv4 addresses
* it destroys firewall rules
* it destroys the Ethernet network
* it destroys the network domain
The destruction is tentative, meaning that if the Ethernet network or
the network domain have some dependency then they cannot be destroyed.
This is happenign quite often since multiple blueprints can share the
same Ethernet network or the same network domain.
"""
self.blueprint = blueprint
if ('domain' not in blueprint
or type(blueprint['domain']) is not dict):
raise PlumberyException(
"Error: no network domain has been defined "
"for the blueprint '{}'!".format(blueprint['target']))
if ('ethernet' not in blueprint
or type(blueprint['ethernet']) is not dict):
raise PlumberyException(
"Error: no ethernet network has been defined "
"for the blueprint '{}'!".format(blueprint['target']))
domainName = blueprint['domain']['name']
networkName = blueprint['ethernet']['name']
domain = self.get_network_domain(domainName)
if domain is None:
plogging.info("Destroying Ethernet network '{}'"
.format(networkName))
plogging.info("- not found")
plogging.info("Destroying network domain '{}'".format(domainName))
plogging.info("- not found")
return
self._destroy_firewall_rules()
self._destroy_balancer()
self._release_ipv4()
plogging.info("Destroying Ethernet network '{}'".format(networkName))
network = self.get_ethernet(networkName)
if network is None:
plogging.info("- not found")
elif ('destroy' in blueprint['ethernet']
and blueprint['ethernet']['destroy'] == 'never'):
plogging.info("- this network can never be destroyed")
elif self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
retry = True
while True:
try:
self.region.ex_delete_vlan(vlan=network)
plogging.info("- in progress")
while True:
try:
time.sleep(10)
self.region.ex_get_vlan(vlan_id=network.id)
except Exception as feedback:
if 'RESOURCE_NOT_FOUND' in str(feedback):
break
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'RESOURCE_NOT_FOUND' in str(feedback):
plogging.info("- not found")
elif 'HAS_DEPENDENCY' in str(feedback):
# give time to ensure nodes have been deleted
if retry:
retry = False
time.sleep(30)
continue
plogging.info("- not now - stuff on it")
return
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
plogging.info(feedback)
return
else:
plogging.info("- unable to destroy Ethernet network")
plogging.error(str(feedback))
return
break
plogging.info("Destroying network domain '{}'".format(domainName))
if 'multicloud' in blueprint \
and isinstance(blueprint['multicloud'], dict):
plogging.info("Destroying multicloud deployment")
self.terraform.destroy(blueprint['multicloud'], safe=self.plumbery.safeMode)
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
return
while True:
try:
self.region.ex_delete_network_domain(network_domain=domain)
plogging.info("- in progress")
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'RESOURCE_NOT_FOUND' in str(feedback):
plogging.info("- not found")
elif 'HAS_DEPENDENCY' in str(feedback):
plogging.info("- not now - stuff on it")
return
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
return
else:
plogging.info("- unable to destroy Ethernet network")
plogging.error(str(feedback))
return
break
def _build_balancer(self):
"""
Adds load balancing for nodes in the blueprint
Example in the fittings plan::
- web:
domain: *vdc1
ethernet: *data
nodes:
- apache-[10..19]
balancer:
- http:
port: 80
protocol: http
- https:
port: 443
protocol: http
pool:
algorithm: round_robin
In this example, load balancing is configured to accept web traffic
and to distribute the workload across multiple web engines.
One balancer is configured for regular http protocol on port 80. The
other balancer is for secured web protocol, aka, https, on port 443.
The algorithm used by default is ``round_robin``. This parameter
can take any value among followings:
* ``random``
* ``round_robin``
* ``least_connections``
* ``weighted_round_robin``
* ``weighted_least_connections``
* ``shortest_response``
* ``persistent_ip``
"""
if 'balancers' not in self.blueprint:
return True
domain = self.get_network_domain(self.blueprint['domain']['name'])
network = self.get_ethernet(self.blueprint['ethernet']['name'])
driver = self.plumbery.get_balancer_driver(self.get_region_id())
driver.ex_set_current_network_domain(domain.id)
pool = self._get_pool()
if pool is None:
if 'pool' in self.blueprint:
settings = self.blueprint['pool']
if not isinstance(settings, dict):
settings = {}
else:
settings = {}
name = self._name_pool()
if 'algorithm' in settings:
algorithm = settings['algorithm'].lower()
else:
algorithm = 'round_robin'
algorithms = [
'random',
'round_robin',
'least_connections',
'weighted_round_robin',
'weighted_least_connections',
'shortest_response',
'persistent_ip']
if algorithm not in algorithms:
raise PlumberyException(
"Error: unknown algorithm has been defined "
"for the pool '{}'!".format(name))
if 'description' not in settings:
settings['description'] = 'by plumbery'
plogging.info("Creating pool '{}'".format(name))
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
try:
pool = driver.ex_create_pool(
network_domain_id=domain.id,
name=name,
balancer_method=algorithm,
ex_description=settings['description'],
health_monitors=None,
service_down_action='NONE',
slow_ramp_time=30)
if self._cache_pools is None:
self._cache_pools = []
self._cache_pools.append(pool)
plogging.info("- in progress")
except Exception as feedback:
if 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- already there")
else:
plogging.info("- unable to create pool")
plogging.error(str(feedback))
for item in self.blueprint['balancers']:
if isinstance(item, dict):
label = list(item)[0]
settings = item[label]
else:
label = str(item)
settings = {}
name = self.name_balancer(label, settings)
if self._get_balancer(name):
plogging.info("Creating balancer '{}'".format(name))
plogging.info("- already there")
continue
if 'protocol' in settings:
protocol = settings['protocol']
else:
protocol = 'http'
protocols = ['http', 'https', 'tcp', 'udp']
if protocol not in protocols:
raise PlumberyException(
"Error: unknown protocol has been defined "
"for the balancer '{}'!".format(label))
if 'port' in settings:
port = str(settings['port'])
else:
port = '80'
if int(port) < 1 or int(port) > 65535:
raise PlumberyException(
"Error: invalid port has been defined "
"for the balancer '{}'!".format(label))
plogging.info("Creating balancer '{}'".format(name))
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
continue
try:
if 'address' in settings:
ip = settings['address']
else:
ip = self._get_ipv4()
balancer = driver.ex_create_virtual_listener(
network_domain_id=domain.id,
name=name,
ex_description="#plumbery",
listener_ip_address=ip,
port=port,
pool=pool,
persistence_profile=None,
fallback_persistence_profile=None,
irule=None,
protocol='TCP',
connection_limit=25000,
connection_rate_limit=2000,
source_port_preservation='PRESERVE')
if self._cache_balancers is None:
self._cache_balancers = []
self._cache_balancers.append(balancer)
plogging.info("- in progress")
except Exception as feedback:
if 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- already there")
elif 'NO_IP_ADDRESS_AVAILABLE' in str(feedback):
plogging.info("- unable to create balancer")
plogging.error("Error: No more ipv4 address available "
"-- assign more")
raise
else:
plogging.info("- unable to create balancer")
plogging.error(str(feedback))
firewall = self.name_firewall_rule('Internet', name, port)
sourceIPv4 = DimensionDataFirewallAddress(
any_ip=True,
ip_address=network.private_ipv4_range_address,
ip_prefix_size=network.private_ipv4_range_size,
port_begin=None,
port_end=None,
address_list_id=None,
port_list_id=None)
destinationIPv4 = DimensionDataFirewallAddress(
any_ip=False,
ip_address=ip,
ip_prefix_size=None,
port_begin=port,
port_end=None,
address_list_id=None,
port_list_id=None)
rule = DimensionDataFirewallRule(
id=uuid4(),
action='ACCEPT_DECISIVELY',
name=firewall,
location=network.location,
network_domain=network.network_domain,
status='NORMAL',
ip_version='IPV4',
protocol='TCP',
enabled='true',
source=sourceIPv4,
destination=destinationIPv4)
plogging.info("Creating firewall rule '{}'"
.format(firewall))
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
try:
self._ex_create_firewall_rule(
network_domain=domain,
rule=rule,
position='LAST')
plogging.info("- in progress")
except Exception as feedback:
if 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- already there")
else:
plogging.info("- unable to create firewall rule")
plogging.error(str(feedback))
return True
def _destroy_balancer(self):
"""
Destroys load balancer
"""
if 'balancers' not in self.blueprint:
return True
domain = self.get_network_domain(self.blueprint['domain']['name'])
driver = self.plumbery.get_balancer_driver(self.get_region_id())
driver.ex_set_current_network_domain(domain.id)
for item in self.blueprint['balancers']:
if isinstance(item, dict):
label = list(item)[0]
settings = item[label]
else:
label = str(item)
settings = {}
name = self.name_balancer(label, settings)
balancer = self._get_balancer(name)
plogging.info("Destroying balancer '{}'".format(name))
if balancer is None:
plogging.info("- not found")
continue
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
continue
try:
driver.destroy_balancer(balancer)
plogging.info("- in progress")
except Exception as feedback:
if 'NOT_FOUND' in str(feedback):
plogging.info("- not found")
else:
plogging.info("- unable to destroy balancer")
plogging.error(str(feedback))
pool = self._get_pool()
plogging.info("Destroying pool '{}'".format(self._name_pool()))
if pool is None:
plogging.info("- not found")
elif self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
try:
driver.ex_destroy_pool(pool)
plogging.info("- in progress")
except Exception as feedback:
if 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- already there")
else:
plogging.info("- unable to destroy pool")
plogging.error(str(feedback))
plogging.info("Destroying pool nodes")
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
try:
nodes = driver.ex_get_nodes(domain.id)
if len(nodes) > 0:
for node in nodes:
plogging.info("- destroying {}".format(node.name))
nodes = driver.ex_destroy_node(node.id)
plogging.info("- in progress")
else:
plogging.info("- nothing to do")
except Exception as feedback:
if 'RESOURCE_NOT_FOUND' in str(feedback):
plogging.info("- not found")
else:
plogging.info("- unable to destroy node")
plogging.error(str(feedback))
def name_balancer(self, label, settings={}):
return label \
+ '.' + self.blueprint['target'] \
+ '.' + self.facility.get_location_id().lower() \
+ '.balancer'
def _get_balancer(self, name):
"""
Retrieves a balancer attached to this blueprint
"""
domain = self.get_network_domain(self.blueprint['domain']['name'])
driver = self.plumbery.get_balancer_driver(self.get_region_id())
if driver is None:
return None
if domain is None:
return None
driver.ex_set_current_network_domain(domain.id)
if self._cache_balancers is None:
plogging.info("Listing balancers")
self._cache_balancers = driver.list_balancers()
plogging.info("- found {} balancers"
.format(len(self._cache_balancers)))
for balancer in self._cache_balancers:
if balancer.name.lower() == name.lower():
return balancer
return None
def _name_pool(self):
return self.blueprint['target'] \
+ '.' + self.facility.get_location_id().lower() \
+ '.pool'
def _get_pool(self):
"""
Retrieves the pool attached to this blueprint
"""
if 'pool' not in self.blueprint:
return None
domain = self.get_network_domain(self.blueprint['domain']['name'])
driver = self.plumbery.get_balancer_driver(self.get_region_id())
driver.ex_set_current_network_domain(domain.id)
name = self._name_pool()
if self._cache_pools is None:
plogging.info("Listing pools")
self._cache_pools = driver.ex_get_pools()
plogging.info("- found {} pools".format(len(self._cache_pools)))
for pool in self._cache_pools:
if pool.name.lower() == name.lower():
return pool
return None
def name_member(self, node):
return node.private_ips[0]
def _add_to_pool(self, node):
"""
Makes a node a new member of the pool
"""
if 'pool' not in self.blueprint:
return
pool = self._get_pool()
if pool is None:
return
domain = self.get_network_domain(self.blueprint['domain']['name'])
driver = self.plumbery.get_balancer_driver(self.get_region_id())
driver.ex_set_current_network_domain(domain.id)
plogging.info("Adding '{}' to pool '{}'".format(node.name, pool.name))
name = self.name_member(node)
members = driver.ex_get_pool_members(pool.id)
for member in members:
if member.name == name:
plogging.info("- already there")
return
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
return
try:
member = driver.ex_create_node(
network_domain_id=domain.id,
name=name,
ip=node.private_ips[0],
ex_description='#plumbery')
driver.ex_create_pool_member(
pool=pool,
node=member)
plogging.info("- in progress")
except Exception as feedback:
if 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- already there")
plogging.error(str(feedback))
else:
plogging.info("- unable to add to pool")
plogging.error(str(feedback))
raise
def _detach_node_from_internet(self, node):
"""
Destroys address translation for one node
:param node: node that was reachable from the internet
:type node: :class:`libcloud.common.Node`
"""
internal_ip = node.private_ips[0]
domain = self.get_network_domain(self.blueprint['domain']['name'])
for rule in self.region.ex_list_nat_rules(domain):
if rule.internal_ip == internal_ip:
plogging.info("Detaching node '{}' from the internet"
.format(node.name))
while True:
try:
self.region.ex_delete_nat_rule(rule)
plogging.info("- in progress")
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
return
else:
plogging.info("- unable to remove "
"address translation")
plogging.error(str(feedback))
break
for rule in self._list_firewall_rules():
if rule.name.lower().startswith(node.name.lower()):
plogging.info("Destroying firewall rule '{}'"
.format(rule.name))
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
self.region.ex_delete_firewall_rule(rule)
plogging.info("- in progress")
def _get_ipv4(self):
"""
Provides a free public IPv4 if possible
This function looks at current IPv4 addresses reserved for the
target network domain, and adds more if needed.
Example to reserve 8 IPv4 addresses in the fittings plan::
- redis:
domain:
name: myVDC
ipv4: 8
If the directive `auto` is used, then plumbery does not check the
maximum number of addresses that can be provided.
"""
domain = self.get_network_domain(self.blueprint['domain']['name'])
if domain is None:
return None
addresses = self._list_ipv4()
if len(addresses) > 0:
plogging.debug('Pool of public IPv4 addresses:')
plogging.debug('- {} adresses have been reserved'.format(
len(addresses)))
for reserved in self.ex_list_reserved_public_ip_addresses(domain):
addresses.remove(reserved)
plogging.debug('- {} available'.format(len(addresses)))
if len(addresses) > 0:
plogging.debug('Using address: {}'.format(addresses[0]))
return addresses[0]
actual = len(self._list_ipv4())
if 'ipv4' in self.blueprint['domain']:
count = self.blueprint['domain']['ipv4']
else:
count = self.get_default('ipv4', 2)
if str(count).lower() == 'auto':
count = actual + 2
if count < 2 or count > 128:
plogging.warning("Invalid count of requested IPv4 public addresses")
return None
if actual >= count:
plogging.error("Error: need more IPv4 address than allocated")
return None
plogging.info('Reserving additional public IPv4 addresses')
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
return None
count = actual + 2
while actual < count:
try:
block = self.region.ex_add_public_ip_block_to_network_domain(
self.get_network_domain(self.blueprint['domain']['name']))
actual += int(block.size)
plogging.info("- reserved {} addresses"
.format(int(block.size)))
return block.base_ip
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
return None
# compensate for bug in Libcloud driver
elif 'RESOURCE_NOT_FOUND' in str(feedback):
actual += 2
continue
else:
plogging.info("- unable to reserve IPv4 public addresses")
plogging.error(str(feedback))
return None
def _list_ipv4(self):
"""
Lists public IPv4 addresses that have been assigned to a domain
:return: the full list of public IPv4 addresses assigned to the domain
:rtype: ``list`` of ``str`` or ``[]``
"""
addresses = []
while True:
try:
blocks = self.region.ex_list_public_ip_blocks(
self.get_network_domain(self.blueprint['domain']['name']))
for block in blocks:
splitted = block.base_ip.split('.')
for ticker in xrange(int(block.size)):
addresses.append('.'.join(splitted))
splitted[3] = str(int(splitted[3])+1)
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
else:
plogging.info("Unable to list IPv4 public addresses")
plogging.error(str(feedback))
return []
break
return addresses
def _release_ipv4(self):
"""
Releases public IPv4 addresses assigned to the blueprint
"""
domain = self.get_network_domain(self.blueprint['domain']['name'])
if len(self.region.ex_list_nat_rules(domain)) > 0:
return
blocks = self.region.ex_list_public_ip_blocks(
self.get_network_domain(self.blueprint['domain']['name']))
if len(blocks) < 1:
return
plogging.info('Releasing public IPv4 addresses')
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
return
for block in blocks:
while True:
try:
self.region.ex_delete_public_ip_block(block)
plogging.info('- in progress')
except Exception as feedback:
if 'RESOURCE_BUSY' in str(feedback):
time.sleep(10)
continue
elif 'HAS_DEPENDENCY' in str(feedback):
plogging.info("- not now - stuff at '{}' and beyond"
.format(block.base_ip))
elif 'RESOURCE_LOCKED' in str(feedback):
plogging.info("- not now - locked")
else:
plogging.info("- unable to release "
"IPv4 public addresses ")
plogging.error(str(feedback))
break
def _build_firewall_rules(self):
"""
Changes firewall settings to accept incoming traffic
This function adds firewall rules to allow traffic towards given
network. It looks at the ``accept`` settings in the blueprint to
identify all source networks.
Example in the fittings plan::
- web:
domain: *vdc1
ethernet:
name: gigafox.production
accept:
- gigafox.control
- dd-eu::EU6::other.network.there
In this example, the firewall is configured so that any ip traffic
from the Ethernet network ``gigafox.control`` can reach the Ethernet
network ``gigafox.production``. One rule is created for
IPv4 and another rule is created for IPv6.
The second network that is configured is from another data centre
in another region. This is leveraging the private network that
interconnect all MCPs. For networks outside the current domain, only
one rule is added to allow IPv6 traffic. This is because IPv4 routing
is not allowed across multiple network domains.
"""
if 'accept' not in self.blueprint['ethernet']:
return True
destination = self.get_ethernet(self.blueprint['ethernet']['name'])
if destination is None:
return True
destinationIPv4 = DimensionDataFirewallAddress(
any_ip=False,
ip_address=destination.private_ipv4_range_address,
ip_prefix_size=destination.private_ipv4_range_size,
port_begin=None,
port_end=None,
address_list_id=None,
port_list_id=None)
destinationIPv6 = DimensionDataFirewallAddress(
any_ip=False,
ip_address=destination.ipv6_range_address,
ip_prefix_size=destination.ipv6_range_size,
port_begin=None,
port_end=None,
address_list_id=None,
port_list_id=None)
for item in self.blueprint['ethernet']['accept']:
if isinstance(item, dict):
label = list(item)[0]
else:
label = str(item)
source = self.get_ethernet(label)
if source is None:
plogging.debug("Source network '{}' is unknown".format(label))
continue
# avoid name collisions across local, remote and off-shore networks
tokens = label.split('::')
while len(tokens) > 2:
tokens.pop(0)
source_name = '-'.join(tokens)
ruleIPv4Name = self.name_firewall_rule(
source_name, destination.name, 'IP')
shouldCreateRuleIPv4 = True
if source.location.name != destination.location.name:
shouldCreateRuleIPv4 = False
elif source.network_domain.name != destination.network_domain.name:
shouldCreateRuleIPv4 = False
ruleIPv6Name = self.name_firewall_rule(
source_name, destination.name, 'IPv6')
shouldCreateRuleIPv6 = True
for rule in self._list_firewall_rules():
if (shouldCreateRuleIPv4
and rule.name.lower() == ruleIPv4Name.lower()):
plogging.info("Creating firewall rule '{}'"
.format(rule.name))
plogging.info("- already there")
shouldCreateRuleIPv4 = False
continue
if (shouldCreateRuleIPv6
and rule.name.lower() == ruleIPv6Name.lower()):
plogging.info("Creating firewall rule '{}'"
.format(rule.name))
plogging.info("- already there")
shouldCreateRuleIPv6 = False
continue
if shouldCreateRuleIPv4:
plogging.info("Creating firewall rule '{}'"
.format(ruleIPv4Name))
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
sourceIPv4 = DimensionDataFirewallAddress(
any_ip=False,
ip_address=source.private_ipv4_range_address,
ip_prefix_size=source.private_ipv4_range_size,
port_begin=None,
port_end=None,
address_list_id=None,
port_list_id=None)
ruleIPv4 = DimensionDataFirewallRule(
id=uuid4(),
action='ACCEPT_DECISIVELY',
name=ruleIPv4Name,
location=destination.location,
network_domain=destination.network_domain,
status='NORMAL',
ip_version='IPV4',
protocol='IP',
enabled='true',
source=sourceIPv4,
destination=destinationIPv4)
try:
self._ex_create_firewall_rule(
network_domain=destination.network_domain,
rule=ruleIPv4,
position='LAST')
plogging.info("- in progress")
except Exception as feedback:
if 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- already there")
else:
plogging.info("- unable to create firewall rule")
plogging.error(str(feedback))
if shouldCreateRuleIPv6:
plogging.info("Creating firewall rule '{}'"
.format(ruleIPv6Name))
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
sourceIPv6 = DimensionDataFirewallAddress(
any_ip=False,
ip_address=source.ipv6_range_address,
ip_prefix_size=source.ipv6_range_size,
port_begin=None,
port_end=None,
address_list_id=None,
port_list_id=None)
ruleIPv6 = DimensionDataFirewallRule(
id=uuid4(),
action='ACCEPT_DECISIVELY',
name=ruleIPv6Name,
location=destination.location,
network_domain=destination.network_domain,
status='NORMAL',
ip_version='IPV6',
protocol='IP',
enabled='true',
source=sourceIPv6,
destination=destinationIPv6)
try:
self._ex_create_firewall_rule(
network_domain=destination.network_domain,
rule=ruleIPv6,
position='LAST')
plogging.info("- in progress")
except Exception as feedback:
if 'NAME_NOT_UNIQUE' in str(feedback):
plogging.info("- already there")
else:
plogging.info("- unable to create firewall rule")
plogging.error(str(feedback))
ruleName = 'CCDEFAULT.DenyExternalInboundIPv6'
for rule in self._list_firewall_rules():
if rule.name.lower() == ruleName.lower():
plogging.info("Disabling firewall rule '{}'".format(ruleName))
try:
if rule.enabled:
self.region.ex_set_firewall_rule_state(rule, False)
plogging.info("- in progress")
else:
plogging.info("- already there")
except Exception as feedback:
plogging.info("- unable to disable firewall rule")
plogging.error(str(feedback))
return True
def _destroy_firewall_rules(self):
"""
Destroys firewall rules
"""
if 'accept' not in self.blueprint['ethernet']:
return True
destinationLabel = self.blueprint['ethernet']['name']
for item in self.blueprint['ethernet']['accept']:
if isinstance(item, dict):
label = list(item)[0]
else:
label = str(item)
sourceLabel = label.split('::').pop()
ruleIPv4Name = self.name_firewall_rule(
sourceLabel, destinationLabel, 'IP')
ruleIPv6Name = self.name_firewall_rule(
sourceLabel, destinationLabel, 'IPv6')
for rule in self._list_firewall_rules():
if rule.name == ruleIPv4Name or rule.name == ruleIPv6Name:
plogging.info("Destroying firewall rule '{}'"
.format(rule.name))
if self.plumbery.safeMode:
plogging.info("- skipped - safe mode")
else:
try:
self.region.ex_delete_firewall_rule(rule)
plogging.info("- in progress")
except Exception as feedback:
if 'RESOURCE_NOT_FOUND' in str(feedback):
plogging.info("- not found")
else:
plogging.info("- unable to destroy "
"firewall rule")
plogging.error(str(feedback))
def name_firewall_rule(self, source, destination, protocol):
"""
Provides a name for a firewall rule
:param source: name of the source network
:type source: ``str``
:param destination: name of the destination network
:type destination: ``str``
:param protocol: the protocol that will flow
:type protocol: ``str``
Use this function to ensure consistent naming across firewall rules.
Example::
>>>source='gigafox.control'
>>>destination='gigafox.production'
>>>protocol='IP'
>>>domain.name_firewall_rule(source, destination, protocol)
'FromGigafoxControlToGigafoxProduction.IP.plumbery'
"""
source = ''.join(e for e in source.title()
if e.isalnum() or e == '_')
destination = ''.join(e for e in destination.title()
if e.isalnum() or e == '_')
if source == 'Internet':
return "{}.{}.plumbery".format(destination, protocol)
else:
return "From{}To{}.{}.plumbery".format(source,
destination,
protocol)
@classmethod
def parse_firewall_port(cls, port):
"""
Parses port definition for a firewall rule
:param port: string definition of a target port
:type port: ``str``
:return: elements of the port definition
This function analyses the provided string and returns
a tuple that can be used for firewall configuration.
Some examples:
>>>container.parse_firewall_port('icmp')
('ICMP', 'any', None, None)
>>>container.parse_firewall_port('tcp:80')
('TCP', '80', '80', None)
>>>container.parse_firewall_port(':80')
('TCP', '80', '80', None)
>>>container.parse_firewall_port('80')
('TCP', '80', '80', None)
>>>container.parse_firewall_port('udp:137..138')
('UDP', '137..138', '137', '138')
>>>container.parse_firewall_port('any')
('TCP', 'any', None, None)
"""
protocols = ('ip', 'icmp', 'tcp', 'udp')
tokens = port.lower().strip(':').split(':')
if len(tokens) > 1: # example: 'TCP:80'
protocol = tokens[0].upper()
port = tokens[1]
elif tokens[0] in protocols: # example: 'icmp'
protocol = tokens[0].upper()
port = 'any'
else: # example: '80'
protocol = 'TCP'
port = tokens[0]
if protocol.lower() not in protocols:
raise ValueError("'{}' is not a valid protocol"
.format(protocol))
tokens = port.split('..')
if len(tokens) == 1:
if tokens[0].lower() == 'any':
port_begin = None
else:
port_begin = tokens[0]
port_end = None
else:
port_begin = tokens[0]
port_end = tokens[1]
return (protocol, port, port_begin, port_end)
def _list_candidate_firewall_rules(self, node, ports=[]):
"""
Lists rules that should apply to one node
:param node: node that has to be reachable from the internet
:type node: :class:`libcloud.common.Node`
:param ports: the ports that have to be opened, or ``any``
:type ports: a ``list`` of ``str``
"""
domain = self.get_network_domain(self.blueprint['domain']['name'])
network = self.get_ethernet(self.blueprint['ethernet']['name'])
internal_ip = node.private_ips[0]
external_ip = None
for rule in self.region.ex_list_nat_rules(domain):
if rule.internal_ip == internal_ip:
external_ip = rule.external_ip
if external_ip is None:
return {}
candidates = {}
if len(ports) < 1:
ports = ['any']
for port in ports:
protocol, port, port_begin, port_end = \
self.parse_firewall_port(port)
ruleIPv4Name = self.name_firewall_rule(
'Internet',
node.name, protocol+'v4_'+port)
sourceIPv4 = DimensionDataFirewallAddress(
any_ip=True,
ip_address=network.private_ipv4_range_address,
ip_prefix_size=network.private_ipv4_range_size,
port_begin=None,
port_end=None,
address_list_id=None,
port_list_id=None)
destinationIPv4 = DimensionDataFirewallAddress(
any_ip=False,
ip_address=external_ip,
ip_prefix_size=None,
port_begin=port_begin,
port_end=port_end,
address_list_id=None,
port_list_id=None)
ruleIPv4 = DimensionDataFirewallRule(
id=uuid4(),
action='ACCEPT_DECISIVELY',
name=ruleIPv4Name,
location=network.location,
network_domain=network.network_domain,
status='NORMAL',
ip_version='IPV4',
protocol=protocol,
enabled='true',
source=sourceIPv4,
destination=destinationIPv4)
candidates[ruleIPv4Name] = ruleIPv4
return candidates
def _list_firewall_rules(self):
"""
Lists all existing rules for the current domain
"""
if len(self._cache_firewall_rules) < 1:
self._cache_firewall_rules = self.region.ex_list_firewall_rules(
self.get_network_domain(self.blueprint['domain']['name']))
return self._cache_firewall_rules
def _ex_create_firewall_rule(self, network_domain, rule, position):
create_node = ET.Element('createFirewallRule', {'xmlns': TYPES_URN})
ET.SubElement(create_node, "networkDomainId").text = network_domain.id
ET.SubElement(create_node, "name").text = rule.name
ET.SubElement(create_node, "action").text = rule.action
ET.SubElement(create_node, "ipVersion").text = rule.ip_version
ET.SubElement(create_node, "protocol").text = rule.protocol
# Setup source port rule
source = ET.SubElement(create_node, "source")
source_ip = ET.SubElement(source, 'ip')
if rule.source.any_ip:
source_ip.set('address', 'ANY')
else:
source_ip.set('address', rule.source.ip_address)
source_ip.set('prefixSize', str(rule.source.ip_prefix_size))
if rule.source.port_begin is not None:
source_port = ET.SubElement(source, 'port')
source_port.set('begin', rule.source.port_begin)
if rule.source.port_end is not None:
source_port.set('end', rule.source.port_end)
# Setup destination port rule
dest = ET.SubElement(create_node, "destination")
dest_ip = ET.SubElement(dest, 'ip')
if rule.destination.any_ip:
dest_ip.set('address', 'ANY')
else:
dest_ip.set('address', rule.destination.ip_address)
if rule.destination.ip_prefix_size is not None:
dest_ip.set('prefixSize', str(rule.destination.ip_prefix_size))
if rule.destination.port_begin is not None:
dest_port = ET.SubElement(dest, 'port')
dest_port.set('begin', rule.destination.port_begin)
if rule.destination.port_end is not None:
dest_port.set('end', rule.destination.port_end)
ET.SubElement(create_node, "enabled").text = 'true'
placement = ET.SubElement(create_node, "placement")
placement.set('position', position)
response = self.region.connection.request_with_orgId_api_2(
'network/createFirewallRule',
method='POST',
data=ET.tostring(create_node)).object
rule_id = None
for info in findall(response, 'info', TYPES_URN):
if info.get('name') == 'firewallRuleId':
rule_id = info.get('value')
rule.id = rule_id
return rule
def ex_reserve_private_ip_addresses(self, vlan, address):
req = ET.Element('reservePrivateIpv4Address', {'xmlns': TYPES_URN})
ET.SubElement(req, "vlanId").text = vlan.id
ET.SubElement(req, "ipAddress").text = address
result = self.region.connection.request_with_orgId_api_2(
action='network/reservedPrivateIpv4Address',
method='POST',
data=ET.tostring(req)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_list_reserved_private_ip_addresses(self, vlan):
params = {}
params['vlanId'] = vlan.id
response = self.region.connection \
.request_with_orgId_api_2('network/reservedPrivateIpv4Address',
params=params).object
reserved = []
for element in findall(response, 'ipv4', TYPES_URN):
reserved.append(element.text)
return reserved
def ex_list_reserved_public_ip_addresses(self, network_domain):
params = {}
params['networkDomainId'] = network_domain.id
response = self.region.connection \
.request_with_orgId_api_2('network/reservedPublicIpv4Address',
params=params).object
reserved = []
for element in findall(response, 'ip', TYPES_URN):
reserved.append(element.text)
return reserved
| apache-2.0 | -8,525,752,551,770,169,000 | 33.397296 | 88 | 0.515647 | false |
michaelrice/gotland | setup.py | 1 | 2459 | # Copyright 2014 Michael Rice <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
with open('test-requirements.txt') as f:
required_for_tests = f.read().splitlines()
setup(
name='gotland',
version='0.4',
description='python bindings to the rabbitmq web based REST api',
long_description=read('README.rst'),
packages=['gotland', 'gotland.rabbit'],
url='https://github.com/michaelrice/gotland',
install_requires=required,
license='License :: OSI Approved :: Apache Software License',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'Environment :: Console',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Distributed Computing',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
],
platforms=['Windows', 'Linux', 'Solaris', 'Mac OS-X', 'Unix'],
author='Michael Rice',
author_email='[email protected]',
test_suite='tests',
zip_safe=True,
tests_require=required_for_tests,
)
| apache-2.0 | -6,532,868,214,960,393,000 | 36.830769 | 76 | 0.651891 | false |
ioos/catalog-ckan | ckanext/ioos_theme/controllers/feedback.py | 1 | 3831 | #!/usr/bin/env python
'''
ckanext/ioos_theme/controllers/feedback.py
IOOS Theme Feedback Controller
'''
from ckan.lib.base import BaseController, render, _
from ckan.lib import helpers as h
from ckan.common import request
from ckanext.ioos_theme.lib import feedback
from pylons import config
import logging
import urllib
import urllib2
import json
class FeedbackController(BaseController):
'''
The FeedbackController renders a Feedback Form and accepts an HTTP POST to
/feedback with the Form parameters. On a POST it will flash a notice
thanking the user for their feedback and then redirect to the home page.
'''
def index(self, data=None, errors=None, error_summary=None, package_name=None):
'''
Returns a render for the feedback form.
:param dict data: Unused
:param dict errors: Any validation errors that the user has entered
will be passed to the controller
:param dict error_summary: Summary of any validation errors
'''
name = ""
email = ""
feedback = ""
recaptcha_response = request.params.get('g-captcha-token')
url = 'https://www.google.com/recaptcha/api/siteverify'
values = {
'secret': config.get('feedback.site_secret', ''),
'response': recaptcha_response
}
url_data = urllib.urlencode(values)
req = urllib2.Request(url, url_data)
response = urllib2.urlopen(req)
result = json.load(response)
# If the HTTP request is POST
if request.params:
try:
# Left for reference during refactor to captcha V3
#if request.params['g-recaptcha-response']:
if result['success']:
return self._post_feedback()
else:
name = request.params['name']
email = request.params['email']
feedback = request.params['feedback']
h.flash_notice(_('Please fill out missing fields below.'))
except KeyError:
name = request.params['name']
email = request.params['email']
feedback = request.params['feedback']
h.flash_notice(_('Please fill out missing fields below.'))
data = data or {"name": "", "email": "", "feedback": ""}
data['name'] = name or ""
data['email'] = email or ""
data['feedback'] = feedback or ""
errors = errors or {}
error_summary = error_summary or {}
site_key = config.get('feedback.site_key', '')
token = config.get('feedback.g-captcha-token', '')
if not site_key:
logging.warning('Administrator must setup feedback.site_key')
vars = {
'package_name': package_name,
'data': data,
'errors': errors,
'error_summary': error_summary,
'feedback_site_key': site_key
}
return render('feedback/form.html', extra_vars=vars)
def _post_feedback(self):
'''
Redirects the user to the home page and flashes a message,
acknowledging the feedback.
'''
context = {
'name': request.params['name'],
'email': request.params['email'],
'feedback': request.params['feedback'],
'package_name': request.params.get('package_name'),
'referrer': request.referrer
}
feedback.send_feedback(context)
h.flash_notice(_('Thank you for your feedback'))
if context['package_name'] is None:
h.redirect_to(controller='home', action='index')
else:
h.redirect_to(controller='package', action='read', id=context['package_name'])
return
| agpl-3.0 | 8,250,284,174,334,944,000 | 35.141509 | 90 | 0.577656 | false |
ned14/Bugs-Everywhere-for-BEurtle | libbe/command/severity.py | 1 | 4051 | # Copyright (C) 2005-2012 Aaron Bentley <[email protected]>
# Chris Ball <[email protected]>
# Gianluca Montecchi <[email protected]>
# Marien Zwart <[email protected]>
# Thomas Gerigk <[email protected]>
# Tim Guirgies <[email protected]>
# W. Trevor King <[email protected]>
#
# This file is part of Bugs Everywhere.
#
# Bugs Everywhere is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option) any
# later version.
#
# Bugs Everywhere is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# Bugs Everywhere. If not, see <http://www.gnu.org/licenses/>.
import libbe
import libbe.bug
import libbe.command
import libbe.command.util
class Severity (libbe.command.Command):
"""Change a bug's severity level
>>> import sys
>>> import libbe.bugdir
>>> bd = libbe.bugdir.SimpleBugDir(memory=False)
>>> io = libbe.command.StringInputOutput()
>>> io.stdout = sys.stdout
>>> ui = libbe.command.UserInterface(io=io)
>>> ui.storage_callbacks.set_bugdir(bd)
>>> cmd = Severity(ui=ui)
>>> bd.bug_from_uuid('a').severity
'minor'
>>> ret = ui.run(cmd, args=['wishlist', '/a'])
>>> bd.flush_reload()
>>> bd.bug_from_uuid('a').severity
'wishlist'
>>> ret = ui.run(cmd, args=['none', '/a'])
Traceback (most recent call last):
UserError: Invalid severity level: none
>>> ui.cleanup()
>>> bd.cleanup()
"""
name = 'severity'
def __init__(self, *args, **kwargs):
libbe.command.Command.__init__(self, *args, **kwargs)
self.args.extend([
libbe.command.Argument(
name='severity', metavar='SEVERITY', default=None,
completion_callback=libbe.command.util.complete_severity),
libbe.command.Argument(
name='bug-id', metavar='BUG-ID', default=None,
repeatable=True,
completion_callback=libbe.command.util.complete_bug_id),
])
def _run(self, **params):
bugdir = self._get_bugdir()
for bug_id in params['bug-id']:
bug,dummy_comment = \
libbe.command.util.bug_comment_from_user_id(bugdir, bug_id)
if bug.severity != params['severity']:
try:
bug.severity = params['severity']
except ValueError, e:
if e.name != 'severity':
raise e
raise libbe.command.UserError(
'Invalid severity level: %s' % e.value)
return 0
def _long_help(self):
try: # See if there are any per-tree severity configurations
bd = self._get_bugdir()
except NotImplementedError:
pass # No tree, just show the defaults
longest_severity_len = max([len(s) for s in libbe.bug.severity_values])
severity_levels = []
for severity in libbe.bug.severity_values :
description = libbe.bug.severity_description[severity]
s = '%*s : %s' % (longest_severity_len, severity, description)
severity_levels.append(s)
ret = """
Show or change a bug's severity level.
If no severity is specified, the current value is printed. If a severity level
is specified, it will be assigned to the bug.
Severity levels are:
%s
You can overide the list of allowed severities on a per-repository
basis. See `be set --help` for details.
""" % ('\n '.join(severity_levels))
return ret
| gpl-2.0 | 8,860,493,650,325,930,000 | 37.216981 | 79 | 0.596396 | false |
rkomartin/user-recs-example | util/process_movielens.py | 1 | 1215 | import json
import sys
from os.path import join
'''
Read movielens data into Veritable-ready json
'''
def main(input_file, output_dir):
data = {}
columns = set()
with open(input_file) as fd:
for line in fd:
tokens = line.split('\t')
user_id = 'U{}'.format(tokens[0]).decode()
movie_id = 'M{}'.format(tokens[1]).decode()
rating = tokens[2]
if user_id not in data:
data[user_id] = { '_id': user_id }
data[user_id][movie_id] = rating
columns.add(movie_id)
# Add dummy data to ensure that each possible rating is observed at
# least once for each movie
for i in range(5):
user_id = 'FU{}'.format(i)
data[user_id] = dict([(m, str(i+1)) for m in columns])
data[user_id]['_id'] = user_id
rows = data.values()
schema = dict([(c, { 'type': 'categorical' }) for c in columns])
open(join(output_dir, 'movielens_data.json'), 'wb').write(
json.dumps(rows, indent=2))
open(join(output_dir, 'movielens_schema.json'), 'wb').write(
json.dumps(schema, indent=2))
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
| mit | -8,084,242,574,354,765,000 | 29.375 | 72 | 0.549794 | false |
nosyndicate/pytorchrl | pytorchrl/distributions/diagonal_gaussian.py | 1 | 4283 | import numpy as np
import torch
from pytorchrl.distributions.base import Distribution
from pytorchrl.misc.tensor_utils import constant
class DiagonalGaussian(Distribution):
"""
Instead of a distribution, rather a collection of distribution.
"""
def __init__(self, means, log_stds):
"""
Parameters
----------
means (Variable):
log_stds (Variable):
"""
self.means = means
self.log_stds = log_stds
# dim is the dimension of action space
self.dim = self.means.size()[-1]
@classmethod
def from_dict(cls, means, log_stds):
"""
Parameters
----------
means (Variable):
log_std (Variable):
"""
return cls(means=means, log_stds=log_stds)
def entropy(self):
"""
Entropy of gaussian distribution is given by
1/2 * log(2 * \pi * e * sigma^2)
= log(sqrt(2 * \pi * e) * sigma))
= log(sigma) + log(sqrt(2 * \pi * e))
"""
return np.sum(self.log_stds.data.numpy() + np.log(np.sqrt(2 * np.pi * np.e)), axis=-1)
def log_likelihood(self, a):
"""
Compute log likelihood of a.
Parameters
----------
a (Variable):
Returns
-------
logli (Variable)
"""
# First cast into float tensor
a = a.type(torch.FloatTensor)
# Convert into a sample of standard normal
zs = (a - self.means) / (self.log_stds.exp())
# TODO (ewei), I feel this equation is not correct.
# Mainly the first line
# TODO (ewei), still need to understand what is meaning of having
# -1 for axis in sum method, (same for numpy)
logli = - self.log_stds.sum(-1) - \
constant(0.5) * zs.pow(2).sum(-1) - \
constant(0.5) * constant(float(self.dim)) * constant(float(np.log(2 * np.pi)))
return logli
def kl_div(self, other):
"""
Given the distribution parameters of two diagonal multivariate Gaussians,
compute their KL divergence (vectorized)
https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence#Kullback.E2.80.93Leibler_divergence_for_multivariate_normal_distributions
In general, for two n-dimensional distributions, we have
D_KL(N1||N2) =
1/2 ( tr(Σ_2^{-1}Σ_1) + (μ_2 - μ_1)^T Σ_2^{-1} (μ_2 - μ_1) - n + ln(det(Σ_2) / det(Σ_1)) )
Here, Σ_1 and Σ_2 are diagonal. Hence this equation can be simplified.
In terms of the parameters of this method,
determinant of diagonal matrix is product of diagonal, thus
- ln(det(Σ_2) / det(Σ_1)) = sum(2 * (log_stds_2 - log_stds_1), axis=-1)
inverse of diagonal matrix is the diagonal matrix of elements at diagonal inverted, thus
- (μ_2 - μ_1)^T Σ_2^{-1} (μ_2 - μ_1) = sum((means_1 - means_2)^2 / vars_2, axis=-1)
trace is sum of the diagonal elements
- tr(Σ_2^{-1}Σ_1) = sum(vars_1 / vars_2, axis=-1)
Where
- vars_1 = exp(2 * log_stds_1)
- vars_2 = exp(2 * log_stds_2)
Combined together, we have
D_KL(N1||N2)
= 1/2 ( tr(Σ_2^{-1}Σ_1) + (μ_2 - μ_1)^T Σ_2^{-1} (μ_2 - μ_1) - n + ln(det(Σ_2) / det(Σ_1)) )
= sum(1/2 * ((vars_1 - vars_2) / vars_2 + (means_1 - means_2)^2 / vars_2 + 2 * (log_stds_2 - log_stds_1)), axis=-1)
= sum( ((means_1 - means_2)^2 + vars_1 - vars_2) / (2 * vars_2) + (log_stds_2 - log_stds_1)), axis=-1)
Parameters
----------
other (DiagonalGaussian):
Returns
-------
kl_div (Variable):
"""
# Constant should wrap in Variable to multiply with another Variable
# TODO (ewei) kl seems have problem
variance = (constant(2.0) * self.log_stds).exp()
other_variance = (constant(2.0) * other.log_stds).exp()
numerator = (self.means - other.means).pow(2) + \
variance - other_variance
denominator = constant(2.0) * other_variance + constant(1e-8)
# TODO (ewei), -1 for sum has a big impact, need to figure out why
kl_div = (numerator / denominator + other.log_stds - self.log_stds).sum(-1)
return kl_div
| mit | 3,443,545,097,000,983,600 | 32.234375 | 147 | 0.547485 | false |
qtproject/pyside-pyside | tests/QtCore/qflags_test.py | 1 | 4672 | #!/usr/bin/python
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
'''Test cases for QFlags'''
import unittest
from PySide2.QtCore import Qt, QTemporaryFile, QFile, QIODevice, QObject
class QFlagTest(unittest.TestCase):
'''Test case for usage of flags'''
def testCallFunction(self):
f = QTemporaryFile()
self.assertTrue(f.open())
fileName = f.fileName()
f.close()
f = QFile(fileName)
self.assertEqual(f.open(QIODevice.Truncate | QIODevice.Text | QIODevice.ReadWrite), True)
om = f.openMode()
self.assertEqual(om & QIODevice.Truncate, QIODevice.Truncate)
self.assertEqual(om & QIODevice.Text, QIODevice.Text)
self.assertEqual(om & QIODevice.ReadWrite, QIODevice.ReadWrite)
self.assertTrue(om == QIODevice.Truncate | QIODevice.Text | QIODevice.ReadWrite)
f.close()
class QFlagOperatorTest(unittest.TestCase):
'''Test case for operators in QFlags'''
def testInvert(self):
'''QFlags ~ (invert) operator'''
self.assertEqual(type(~QIODevice.ReadOnly), QIODevice.OpenMode)
def testOr(self):
'''QFlags | (or) operator'''
self.assertEqual(type(QIODevice.ReadOnly | QIODevice.WriteOnly), QIODevice.OpenMode)
def testAnd(self):
'''QFlags & (and) operator'''
self.assertEqual(type(QIODevice.ReadOnly & QIODevice.WriteOnly), QIODevice.OpenMode)
def testIOr(self):
'''QFlags |= (ior) operator'''
flag = Qt.WindowFlags()
self.assertTrue(Qt.Widget == 0)
self.assertFalse(flag & Qt.Widget)
result = flag & Qt.Widget
self.assertTrue(result == 0)
flag |= Qt.WindowMinimizeButtonHint
self.assertTrue(flag & Qt.WindowMinimizeButtonHint)
def testInvertOr(self):
'''QFlags ~ (invert) operator over the result of an | (or) operator'''
self.assertEqual(type(~(Qt.ItemIsSelectable | Qt.ItemIsEditable)), Qt.ItemFlags)
def testEqual(self):
'''QFlags == operator'''
flags = Qt.Window
flags |= Qt.WindowMinimizeButtonHint
flag_type = (flags & Qt.WindowType_Mask)
self.assertEqual(flag_type, Qt.Window)
self.assertEqual(Qt.KeyboardModifiers(Qt.ControlModifier), Qt.ControlModifier)
def testOperatorBetweenFlags(self):
'''QFlags & QFlags'''
flags = Qt.NoItemFlags | Qt.ItemIsUserCheckable
newflags = Qt.NoItemFlags | Qt.ItemIsUserCheckable
self.assertTrue(flags & newflags)
def testOperatorDifferentOrder(self):
'''Different ordering of arguments'''
flags = Qt.NoItemFlags | Qt.ItemIsUserCheckable
self.assertEqual(flags | Qt.ItemIsEnabled, Qt.ItemIsEnabled | flags)
class QFlagsOnQVariant(unittest.TestCase):
def testQFlagsOnQVariant(self):
o = QObject()
o.setProperty("foo", QIODevice.ReadOnly | QIODevice.WriteOnly)
self.assertEqual(type(o.property("foo")), QIODevice.OpenMode)
class QFlagsWrongType(unittest.TestCase):
def testWrongType(self):
'''Wrong type passed to QFlags binary operators'''
self.assertRaises(TypeError, Qt.NoItemFlags | '43')
self.assertRaises(TypeError, Qt.NoItemFlags & '43')
self.assertRaises(TypeError, 'jabba' & Qt.NoItemFlags)
self.assertRaises(TypeError, 'hut' & Qt.NoItemFlags)
self.assertRaises(TypeError, Qt.NoItemFlags & QObject())
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 | -8,054,306,986,531,099,000 | 37.61157 | 97 | 0.661173 | false |
h5py/h5py | h5py/tests/test_attrs_data.py | 1 | 7646 | # This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
"""
Attribute data transfer testing module
Covers all data read/write and type-conversion operations for attributes.
"""
import numpy as np
from .common import TestCase, ut
import h5py
from h5py import h5a, h5s, h5t
from h5py import File
from h5py._hl.base import is_empty_dataspace
class BaseAttrs(TestCase):
def setUp(self):
self.f = File(self.mktemp(), 'w')
def tearDown(self):
if self.f:
self.f.close()
class TestScalar(BaseAttrs):
"""
Feature: Scalar types map correctly to array scalars
"""
def test_int(self):
""" Integers are read as correct NumPy type """
self.f.attrs['x'] = np.array(1, dtype=np.int8)
out = self.f.attrs['x']
self.assertIsInstance(out, np.int8)
def test_compound(self):
""" Compound scalars are read as numpy.void """
dt = np.dtype([('a', 'i'), ('b', 'f')])
data = np.array((1, 4.2), dtype=dt)
self.f.attrs['x'] = data
out = self.f.attrs['x']
self.assertIsInstance(out, np.void)
self.assertEqual(out, data)
self.assertEqual(out['b'], data['b'])
class TestArray(BaseAttrs):
"""
Feature: Non-scalar types are correctly retrieved as ndarrays
"""
def test_single(self):
""" Single-element arrays are correctly recovered """
data = np.ndarray((1,), dtype='f')
self.f.attrs['x'] = data
out = self.f.attrs['x']
self.assertIsInstance(out, np.ndarray)
self.assertEqual(out.shape, (1,))
def test_multi(self):
""" Rank-1 arrays are correctly recovered """
data = np.ndarray((42,), dtype='f')
data[:] = 42.0
data[10:35] = -47.0
self.f.attrs['x'] = data
out = self.f.attrs['x']
self.assertIsInstance(out, np.ndarray)
self.assertEqual(out.shape, (42,))
self.assertArrayEqual(out, data)
class TestTypes(BaseAttrs):
"""
Feature: All supported types can be stored in attributes
"""
def test_int(self):
""" Storage of integer types """
dtypes = (np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64)
for dt in dtypes:
data = np.ndarray((1,), dtype=dt)
data[...] = 42
self.f.attrs['x'] = data
out = self.f.attrs['x']
self.assertEqual(out.dtype, dt)
self.assertArrayEqual(out, data)
def test_float(self):
""" Storage of floating point types """
dtypes = tuple(np.dtype(x) for x in ('<f4', '>f4', '>f8', '<f8'))
for dt in dtypes:
data = np.ndarray((1,), dtype=dt)
data[...] = 42.3
self.f.attrs['x'] = data
out = self.f.attrs['x']
# TODO: Clean up after issue addressed !
print("dtype: ", out.dtype, dt)
print("value: ", out, data)
self.assertEqual(out.dtype, dt)
self.assertArrayEqual(out, data)
def test_complex(self):
""" Storage of complex types """
dtypes = tuple(np.dtype(x) for x in ('<c8', '>c8', '<c16', '>c16'))
for dt in dtypes:
data = np.ndarray((1,), dtype=dt)
data[...] = -4.2j + 35.9
self.f.attrs['x'] = data
out = self.f.attrs['x']
self.assertEqual(out.dtype, dt)
self.assertArrayEqual(out, data)
def test_string(self):
""" Storage of fixed-length strings """
dtypes = tuple(np.dtype(x) for x in ('|S1', '|S10'))
for dt in dtypes:
data = np.ndarray((1,), dtype=dt)
data[...] = 'h'
self.f.attrs['x'] = data
out = self.f.attrs['x']
self.assertEqual(out.dtype, dt)
self.assertEqual(out[0], data[0])
def test_bool(self):
""" Storage of NumPy booleans """
data = np.ndarray((2,), dtype=np.bool_)
data[...] = True, False
self.f.attrs['x'] = data
out = self.f.attrs['x']
self.assertEqual(out.dtype, data.dtype)
self.assertEqual(out[0], data[0])
self.assertEqual(out[1], data[1])
def test_vlen_string_array(self):
""" Storage of vlen byte string arrays"""
dt = h5py.string_dtype(encoding='ascii')
data = np.ndarray((2,), dtype=dt)
data[...] = "Hello", "Hi there! This is HDF5!"
self.f.attrs['x'] = data
out = self.f.attrs['x']
self.assertEqual(out.dtype, dt)
self.assertEqual(out[0], data[0])
self.assertEqual(out[1], data[1])
def test_string_scalar(self):
""" Storage of variable-length byte string scalars (auto-creation) """
self.f.attrs['x'] = b'Hello'
out = self.f.attrs['x']
self.assertEqual(out, 'Hello')
self.assertEqual(type(out), str)
aid = h5py.h5a.open(self.f.id, b"x")
tid = aid.get_type()
self.assertEqual(type(tid), h5py.h5t.TypeStringID)
self.assertEqual(tid.get_cset(), h5py.h5t.CSET_ASCII)
self.assertTrue(tid.is_variable_str())
def test_unicode_scalar(self):
""" Storage of variable-length unicode strings (auto-creation) """
self.f.attrs['x'] = u"Hello" + chr(0x2340) + u"!!"
out = self.f.attrs['x']
self.assertEqual(out, u"Hello" + chr(0x2340) + u"!!")
self.assertEqual(type(out), str)
aid = h5py.h5a.open(self.f.id, b"x")
tid = aid.get_type()
self.assertEqual(type(tid), h5py.h5t.TypeStringID)
self.assertEqual(tid.get_cset(), h5py.h5t.CSET_UTF8)
self.assertTrue(tid.is_variable_str())
class TestEmpty(BaseAttrs):
def setUp(self):
BaseAttrs.setUp(self)
sid = h5s.create(h5s.NULL)
tid = h5t.C_S1.copy()
tid.set_size(10)
aid = h5a.create(self.f.id, b'x', tid, sid)
self.empty_obj = h5py.Empty(np.dtype("S10"))
def test_read(self):
self.assertEqual(
self.empty_obj, self.f.attrs['x']
)
def test_write(self):
self.f.attrs["y"] = self.empty_obj
self.assertTrue(is_empty_dataspace(h5a.open(self.f.id, b'y')))
def test_modify(self):
with self.assertRaises(IOError):
self.f.attrs.modify('x', 1)
def test_values(self):
# list() is for Py3 where these are iterators
values = list(self.f.attrs.values())
self.assertEqual(
[self.empty_obj], values
)
def test_items(self):
items = list(self.f.attrs.items())
self.assertEqual(
[(u"x", self.empty_obj)], items
)
def test_itervalues(self):
values = list(self.f.attrs.values())
self.assertEqual(
[self.empty_obj], values
)
def test_iteritems(self):
items = list(self.f.attrs.items())
self.assertEqual(
[(u"x", self.empty_obj)], items
)
class TestWriteException(BaseAttrs):
"""
Ensure failed attribute writes don't leave garbage behind.
"""
def test_write(self):
""" ValueError on string write wipes out attribute """
s = b"Hello\x00Hello"
try:
self.f.attrs['x'] = s
except ValueError:
pass
with self.assertRaises(KeyError):
self.f.attrs['x']
| bsd-3-clause | 2,816,667,813,688,763,400 | 28.183206 | 78 | 0.552577 | false |
quantumlib/OpenFermion | src/openfermion/utils/grid.py | 1 | 11136 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy
import scipy
import scipy.linalg
# Exceptions.
class OrbitalSpecificationError(Exception):
pass
class Grid:
"""
A multi-dimension grid of points with an assigned length scale.
This grid acts as a helper class for parallelpiped super cells. It
tracks a mapping from indices to grid points and stores the associated
reciprocal lattice with respect to the original real-space lattice.
This enables calculations with non-trivial unit cells.
Attributes:
dimensions (int): Number of spatial dimensions the grid occupys
length (tuple of ints): d-length tuple specifying number of points
along each dimension.
shifts (list of ints): Integer shifts in position to center grid.
scale (ndarray): Vectors defining the super cell being simulated,
vectors are stored as columns in the matrix.
volume (float): Total volume of the supercell parallelpiped.
num_points (int): Total number of points in the grid.
reciprocal_scale (ndarray): Vectors defining the reciprocal lattice.
The vectors are stored as the columns in the matrix.
"""
def __init__(self, dimensions, length, scale):
"""
Args:
dimensions (int): The number of dimensions the grid lives in.
length (int or tuple): The number of points along each grid axis
that will be taken in both reciprocal and real space.
If tuple, it is read for each dimension, otherwise assumed
uniform.
scale (float or ndarray): The total length of each grid dimension.
If a float is passed, the uniform cubic unit cell is assumed.
For an ndarray, dimensions independent vectors of the correct
dimension must be passed. We assume column vectors define
the supercell vectors.
"""
if not isinstance(dimensions, int) or dimensions <= 0:
raise ValueError(
'dimensions must be a positive int but was {} {}'.format(
type(dimensions), repr(dimensions)))
if ((not isinstance(length, int) or length < 0) and
(not isinstance(length, tuple)) and (not isinstance(length, list))):
raise ValueError('length must be a non-negative int or tuple '
'but was {} {}'.format(type(length), repr(length)))
if ((not isinstance(scale, float) or not scale > 0) and
(not isinstance(scale, numpy.ndarray))):
raise ValueError(
'scale must be a positive float or ndarray but was '
'{} {}'.format(type(scale), repr(scale)))
self.dimensions = dimensions
# If single integer, assume uniform
if isinstance(length, int):
self.length = (length,) * dimensions
else:
self.length = length
self.shifts = [self.length[i] // 2 for i in range(dimensions)]
# If single float, construct cubic unit cell
if isinstance(scale, float):
self.scale = numpy.diag([scale] * self.dimensions)
else:
self.scale = scale
# Compute the volume of the super cell
self.volume = numpy.abs(scipy.linalg.det(self.scale))
# Compute total number of points
self.num_points = numpy.prod(self.length)
# Compute the reciprocal lattice basis
self.reciprocal_scale = 2 * numpy.pi * scipy.linalg.inv(self.scale).T
def volume_scale(self):
"""
Returns:
float: The volume of a length-scale hypercube within the grid.
"""
return self.volume
def all_points_indices(self):
"""
Returns:
iterable[tuple[int]]:
The index-coordinate tuple of each point in the grid.
"""
return itertools.product(
*[range(self.length[i]) for i in range(self.dimensions)])
def position_vector(self, position_indices):
"""Given grid point coordinate, return position vector with dimensions.
Args:
position_indices (int|iterable[int]):
List or tuple of integers giving grid point coordinate.
Allowed values are ints in [0, grid_length).
Returns:
position_vector (numpy.ndarray[float])
"""
# Raise exceptions.
if isinstance(position_indices, int):
position_indices = [position_indices]
if not all(0 <= e < self.length[i]
for i, e in enumerate(position_indices)):
raise OrbitalSpecificationError(
'Position indices must be integers in [0, grid_length).')
# Compute position vector
vector = sum([
(float(n - self.shifts[i]) / self.length[i]) * self.scale[:, i]
for i, n in enumerate(position_indices)
])
return vector
def momentum_vector(self, momentum_indices, periodic=True):
"""Given grid point coordinate, return momentum vector with dimensions.
Args:
momentum_indices (list): integers giving momentum
indices. Allowed values are ints in [0, grid_length).
periodic (bool): Wrap the momentum indices according to periodicity
Returns:
momentum_vector: A numpy array giving the momentum vector with
dimensions.
"""
# Raise exceptions.
if isinstance(momentum_indices, int):
momentum_indices = [momentum_indices]
if (not all(0 <= e < self.length[i]
for i, e in enumerate(momentum_indices))):
raise OrbitalSpecificationError(
'Momentum indices must be integers in [0, grid_length).')
# Compute momentum vector.
momentum_ints = self.index_to_momentum_ints(momentum_indices)
vector = self.momentum_ints_to_value(momentum_ints, periodic)
return vector
def index_to_momentum_ints(self, index):
"""
Args:
index (tuple): d-dimensional tuple specifying index in the grid
Returns:
Integer momentum vector
"""
# Set baseline for grid between [-N//2, N//2]
momentum_int = [
index[i] - self.shifts[i] for i in range(self.dimensions)
]
return numpy.array(momentum_int, dtype=int)
def momentum_ints_to_index(self, momentum_ints):
"""
Args:
momentum_ints (tuple): d-dimensional tuple momentum integers
Returns:
d-dimensional tuples of indices
"""
indices = momentum_ints
# Shift to indices
indices = [n + self.shifts[i] for i, n in enumerate(indices)]
# Wrap dimensions
indices = [n % self.length[i] for i, n in enumerate(indices)]
return indices
def momentum_ints_to_value(self, momentum_ints, periodic=True):
"""
Args:
momentum_ints (tuple): d-dimensional tuple momentum integers
periodic (bool): Alias the momentum
Returns:
ndarray containing the momentum vector.
"""
# Alias the higher momentum modes
if periodic:
momentum_ints = self.index_to_momentum_ints(
self.momentum_ints_to_index(momentum_ints))
momentum_vector = sum([
n * self.reciprocal_scale[:, i] for i, n in enumerate(momentum_ints)
])
return momentum_vector
def orbital_id(self, grid_coordinates, spin=None):
"""Return the tensor factor of a orbital
with given coordinates and spin.
Args:
grid_coordinates: List or tuple of ints giving coordinates of grid
element. Acceptable to provide an int(instead of tuple or list)
for 1D case.
spin (bool): 0 means spin down and 1 means spin up.
If None, assume spinless model.
Returns:
tensor_factor (int):
tensor factor associated with provided orbital label.
"""
# Initialize.
if isinstance(grid_coordinates, int):
grid_coordinates = [grid_coordinates]
# Loop through dimensions of coordinate tuple.
tensor_factor = 0
for dimension, grid_coordinate in enumerate(grid_coordinates):
# Make sure coordinate is an integer in the correct bounds.
if (isinstance(grid_coordinate, int) and
grid_coordinate < self.length[dimension]):
tensor_factor += (grid_coordinate *
int(numpy.product(self.length[:dimension])))
else:
# Raise for invalid model.
raise OrbitalSpecificationError(
'Invalid orbital coordinates provided.')
# Account for spin and return.
if spin is None:
return tensor_factor
else:
tensor_factor *= 2
tensor_factor += spin
return tensor_factor
def grid_indices(self, qubit_id, spinless):
"""This function is the inverse of orbital_id.
Args:
qubit_id (int): The tensor factor to map to grid indices.
spinless (bool): Whether to use the spinless model or not.
Returns:
grid_indices (numpy.ndarray[int]):
The location of the qubit on the grid.
"""
if not (numpy.product(self.length) * (2 - spinless) > qubit_id >= 0):
raise OrbitalSpecificationError('Invalid qubit_id provided.')
# Remove spin degree of freedom if it exists.
orbital_id = qubit_id
if not spinless:
orbital_id //= 2
# Get grid indices.
grid_indices = []
for dimension in range(self.dimensions):
remainder = (orbital_id %
int(numpy.product(self.length[:dimension + 1])))
grid_index = (remainder //
int(numpy.product(self.length[:dimension])))
grid_indices += [grid_index]
return grid_indices
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return (self.dimensions == other.dimensions and
(self.scale == other.scale).all() and
self.length == other.length)
def __ne__(self, other):
return not self == other
| apache-2.0 | -896,618,528,948,675,100 | 36.494949 | 80 | 0.592672 | false |
abetusk/www.meowcad.com | cgi/picModLibSentry.py | 1 | 1864 | #!/usr/bin/python
#
import os
import cgi
import cgitb
import sys
import meowaux as mew
import urllib
import Cookie
import json
cgitb.enable();
#print "Content-Type: text/html"
#print
cookie = Cookie.SimpleCookie()
cookie_hash = mew.getCookieHash( os.environ )
g_debug = False
def log_line( l ):
logf = open("/tmp/picmodlibsentry.log", "a")
logf.write( l + "\n")
logf.close()
def error_and_quit():
if g_debug:
log_line("error, quitting")
print "Status: 404 Not Found"
print
print "File not found"
sys.exit(0)
fields = cgi.FieldStorage()
if "data" not in fields:
if g_debug:
log_line("no data")
error_and_quit()
userId = None
sessionId = None
projectId = None
if ("userId" in fields) and ("sessionId" in fields):
if mew.authenticateSession( fields["userId"].value, fields["sessionId"].value ):
userId = fields["userId"].value
sessionId = fields["sessionId"].value
if "projectId" in fields:
projectId = fields["projectId"].value
if ( ("userId" in cookie_hash) and ("sessionId" in cookie_hash) and
( mew.authenticateSession( cookie_hash["userId"], cookie_hash["sessionId"] ) == 1) ):
userId = cookie_hash["userId"]
sessionId = cookie_hash["sessionId"]
if "projectId" in fields:
projectId = fields["projectId"].value
#raw_name = urllib.unquote( fields["data"].value )
raw_name = fields["data"].value
jsfnstr = mew.file_cascade_fn( userId, projectId, raw_name )
jsfn = json.loads( jsfnstr )
if jsfn["type"] != "success":
log_line( jsfnstr )
log_line( "raw_name: " + str(raw_name) )
error_and_quit()
fn = jsfn["filename"]
try:
with open( fn ) as pic_fd:
d = pic_fd.read()
print "Content-Type: image/png"
print
print d
except IOError as e:
if g_debug:
s_e = str(e)
log_line("error opening file (2) " + fileId + ", got '" + s_e + "'")
error_and_quit()
| agpl-3.0 | -7,365,819,750,410,541,000 | 20.929412 | 90 | 0.65397 | false |
alphapigger/igetui | igetui/google/protobuf/message_factory.py | 1 | 4235 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides a factory class for generating dynamic messages."""
__author__ = '[email protected] (Matt Toia)'
from . import descriptor_database
from . import descriptor_pool
from . import message
from . import reflection
class MessageFactory(object):
"""Factory for creating Proto2 messages from descriptors in a pool."""
def __init__(self):
"""Initializes a new factory."""
self._classes = {}
def GetPrototype(self, descriptor):
"""Builds a proto2 message class based on the passed in descriptor.
Passing a descriptor with a fully qualified name matching a previous
invocation will cause the same class to be returned.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor.
"""
if descriptor.full_name not in self._classes:
result_class = reflection.GeneratedProtocolMessageType(
descriptor.name.encode('ascii', 'ignore'),
(message.Message,),
{'DESCRIPTOR': descriptor})
self._classes[descriptor.full_name] = result_class
for field in descriptor.fields:
if field.message_type:
self.GetPrototype(field.message_type)
return self._classes[descriptor.full_name]
_DB = descriptor_database.DescriptorDatabase()
_POOL = descriptor_pool.DescriptorPool(_DB)
_FACTORY = MessageFactory()
def GetMessages(file_protos):
"""Builds a dictionary of all the messages available in a set of files.
Args:
file_protos: A sequence of file protos to build messages out of.
Returns:
A dictionary containing all the message types in the files mapping the
fully qualified name to a Message subclass for the descriptor.
"""
result = {}
for file_proto in file_protos:
_DB.Add(file_proto)
for file_proto in file_protos:
for desc in _GetAllDescriptors(file_proto.message_type, file_proto.package):
result[desc.full_name] = _FACTORY.GetPrototype(desc)
return result
def _GetAllDescriptors(desc_protos, package):
"""Gets all levels of nested message types as a flattened list of descriptors.
Args:
desc_protos: The descriptor protos to process.
package: The package where the protos are defined.
Yields:
Each message descriptor for each nested type.
"""
for desc_proto in desc_protos:
name = '.'.join((package, desc_proto.name))
yield _POOL.FindMessageTypeByName(name)
for nested_desc in _GetAllDescriptors(desc_proto.nested_type, name):
yield nested_desc
| mit | -3,856,056,638,776,542,700 | 35.477876 | 80 | 0.716883 | false |
ama-jharrison/agdc | agdc/api/source/main/python/datacube/api/tool/retrieve_dataset_stack.py | 1 | 12976 | #!/usr/bin/env python
# ===============================================================================
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
__author__ = "Simon Oldfield"
import logging
import os
from datacube.api import dataset_type_arg, writeable_dir, output_format_arg
from datacube.api.model import DatasetType
from datacube.api.tool import CellTool
from datacube.api.utils import get_mask_pqa, get_mask_wofs, get_dataset_data_masked, format_date, OutputFormat, \
get_mask_vector_for_cell
from datacube.api.utils import get_dataset_band_stack_filename
from datacube.api.utils import get_band_name_union, get_band_name_intersection
from datacube.api.utils import get_dataset_ndv, get_dataset_datatype, get_dataset_metadata
from enum import Enum
_log = logging.getLogger()
class BandListType(Enum):
__order__ = "EXPLICIT ALL COMMON"
EXPLICIT = "EXPLICIT"
ALL = "ALL"
COMMON = "COMMON"
class RetrieveDatasetStackTool(CellTool):
def __init__(self, name):
# Call method on super class
# super(self.__class__, self).__init__(name)
CellTool.__init__(self, name)
self.dataset_type = None
self.bands = None
self.output_directory = None
self.overwrite = None
self.list_only = None
self.output_format = None
def setup_arguments(self):
# Call method on super class
# super(self.__class__, self).setup_arguments()
CellTool.setup_arguments(self)
self.parser.add_argument("--dataset-type", help="The type(s) of dataset to retrieve",
action="store",
dest="dataset_type",
type=dataset_type_arg,
choices=self.get_supported_dataset_types(), default=DatasetType.ARG25, required=True,
metavar=" ".join([s.name for s in self.get_supported_dataset_types()]))
group = self.parser.add_mutually_exclusive_group()
# TODO explicit list of bands
# group.add_argument("--bands", help="List of bands to retrieve", action="store")
group.add_argument("--bands-all", help="Retrieve all bands with NULL values where the band is N/A",
action="store_const", dest="bands", const=BandListType.ALL)
group.add_argument("--bands-common", help="Retrieve only bands in common across all satellites",
action="store_const", dest="bands", const=BandListType.COMMON)
self.parser.set_defaults(bands=BandListType.ALL)
self.parser.add_argument("--output-directory", help="Output directory", action="store", dest="output_directory",
type=writeable_dir, required=True)
self.parser.add_argument("--overwrite", help="Over write existing output file", action="store_true",
dest="overwrite", default=False)
self.parser.add_argument("--list-only",
help="List the datasets that would be retrieved rather than retrieving them",
action="store_true", dest="list_only", default=False)
self.parser.add_argument("--output-format", help="The format of the output dataset",
action="store",
dest="output_format",
type=output_format_arg,
choices=OutputFormat, default=OutputFormat.GEOTIFF,
metavar=" ".join([f.name for f in OutputFormat]))
def process_arguments(self, args):
# Call method on super class
# super(self.__class__, self).process_arguments(args)
CellTool.process_arguments(self, args)
self.dataset_type = args.dataset_type
if args.bands == BandListType.ALL:
self.bands = get_band_name_union(self.dataset_type, self.satellites)
else:
self.bands = get_band_name_intersection(self.dataset_type, self.satellites)
self.output_directory = args.output_directory
self.overwrite = args.overwrite
self.list_only = args.list_only
self.output_format = args.output_format
def log_arguments(self):
# Call method on super class
# super(self.__class__, self).log_arguments()
CellTool.log_arguments(self)
_log.info("""
datasets to retrieve = {dataset_type}
bands to retrieve = {bands}
output directory = {output}
over write existing = {overwrite}
list only = {list_only}
output format = {output_format}
""".format(dataset_type=self.dataset_type.name,
bands=self.bands,
output=self.output_directory,
overwrite=self.overwrite,
list_only=self.list_only,
output_format=self.output_format.name))
def get_tiles(self):
return list(self.get_tiles_from_db())
def get_tiles_from_db(self):
from datacube.api.query import list_tiles
x_list = [self.x]
y_list = [self.y]
dataset_types = [self.dataset_type]
if self.mask_pqa_apply and DatasetType.PQ25 not in dataset_types:
dataset_types.append(DatasetType.PQ25)
if self.mask_wofs_apply and DatasetType.WATER not in dataset_types:
dataset_types.append(DatasetType.WATER)
for tile in list_tiles(x=x_list, y=y_list,
acq_min=self.acq_min, acq_max=self.acq_max,
satellites=[satellite for satellite in self.satellites],
dataset_types=dataset_types):
yield tile
def go(self):
# If we are applying a vector mask then calculate it not (once as it is the same for all tiles)
mask = None
if self.mask_vector_apply:
mask = get_mask_vector_for_cell(self.x, self.y, self.mask_vector_file, self.mask_vector_layer, self.mask_vector_feature)
# TODO move the dicking around with bands stuff into utils?
import gdal
driver = raster = None
metadata = None
data_type = ndv = None
tiles = self.get_tiles()
_log.info("Total tiles found [%d]", len(tiles))
for band_name in self.bands:
_log.info("Creating stack for band [%s]", band_name)
relevant_tiles = []
for tile in tiles:
dataset = self.dataset_type in tile.datasets and tile.datasets[self.dataset_type] or None
if not dataset:
_log.info("No applicable [%s] dataset for [%s]", self.dataset_type.name, tile.end_datetime)
continue
if band_name in [b.name for b in tile.datasets[self.dataset_type].bands]:
relevant_tiles.append(tile)
_log.info("Total tiles for band [%s] is [%d]", band_name, len(relevant_tiles))
for index, tile in enumerate(relevant_tiles, start=1):
dataset = tile.datasets[self.dataset_type]
assert dataset
band = dataset.bands[band_name]
assert band
if self.list_only:
_log.info("Would stack band [%s] from dataset [%s]", band.name, dataset.path)
continue
pqa = (self.mask_pqa_apply and DatasetType.PQ25 in tile.datasets) and tile.datasets[DatasetType.PQ25] or None
wofs = (self.mask_wofs_apply and DatasetType.WATER in tile.datasets) and tile.datasets[DatasetType.WATER] or None
if self.dataset_type not in tile.datasets:
_log.debug("No [%s] dataset present for [%s] - skipping", self.dataset_type.name, tile.end_datetime)
continue
filename = os.path.join(self.output_directory,
get_dataset_band_stack_filename(dataset, band,
output_format=self.output_format,
mask_pqa_apply=self.mask_pqa_apply,
mask_wofs_apply=self.mask_wofs_apply,
mask_vector_apply=self.mask_vector_apply))
if not metadata:
metadata = get_dataset_metadata(dataset)
assert metadata
if not data_type:
data_type = get_dataset_datatype(dataset)
assert data_type
if not ndv:
ndv = get_dataset_ndv(dataset)
assert ndv
if not driver:
if self.output_format == OutputFormat.GEOTIFF:
driver = gdal.GetDriverByName("GTiff")
elif self.output_format == OutputFormat.ENVI:
driver = gdal.GetDriverByName("ENVI")
assert driver
if not raster:
if self.output_format == OutputFormat.GEOTIFF:
raster = driver.Create(filename, metadata.shape[0], metadata.shape[1], len(tiles), data_type, options=["BIGTIFF=YES", "INTERLEAVE=BAND"])
elif self.output_format == OutputFormat.ENVI:
raster = driver.Create(filename, metadata.shape[0], metadata.shape[1], len(tiles), data_type, options=["INTERLEAVE=BSQ"])
assert raster
# NOTE: could do this without the metadata!!
raster.SetGeoTransform(metadata.transform)
raster.SetProjection(metadata.projection)
raster.SetMetadata(self.generate_raster_metadata())
# mask = None
if pqa:
mask = get_mask_pqa(pqa, self.mask_pqa_mask, mask=mask)
if wofs:
mask = get_mask_wofs(wofs, self.mask_wofs_mask, mask=mask)
_log.info("Stacking [%s] band data from [%s] with PQA [%s] and PQA mask [%s] and WOFS [%s] and WOFS mask [%s] to [%s]",
band.name, dataset.path,
pqa and pqa.path or "",
pqa and self.mask_pqa_mask or "",
wofs and wofs.path or "", wofs and self.mask_wofs_mask or "",
filename)
data = get_dataset_data_masked(dataset, mask=mask, ndv=ndv)
_log.debug("data is [%s]", data)
stack_band = raster.GetRasterBand(index)
stack_band.SetDescription(os.path.basename(dataset.path))
stack_band.SetNoDataValue(ndv)
stack_band.WriteArray(data[band])
stack_band.ComputeStatistics(True)
stack_band.SetMetadata({"ACQ_DATE": format_date(tile.end_datetime), "SATELLITE": dataset.satellite.name})
stack_band.FlushCache()
del stack_band
if raster:
raster.FlushCache()
del raster
raster = None
def generate_raster_metadata(self):
return {
"X_INDEX": "{x:03d}".format(x=self.x),
"Y_INDEX": "{y:04d}".format(y=self.y),
"DATASET_TYPE": self.dataset_type.name,
"ACQUISITION_DATE": "{acq_min} to {acq_max}".format(acq_min=format_date(self.acq_min), acq_max=format_date(self.acq_max)),
"SATELLITES": " ".join([s.name for s in self.satellites]),
"PIXEL_QUALITY_FILTER": self.mask_pqa_apply and " ".join([mask.name for mask in self.mask_pqa_mask]) or "",
"WATER_FILTER": self.mask_wofs_apply and " ".join([mask.name for mask in self.mask_wofs_mask]) or ""
}
def format_date_time(d):
from datetime import datetime
if d:
return datetime.strftime(d, "%Y-%m-%d %H:%M:%S")
return None
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
RetrieveDatasetStackTool("Retrieve Dataset Stack").run()
| apache-2.0 | 1,836,849,953,270,979,000 | 38.560976 | 161 | 0.556412 | false |
gtrdotmcs/python-withings | tests/test_withings_measures.py | 1 | 1458 | import time
import unittest
from withings import WithingsMeasureGroup, WithingsMeasures
class TestWithingsMeasures(unittest.TestCase):
def test_withings_measures_init(self):
"""
Check that WithingsMeasures create groups correctly and that the
update time is parsed correctly
"""
data = {
'updatetime': 1409596058,
'measuregrps': [
{'attrib': 2, 'date': 1409361740, 'category': 1,
'measures': [{'unit': -1, 'type': 1, 'value': 860}],
'grpid': 111111111},
{'attrib': 2, 'date': 1409361740, 'category': 1,
'measures': [{'unit': -2, 'type': 4, 'value': 185}],
'grpid': 111111112}
]
}
measures = WithingsMeasures(data)
self.assertEqual(type(measures), WithingsMeasures)
self.assertEqual(measures.data, data)
self.assertEqual(type(measures.measuregrps), list)
self.assertEqual(len(measures.measuregrps), 2)
self.assertEqual(measures.measuregrps[0], data['measuregrps'][0])
self.assertEqual(measures.measuregrps[1], data['measuregrps'][1])
self.assertEqual(len(measures), 2)
self.assertEqual(type(measures[0]), WithingsMeasureGroup)
self.assertEqual(measures[0].weight, 86.0)
self.assertEqual(measures[1].height, 1.85)
self.assertEqual(measures.updatetime.timestamp, 1409596058)
| mit | -9,134,735,069,673,304,000 | 41.882353 | 73 | 0.606996 | false |
ProjetPP/PPP-QuestionParsing-Grammatical | tests/test_dependencyTree.py | 1 | 4775 | import json
from nltk.stem.wordnet import WordNetLemmatizer
from ppp_questionparsing_grammatical import Word, DependenciesTree, TreeGenerator, computeTree
import data
from unittest import TestCase
class DependenciesTreeTests(TestCase):
########
# Word #
########
def testBasicWordConstructor1(self):
w=Word('foo', 1, 'bar')
self.assertEqual(w.word, 'foo')
self.assertEqual(w.index, 1)
self.assertEqual(w.pos, 'bar')
self.assertEqual(str(w), "(foo, 1, bar)")
w.append('aaa')
self.assertEqual(Word('foo aaa', 1, 'bar'), w)
def testPOS(self):
for pos in {'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'}:
w = Word('foo', 1, pos)
self.assertTrue(w.isVerb())
self.assertFalse(w.isNoun())
for pos in {'NN', 'NNS', 'NNP', 'NNPS'}:
w = Word('foo', 1, pos)
self.assertFalse(w.isVerb())
self.assertTrue(w.isNoun())
###################
# Dependency tree #
###################
def testBasicTreeConstructor(self):
n = DependenciesTree('foo', 1)
self.assertEqual(n.wordList, [Word('foo', 1)])
self.assertEqual(n.namedEntityTag, 'undef')
self.assertEqual(n.dependency, 'undef')
self.assertEqual(n.child, [])
self.assertEqual(n.text, "")
self.assertEqual(n.parent, None)
self.assertEqual(n.subtreeType, 'undef')
self.assertEqual(n.dfsTag, 0)
self.assertFalse(n.isVerb())
self.assertFalse(n.isNoun())
n.appendWord('bar')
self.assertEqual(str(DependenciesTree('foo bar', 1)), str(n))
def testTreePos(self):
n = DependenciesTree('foo', 1)
n.wordList += [Word('eat', 2, 'VB'), Word('bar', 3)]
self.assertTrue(n.isVerb())
self.assertFalse(n.isNoun())
n = DependenciesTree('foo', 1)
n.wordList += [Word('broomstick', 2, 'NN'), Word('bar', 3)]
self.assertFalse(n.isVerb())
self.assertTrue(n.isNoun())
###############
# computeTree #
###############
def testStr1(self):
tree=computeTree(data.give_john_smith())
self.maxDiff=None
tree.sort()
self.assertEqual(str(tree), data.give_john_smith_string())
###############
# Merge #
###############
def testMerge(self):
root1 = DependenciesTree('root', 1)
root2 = DependenciesTree('root', 2)
node1 = DependenciesTree('n', 1, 'tag1', 'stype1', 'dep1', [DependenciesTree('childn', 1)])
node1.parent = root1
root1.child += [node1]
node2 = DependenciesTree('n', 2, 'tag2', 'stype2', 'dep2', [DependenciesTree('childn', 2)])
node2.parent = root2
root2.child += [node2]
node1.merge(node2, True)
self.assertEqual(len(root2.child), 0)
self.assertEqual(len(root1.child), 1)
self.assertEqual(len(node1.child), 2)
self.assertEqual(node1.wordList, [Word('n', 1), Word('n', 2)])
self.assertEqual(node1.namedEntityTag, 'tag1')
self.assertEqual(node1.dependency, 'dep1')
self.assertEqual(node1.parent, root1)
self.assertEqual(node1.subtreeType, 'stype1')
self.assertEqual(node1.dfsTag, 0)
###############
# correctTree #
###############
def testAddNamedEntityTag1(self):
foo1 = DependenciesTree('foo1', 1, namedEntityTag='42')
foo2 = DependenciesTree('foo2', 3, namedEntityTag='42')
bar = DependenciesTree('bar', 2, namedEntityTag='undef', dependency = 'nn', parent = foo1)
generator = TreeGenerator(None)
generator.nameToNodes = {('foo1',1) : foo1, ('bar',2) : bar, ('foo2', 3) : foo2}
generator._correctTree(foo1)
self.assertEqual(bar.namedEntityTag, '42')
def testAddNamedEntityTag2(self):
foo1 = DependenciesTree('foo1', 1, namedEntityTag='42')
foo2 = DependenciesTree('foo2', 3, namedEntityTag='42')
bar = DependenciesTree('bar', 2, namedEntityTag='27', dependency = 'nn', parent = foo1)
generator = TreeGenerator(None)
generator.nameToNodes = {('foo1',1) : foo1, ('bar',2) : bar, ('foo2', 3) : foo2}
generator._correctTree(foo1)
self.assertEqual(bar.namedEntityTag, '27')
def testAddNamedEntityTag3(self):
foo1 = DependenciesTree('foo1', 1, namedEntityTag='42')
foo2 = DependenciesTree('foo2', 3, namedEntityTag='42')
bar = DependenciesTree('bar', 2, namedEntityTag='undef', dependency = 'amod', parent = foo1)
generator = TreeGenerator(None)
generator.nameToNodes = {('foo1',1) : foo1, ('bar',2) : bar, ('foo2', 3) : foo2}
generator._correctTree(foo1)
self.assertEqual(bar.namedEntityTag, 'undef')
| agpl-3.0 | 1,798,899,934,578,973,200 | 37.2 | 100 | 0.580105 | false |
janghe11/IoT_Hands-On-Lab | 02_iot-raspbian/05_rotary-led.py | 1 | 1863 | # Adjust LED brightness by rotating Potentiometer
# GrovePi + Rotary Angle Sensor (Potentiometer) + LED
# http://www.seeedstudio.com/wiki/Grove_-_Rotary_Angle_Sensor
# http://www.seeedstudio.com/wiki/Grove_-_LED_Socket_Kit
'''
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import grovepi
# Connect the Rotary Angle Sensor to analog port A2
potentiometer = 2
# Connect the LED to digital port D5
led = 4
grovepi.pinMode(led,"OUTPUT")
time.sleep(1)
i = 0
while True:
try:
# Read resistance from Potentiometer
i = grovepi.analogRead(potentiometer)
print(i)
# Send PWM signal to LED
grovepi.analogWrite(led,i//4)
except IOError:
print("Error") | mit | 6,494,753,258,044,894,000 | 32.890909 | 103 | 0.760601 | false |
enthought/traitsgui | examples/dock/dock_test2.py | 1 | 1977 | #-------------------------------------------------------------------------------
#
# Test the DockWindow.
#
# Written by: David C. Morrill
#
# Date: 10/20/2005
#
# (c) Copyright 2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
import sys
from enthought.traits.api \
import *
from enthought.traits.ui.api \
import *
from enthought.traits.ui.menu \
import *
#-------------------------------------------------------------------------------
# 'TestDock' class:
#-------------------------------------------------------------------------------
class TestDock ( HasPrivateTraits ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
button1 = Button
button2 = Button
button3 = Button
button4 = Button
button5 = Button
button6 = Button
#---------------------------------------------------------------------------
# Traits view definitions:
#---------------------------------------------------------------------------
view = View( [ 'button1' ],
[ 'button2' ],
[ 'button3' ],
[ 'button4' ],
[ 'button5' ],
[ 'button6' ],
title = 'DockWindow Test',
resizable = True,
width = 0.5,
height = 0.5,
buttons = NoButtons )
#-------------------------------------------------------------------------------
# Run the test program:
#-------------------------------------------------------------------------------
if __name__ == '__main__':
TestDock().configure_traits()
| bsd-3-clause | 577,342,658,993,957,100 | 28.954545 | 80 | 0.254932 | false |
emmanvg/cti-stix-elevator | stix2elevator/convert_pattern.py | 1 | 89308 | import datetime
import re
import sys
from cybox.objects.account_object import Account
from cybox.objects.address_object import Address
from cybox.objects.archive_file_object import ArchiveFile
from cybox.objects.domain_name_object import DomainName
from cybox.objects.email_message_object import EmailMessage
from cybox.objects.file_object import File
from cybox.objects.http_session_object import HostField, HTTPSession
from cybox.objects.mutex_object import Mutex
from cybox.objects.network_connection_object import NetworkConnection
from cybox.objects.network_packet_object import NetworkPacket
from cybox.objects.network_socket_object import NetworkSocket
from cybox.objects.process_object import Process
from cybox.objects.unix_user_account_object import UnixUserAccount
from cybox.objects.uri_object import URI
from cybox.objects.win_computer_account_object import WinComputerAccount
from cybox.objects.win_executable_file_object import WinExecutableFile
from cybox.objects.win_process_object import WinProcess
from cybox.objects.win_registry_key_object import WinRegistryKey
from cybox.objects.win_service_object import WinService
from six import text_type
import stix2
from stix2.patterns import (BasicObjectPathComponent, ListObjectPathComponent,
ObjectPath, ObservationExpression,
QualifiedObservationExpression,
ReferenceObjectPathComponent, _BooleanExpression,
_ComparisonExpression,
_CompoundObservationExpression, _Constant)
import stixmarx
from stix2elevator.common import ADDRESS_FAMILY_ENUMERATION, SOCKET_OPTIONS
from stix2elevator.convert_cybox import split_into_requests_and_responses
from stix2elevator.ids import (add_object_id_value, exists_object_id_key,
get_id_value, get_object_id_value)
from stix2elevator.options import error, get_option_value, info, warn
from stix2elevator.utils import identifying_info, map_vocabs_to_label
from stix2elevator.vocab_mappings import WINDOWS_PEBINARY
if sys.version_info > (3,):
long = int
KEEP_OBSERVABLE_DATA_USED_IN_PATTERNS = False
KEEP_INDICATORS_USED_IN_COMPOSITE_INDICATOR_EXPRESSION = True
class BasicObjectPathComponentForElevator(BasicObjectPathComponent):
@staticmethod
def create_ObjectPathComponent(component_name):
if component_name.endswith("_ref"):
return ReferenceObjectPathComponentForElevator(component_name)
elif component_name.find("[") != -1:
parse1 = component_name.split("[")
return ListObjectPathComponentForElevator(parse1[0], parse1[1][:-1])
else:
return BasicObjectPathComponentForElevator(component_name, False)
class ListObjectPathComponentForElevator(ListObjectPathComponent):
@staticmethod
def create_ObjectPathComponent(component_name):
if component_name.endswith("_ref"):
return ReferenceObjectPathComponentForElevator(component_name)
elif component_name.find("[") != -1:
parse1 = component_name.split("[")
return ListObjectPathComponentForElevator(parse1[0], parse1[1][:-1])
else:
return BasicObjectPathComponentForElevator(component_name, False)
class ReferenceObjectPathComponentForElevator(ReferenceObjectPathComponent):
@staticmethod
def create_ObjectPathComponent(component_name):
if component_name.endswith("_ref"):
return ReferenceObjectPathComponentForElevator(component_name)
elif component_name.find("[") != -1:
parse1 = component_name.split("[")
return ListObjectPathComponentForElevator(parse1[0], parse1[1][:-1])
else:
return BasicObjectPathComponentForElevator(component_name, False)
class ObjectPathForElevator(ObjectPath):
def toSTIX21(self):
current_cyber_observable_type = self.object_type_name
for x in self.property_path:
if x.property_name == "extensions":
continue
if current_cyber_observable_type == "file":
if (x.property_name == "is_encrypted" or
x.property_name == "encryption_algorithm" or
x.property_name == "decryption_key"):
print(
"Expression contains the property " + x.property_name + ", for a file, which is not in STIX 2.1")
elif x.property_name == "archive-ext" or x.property_name == "raster-image-ext":
current_cyber_observable_type = x.property_name
elif x.property_name == "contains_refs":
current_cyber_observable_type = "file"
elif x.property_name == "parent_directory_ref":
current_cyber_observable_type = "directory"
elif current_cyber_observable_type == "directory":
if x.property_name == "contains_refs":
# TODO - what if it is a directory?
current_cyber_observable_type = "file"
elif current_cyber_observable_type == "archive-ext":
if x.property_name == "version":
print("Expression contains the property version, for a file.archive-ext, which is not in STIX 2.1")
elif current_cyber_observable_type == "raster-image-ext":
if x.property_name == "image_compression_algorithm":
print(
"Expression contains the property image_compression_algorithm, for a file.raster-image-ext, which is not in STIX 2.1")
elif current_cyber_observable_type == "network_traffic":
if x.property_name == "socket-ext":
current_cyber_observable_type = x.property_name
elif current_cyber_observable_type == "socket-ext":
if x.property_name == "protocol_family":
print(
"Expression contains the property protocol_familys, for a network_traffic:socket-ext, which is not in STIX 2.1")
elif current_cyber_observable_type == "process":
if x.property_name == "name" or x.property_name == "arguments":
print(
"Expression contains the property " + x.property_name + ", for a process, which is not in STIX 2.1")
elif x.property_name == "binary_ref":
x.property_name = "image_ref"
elif x.property_name == "opened_connection_refs":
current_cyber_observable_type = "network_traffic"
elif x.property_name == 'creator_user_ref':
current_cyber_observable_type = "user_account"
elif x.property_name == 'binary_ref':
current_cyber_observable_type = "file"
elif x.property_name == 'windows-service-ext':
current_cyber_observable_type = 'windows-service-ext'
elif current_cyber_observable_type == 'windows-service-ext':
if x.property_name == 'service_dll_refs':
current_cyber_observable_type = "file"
elif current_cyber_observable_type == "user_account":
if x.property_name == "password_last_changed":
x.property_name = "credential_last_changed"
return self
class ComparisonExpressionForElevator(_ComparisonExpression):
# overrides, so IdrefPlaceHolder can be handled
def __init__(self, operator, lhs, rhs, negated=False):
self.operator = operator
if operator == "=" and isinstance(rhs, stix2.ListConstant):
warn("apply_condition assumed to be 'ANY' in %s",
721, identifying_info(get_dynamic_variable("current_observable")))
self.operator = "IN"
if isinstance(lhs, stix2.ObjectPath):
self.lhs = lhs
else:
self.lhs = stix2.ObjectPath.make_object_path(lhs)
# rhs might be a reference to another object, which has its own observable pattern
if isinstance(rhs, _Constant) or isinstance(rhs, IdrefPlaceHolder):
self.rhs = rhs
else:
self.rhs = make_constant(rhs)
self.negated = negated
self.root_type = self.lhs.object_type_name
def contains_placeholder(self):
return isinstance(self.rhs, IdrefPlaceHolder)
def collapse_reference(self, prefix):
new_lhs = prefix.merge(self.lhs)
new_lhs.collapsed = True
return ComparisonExpressionForElevator(self.operator, new_lhs, self.rhs)
def replace_placeholder_with_idref_pattern(self, idref):
if isinstance(self.rhs, IdrefPlaceHolder):
change_made, pattern = self.rhs.replace_placeholder_with_idref_pattern(idref)
if change_made:
if hasattr(self.lhs, "collapsed") and self.lhs.collapsed:
return True, ComparisonExpressionForElevator(pattern.operator, self.lhs, pattern.rhs)
else:
return True, pattern.collapse_reference(self.lhs)
return False, self
def partition_according_to_object_path(self):
return self
def contains_unconverted_term(self):
return False
def toSTIX21(self):
self.lhs = self.lhs.toSTIX21()
return self
class EqualityComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(EqualityComparisonExpressionForElevator, self).__init__("=", lhs, rhs, negated)
class MatchesComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(MatchesComparisonExpressionForElevator, self).__init__("MATCHES", lhs, rhs, negated)
class GreaterThanComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(GreaterThanComparisonExpressionForElevator, self).__init__(">", lhs, rhs, negated)
class LessThanComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(LessThanComparisonExpressionForElevator, self).__init__("<", lhs, rhs, negated)
class GreaterThanEqualComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(GreaterThanEqualComparisonExpressionForElevator, self).__init__(">=", lhs, rhs, negated)
class LessThanEqualComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(LessThanEqualComparisonExpressionForElevator, self).__init__("<=", lhs, rhs, negated)
class InComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(InComparisonExpressionForElevator, self).__init__("IN", lhs, rhs, negated)
class LikeComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(LikeComparisonExpressionForElevator, self).__init__("LIKE", lhs, rhs, negated)
class IsSubsetComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(IsSubsetComparisonExpressionForElevator, self).__init__("ISSUBSET", lhs, rhs, negated)
class IsSupersetComparisonExpressionForElevator(ComparisonExpressionForElevator):
def __init__(self, lhs, rhs, negated=False):
super(IsSupersetComparisonExpressionForElevator, self).__init__("ISSUPERSET", lhs, rhs, negated)
class BooleanExpressionForElevator(_BooleanExpression):
def add_operand(self, operand):
self.operands.append(operand)
def contains_placeholder(self):
for args in self.operands:
if args.contains_placeholder():
return True
return False
def replace_placeholder_with_idref_pattern(self, idref):
new_operands = []
change_made = False
for args in self.operands:
change_made_this_time, new_operand = args.replace_placeholder_with_idref_pattern(idref)
if change_made_this_time:
if not hasattr(self, "root_type"):
self.root_type = new_operand.root_type
elif self.root_type and hasattr(new_operand, "root_type") and (self.root_type != new_operand.root_type):
self.root_type = None
change_made = change_made or change_made_this_time
new_operands.append(new_operand)
self.operands = new_operands
return change_made, self
def collapse_reference(self, prefix):
new_operands = []
for operand in self.operands:
new_operands.append(operand.collapse_reference(prefix))
return BooleanExpressionForElevator(self.operator, new_operands)
def partition_according_to_object_path(self):
subexpressions = []
results = []
for term in self.operands:
term_was_appended = False
for sub in subexpressions:
if not hasattr(term, "root_type") and not hasattr(sub[0], "root_type"):
sub.append(term)
term_was_appended = True
break
elif hasattr(term, "root_type") and hasattr(sub[0], "root_type") and term.root_type == sub[0].root_type:
sub.append(term)
term_was_appended = True
break
if not term_was_appended:
subexpressions.append([term])
for x in subexpressions:
if len(x) == 1:
results.append(x[0])
else:
results.append(create_boolean_expression(self.operator, x))
if len(results) == 1:
return results[0]
else:
return CompoundObservationExpressionForElevator(self.operator, results)
def contains_unconverted_term(self):
for args in self.operands:
if args.contains_unconverted_term():
return True
return False
def toSTIX21(self):
for args in self.operands:
args.toSTIX21()
return self
class AndBooleanExpressionForElevator(BooleanExpressionForElevator):
"""'AND' Boolean Pattern Expression. Only use if both operands are of
the same root object.
Args:
operands (list): AND operands
"""
def __init__(self, operands):
super(AndBooleanExpressionForElevator, self).__init__("AND", operands)
class OrBooleanExpressionForElevator(BooleanExpressionForElevator):
"""'OR' Boolean Pattern Expression. Only use if both operands are of the same root object
Args:
operands (list): OR operands
"""
def __init__(self, operands):
super(OrBooleanExpressionForElevator, self).__init__("OR", operands)
class IdrefPlaceHolder(object):
def __init__(self, idref):
self.idref = idref
def __str__(self):
return "PLACEHOLDER:" + self.idref
def contains_placeholder(self):
return True
def replace_placeholder_with_idref_pattern(self, idref):
if idref == self.idref:
return True, get_pattern_from_cache(idref)
elif exists_object_id_key(self.idref) and idref == get_object_id_value(self.idref):
return True, get_pattern_from_cache(idref)
else:
return False, self
def partition_according_to_object_path(self):
error("Placeholder %s should be resolved", 203, self.idref)
return self
def contains_unconverted_term(self):
return False
class UnconvertedTerm(object):
def __init__(self, term_info):
self.term_info = term_info
def __str__(self):
return "unconverted_term:%s" % self.term_info
def contains_placeholder(self):
return False
def replace_placeholder_with_idref_pattern(self, idref):
return False, self
def partition_according_to_object_path(self):
return self
def contains_unconverted_term(self):
return True
class ObservationExpressionForElevator(ObservationExpression):
def toSTIX21(self):
self.operand.toSTIX21()
return self
class CompoundObservationExpressionForElevator(_CompoundObservationExpression):
def __str__(self):
sub_exprs = []
if len(self.operands) == 1:
return "[%s]" % self.operands[0]
for o in self.operands:
if isinstance(o, ObservationExpressionForElevator) or isinstance(o,
CompoundObservationExpressionForElevator):
sub_exprs.append("%s" % o)
else:
sub_exprs.append("[%s]" % o)
return (" " + self.operator + " ").join(sub_exprs)
def contains_placeholder(self):
for args in self.operands:
if args.contains_placeholder():
error("Observable Expressions should not contain placeholders", 202)
def contains_unconverted_term(self):
for args in self.operands:
if args.contains_unconverted_term():
return True
return False
def partition_according_to_object_path(self):
return self
def toSTIX21(self):
for arg in self.operands:
arg.toSTIX21()
return self
class AndObservationExpressionForElevator(CompoundObservationExpressionForElevator):
"""'AND' Compound Observation Pattern Expression
Args:
operands (str): compound observation operands
"""
def __init__(self, operands):
super(AndObservationExpressionForElevator, self).__init__("AND", operands)
class OrObservationExpressionForElevator(CompoundObservationExpressionForElevator):
"""Pattern 'OR' Compound Observation Expression
Args:
operands (str): compound observation operands
"""
def __init__(self, operands):
super(OrObservationExpressionForElevator, self).__init__("OR", operands)
class FollowedByObservationExpressionForElevator(CompoundObservationExpressionForElevator):
"""Pattern 'Followed by' Compound Observation Expression
Args:
operands (str): compound observation operands
"""
def __init__(self, operands):
super(FollowedByObservationExpressionForElevator, self).__init__("FOLLOWEDBY", operands)
class QualifiedObservationExpressionForElevator(QualifiedObservationExpression):
"""Pattern Qualified Observation Expression
Args:
observation_expression (PatternExpression OR _CompoundObservationExpression OR ): pattern expression
qualifier (_ExpressionQualifier): pattern expression qualifier
"""
def __init__(self, observation_expression, qualifier):
super(QualifiedObservationExpressionForElevator, self).__init__(observation_expression, qualifier)
def toSTIX21(self):
self.observation_expression.toSTIX21()
return self
class ParentheticalExpressionForElevator(stix2.ParentheticalExpression):
def contains_placeholder(self):
return self.expression.contains_placeholder()
def contains_unconverted_term(self):
return self.expression.contains_unconverted_term()
def replace_placeholder_with_idref_pattern(self, idref):
change_made, new_expression = self.expression.replace_placeholder_with_idref_pattern(idref)
self.expression = new_expression
if hasattr(new_expression, "root_type"):
self.root_type = new_expression.root_type
return change_made, self
def collapse_reference(self, prefix):
new_expression = self.expression.collapse_reference(prefix)
return ParentheticalExpressionForElevator(new_expression)
def partition_according_to_object_path(self):
self.expression = self.expression.partition_according_to_object_path()
return self
def toSTIX21(self):
self.expression.toSTIX21()
return self
def create_boolean_expression(operator, operands):
if len(operands) == 1:
return operands[0]
exp = BooleanExpressionForElevator(operator, [])
for arg in operands:
if not isinstance(arg, IdrefPlaceHolder) and not isinstance(arg, UnconvertedTerm) and hasattr(arg, "root_type"):
if not hasattr(exp, "root_type"):
exp.root_type = arg.root_type
elif exp.root_type and (exp.root_type != arg.root_type):
exp.root_type = None
exp.add_operand(arg)
return ParentheticalExpressionForElevator(exp)
###################
_PATTERN_CACHE = {}
def clear_pattern_cache():
global _PATTERN_CACHE
_PATTERN_CACHE = {}
def add_to_pattern_cache(key, pattern):
global _PATTERN_CACHE
if pattern:
_PATTERN_CACHE[key] = pattern
def id_in_pattern_cache(id_):
return id_ in _PATTERN_CACHE
def get_pattern_from_cache(id_):
return _PATTERN_CACHE[id_]
def get_ids_from_pattern_cache():
return _PATTERN_CACHE.keys()
def get_items_from_pattern_cache():
return _PATTERN_CACHE.items()
def pattern_cache_is_empty():
return _PATTERN_CACHE == {}
###########
_OBSERVABLE_MAPPINGS = {}
def add_to_observable_mappings(obs):
global _OBSERVABLE_MAPPINGS
if obs:
_OBSERVABLE_MAPPINGS[obs.id_] = obs
_OBSERVABLE_MAPPINGS[obs.object_.id_] = obs
def id_in_observable_mappings(id_):
return id_ in _OBSERVABLE_MAPPINGS
def get_obs_from_mapping(id_):
return _OBSERVABLE_MAPPINGS[id_]
def clear_observable_mappings():
global _OBSERVABLE_MAPPINGS
_OBSERVABLE_MAPPINGS = {}
# simulate dynamic variable environment
_DYNAMIC_SCOPING_ENV = {}
def intialize_dynamic_variable(var):
global _DYNAMIC_SCOPING_ENV
if var in _DYNAMIC_SCOPING_ENV:
raise Exception
else:
_DYNAMIC_SCOPING_ENV[var] = []
def set_dynamic_variable(var, value):
global _DYNAMIC_SCOPING_ENV
if var not in _DYNAMIC_SCOPING_ENV:
intialize_dynamic_variable(var)
_DYNAMIC_SCOPING_ENV[var].append(value)
def get_dynamic_variable(var):
if var not in _DYNAMIC_SCOPING_ENV:
raise Exception
else:
return _DYNAMIC_SCOPING_ENV[var][-1]
def pop_dynamic_variable(var):
if var not in _DYNAMIC_SCOPING_ENV or not _DYNAMIC_SCOPING_ENV[var]:
raise Exception
else:
_DYNAMIC_SCOPING_ENV[var].pop
_CLASS_NAME_MAPPING = {"File": "file",
"URI": "uri",
"EmailMessage": "email-message",
"WinRegistryKey": "windows-registry-key",
"Process": "process",
"DomainName": "domain-name",
"Mutex": "mutex",
"WinExecutableFile": "file:extensions.'windows-pebinary-ext'",
"ArchiveFile": "file:extensions.'archive-ext'",
"NetworkConnection": "network-traffic"}
_ADDRESS_NAME_MAPPING = {Address.CAT_IPV4: "ipv4-addr",
Address.CAT_IPV6: "ipv6-addr",
Address.CAT_MAC: "mac-addr",
Address.CAT_EMAIL: "email-addr"}
# address, network_connection
def convert_cybox_class_name_to_object_path_root_name(instance):
class_name = instance.__class__.__name__
if class_name in _CLASS_NAME_MAPPING:
return _CLASS_NAME_MAPPING[class_name]
elif class_name == "Address" and instance.category in _ADDRESS_NAME_MAPPING:
return _ADDRESS_NAME_MAPPING[class_name]
else:
error("Cannot convert CybOX 2.x class name %s to an object_path_root_name", 813, class_name)
return None
def need_not(condition):
return condition == "DoesNotContain"
def is_equal_condition(cond):
return cond == "Equals" or cond is None
def add_parens_if_needed(expr):
if expr.find("AND") != -1 or expr.find("OR") != -1:
return "(" + expr + ")"
else:
return expr
_CONDITION_OPERATOR_MAP = {
'Equals': "=",
"DoesNotEqual": "!=",
"Contains": "=",
"DoesNotContain": "!=",
"GreaterThan": ">",
'GreaterThanOrEqual': ">=",
"LessThan": "<",
"LessThanOrEqual": "<="
# StartsWith - handled in create_term_with_regex
# EndsWith - handled in create_term_with_regex
# InclusiveBetween - handled in create_term_with_range
# ExclusiveBetween - handled in create_term_with_range
# FitsPattern
# BitwiseAnd
# BitwiseOr
}
def convert_condition(condition):
if condition is None:
warn("No condition given for %s - assume '='", 714,
identifying_info(get_dynamic_variable("current_observable")))
return "="
for cond, op in _CONDITION_OPERATOR_MAP.items():
if cond.lower() == condition.lower():
if cond != condition:
warn("'%s' allowed in %s - should be '%s'", 630,
condition,
identifying_info(get_dynamic_variable("current_observable")),
cond)
return op
warn("Unknown condition given in %s - marked as 'INVALID_CONDITION'", 628,
identifying_info(get_dynamic_variable("current_observable")))
return "INVALID-CONDITION"
def process_boolean_negation(op, negated):
if not negated:
return op
elif op == "AND":
return "OR"
elif op == "OR":
return "AND"
else:
raise (ValueError("not a legal Boolean op: %s" % op))
def process_comparison_negation(op, negated):
if not negated:
return op
elif op == "=":
return "!="
elif op == "!=":
return "="
elif op == "<":
return ">="
elif op == "<=":
return ">"
elif op == ">":
return "<="
elif op == ">=":
return "<"
else:
raise (ValueError("not a legal Comparison op: %s" % op))
def create_term_with_regex(lhs, condition, rhs, negated):
# TODO: escape characters
if condition == "StartsWith":
rhs.value = "^%s" % rhs.value
elif condition == "EndsWith":
rhs.value = "$%s" % rhs.value
return ComparisonExpressionForElevator("MATCHES", lhs, rhs, negated)
def create_term_with_range(lhs, condition, rhs, negated=False):
# TODO: handle negated
if not isinstance(rhs, stix2.ListConstant) or len(rhs.value) != 2:
error("%s was used, but two values were not provided.", 609, condition)
return "'range term underspecified'"
else:
if condition == "InclusiveBetween":
# return "(" + lhs + " GE " + text_type(rhs[0]) + " AND " + lhs + " LE " + text_type(rhs[1]) + ")"
lower_bound = ComparisonExpressionForElevator(process_comparison_negation(">=", negated), lhs, rhs.value[0])
upper_bound = ComparisonExpressionForElevator(process_comparison_negation("<=", negated), lhs, rhs.value[1])
else: # "ExclusiveBetween"
# return "(" + lhs + " GT " + text_type(rhs[0]) + " AND " + lhs + " LT " + text_type(rhs[1]) + ")"
lower_bound = ComparisonExpressionForElevator(process_comparison_negation(">", negated), lhs, rhs.value[0])
upper_bound = ComparisonExpressionForElevator(process_comparison_negation("<", negated), lhs, rhs.value[1])
return create_boolean_expression(process_boolean_negation("AND", negated), [lower_bound, upper_bound])
def multi_valued_property(object_path):
return object_path and object_path.find("*") != -1
def negate_if_needed(condition, negated):
if negated:
return "NOT " + condition
else:
return condition
def create_term(lhs, condition, rhs, negated=False):
if condition == "StartsWith" or condition == "EndsWith":
return create_term_with_regex(lhs, condition, rhs, negated)
elif condition == "InclusiveBetween" or condition == "ExclusiveBetween":
return create_term_with_range(lhs, condition, rhs, negated)
else:
if condition == "Contains" and not multi_valued_property(lhs):
warn("Used MATCHES operator for %s", 715, condition)
return create_term_with_regex(lhs, condition, rhs, negated)
elif condition == "DoesNotContain":
warn("Used MATCHES operator for %s", 715, condition)
return create_term_with_regex(lhs, condition, rhs, not negated)
# return lhs + " " + negate_if_needed(convert_condition(condition), negated) + " '" + convert_to_text_type(rhs) + "'"
return ComparisonExpressionForElevator(convert_condition(condition), lhs, rhs, negated)
def make_constant(obj):
# TODO: handle other Markable objects?
if isinstance(obj, bool):
return stix2.BooleanConstant(obj)
elif isinstance(obj, int) or isinstance(obj, long):
return stix2.IntegerConstant(obj)
elif isinstance(obj, float):
return stix2.FloatConstant(obj)
elif isinstance(obj, str) or isinstance(obj, stixmarx.api.types.MarkableText):
return stix2.StringConstant(obj.strip())
elif isinstance(obj, list):
return stix2.ListConstant([make_constant(x) for x in obj])
elif isinstance(obj, datetime.datetime) or isinstance(obj, stixmarx.api.types.MarkableDateTime):
return stix2.TimestampConstant(obj.strftime("%Y-%m-%dT%H:%M:%S.%fZ"))
else:
raise ValueError("Can't make a constant from %s" % obj)
def add_comparison_expression(prop, object_path):
if prop is not None and prop.value is not None:
if hasattr(prop, "condition"):
cond = prop.condition
else:
warn("No condition given - assume '='", 714)
cond = None
return create_term(object_path, cond, make_constant(prop.value))
if prop is not None and prop.value is None:
warn("No term was yielded for %s", 622, object_path)
return None
def convert_custom_properties(cps, object_type_name):
expressions = []
for cp in cps.property_:
if not re.match("[a-z0-9_]+", cp.name):
warn("The custom property name %s does not adhere to the specification rules", 617, cp.name)
if " " in cp.name:
warn("The custom property name %s contains whitespace, replacing it with underscores", 624, cp.name)
expressions.append(
create_term(object_type_name + ":x_" + cp.name.replace(" ", "_"), cp.condition, make_constant(cp.value)))
return create_boolean_expression("AND", expressions)
_ACCOUNT_PROPERTIES = [
["full_name", "user-account:display_name"],
["last_login", "user-account:account_last_login"],
["username", "user-account:account_login"],
["creation_time", "user-account:account_created"]
]
def convert_account_to_pattern(account):
expressions = []
if hasattr(account, "disabled") and account.disabled:
expressions.append(create_term("user-account:is_disabled",
"Equals",
stix2.BooleanConstant(account.disabled)))
for prop_spec in _ACCOUNT_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(account, prop_1x) and getattr(account, prop_1x):
term = add_comparison_expression(getattr(account, prop_1x), object_path)
if term:
expressions.append(term)
if account.authentication and get_option_value("spec_version") == "2.1":
if account.authentication.authentication_data:
expressions.append(create_term("user-account:credential",
"Equals",
stix2.StringConstant(account.authentication.authentication_data)))
if isinstance(account, UnixUserAccount):
win_process_expression = convert_unix_user_to_pattern(account)
if win_process_expression:
expressions.append(win_process_expression)
else:
warn("No UnixUserAccount properties found in %s", 615, text_type(account))
elif isinstance(account, WinComputerAccount):
expressions.append(create_term("user-account:account_type",
"Equals",
stix2.StringConstant("windows-domain" if account.domain else "windows-local")))
if expressions:
return create_boolean_expression("AND", expressions)
_UNIX_ACCOUNT_PROPERTIES = [
["group_id", "user-account:extensions.'unix-account-ext'.gid"],
["login_shell", "user-account:extensions.'unix-account-ext'.shell"],
["home_directory", "user-account:extensions.'unix-account-ext'.home_dir"],
]
def convert_unix_user_to_pattern(account):
expressions = []
expressions.append(create_term("user-account:account_type",
"Equals",
stix2.StringConstant("unix")))
if hasattr(account, "user_id") and account.user_id:
expressions.append(create_term("user-account:user_id",
account.user_id.condition,
stix2.StringConstant(text_type(account.user_id.value))))
for prop_spec in _UNIX_ACCOUNT_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(account, prop_1x) and getattr(account, prop_1x):
term = add_comparison_expression(getattr(account, prop_1x), object_path)
if term:
expressions.append(term)
if expressions:
return create_boolean_expression("AND", expressions)
def convert_address_to_pattern(add):
cond = add.address_value.condition
if add.category == add.CAT_IPV4:
return create_term("ipv4-addr:value", cond, make_constant(add.address_value.value.strip()))
elif add.category == add.CAT_IPV6:
return create_term("ipv6-addr:value", cond, make_constant(add.address_value.value.strip()))
elif add.category == add.CAT_MAC:
return create_term("mac-addr:value", cond, make_constant(add.address_value.value.strip()))
elif add.category == add.CAT_EMAIL:
return create_term("email-addr:value", cond, make_constant(add.address_value.value.strip()))
else:
warn("The address type %s is not part of Cybox 3.0", 421, add.category)
def convert_uri_to_pattern(uri):
return create_term("url:value", uri.value.condition, make_constant(uri.value.value.strip()))
# NOTICE: The format of these PROPERTIES is different than the others in this file!!!!!!
_EMAIL_HEADER_PROPERTIES = [["email-message:subject", ["subject"]],
["email-message:from_ref.value", ["from_", "address_value"]],
["email-message:sender_ref.value", ["sender", "address_value"]],
["email-message:date", ["date"]],
["email-message:content_type", ["content_type"]],
["email-message:to_refs[*].value", ["to*", "address_value"]],
["email-message:cc_refs[*].value", ["cc*", "address_value"]],
["email-message:bcc_refs[*].value", ["bcc*", "address_value"]]]
_EMAIL_ADDITIONAL_HEADERS_PROPERTIES = \
[["email-message:additional_header_fields.Reply-To", ["reply-to*", "address_value"]],
["email-message:additional_header_fields.Message-ID", ["message_id"]],
["email-message:additional_header_fields.In-Reply-To", ["in_reply_to"]],
["email-message:additional_header_fields.Errors-To", ["errors_to"]],
["email-message:additional_header_fields.MIME-Version", ["mime_version"]],
["email-message:additional_header_fields.Precedence", ["precedence"]],
["email-message:additional_header_fields.User-Agent", ["user_agent"]],
["email-message:additional_header_fields.Boundary", ["boundary"]],
["email-message:additional_header_fields.X-Originating-IP", ["x_originating_ip", "address_value"]],
["email-message:additional_header_fields.X-Priority", ["x_priority"]],
["email-message:additional_header_fields.X-Mailer", ["x_mailer"]]]
def cannonicalize_prop_name(name):
if name.find("*") == -1:
return name
else:
return name[:-1]
def create_terms_from_prop_list(prop_list, obj, object_path):
if len(prop_list) == 1:
prop_1x = prop_list[0]
if hasattr(obj, cannonicalize_prop_name(prop_1x)):
if multi_valued_property(prop_1x):
prop_exprs = []
for c in getattr(obj, cannonicalize_prop_name(prop_1x)):
term = add_comparison_expression(c, object_path)
if term:
prop_exprs.append(term)
# return " OR ".join(prop_exprs)
if prop_exprs:
return create_boolean_expression("OR", prop_exprs)
else:
return add_comparison_expression(getattr(obj, cannonicalize_prop_name(prop_1x)), object_path)
else:
prop_1x, rest_of_prop_list = prop_list[0], prop_list[1:]
if hasattr(obj, cannonicalize_prop_name(prop_1x)):
if multi_valued_property(prop_1x):
prop_exprs = []
values = getattr(obj, cannonicalize_prop_name(prop_1x))
if values:
for c in values:
term = create_terms_from_prop_list(rest_of_prop_list, c, object_path)
if term:
prop_exprs.append(term)
# return " OR ".join(prop_exprs)
if prop_exprs:
return create_boolean_expression("OR", prop_exprs)
else:
return create_terms_from_prop_list(rest_of_prop_list,
getattr(obj, cannonicalize_prop_name(prop_1x)),
object_path)
def convert_email_header_to_pattern(head, properties):
header_expressions = []
for prop_spec in properties:
object_path = prop_spec[0]
prop_1x_list = prop_spec[1]
if hasattr(head, cannonicalize_prop_name(prop_1x_list[0])):
term = create_terms_from_prop_list(prop_1x_list, head, object_path)
if term:
header_expressions.append(term)
if head.received_lines:
warn("Email received lines not handled yet", 806)
if header_expressions:
return create_boolean_expression("AND", header_expressions)
def convert_attachment_to_ref(attachment):
return IdrefPlaceHolder(attachment.object_reference)
def convert_email_message_to_pattern(mess):
expressions = []
if mess.header is not None:
expressions.append(convert_email_header_to_pattern(mess.header, _EMAIL_HEADER_PROPERTIES))
add_headers = convert_email_header_to_pattern(mess.header, _EMAIL_ADDITIONAL_HEADERS_PROPERTIES)
if add_headers:
expressions.append(add_headers)
if mess.attachments is not None:
for attachment in mess.attachments:
expressions.append(ComparisonExpressionForElevator("=", "email-message:body_multipart[*].body_raw_ref",
convert_attachment_to_ref(attachment)))
if mess.raw_body is not None:
if not mess.raw_body.value:
warn("%s contains no value", 621, "Email raw body")
else:
warn("Email raw body not handled yet", 806)
if mess.links is not None:
warn("Email links not handled yet", 806)
if expressions:
return create_boolean_expression("AND", expressions)
_PE_FILE_HEADER_PROPERTIES = \
[["machine", "file:extensions.'windows-pebinary-ext'.file_header:machine_hex"],
["time_date_stamp", "file:extensions.'windows-pebinary-ext'.file_header.time_date_stamp"],
["number_of_sections", "file:extensions.'windows-pebinary-ext'.file_header.number_of_sections"],
["pointer_to_symbol_table", "file:extensions.'windows-pebinary-ext'.file_header.pointer_to_symbol_table"],
["number_of_symbols", "file:extensions.'windows-pebinary-ext'.file_header.number_of_symbols"],
["size_of_optional_header", "file:extensions.'windows-pebinary-ext'.file_header.size_of_optional_header"],
["characteristics", "file:extensions.'windows-pebinary-ext'.file_header.characteristics_hex"]]
_PE_SECTION_HEADER_PROPERTIES = [["name", "file:extensions.'windows-pebinary-ext'.section[*].name"],
["virtual_size", "file:extensions.'windows-pebinary-ext'.section[*].size"]]
_ARCHIVE_FILE_PROPERTIES_2_0 = [["comment", "file:extensions.'archive-ext'.comment"],
["version", "file:extensions.'archive-ext'.version"]]
_ARCHIVE_FILE_PROPERTIES_2_1 = [["comment", "file:extensions.'archive-ext'.comment"]]
def select_archive_file_properties():
if get_option_value("spec_version") == "2.1":
return _ARCHIVE_FILE_PROPERTIES_2_1
else:
return _ARCHIVE_FILE_PROPERTIES_2_0
def convert_windows_executable_file_to_pattern(f):
expressions = []
if f.headers:
file_header = f.headers.file_header
if file_header:
file_header_expressions = []
for prop_spec in _PE_FILE_HEADER_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(file_header, prop_1x) and getattr(file_header, prop_1x):
term = add_comparison_expression(getattr(file_header, prop_1x), object_path)
if term:
file_header_expressions.append(term)
if file_header.hashes is not None:
hash_expression = convert_hashes_to_pattern(file_header.hashes)
if hash_expression:
file_header_expressions.append(hash_expression)
if file_header_expressions:
expressions.append(create_boolean_expression("AND", file_header_expressions))
if f.headers.optional_header:
warn("file:extensions:'windows-pebinary-ext':optional_header is not implemented yet", 807)
if f.type_:
expressions.append(create_term("file:extensions.'windows-pebinary-ext'.pe_type",
f.type_.condition,
stix2.StringConstant(map_vocabs_to_label(f.type_.value, WINDOWS_PEBINARY))))
sections = f.sections
if sections:
sections_expressions = []
# should order matter in patterns???
for s in sections:
section_expressions = []
if s.section_header:
for prop_spec in _PE_SECTION_HEADER_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(s.section_header, prop_1x) and getattr(s.section_header, prop_1x):
term = add_comparison_expression(getattr(s.section_header, prop_1x), object_path)
if term:
section_expressions.append(term)
if s.entropy:
if s.entropy.min:
warn("Entropy.min is not supported in STIX 2.0", 424)
if s.entropy.min:
warn("Entropy.max is not supported in STIX 2.0", 424)
if s.entropy.value:
section_expressions.append(create_term("file:extensions.'windows-pebinary-ext'.section[*].entropy",
s.entropy.value.condition,
stix2.FloatConstant(s.entropy.value.value)))
if s.data_hashes:
section_expressions.append(convert_hashes_to_pattern(s.data_hashes))
if s.header_hashes:
section_expressions.append(convert_hashes_to_pattern(s.header_hashes))
if section_expressions:
sections_expressions.append(create_boolean_expression("AND", section_expressions))
if sections_expressions:
expressions.append(create_boolean_expression("AND", sections_expressions))
if f.exports:
warn("The exports property of WinExecutableFileObj is not part of STIX 2.x", 418)
expressions.append(UnconvertedTerm("WinExecutableFileObj.exports"))
if f.imports:
warn("The imports property of WinExecutableFileObj is not part of STIX 2.x", 418)
expressions.append(UnconvertedTerm("WinExecutableFileObj.imports"))
if expressions:
return create_boolean_expression("AND", expressions)
def convert_archive_file_to_pattern(f):
and_expressions = []
for prop_spec in select_archive_file_properties():
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(f, prop_1x):
term = add_comparison_expression(getattr(f, prop_1x), object_path)
if term:
and_expressions.append(term)
if and_expressions:
return create_boolean_expression("AND", and_expressions)
def convert_hashes_to_pattern(hashes):
hash_expressions = []
for h in hashes:
if getattr(h, "simple_hash_value"):
hash_value = h.simple_hash_value
else:
hash_value = h.fuzzy_hash_value
if text_type(h.type_).startswith("SHA"):
hash_type = "'" + "SHA" + "-" + text_type(h.type_)[3:] + "'"
elif text_type(h.type_) == "SSDEEP":
hash_type = text_type(h.type_).lower()
else:
hash_type = text_type(h.type_)
try:
hc = stix2.HashConstant(hash_value.value, text_type(h.type_))
except ValueError as err:
# don't cause exception if hash value isn't correct
warn(err, 626)
hc = make_constant(hash_value.value)
hash_expressions.append(create_term("file:hashes" + "." + hash_type,
hash_value.condition,
hc))
if hash_expressions:
return create_boolean_expression("OR", hash_expressions)
def convert_file_name_and_file_extension(file_name, file_extension):
if (file_extension and file_extension.value and is_equal_condition(file_name.condition) and
is_equal_condition(file_extension.condition) and file_name.value.endswith(file_extension.value)):
return create_term("file:name", file_name.condition, make_constant(file_name.value))
elif (file_name.condition == "StartsWith" and file_extension and file_extension.value and
is_equal_condition(file_extension.condition)):
return ComparisonExpressionForElevator("MATCHES", "file:name",
make_constant(
"^" + file_name.value + "*." + file_extension.value + "$"))
elif (file_name.condition == "Contains" and file_extension and file_extension.value and
is_equal_condition(file_extension.condition)):
return ComparisonExpressionForElevator("MATCHES", "file:name",
make_constant(
file_name.value + "*." + file_extension.value + "$"))
else:
warn("Unable to create a pattern for file:file_name from a File object", 620)
def convert_file_name_and_path_to_pattern(f):
file_name_path_expressions = []
if f.file_name and f.file_extension and f.file_extension.value:
file_name_path_expressions.append(convert_file_name_and_file_extension(f.file_name, f.file_extension))
elif f.file_name:
file_name_path_expressions.append(create_term("file:name",
f.file_name.condition,
make_constant(f.file_name.value)))
if f.file_path and f.file_path.value:
index = f.file_path.value.rfind("/")
if index == -1:
index = f.file_path.value.rfind("\\")
if index == -1:
warn("Ambiguous file path '%s' was not processed", 816, f.file_path.value)
else:
if not (f.file_path.value.endswith("/") or f.file_path.value.endswith("\\")):
file_name_path_expressions.append(create_term("file:name",
f.file_path.condition,
make_constant(f.file_path.value[index + 1:])))
path_string_constant = make_constant(((f.device_path.value if f.device_path else "") +
f.file_path.value[0: index]))
file_name_path_expressions.append(create_term("file:parent_directory_ref.path",
f.file_path.condition,
path_string_constant))
else:
path_string_constant = make_constant(((f.device_path.value if f.device_path else "") +
f.file_path.value[0: index]))
file_name_path_expressions.append(create_term("directory:path",
f.file_path.condition,
path_string_constant))
if f.full_path:
warn("1.x full file paths are not processed, yet", 802)
if file_name_path_expressions:
return create_boolean_expression("AND", file_name_path_expressions)
_FILE_PROPERTIES_2_0 = [["size_in_bytes", "file:size"],
["magic_number", "file:magic_number_hex"],
["created_time", "file:created"],
["modified_time", "file:modified"],
["accessed_time", "file:accessed"],
["encyption_algorithm", "file:encyption_algorithm"],
["decryption_key", "file:decryption_key"]]
# is_encrypted
_FILE_PROPERTIES_2_1 = [["size_in_bytes", "file:size"],
["magic_number", "file:magic_number_hex"],
["created_time", "file:created"],
["modified_time", "file:modified"],
["accessed_time", "file:accessed"]]
def select_file_properties():
if get_option_value("spec_version") == "2.1":
return _FILE_PROPERTIES_2_1
else:
return _FILE_PROPERTIES_2_0
def convert_file_to_pattern(f):
expressions = []
if f.hashes is not None:
hash_expression = convert_hashes_to_pattern(f.hashes)
if hash_expression:
expressions.append(hash_expression)
file_name_and_path_expression = convert_file_name_and_path_to_pattern(f)
if file_name_and_path_expression:
expressions.append(file_name_and_path_expression)
properties_expressions = []
for prop_spec in select_file_properties():
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(f, prop_1x) and getattr(f, prop_1x):
term = add_comparison_expression(getattr(f, prop_1x), object_path)
if term:
properties_expressions.append(term)
if properties_expressions:
expressions.extend(properties_expressions)
if isinstance(f, WinExecutableFile):
windows_executable_file_expression = convert_windows_executable_file_to_pattern(f)
if windows_executable_file_expression:
expressions.append(windows_executable_file_expression)
else:
warn("No WinExecutableFile properties found in %s", 613, text_type(f))
if isinstance(f, ArchiveFile):
archive_file_expressions = convert_archive_file_to_pattern(f)
if archive_file_expressions:
expressions.append(archive_file_expressions)
else:
warn("No ArchiveFile properties found in %s", 614, text_type(f))
if expressions:
return create_boolean_expression("AND", expressions)
_REGISTRY_KEY_VALUES_PROPERTIES = [["data", "windows-registry-key:values[*].data"],
["name", "windows-registry-key:values[*].name"],
["datatype", "windows-registry-key:values[*].data_type"]]
def convert_registry_key_to_pattern(reg_key):
expressions = []
if reg_key.key:
key_value_term = ""
if reg_key.hive:
if reg_key.hive.condition is None or is_equal_condition(reg_key.hive.condition):
key_value_term += reg_key.hive.value + "\\"
else:
warn("Condition %s on a hive property not handled", 812, reg_key.hive.condition)
if reg_key.key.value.startswith(reg_key.hive.value):
warn("Hive property, %s, is already a prefix of the key property, %s", 623, reg_key.hive.value,
reg_key.key.value)
key_value_term = reg_key.key.value
else:
key_value_term += reg_key.key.value
else:
key_value_term = reg_key.key.value
expressions.append(create_term("windows-registry-key:key",
reg_key.key.condition,
make_constant(key_value_term)))
if reg_key.values:
values_expressions = []
for v in reg_key.values:
value_expressions = []
for prop_spec in _REGISTRY_KEY_VALUES_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(v, prop_1x) and getattr(v, prop_1x):
term = add_comparison_expression(getattr(v, prop_1x), object_path)
if term:
value_expressions.append(term)
if value_expressions:
values_expressions.append(create_boolean_expression("OR", value_expressions))
expressions.extend(values_expressions)
if expressions:
return create_boolean_expression("AND", expressions)
def convert_image_info_to_pattern(image_info):
expressions = []
if image_info.command_line:
expressions.append(add_comparison_expression(image_info.command_line, "process:command_line"))
if image_info.current_directory:
expressions.append(add_comparison_expression(image_info.current_directory, "process:cwd"))
if expressions:
return create_boolean_expression("AND", expressions)
_PROCESS_PROPERTIES_2_0 = [
["is_hidden", "process:is_hidden"],
["pid", "process:pid"],
["name", "process:name"],
["parent_pid", "process:parent_ref.pid"],
["username", "process:creator_user_ref.user_id"],
["creation_time", "process:created"]
]
_PROCESS_PROPERTIES_2_1 = [
["is_hidden", "process:is_hidden"],
["pid", "process:pid"],
["parent_pid", "process:parent_ref.pid"],
["username", "process:creator_user_ref.user_id"],
["creation_time", "process:created"]
]
def select_process_properties():
if get_option_value("spec_version") == "2.1":
return _PROCESS_PROPERTIES_2_1
else:
return _PROCESS_PROPERTIES_2_0
def convert_process_to_pattern(process):
expressions = []
for prop_spec in select_process_properties():
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(process, prop_1x) and getattr(process, prop_1x):
term = add_comparison_expression(getattr(process, prop_1x), object_path)
if term:
expressions.append(term)
if process.image_info:
process_info = convert_image_info_to_pattern(process.image_info)
if process_info:
expressions.append(process_info)
if hasattr(process, "argument_list") and process.argument_list:
if get_option_value("spec_version") == "2.0":
argument_expressions = []
for a in process.argument_list:
argument_expressions.append(create_term("process:arguments[*]",
a.condition,
stix2.StringConstant(a.value)))
if argument_expressions:
expressions.append(create_boolean_expression("AND", argument_expressions))
else:
warn("The argument_list property of ProcessObj is not part of STIX 2.1", 418)
expressions.append(UnconvertedTerm("ProcessObj.argument_list"))
if hasattr(process, "environment_variable_list") and process.environment_variable_list:
ev_expressions = []
for ev in process.environment_variable_list:
# TODO: handle variable names with '-'
ev_expressions.append(create_term("process:environment_variables[*]." + str(ev.name),
ev.value.condition,
stix2.StringConstant(str(ev.value))))
if ev_expressions:
expressions.append(create_boolean_expression("AND", ev_expressions))
if hasattr(process, "child_pid_list") and process.child_pid_list:
child_pids_expressions = []
for cp in process.child_pid_list:
child_pids_expressions.append(create_term("process:child_refs[*].pid",
cp.condition,
stix2.IntegerConstant(cp.value)))
if child_pids_expressions:
expressions.append(create_boolean_expression("AND", child_pids_expressions))
if hasattr(process, "network_connection_list") and process.network_connection_list:
network_connection_expressions = []
for nc in process.network_connection_list:
new_pattern = convert_network_connection_to_pattern(nc)
network_connection_expressions.append(
new_pattern.collapse_reference(stix2.ObjectPath.make_object_path("process:opened_connection_refs[*]")))
if network_connection_expressions:
expressions.append(create_boolean_expression("AND", network_connection_expressions))
if isinstance(process, WinProcess):
win_process_expression = convert_windows_process_to_pattern(process)
if win_process_expression:
expressions.append(win_process_expression)
else:
warn("No WinProcess properties found in %s", 615, text_type(process))
if isinstance(process, WinService):
service_expression = convert_windows_service_to_pattern(process)
if service_expression:
expressions.append(service_expression)
else:
warn("No WinService properties found in %s", 616, text_type(process))
if expressions:
return create_boolean_expression("AND", expressions)
_WINDOWS_PROCESS_PROPERTIES = [
["aslr_enabled", "process:extensions.'windows-process-ext'.aslr_enabled"],
["dep_enabled", "process:extensions.'windows-process-ext'.dep_enabled"],
["priority", "process:extensions.'windows-process-ext'.priority"],
["security_id", "process:extensions.'windows-process-ext'.owner_sid"],
["window_title", "process:extensions.'windows-process-ext'.window_title"]
]
def convert_windows_process_to_pattern(process):
expressions = []
for prop_spec in _WINDOWS_PROCESS_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(process, prop_1x) and getattr(process, prop_1x):
term = add_comparison_expression(getattr(process, prop_1x), object_path)
if term:
expressions.append(term)
if process.handle_list:
for h in process.handle_list:
warn("Windows Handles are not a part of STIX 2.0", 420)
if process.startup_info:
warn("The startup_info property of ProcessObj is not part of STIX 2.x", 418)
expressions.append(UnconvertedTerm("ProcessObj.startup_info"))
if expressions:
return create_boolean_expression("AND", expressions)
_WINDOWS_SERVICE_PROPERTIES = \
[["service_name", "process:extensions.'windows-service-ext'.service_name"],
["display_name", "process:extensions.'windows-service-ext'.display_name"],
["startup_command_line", "process:extensions.'windows-service-ext'.startup_command_line"],
["start_type", "process:extensions.'windows-service-ext'.start_type"],
["service_type", "process:extensions.'windows-service-ext'.service_type"],
["service_status", "process:extensions.'windows-service-ext'.service_status"]]
def convert_windows_service_to_pattern(service):
expressions = []
for prop_spec in _WINDOWS_SERVICE_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(service, prop_1x) and getattr(service, prop_1x):
term = add_comparison_expression(getattr(service, prop_1x), object_path)
if term:
expressions.append(term)
if hasattr(service, "description_list") and service.description_list:
description_expressions = []
for d in service.description_list:
description_expressions.append(create_term("process:extensions.'windows-service-ext'.descriptions[*]",
d.condition,
make_constant(d.value)))
if description_expressions:
expressions.append(create_boolean_expression("OR", description_expressions))
if hasattr(service, "service_dll") and service.service_dll:
warn("The service_dll property of WinServiceObject is not part of STIX 2.x", 418)
expressions.append(UnconvertedTerm("WinServiceObject.service_dll"))
if expressions:
return create_boolean_expression("AND", expressions)
def convert_related_object_to_pattern(ro):
if ro.id_:
new_pattern = convert_object_to_pattern(ro, ro.id_)
if new_pattern:
add_to_pattern_cache(ro.id_, new_pattern)
return new_pattern
elif ro.idref:
if id_in_pattern_cache(ro.idref):
return get_pattern_from_cache(ro.idref)
else:
if id_in_observable_mappings(ro.idref):
return convert_observable_to_pattern(get_obs_from_mapping(ro.idref))
return IdrefPlaceHolder(ro.idref)
def convert_domain_name_to_pattern(domain_name, related_objects):
pattern = [
create_term("domain-name:value", domain_name.value.condition, make_constant(domain_name.value.value))]
if related_objects:
for ro in related_objects:
if ro.relationship == "Resolved_To":
new_pattern = convert_related_object_to_pattern(ro)
if new_pattern:
if isinstance(new_pattern, IdrefPlaceHolder):
pattern.append(ComparisonExpressionForElevator("=",
"domain-name:resolves_to_refs[*]",
new_pattern))
else:
pattern.append(new_pattern.collapse_reference(
stix2.ObjectPath.make_object_path("domain-name:resolves_to_refs[*]")))
else:
warn("The %s relationship involving %s is not supported in STIX 2.0", 427, ro.relationship,
identifying_info(ro))
return create_boolean_expression("AND", pattern)
def convert_mutex_to_pattern(mutex):
if mutex.name:
return create_term("mutex:name", mutex.name.condition, make_constant(mutex.name.value))
else:
return None
def convert_network_connection_to_pattern(conn):
expressions = []
if conn.layer3_protocol is not None:
expressions.append(create_term("network-traffic:protocols[*]",
conn.layer3_protocol.condition,
make_constant(conn.layer3_protocol.value.lower())))
if conn.layer4_protocol is not None:
expressions.append(create_term("network-traffic:protocols[*]",
conn.layer4_protocol.condition,
make_constant(conn.layer4_protocol.value.lower())))
if conn.layer7_protocol is not None:
expressions.append(create_term("network-traffic:protocols[*]",
conn.layer7_protocol.condition,
make_constant(conn.layer7_protocol.value.lower())))
if conn.source_socket_address is not None:
if conn.source_socket_address.port is not None:
if conn.source_socket_address.port.port_value is not None:
expressions.append(create_term("network-traffic:src_port",
conn.source_socket_address.port.port_value.condition,
stix2.IntegerConstant(int(conn.source_socket_address.port.port_value))))
if conn.source_socket_address.port.layer4_protocol is not None:
expressions.append(
create_term("network-traffic:protocols[*]",
conn.source_socket_address.port.layer4_protocol.condition,
make_constant(conn.source_socket_address.port.layer4_protocol.value.lower())))
if conn.source_socket_address.ip_address is not None:
expressions.append(
create_term("network-traffic:src_ref.value",
conn.source_socket_address.ip_address.address_value.condition,
make_constant(conn.source_socket_address.ip_address.address_value.value)))
elif conn.source_socket_address.hostname is not None:
if conn.source_socket_address.hostname.is_domain_name and conn.source_socket_address.hostname.hostname_value is not None:
expressions.append(
create_term("network-traffic:src_ref.value",
conn.source_socket_address.hostname.condition,
make_constant(conn.source_socket_address.hostname.hostname_value)))
elif (conn.source_socket_address.hostname.naming_system is not None and
any(x.value == "DNS" for x in conn.source_socket_address.hostname.naming_system)):
expressions.append(
create_term("network-traffic:src_ref.value",
conn.source_socket_address.hostname.condition,
make_constant(conn.source_socket_address.hostname.hostname_value)))
if conn.destination_socket_address is not None:
if conn.destination_socket_address.port is not None:
if conn.destination_socket_address.port.port_value is not None:
expressions.append(
create_term("network-traffic:dst_port",
conn.destination_socket_address.port.port_value.condition,
stix2.IntegerConstant(int(conn.destination_socket_address.port.port_value))))
if conn.destination_socket_address.port.layer4_protocol is not None:
expressions.append(
create_term("network-traffic:protocols[*]",
conn.destination_socket_address.port.layer4_protocol.condition,
make_constant(
conn.destination_socket_address.port.layer4_protocol.value.lower())))
if conn.destination_socket_address.ip_address is not None:
expressions.append(
create_term("network-traffic:dst_ref.value",
conn.destination_socket_address.ip_address.address_value.condition,
make_constant(conn.destination_socket_address.ip_address.address_value.value)))
elif conn.destination_socket_address.hostname is not None:
hostname = conn.destination_socket_address.hostname
if hostname.is_domain_name and hostname.hostname_value is not None:
expressions.append(
create_term("network-traffic:dst_ref.value",
conn.destination_socket_address.hostname.condition,
make_constant(conn.destination_socket_address.hostname.hostname_value)))
elif (conn.destination_socket_address.hostname.naming_system is not None and
any(x.value == "DNS" for x in conn.destination_socket_address.hostname.naming_system)):
expressions.append(
create_term("network-traffic:dst_ref.value",
conn.destination_socket_address.hostname.condition,
make_constant(conn.destination_socket_address.hostname.hostname_value)))
if conn.layer7_connections is not None:
if conn.layer7_connections.http_session is not None:
extension_expressions = convert_http_session_to_pattern(conn.layer7_connections.http_session)
if extension_expressions:
expressions.append(extension_expressions)
return create_boolean_expression("AND", expressions)
def convert_http_client_request_to_pattern(http_request):
expressions = []
if http_request.http_request_line is not None:
if http_request.http_request_line.http_method is not None:
term = add_comparison_expression(http_request.http_request_line.http_method,
"network-traffic:extensions.'http-request-ext'.request_method")
if term:
expressions.append(term)
if http_request.http_request_line.version is not None:
term = add_comparison_expression(http_request.http_request_line.version,
"network-traffic:extensions.'http-request-ext'.request_version")
if term:
expressions.append(term)
if http_request.http_request_header is not None:
if http_request.http_request_header.parsed_header is not None:
header = http_request.http_request_header.parsed_header
for prop_spec in _NETWORK_CONNECTION_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(header, prop_1x) and getattr(header, prop_1x):
value = getattr(header, prop_1x)
# handle non-String properties
if isinstance(value, Address):
value = getattr(value, "address_value")
elif isinstance(value, HostField):
value = getattr(value, "domain_name").value
elif isinstance(value, URI):
value = value.value
term = add_comparison_expression(value, object_path)
if term:
expressions.append(term)
return create_boolean_expression("AND", expressions)
def convert_http_network_connection_extension(http):
if http.http_client_request is not None:
return convert_http_client_request_to_pattern(http.http_client_request)
_NETWORK_CONNECTION_PROPERTIES = [
["accept", "network-traffic:extensions.'http-request-ext'.request_header.Accept"],
["accept_charset", "network-traffic:extensions.'http-request-ext'.request_header.'Accept-Charset'"],
["accept_language", "network-traffic:extensions.'http-request-ext'.request_header.'Accept-Language'"],
["accept_datetime", "network-traffic:extensions.'http-request-ext'.request_header.'Accept-Datetime'"],
["accept_encoding", "network-traffic:extensions.'http-request-ext'.request_header.'Accept-Encoding'"],
["authorization", "network-traffic:extensions.'http-request-ext'.request_header.Authorization"],
["cache_control", "network-traffic:extensions.'http-request-ext'.request_header.'Cache-Control'"],
["connection", "network-traffic:extensions.'http-request-ext'.request_header.Connection"],
["cookie", "network-traffic:extensions.'http-request-ext'.request_header.Cookie"],
["content_length", "network-traffic:extensions.'http-request-ext'.request_header.'Content-Length'"],
["content_md5", "network-traffic:extensions.'http-request-ext'.request_header.'Content-MD5'"],
["content_type", "network-traffic:extensions.'http-request-ext'.request_header.'Content-Type'"],
["date", "network-traffic:extensions.'http-request-ext'.request_header.Date"],
["expect", "network-traffic:extensions.'http-request-ext'.request_header.Expect"],
["from_", "network-traffic:extensions.'http-request-ext'.request_header.From"],
["host", "network-traffic:extensions.'http-request-ext'.request_header.Host"],
["if_match", "network-traffic:extensions.'http-request-ext'.request_header.'If-Match'"],
["if_modified_since", "network-traffic:extensions.'http-request-ext'.request_header.'If-Modified-Since'"],
["if_none_match", "network-traffic:extensions.'http-request-ext'.request_header.'If-None-Match'"],
["if_range", "network-traffic:extensions.'http-request-ext'.request_header.'If-Range'"],
["if_unmodified_since", "network-traffic:extensions.'http-request-ext'.request_header.'If-Unmodified-Since'"],
["max_forwards", "network-traffic:extensions.'http-request-ext'.request_header.'Max-Forwards'"],
["pragma", "network-traffic:extensions.'http-request-ext'.request_header.Pragma"],
["proxy_authorization", "network-traffic:extensions.'http-request-ext'.request_header.'Proxy-Authorization'"],
["range", "network-traffic:extensions.'http-request-ext'.request_header.Range"],
["referer", "network-traffic:extensions.'http-request-ext'.request_header.Referer"],
["te", "network-traffic:extensions.'http-request-ext'.request_header.TE"],
["user_agent", "network-traffic:extensions.'http-request-ext'.request_header.'User-Agent'"],
["via", "network-traffic:extensions.'http-request-ext'.request_header.Via"],
["warning", "network-traffic:extensions.'http-request-ext'.request_header.Warning"],
["dnt", "network-traffic:extensions.'http-request-ext'.request_header.DNT"],
["x_requested_with", "network-traffic:extensions.'http-request-ext'.request_header.'X-Requested-With'"],
["x_forwarded_for", "network-traffic:extensions.'http-request-ext'.request_header.'X-Forwarded-For'"],
["x_att_deviceid", "network-traffic:extensions.'http-request-ext'.request_header.'X-ATT-DeviceId'"],
["x_wap_profile", "network-traffic:extensions.'http-request-ext'.request_header.'X-Wap-Profile'"],
]
def convert_network_packet_to_pattern(packet):
if packet.internet_layer:
internet_layer = packet.internet_layer
if internet_layer.ipv4 or internet_layer.ipv6:
warn("Internet_Layer/IP_Packet content not supported in STIX 2.0", 424)
else:
if internet_layer.icmpv4:
icmp_header = internet_layer.icmpv4.icmpv4_header
elif internet_layer.icmpv6:
icmp_header = internet_layer.icmpv6.icmpv6_header
else:
return None
expressions = []
if icmp_header.type_:
expressions.append(create_term("network-traffic:extensions.'icmp-ext'.icmp_type_hex",
icmp_header.type_.condition,
stix2.HexConstant(str(icmp_header.type_))))
if icmp_header.code:
expressions.append(create_term("network-traffic:extensions.'icmp-ext'.icmp_type_code",
icmp_header.code.condition,
stix2.HexConstant(str(icmp_header.code))))
return create_boolean_expression("AND", expressions)
def convert_http_session_to_pattern(session):
if session.http_request_response:
requests, responses = split_into_requests_and_responses(session.http_request_response)
if len(responses) != 0:
warn("HTTPServerResponse type is not supported in STIX 2.0", 429)
if len(requests) >= 1:
expression = convert_http_client_request_to_pattern(requests[0])
if len(requests) > 1:
warn("Only HTTP_Request_Response used for http-request-ext, using first value", 512)
return expression
def convert_socket_options_to_pattern(options):
expressions = []
for prop_name in SOCKET_OPTIONS:
prop = getattr(options, prop_name)
if prop:
expressions.append(create_term("network-traffic:extensions.'socket-ext'.options." + prop_name.upper(),
"Equals",
prop))
return create_boolean_expression("AND", expressions)
_SOCKET_MAP = {
"is_blocking": "network-traffic:extensions.'socket-ext'.is_blocking",
"is_listening": "network-traffic:extensions.'socket-ext'.is_listening",
"type_": "network-traffic:extensions.'socket-ext'.socket_type",
"domain": "network-traffic:extensions.'socket-ext'.socket_type",
"socket_descriptor": "network-traffic:extensions.'socket-ext'.socket_descriptor"
}
def convert_network_socket_to_pattern(socket):
expressions = []
for prop_spec in _SOCKET_MAP:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(socket, prop_1x) and getattr(socket, prop_1x):
term = add_comparison_expression(getattr(socket, prop_1x), object_path)
if term:
expressions.append(term)
if socket.address_family:
if socket.address_family in ADDRESS_FAMILY_ENUMERATION:
expressions.append(add_comparison_expression(socket.address_family,
"network-traffic:extensions.'socket-ext'.address_family"))
else:
warn("%s in is not a member of the %s enumeration", 627, socket.address_family, "address family")
if socket.options:
expressions.append(convert_socket_options_to_pattern(socket.options))
if socket.local_address:
warn("Network_Socket.local_address content not supported in STIX 2.0", 424)
if socket.remote_address:
warn("Network_Socket.remote_address content not supported in STIX 2.0", 424)
if socket.protocol:
expressions.append(add_comparison_expression(socket.protocol,
"network-traffic:protocols[*]"))
return create_boolean_expression("AND", expressions)
####################################################################################################################
def convert_observable_composition_to_pattern(obs_comp):
expressions = []
for obs in obs_comp.observables:
term = convert_observable_to_pattern(obs)
if term:
expressions.append(term)
if expressions:
return create_boolean_expression(obs_comp.operator, expressions)
else:
return ""
def convert_object_to_pattern(obj, obs_id):
related_objects = obj.related_objects
prop = obj.properties
expression = None
if prop:
if isinstance(prop, Address):
expression = convert_address_to_pattern(prop)
elif isinstance(prop, URI):
expression = convert_uri_to_pattern(prop)
elif isinstance(prop, EmailMessage):
expression = convert_email_message_to_pattern(prop)
elif isinstance(prop, File):
expression = convert_file_to_pattern(prop)
elif isinstance(prop, WinRegistryKey):
expression = convert_registry_key_to_pattern(prop)
elif isinstance(prop, Process):
expression = convert_process_to_pattern(prop)
elif isinstance(prop, DomainName):
expression = convert_domain_name_to_pattern(prop, related_objects)
elif isinstance(prop, Mutex):
expression = convert_mutex_to_pattern(prop)
elif isinstance(prop, NetworkConnection):
expression = convert_network_connection_to_pattern(prop)
elif isinstance(prop, Account):
expression = convert_account_to_pattern(prop)
elif isinstance(prop, HTTPSession):
expression = convert_http_session_to_pattern(prop)
elif isinstance(prop, NetworkPacket):
expression = convert_network_packet_to_pattern(prop)
elif isinstance(prop, NetworkSocket):
expression = convert_network_socket_to_pattern(prop)
else:
warn("%s found in %s cannot be converted to a pattern, yet.", 808, text_type(obj.properties), obs_id)
expression = UnconvertedTerm(obs_id)
if prop.custom_properties is not None:
object_path_root = convert_cybox_class_name_to_object_path_root_name(prop)
if object_path_root:
if expression:
expression = create_boolean_expression("AND", [expression,
convert_custom_properties(prop.custom_properties,
object_path_root)])
else:
expression = convert_custom_properties(prop.custom_properties, object_path_root)
if not expression:
warn("No pattern term was created from %s", 422, obs_id)
expression = UnconvertedTerm(obs_id)
elif obj.id_:
add_object_id_value(obj.id_, obs_id)
return expression
def match_1x_id_with_20_id(id_1x, id_20):
id_1x_split = id_1x.split("-", 1)
id_20_split = id_20.split("--")
return id_1x_split[1] == id_20_split[1]
def find_definition(idref, sdos):
for obs in sdos:
if match_1x_id_with_20_id(idref, obs["id"]):
info("Found definition for %s", 204, idref)
return obs
# warn (idref + " cannot be resolved")
return None
def negate_expression(obs):
return hasattr(obs, "negate") and obs.negate
def convert_observable_to_pattern(obs):
try:
set_dynamic_variable("current_observable", obs)
if negate_expression(obs):
warn("Negation of %s is not handled yet", 810, obs.id_)
return convert_observable_to_pattern_without_negate(obs)
finally:
pop_dynamic_variable("current_observable")
def convert_observable_to_pattern_without_negate(obs):
if obs.observable_composition is not None:
pattern = convert_observable_composition_to_pattern(obs.observable_composition)
if pattern and obs.id_:
add_to_pattern_cache(obs.id_, pattern)
return pattern
elif obs.object_ is not None:
pattern = convert_object_to_pattern(obs.object_, obs.id_)
if pattern:
add_to_pattern_cache(obs.id_, pattern)
if obs.object_.related_objects:
related_patterns = []
for o in obs.object_.related_objects:
# save pattern for later use
if o.id_ and not id_in_pattern_cache(o.id_):
new_pattern = convert_object_to_pattern(o, o.id_)
if new_pattern:
related_patterns.append(new_pattern)
add_to_pattern_cache(o.id_, new_pattern)
if pattern:
related_patterns.append(pattern)
return create_boolean_expression("AND", related_patterns)
else:
return pattern
elif obs.idref is not None:
if id_in_pattern_cache(obs.idref):
return get_pattern_from_cache(obs.idref)
else:
# resolve now if possible, and remove from observed_data
if id_in_observable_mappings(obs.idref):
return convert_observable_to_pattern(get_obs_from_mapping(obs.idref))
return IdrefPlaceHolder(obs.idref)
# patterns can contain idrefs which might need to be resolved because the order in which the ids and idrefs appear
def interatively_resolve_placeholder_refs():
if pattern_cache_is_empty():
return
done = False
while not done:
# collect all of the fully resolved idrefs
fully_resolved_idrefs = []
for idref, expr in get_items_from_pattern_cache():
if expr and not expr.contains_placeholder():
# no PLACEHOLDER idrefs found in the expr, means this idref is fully resolved
fully_resolved_idrefs.append(idref)
# replace only fully resolved idrefs
change_made = False
for fr_idref in fully_resolved_idrefs:
for idref, expr in get_items_from_pattern_cache():
if expr:
change_made, expr = expr.replace_placeholder_with_idref_pattern(fr_idref)
# a change will be made, which could introduce a new placeholder id into the expr
if change_made:
add_to_pattern_cache(idref, expr) # PATTERN_CACHE[idref] = expr
done = not change_made
def is_placeholder(thing):
return thing.index("PLACEHOLDER") != -1
def fix_pattern(pattern):
if not pattern_cache_is_empty():
# info(text_type(PATTERN_CACHE))
# info("pattern is: " + pattern)
if pattern and pattern.contains_placeholder:
for idref in get_ids_from_pattern_cache():
pattern.replace_placeholder_with_idref_pattern(idref)
return pattern
def convert_indicator_to_pattern(ind):
try:
set_dynamic_variable("current_indicator", ind)
if ind.negate:
warn("Negation of %s is not handled yet", 810, ind.id_)
return convert_indicator_to_pattern_without_negate(ind)
finally:
pop_dynamic_variable("current_indicator")
def convert_indicator_to_pattern_without_negate(ind):
if ind.composite_indicator_expression is not None:
pattern = convert_indicator_composition_to_pattern(ind.composite_indicator_expression)
if pattern and ind.id_:
add_to_pattern_cache(ind.id_, pattern)
return pattern
elif ind.observable is not None:
pattern = convert_observable_to_pattern(ind.observable)
if pattern:
add_to_pattern_cache(ind.id_, pattern)
return pattern
elif ind.idref is not None:
if id_in_pattern_cache(ind.idref):
return get_pattern_from_cache(ind.idref)
else:
# resolve now if possible, and remove from observed_data
if id_in_observable_mappings(ind.idref):
return convert_observable_to_pattern(get_obs_from_mapping(ind.idref))
return IdrefPlaceHolder(ind.idref)
def convert_indicator_composition_to_pattern(ind_comp):
expressions = []
for ind in ind_comp.indicators:
term = convert_indicator_to_pattern(ind)
if term:
expressions.append(term)
else:
warn("No term was yielded for %s", 422, ind.id_ or ind.idref)
if expressions:
return create_boolean_expression(ind_comp.operator, expressions)
else:
return ""
def remove_pattern_objects(bundle_instance):
all_new_ids_with_patterns = []
for old_id in get_ids_from_pattern_cache():
new_id = get_id_value(old_id)
if new_id and len(new_id) == 1:
all_new_ids_with_patterns.append(new_id[0])
if not KEEP_OBSERVABLE_DATA_USED_IN_PATTERNS:
remaining_objects = []
for obj in bundle_instance["objects"]:
if obj["type"] != "observed-data" or obj["id"] not in all_new_ids_with_patterns:
remaining_objects.append(obj)
else:
warn("%s is used as a pattern, therefore it is not included as an observed_data instance", 423,
obj["id"])
bundle_instance["objects"] = remaining_objects
if not KEEP_OBSERVABLE_DATA_USED_IN_PATTERNS:
for obj in bundle_instance["objects"]:
if obj["type"] == "report":
remaining_object_refs = []
if "object_refs" in obj:
for ident in obj["object_refs"]:
if not ident.startswith("observed-data") or ident not in all_new_ids_with_patterns:
remaining_object_refs.append(ident)
obj["object_refs"] = remaining_object_refs
# TODO: only remove indicators that were involved ONLY as sub-indicators within composite indicator expressions
# if not KEEP_INDICATORS_USED_IN_COMPOSITE_INDICATOR_EXPRESSION and "indicators" in bundle_instance:
# remaining_indicators = []
# for ind in bundle_instance["indicators"]:
# if ind["id"] not in all_new_ids_with_patterns:
# remaining_indicators.append(ind)
# bundle_instance["indicators"] = remaining_indicators
| bsd-3-clause | 1,290,568,712,240,709,000 | 43.676338 | 142 | 0.613797 | false |
Osthanes/appscan_static_analyzer | appscan_check.py | 1 | 36174 | #!/usr/bin/python
#***************************************************************************
# Copyright 2015 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
#***************************************************************************
import json
import logging
import logging.handlers
import os
import os.path
import sys
import time
import timeit
from datetime import datetime
from subprocess import call, Popen, PIPE
import python_utils
APP_SECURITY_SERVICE='Application Security on Cloud'
DEFAULT_SERVICE=APP_SECURITY_SERVICE
DEFAULT_SERVICE_PLAN="free"
DEFAULT_SERVICE_NAME=DEFAULT_SERVICE
DEFAULT_SCANNAME="staticscan"
DEFAULT_OLD_SCANS_TO_KEEP="5"
DEFAULT_OLD_SCANS_TO_KEEP_INT=5
# time to sleep between checks when waiting on pending jobs, in seconds
SLEEP_TIME=15
# check cli args, set globals appropriately
def parse_args ():
parsed_args = {}
parsed_args['loginonly'] = False
parsed_args['forcecleanup'] = False
parsed_args['checkstate'] = False
parsed_args['debug'] = False
parsed_args['help'] = False
for arg in sys.argv:
if arg == "--loginonly":
# only login, no scanning or submission
parsed_args['loginonly'] = True
if arg == "--forcecleanup":
# cleanup/cancel all complete jobs, and delete irx files
parsed_args['forcecleanup'] = True
if arg == "--checkstate":
# just check state of existing jobs, don't scan or submit
# any new ones
parsed_args['checkstate'] = True
if arg == "--debug":
# enable debug mode, can also be done with python_utils.DEBUG env var
parsed_args['debug'] = True
python_utils.DEBUG = "1"
if arg == "--help":
# just print help and return
parsed_args['help'] = True
return parsed_args
# print a quick usage/help statement
def print_help ():
print "usage: appscan_check.py [options]"
print
print "\toptions:"
print "\t --loginonly : get credentials and login to appscan only"
print "\t --forcecleanup : on exit, force removal of pending jobs from this run"
print "\t --checkstate : check state of existing job(s), no new submission"
print "\t --debug : get additional debug output"
print "\t --help : print this help message and exit"
print
# create a template for a current scan. this will be in the format
# "<scanname>-<version>-" where scanname comes from env var
# 'SUBMISSION_NAME', and version comes from env var 'APPLICATION_VERSION'
def get_scanname_template (include_version=True):
# check the env for name of the scan, else use default
if os.environ.get('SUBMISSION_NAME'):
scanname=os.environ.get('SUBMISSION_NAME')
elif os.environ.get('IDS_PROJECT_NAME'):
scanname=os.environ.get('IDS_PROJECT_NAME').replace(" | ", "-")
else:
scanname=DEFAULT_SCANNAME
if include_version:
# if we have an application version, append it to the scanname
if os.environ.get('APPLICATION_VERSION'):
scanname = scanname + "-" + os.environ.get('APPLICATION_VERSION')
scanname = scanname + "-"
return scanname
# given userid and password, attempt to authenticate to appscan for
# future calls
def appscan_login (userid, password):
proc = Popen(["appscan.sh login -u " + userid + " -P " + password + ""],
shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if not "Authenticated successfully." in out:
raise Exception("Unable to login to Static Analysis service")
# callout to appscan to prepare a current irx file, return a set of
# the files created by the prepare
def appscan_prepare ():
# sadly, prepare doesn't tell us what file it created, so find
# out by a list compare before/after
oldIrxFiles = []
for file in os.listdir("."):
if file.endswith(".irx"):
oldIrxFiles.append(file)
# clean up the appscan client log so we can dump it on error if needed
# and only see the error from this call
logfileName = None
appscanDir = os.environ.get('APPSCAN_INSTALL_DIR')
if appscanDir:
logfileName = appscanDir+"/logs/client.log"
if os.path.isfile( logfileName ):
os.remove( logfileName )
proc = Popen(["appscan.sh prepare"],
shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if not "IRX file generation successful" in out:
if "An IRX file was created, but it may be incomplete" in err:
# some jar/war/ear files were not scannable, but some were.
# attempt the submission
python_utils.LOGGER.warning("Not all files could be scanned, but the scan has been submitted for those which were")
else:
if python_utils.DEBUG:
call(["grep -H \".*\" logs/*.log"], shell=True, cwd=appscanDir)
raise Exception("Unable to prepare code for analysis by Static Analysis service: " +
err)
# what files are there now?
newIrxFiles = []
for file in os.listdir("."):
if file.endswith(".irx"):
newIrxFiles.append(file)
# which files are new?
newIrxFiles = set(newIrxFiles).difference(oldIrxFiles)
logMessage = "Generated scans as file(s):"
for file in newIrxFiles:
logMessage = logMessage + "\n\t" + file
python_utils.LOGGER.info(logMessage)
return newIrxFiles
# submit a created irx file to appscan for analysis
def appscan_submit (filelist):
if filelist==None:
raise Exception("No files to analyze")
scanlist = []
index = 0
for filename in filelist:
submit_scanname = get_scanname_template() + str(index)
proc = Popen(["appscan.sh queue_analysis -f " + filename +
" -n " + submit_scanname],
shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
transf_found = False
for line in out.splitlines() :
python_utils.LOGGER.debug("Submit response line: " + line)
if "100% transferred" in line:
# done transferring
transf_found = True
elif not transf_found:
# not done transferring yet
continue
elif line:
# done, if line isn't empty, is an id
scanlist.append(line)
python_utils.LOGGER.info("Job for file " + filename + " was submitted as scan " + submit_scanname + " and assigned id " + line)
else:
# empty line, skip it
continue
if err:
python_utils.LOGGER.warning("Submit error response: " + str(err))
index = index + 1
return scanlist, err
# get appscan list of current jobs
def appscan_list ():
proc = Popen(["appscan.sh list"],
shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
scanlist = []
for line in out.splitlines() :
if "No analysis jobs" in line:
# no jobs, return empty list
python_utils.LOGGER.debug("No analysis jobs found")
return []
elif line:
# done, if line isn't empty, is an id
scanlist.append(line)
else:
# empty line, skip it
continue
python_utils.LOGGER.debug("Analysis jobs found: " + str(scanlist))
return scanlist
# translate a job state to a pretty name
# CLI now returns the string, keeping in case needed later and to have a list of possible stages
def get_state_name (state):
return {
0 : "Pending",
1 : "Starting",
2 : "Running",
3 : "FinishedRunning",
4 : "FinishedRunningWithErrors",
5 : "PendingSupport",
6 : "Ready",
7 : "ReadyIncomplete",
8 : "FailedToScan",
9 : "ManuallyStopped",
10 : "None",
11 : "Initiating",
12 : "MissingConfiguration",
13 : "PossibleMissingConfiguration"
}.get(state, "Unknown")
# translate a job state from a name to a number
def get_state_num (state):
val = {
"pending" : 0,
"starting" : 1,
"running" : 2,
"finishedrunning" : 3,
"finishedrunningwitherrors" : 4,
"pendingsupport" : 5,
"ready" : 6,
"readyincomplete" : 7,
"failedtoscan" : 8,
"manuallystopped" : 9,
"none" : 10,
"initiating" : 11,
"missingconfiguration" : 12,
"possiblemissingconfiguration" : 13
}.get(state.lower().strip(), 14)
python_utils.LOGGER.debug("Getting number for state: \""+str(state)+"\" ("+str(val)+")")
return val
# given a state, is the job completed
def get_state_completed (state):
return {
0 : False,
1 : False,
2 : False,
3 : True,
4 : True,
5 : False,
6 : True,
7 : True,
8 : True,
9 : True,
10 : True,
11 : False,
12 : True,
13 : True
}.get(get_state_num(state), True)
# given a state, was it completed successfully
def get_state_successful (state):
return {
0 : False,
1 : False,
2 : False,
3 : True,
4 : False,
5 : False,
6 : True,
7 : False,
8 : False,
9 : False,
10 : False,
11 : False,
12 : False,
13 : False
}.get(get_state_num(state), False)
# get status of a given job
def appscan_status (jobid):
if jobid == None:
raise Exception("No jobid to check status")
proc = Popen(["appscan.sh status -i " + str(jobid)],
shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
if "request is invalid" in err:
if python_utils.DEBUG:
python_utils.LOGGER.debug("error getting status: " + str(err))
raise Exception("Invalid jobid")
retval = str(out)
return retval
# cancel an appscan job
def appscan_cancel (jobid):
if jobid == None:
return
proc = Popen(["appscan.sh cancel -i " + str(jobid)],
shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
# parse a key=value line, return value
def parse_key_eq_val (line):
if line == None:
return None
eqIndex = line.find("=");
if eqIndex != -1:
return line[eqIndex+1:]
else:
return None
# extended info on a current appscan job. this comes back in a form
# similar to:
#NLowIssues=0
#ReadStatus=2
#NHighIssues=0
#Name=appscan.zip
#ScanEndTime=2014-11-20T13:56:04.497Z
#Progress=0
#RemainingFreeRescanMinutes=0
#ParentJobId=00000000-0000-0000-0000-000000000000
#EnableMailNotifications=false
#JobStatus=6
#NInfoIssues=0
#JobId=9b344fc7-bc70-e411-b922-005056924f9b
#NIssuesFound=0
#CreatedAt=2014-11-20T13:54:49.597Z
#UserMessage=Scan completed successfully. The report is ready.
#NMediumIssues=0
#Result=1
#
# parse it and return useful parts. in particular, returns
# a dict containing fields for "NLowIssues", "ReadStatus", et al
# per the list above
def appscan_info (jobid):
# setup default (empty) return
return_info = {}
return_info['NLowIssues'] = 0
return_info['ReadStatus'] = 0
return_info['NHighIssues'] = 0
return_info['Name'] = ""
return_info['ScanEndTime'] = None
return_info['Progress'] = 0
return_info['RemainingFreeRescanMinutes'] = 0
return_info['ParentJobId'] = ""
return_info['EnableMailNotifications'] = False
return_info['JobStatus'] = 0
return_info['NInfoIssues'] = 0
return_info['JobId'] = ""
return_info['NIssuesFound'] = 0
return_info['CreatedAt'] = None
return_info['UserMessage'] = ""
return_info['NMediumIssues'] = 0
return_info['Result'] = 0
if jobid == None:
return return_info
command = "appscan.sh info -i " + str(jobid)
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
for line in out.splitlines() :
if "NLowIssues=" in line:
# number of low severity issues found in the scan
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['NLowIssues'] = int(tmpstr)
except ValueError:
return_info['NLowIssues']= 0
elif "NMediumIssues=" in line:
# number of medium severity issues found in the scan
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['NMediumIssues'] = int(tmpstr)
except ValueError:
return_info['NMediumIssues'] = 0
elif "NHighIssues=" in line:
# number of high severity issues found in the scan
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['NHighIssues'] = int(tmpstr)
except ValueError:
return_info['NHighIssues'] = 0
elif "NInfoIssues=" in line:
# number of info severity issues found in the scan
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['NInfoIssues'] = int(tmpstr)
except ValueError:
return_info['NInfoIssues'] = 0
elif "NIssuesFound=" in line:
# total number of issues found in the scan
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['NIssuesFound'] = int(tmpstr)
except ValueError:
return_info['NIssuesFound'] = 0
elif "Progress=" in line:
# current scan progress (0-100)
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['Progress'] = int(tmpstr)
except ValueError:
return_info['Progress'] = 0
elif "RemainingFreeRescanMinutes=" in line:
# what the name says
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['RemainingFreeRescanMinutes'] = int(tmpstr)
except ValueError:
return_info['RemainingFreeRescanMinutes'] = 0
elif "JobStatus=" in line:
# current job status
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['JobStatus'] = int(tmpstr)
except ValueError:
return_info['JobStatus'] = 0
elif "ReadStatus=" in line:
# not sure what this is
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['ReadStatus'] = int(tmpstr)
except ValueError:
return_info['ReadStatus'] = 0
elif "Result=" in line:
# final return code
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['Result'] = int(tmpstr)
except ValueError:
return_info['Result'] = 0
elif "ScanEndTime=" in line:
# timestamp when this scan completed
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['ScanEndTime'] = datetime.strptime(tmpstr, "%Y-%m-%dT%H:%M:%S.%fZ")
except ValueError:
return_info['ScanEndTime'] = None
elif "CreatedAt=" in line:
# timestamp when this job was created
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
try:
return_info['CreatedAt'] = datetime.strptime(tmpstr, "%Y-%m-%dT%H:%M:%S.%fZ")
except ValueError:
return_info['CreatedAt'] = None
elif "Name=" in line:
# job name
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
return_info['Name'] = tmpstr
elif "JobId=" in line:
# job ID
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
return_info['JobId'] = tmpstr
elif "ParentJobId=" in line:
# parent job ID
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
return_info['ParentJobId'] = tmpstr
elif "UserMessage=" in line:
# user displayable message, current job state
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
return_info['UserMessage'] = tmpstr
elif "EnableMailNotifications=" in line:
# are email notifications setup (doesn't matter, we don't use it)
tmpstr = parse_key_eq_val(line)
if tmpstr != None:
if tmpstr.lower() in ("yes", "true"):
return_info['EnableMailNotifications'] = True
else:
return_info['EnableMailNotifications'] = False
return return_info
# get the result file for a given job
def appscan_get_result (jobid, scan_name):
if jobid == None:
raise Exception("No jobid to get results")
# App name might have a space.
scan_name = scan_name.replace(" ", "-");
# Get the appscan zip file
proc = Popen(["appscan.sh get_result -i " + str(jobid) + " -d appscan-" + str(scan_name) + ".zip -t zip"],
shell=True, stdout=PIPE, stderr=PIPE, cwd=os.environ.get('EXT_DIR'))
out, err = proc.communicate();
print "Out = " + out
print "Err = " + err
# get the result file for a given job
def save_job_result (scan_name, job_result):
# App name might have a space.
scan_name = scan_name.replace(" ", "-");
# Store the job result summary
with open(os.environ.get('EXT_DIR') + "/appscan-" + str(scan_name) + ".json", 'w') as outfile:
json.dump(job_result, outfile, sort_keys = True)
# get the result file for a given job
def upload_results_to_dra ():
proc = Popen(["dra.sh"],
shell=True, stdout=PIPE, stderr=PIPE, cwd=os.environ.get('EXT_DIR'))
out, err = proc.communicate();
print "Out = " + out
print "Err = " + err
# if the job we would run is already up (and either pending or complete),
# we just want to get state (and wait for it if needed), not create a whole
# new submission. for the key, we use the job name, compared to the
# name template as per get_scanname_template()
def check_for_existing_job ( ignore_older_jobs = True):
alljobs = appscan_list()
if alljobs == None:
# no jobs, ours can't be there
return None
# get the name we're looking for
job_name = get_scanname_template( include_version = ignore_older_jobs )
joblist = []
found = False
for jobid in alljobs:
results = appscan_info(jobid)
python_utils.LOGGER.debug("Results for "+jobid+": "+ str(results))
if results["Name"].startswith(job_name):
joblist.append(jobid)
found = True
if found:
return joblist
else:
return None
# don't want to have too many old copies of the job hanging out, it
# makes a mess and is hard to read. prune old copies here
def cleanup_old_jobs ():
# see how many copies we're going to keep
try:
count_to_keep = int(os.getenv('OLD_SCANS_TO_KEEP', DEFAULT_OLD_SCANS_TO_KEEP))
except ValueError:
count_to_keep = DEFAULT_OLD_SCANS_TO_KEEP_INT
# if the count to keep is 0 or negative, keep all copies
if count_to_keep < 1:
return
joblist = check_for_existing_job( ignore_older_jobs = False )
if joblist == None or len(joblist) <= count_to_keep:
# related job count < number of jobs too keep, do nothing
return
# too many jobs! remove the oldest ones (cancel if necessary)
if python_utils.DEBUG:
python_utils.LOGGER.debug("Found " + str(len(joblist)) + " jobs pending with limit " + str(count_to_keep))
# make a sorted list of these jobs (yes, this is O(n**2) algorithm, but
# this should always be a fairly short list of scans)
s_jobs = []
for job in joblist:
results = appscan_info(job)
# if no results or time, this is not a valid job, skip it
if (results['CreatedAt'] == None):
continue
# put it in the right spot in the list
i = 0
while i < len(s_jobs):
if results['CreatedAt'] > s_jobs[i]['CreatedAt']:
# found right place
if python_utils.DEBUG:
python_utils.LOGGER.debug("Insert job " + str(results['Name']) + " at index " + str(i) + " for timestamp " + str(results['CreatedAt']))
s_jobs.insert(i, results)
break
i += 1
if i==len(s_jobs):
# right place is the end
if python_utils.DEBUG:
python_utils.LOGGER.debug("Append job " + str(results['Name']) + " at index " + str(i) + " for timestamp " + str(results['CreatedAt']))
s_jobs.append(results)
# now cleanup all jobs after the 'n' we're supposed to keep
for index, res in enumerate(s_jobs):
if index<count_to_keep:
if python_utils.DEBUG:
python_utils.LOGGER.debug("keeping: " + str(index) + " \"" + res['Name'] + "\" : " + str(res['JobId']))
else:
if python_utils.DEBUG:
python_utils.LOGGER.debug("cleaning: " + str(index) + " \"" + res['Name'] + "\" : " + str(res['JobId']))
appscan_cancel(res['JobId'])
# and we're done
# wait for a given set of scans to complete and, if successful,
# download the results
def wait_for_scans (joblist):
# create array of the jon results in json format
jobResults = []
# were all jobs completed on return
all_jobs_complete = True
# number of high sev issues in completed jobs
high_issue_count = 0
med_issue_count=0
python_utils.LOGGER.debug("Waiting for joblist: "+str(joblist))
dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE)
for jobid in joblist:
try:
while True:
state = appscan_status(jobid)
python_utils.LOGGER.info("Job " + str(jobid) + " in state " + state)
if get_state_completed(state):
results = appscan_info(jobid)
if get_state_successful(state):
high_issue_count += results["NHighIssues"]
med_issue_count += results["NMediumIssues"]
python_utils.LOGGER.info("Analysis successful (" + results["Name"] + ")")
#print "\tOther Message : " + msg
job_result = { 'job_name': results["Name"],
'job_id': jobid,
'status': "successful",
'high_severity_issues': int(str(results["NHighIssues"])),
'medium_severity_issues': int(str(results["NMediumIssues"])),
'low_severity_issues': int(str(results["NLowIssues"])),
'info_severity_issues': int(str(results["NInfoIssues"])),
'url': dash}
# Search for file name results["Name"] + "*.zip"
if os.environ.get('DRA_IS_PRESENT') == "1":
appscan_get_result(jobid, results["Name"]);
save_job_result(results["Name"], job_result);
#appscan_get_result(jobid)
print python_utils.LABEL_GREEN + python_utils.STARS
print "Analysis successful for job \"" + results["Name"] + "\""
print "\tHigh Severity Issues : " + str(results["NHighIssues"])
print "\tMedium Severity Issues : " + str(results["NMediumIssues"])
print "\tLow Severity Issues : " + str(results["NLowIssues"])
print "\tInfo Severity Issues : " + str(results["NInfoIssues"])
if dash != None:
print "See detailed results at: " + python_utils.LABEL_COLOR + " " + dash
f = open("result_url","w")
f.write(dash)
f.close()
print python_utils.LABEL_GREEN + python_utils.STARS + python_utils.LABEL_NO_COLOR
# append results to the jobResults for the json format
jobResults.append(job_result)
else:
python_utils.LOGGER.info("Analysis unsuccessful (" + results["Name"] + ") with message \"" + results["UserMessage"] + "\"")
# append results to the jobResults for the json format
jobResults.append({'job_name': results["Name"],
'job_id': jobid,
'status': "unsuccessful"})
break
else:
time_left = python_utils.get_remaining_wait_time()
if (time_left > SLEEP_TIME):
time.sleep(SLEEP_TIME)
else:
# ran out of time, flag that at least one job didn't complete
all_jobs_complete = False
# get what info we can on this job
results = appscan_info(jobid)
# notify the user
print python_utils.LABEL_RED + python_utils.STARS
print "Analysis incomplete for job \"" + results["Name"] + "\""
print "\t" + str(results["Progress"]) + "% complete"
if dash != None:
print "Track current state and results at: " + python_utils.LABEL_COLOR + " " + dash
f = open("result_url","w")
f.write(dash)
f.close()
print python_utils.LABEL_RED + "Increase the time to wait and rerun this job. The existing analysis will continue and be found and tracked."
print python_utils.STARS + python_utils.LABEL_NO_COLOR
# append results to the jobResults for the json format
jobResults.append({'job_name': results["Name"],
'job_id': jobid,
'status': "incomplete",
'percentage_complete': int(str(results["Progress"]))})
# and continue to get state for other jobs
break
except Exception, e:
# bad id, skip it
if python_utils.DEBUG:
python_utils.LOGGER.debug("exception in wait_for_scans: " + str(e))
# generate appscan-result.json file
appscan_result = {'all_jobs_complete': all_jobs_complete,
'high_issue_count': high_issue_count,
'medium_issue_count': med_issue_count,
'job_results': jobResults}
appscan_result_file = './appscan-result.json'
with open(appscan_result_file, 'w') as outfile:
json.dump(appscan_result, outfile, sort_keys = True)
if os.environ.get('DRA_IS_PRESENT') == "1":
upload_results_to_dra()
return all_jobs_complete, high_issue_count, med_issue_count
# begin main execution sequence
try:
parsed_args = parse_args()
if parsed_args['help']:
print_help()
sys.exit(0)
python_utils.LOGGER = python_utils.setup_logging()
# send slack notification
if os.path.isfile("%s/utilities/sendMessage.sh" % python_utils.EXT_DIR):
command='{path}/utilities/sendMessage.sh -l info -m \"Starting static security scan\"'.format(path=python_utils.EXT_DIR)
if python_utils.DEBUG:
print "running command " + command
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
python_utils.LOGGER.debug(out)
else:
if python_utils.DEBUG:
print "sendMessage.sh not found, notifications not attempted"
python_utils.WAIT_TIME = python_utils.get_remaining_wait_time(first = True)
python_utils.LOGGER.info("Getting credentials for Static Analysis service")
creds = python_utils.get_credentials_for_non_binding_service(service=APP_SECURITY_SERVICE)
python_utils.LOGGER.info("Connecting to Static Analysis service")
appscan_login(creds['bindingid'],creds['password'])
# allow testing connection without full job scan and submission
if parsed_args['loginonly']:
python_utils.LOGGER.info("LoginOnly set, login complete, exiting")
endtime = timeit.default_timer()
print "Script completed in " + str(endtime - python_utils.SCRIPT_START_TIME) + " seconds"
sys.exit(0)
# if checkstate, don't really do a scan, just check state of current outstanding ones
if parsed_args['checkstate']:
# for checkstate, don't wait, just check current
python_utils.WAIT_TIME = 0
# see if we have related jobs
joblist = check_for_existing_job()
if joblist == None:
# no related jobs, get whole list
joblist = appscan_list()
else:
# if the job we would run is already up (and either pending or complete),
# we just want to get state (and wait for it if needed), not create a whole
# new submission
joblist = check_for_existing_job()
if joblist == None:
python_utils.LOGGER.info("Scanning for code submission")
files_to_submit = appscan_prepare()
python_utils.LOGGER.info("Submitting scans for analysis")
joblist, errMsg = appscan_submit(files_to_submit)
if (not joblist) or len(joblist) < len(files_to_submit):
if (not errMsg):
errMsg = "Check status of existing scans."
#Error, we didn't return as many jobs as we should have
dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE)
if os.path.isfile("%s/utilities/sendMessage.sh" % python_utils.EXT_DIR):
command='{path}/utilities/sendMessage.sh -l bad -m \"<{url}|Static security scan> could not successfully submit scan. {errMsg}\"'.format(path=python_utils.EXT_DIR,url=dash,errMsg=errMsg)
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
python_utils.LOGGER.debug(out)
python_utils.LOGGER.error('ERROR: could not successfully submit scan. {errMsg} {url}'.format(url=dash,errMsg=errMsg))
endtime = timeit.default_timer()
print "Script completed in " + str(endtime - python_utils.SCRIPT_START_TIME) + " seconds"
sys.exit(4)
python_utils.LOGGER.info("Waiting for analysis to complete")
else:
python_utils.LOGGER.info("Existing job found, connecting")
# check on pending jobs, waiting if appropriate
all_jobs_complete, high_issue_count, med_issue_count = wait_for_scans(joblist)
# force cleanup of all?
if parsed_args['forcecleanup']:
# cleanup the jobs we launched (since they're complete)
print "Cleaning up"
for job in joblist:
appscan_cancel(job)
# and cleanup the submitted irx files
for file in files_to_submit:
if os.path.isfile(file):
os.remove(file)
if os.path.isfile(file+".log"):
os.remove(file+".log")
else:
# cleanup old copies of this job
cleanup_old_jobs()
# if we didn't successfully complete jobs, return that we timed out
if not all_jobs_complete:
# send slack notification
if os.path.isfile("%s/utilities/sendMessage.sh" % python_utils.EXT_DIR):
dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE)
command='{path}/utilities/sendMessage.sh -l bad -m \"<{url}|Static security scan> did not complete within {wait} minutes. Stage will need to be re-run after the scan completes.\"'.format(path=python_utils.EXT_DIR,url=dash,wait=python_utils.FULL_WAIT_TIME)
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
python_utils.LOGGER.debug(out)
endtime = timeit.default_timer()
print "Script completed in " + str(endtime - python_utils.SCRIPT_START_TIME) + " seconds"
sys.exit(2)
else:
if high_issue_count > 0:
# send slack notification
if os.path.isfile("%s/utilities/sendMessage.sh" % python_utils.EXT_DIR):
dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE)
command='{path}/utilities/sendMessage.sh -l bad -m \"<{url}|Static security scan> completed with {issues} high issues detected in the application.\"'.format(path=python_utils.EXT_DIR,url=dash, issues=high_issue_count)
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
python_utils.LOGGER.debug(out)
endtime = timeit.default_timer()
print "Script completed in " + str(endtime - python_utils.SCRIPT_START_TIME) + " seconds"
sys.exit(3)
if os.path.isfile("%s/utilities/sendMessage.sh" % python_utils.EXT_DIR):
if med_issue_count > 0:
dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE)
command='SLACK_COLOR=\"warning\" {path}/utilities/sendMessage.sh -l good -m \"<{url}|Static security scan> completed with no major issues.\"'.format(path=python_utils.EXT_DIR,url=dash)
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
python_utils.LOGGER.debug(out)
else:
dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE)
command='{path}/utilities/sendMessage.sh -l good -m \"<{url}|Static security scan> completed with no major issues.\"'.format(path=python_utils.EXT_DIR,url=dash)
proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate();
python_utils.LOGGER.debug(out)
endtime = timeit.default_timer()
print "Script completed in " + str(endtime - python_utils.SCRIPT_START_TIME) + " seconds"
sys.exit(0)
except Exception, e:
python_utils.LOGGER.warning("Exception received", exc_info=e)
endtime = timeit.default_timer()
print "Script completed in " + str(endtime - python_utils.SCRIPT_START_TIME) + " seconds"
sys.exit(1)
| apache-2.0 | -3,397,173,683,578,174,000 | 38.927152 | 268 | 0.565931 | false |
braams/shtoom | shtoom/ui/tkui/popups.py | 1 | 7401 |
from Tkinter import Toplevel, Tk
if __name__ == "__main__":
_ = lambda x:x
class Popup(Toplevel):
deferred = None
parent = None
def __init__(self, parent, addnl=None):
Toplevel.__init__(self)
self.initial_focus = self
self.parent = parent
self.addnl = addnl
self.body()
self.title('popup window')
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.showWindow()
def body(self):
pass
def cancel(self):
self.hideWindow()
if self.deferred:
d, self.deferred = self.deferred, None
if self.addnl is None:
d.callback(None)
else:
d.callback((None,self.addnl))
self.addnl = None
def getResult(self):
return None
def selected(self, option=None):
if option is None:
option = self.getResult()
self.hideWindow()
if self.deferred:
d, self.deferred = self.deferred, None
if self.addnl is None:
d.callback(option)
else:
d.callback((option,self.addnl))
self.addnl = None
def showWindow(self):
self.transient(self.parent)
self.geometry("+%d+%d" % (self.parent.winfo_rootx()+50,
self.parent.winfo_rooty()+50))
def hideWindow(self):
Toplevel.destroy(self)
class Dialog(Popup):
def __init__(self, parent, deferred, message, buttons, addnl=None):
self.message = message
self.buttons = buttons
self.deferred = deferred
Popup.__init__(self, parent, addnl)
def body(self):
from Tkinter import NW, E, Frame, Label, Button
self.top = Frame(self)
self.top.grid(row=1,column=1,sticky=E)
self.label = Label(self.top, text=self.message, justify='center')
self.label.grid(row=1, column=1, padx=5, pady=5,
columnspan=len(self.buttons),sticky=NW)
for n, b in enumerate(self.buttons):
b = Button(self.top, text=b, command=lambda b=b: self.selected(b))
b.grid(row=2, column=n, sticky=NW, pady=5, padx=5)
if self.initial_focus == self:
self.initial_focus = b
b.focus_set()
class AuthDialog(Popup):
message = _('Enter username and password\nfor "%(method)s" at "%(realm)s"')
def __init__(self, parent, deferred, method, realm, addnl=None):
self.deferred = deferred
self.method = method
self.realm = realm
self._saveOK = False
Popup.__init__(self, parent, addnl)
def _saveBoolean(self, *value):
self._saveOK = not self._saveOK
def getResult(self):
return (self.uentry.get(), self.pentry.get(), self._saveOK)
def body(self):
print "auth body"
from Tkinter import NW, E, W, Frame, Label, Button, Entry, Checkbutton
defargs = { 'padx':5, 'pady':5, 'sticky':W }
self.top = Frame(self)
self.top.grid(row=1,column=1,sticky=NW)
msg = self.message % { 'realm':self.realm, 'method':self.method }
self.label = Label(self.top, text=msg, justify='center')
self.label.grid(row=1, column=1, columnspan=4, **defargs)
self.ulabel = Label(self.top, text=_('User Name')+':', justify='left')
self.ulabel.grid(row=2, column=1, columnspan=2, **defargs)
self.uentry = Entry(self.top)
self.uentry.grid(row=2, column=3, columnspan=2, **defargs)
self.uentry.focus_set()
self.plabel = Label(self.top, text=_('Password')+':', justify='left')
self.plabel.grid(row=3, column=1, columnspan=2, **defargs)
self.pentry = Entry(self.top, show="*")
self.pentry.grid(row=3, column=3, columnspan=2, **defargs)
self._saveOk = False
self.saveCheck = Checkbutton(self.top, command=self._saveBoolean)
self.saveCheck.grid(row=4, column=1, columnspan=1, **defargs)
self.savelabel = Label(self.top,
text=_('Save this username and password'))
self.savelabel.grid(row=4, column=2, columnspan=3, **defargs)
defargs['sticky'] = W
self.cancelb = Button(self.top, text=_('Cancel'), command=self.cancel)
self.cancelb.grid(row=5, column=3, columnspan=1, **defargs)
self.okb = Button(self.top, text=_('OK'), command=self.selected)
self.okb.grid(row=5, column=4, columnspan=1, **defargs)
class MovingDialog(Dialog):
"A Dialog that slides in on the bottom right"
# XXX Tk doesn't seem to want to allow the geometry to go off-screen :-(
finalOffset = 10
def showWindow(self):
# Make this an override-redirect
self.overrideredirect(1)
self._x, self._y = self.winfo_width(), self.winfo_height()
if self._x == 1 or self._y == 1:
# sometimes we're called before being laid out, argh
self._x = self._y = None
# screen size
self._sx = self.parent.winfo_screenwidth()
self._sy = self.parent.winfo_screenheight()
# final positions
if self._x is not None:
self._fx = self._sx - self._x - self.finalOffset
self._fy = self._sy - self._y - self.finalOffset
self.geometry("+%d+%d" % (self._fx, self._sy))
else:
# Not laid out yet.
self.geometry("+%d+%d" % (self._sx, self._sy))
reactor.callLater(0.01, self._moveWindow)
def _moveWindow(self):
if self._x is None:
x, y = self.winfo_rootx(), self.winfo_rooty()
self._x, self._y = self.winfo_width(), self.winfo_height()
self._fx = self._sx - self._x - self.finalOffset
self._fy = self._sy - self._y - self.finalOffset
print "final",(self._fx, self._fy)
newx = self._sx
newy = self._fy
else:
x, y = self.winfo_rootx(), self.winfo_rooty()
newx, newy = x - 2, y
print "window/geom", (self._x, self._y),(x,y)
if newx < self._fx:
newx = self._fx
self.geometry("+%d+%d" % (newx, newy))
if newx > self._fx:
print "move",(newx, newy), (self._fx, self._fy)
reactor.callLater(0.02, self._moveWindow)
def hideWindow(self):
Toplevel.destroy(self)
if __name__ == "__main__":
from twisted.internet.task import LoopingCall
from twisted.internet import defer
from twisted.internet import tksupport, reactor
def mainWindow():
global main
main = Tk(className='shtoom')
tksupport.install(main)
def optionClicked(option):
print "got option", option
reactor.stop()
def popupWindow():
global main
d = defer.Deferred()
popup = MovingDialog(main, d, 'hello world', ('OK', 'Cancel'))
d.addCallback(optionClicked)
def oops(failure):
print "arg", failure
def popupAuth():
print "popup"
d = defer.Deferred()
popup = AuthDialog(main, d, 'INVITE', 'fwd.pulver.com')
d.addCallback(optionClicked)
d.addErrback(oops)
def ping():
print "ping"
p = LoopingCall(ping)
p.start(0.5)
reactor.callLater(0, mainWindow)
reactor.callLater(1, popupAuth)
reactor.run()
| lgpl-2.1 | 8,494,601,776,254,829,000 | 31.460526 | 79 | 0.563437 | false |
shiaki/iterative-modelling | src/pcs_snap.py | 1 | 3204 | #!/usr/bin/python
work_dir = ''
import numpy as np
from scipy.io import FortranFile as ufmt
if __name__ == '__main__':
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# See GALAXY 14.50 Manual, Sec. 9.2, P54
header_dtype = [('n1', '<i4'), ('n2', '<i4'), ('n3', '<i4'),
('ncoor', '<i4'), ('np', '<i4'), ('time', '<f4'),
('pm', '<f4'), ('pertbn', '<i4')]
def save_snap_galaxy_pcs(filename, snap):
# unpack snapshot
cps = snap['cps']
# Get ptcl number
n1, n2, n3 = 0, 0, 0
if(cps.has_key('C1')): n1 = cps['C1']['N_pcs']
if(cps.has_key('C2')): n2 = cps['C2']['N_pcs']
if(cps.has_key('C3')): n3 = cps['C3']['N_pcs']
N_pcs = n1 + n2 + n3
# Make array
pcs = np.empty(shape = (N_pcs, 6), dtype = 'f4')
if n1 != 0: pcs[:n1] = cps['C1']['pcs']
if n2 != 0: pcs[n1: n1 + n2] = cps['C2']['pcs']
if n3 != 0: pcs[n1 + n2: n1 + n2 + n3] = cps['C1']['pcs']
# prepare header,
header = np.empty(1, dtype = header_dtype)
header[0]['n1'] = n1
header[0]['n2'] = n2
header[0]['n3'] = n3
header[0]['ncoor'] = 6
header[0]['np'] = 5000
header[0]['time'] = snap['time']
header[0]['pm'] = snap['pm']
header[0]['pertbn'] = 0
# open a file, write the header
pcs_fs = ufmt(filename, 'w')
pcs_fs.write_record(header)
# write pcs array in batches of 5k ptcls
N_put, chunk_size = 0, 5000 * 6
pcs = pcs.reshape((-1,)) # into 1d array
while N_put < N_pcs * 6:
chunk_t = pcs[N_put: N_put + chunk_size]
pcs_fs.write_record(chunk_t)
N_put += chunk_t.size
pcs_fs.close()
return 0
def read_snap_galaxy_pcs(filename):
pcs_ds = ufmt(filename, 'r')
header = pcs_ds.read_record(dtype = header_dtype)[0]
# read header info / GALAXY 14.50 Manual, 9.2
n1, n2, n3 = header['n1'], header['n2'], header['n3']
N_pcs = n1 + n2 + n3
chunk_size = header['ncoor'] * header['np']
# assume 3D problem with equal-mass particles for each component
assert header['ncoor'] == 6
# read ptcls in batches
N_get = 0
pcs = np.empty(N_pcs * 6, dtype = 'f4')
while N_get < N_pcs * 6:
chunk_t = pcs_ds.read_reals(dtype = 'f4')
pcs[N_get: N_get + chunk_size] = chunk_t
N_get += chunk_t.size
pcs = pcs.reshape((-1, 6))
pcs_ds.close()
# Make them into components
snap = {'cps' : {},
'pm' : header['pm'],
'time': header['time']}
if n1 != 0: # component 1 has mtcls
snap['cps']['C1'] = {'N_pcs': n1,
'pm' : header['pm'],
'pcs' : pcs[:n1]}
if n2 != 0: # component 2 has ptcls
snap['cps']['C2'] = {'N_pcs': n2,
'pm' : header['pm'],
'pcs' : pcs[n1: n1 + n2]}
if n3 != 0: # component 3 has ptcls
snap['cps']['C3'] = {'N_pcs': n3,
'pm' : header['pm'],
'pcs' : pcs[n1 + n2: n1 + n2 + n3]}
return snap
# diff test
if False:
import os # for diff
dic = read_snap_galaxy_pcs('run999.pcs0')
save_snap_galaxy_pcs('test.pcs0', dic)
df = os.system('diff run999.pcs0 test.pcs0')
if(df): print "diff test failed."
else: print "diff test passed."
| bsd-3-clause | -3,170,242,807,777,087,500 | 25.92437 | 66 | 0.522784 | false |
ParticulateSolutions/django-sofortueberweisung | setup.py | 1 | 2687 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import re
import os
import sys
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.match("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if 'tests' not in dirnames and not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
REQUIREMENTS = [
'Django>=1.8',
'xmltodict>=0.9.2',
'six>=1.10.0'
]
version = get_version('django_sofortueberweisung')
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
setup(
name='django-sofortueberweisung',
author='Particulate Solutions GmbH',
author_email='[email protected]',
description=u'Django integration of Sofort.com',
version=version,
url='https://github.com/ParticulateSolutions/django-sofortueberweisung',
packages=get_packages('django_sofortueberweisung'),
package_data=get_package_data('django_sofortueberweisung'),
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules'],
install_requires=REQUIREMENTS,
zip_safe=False)
| mit | -1,253,928,873,854,482,700 | 30.768293 | 100 | 0.605136 | false |
olynch/dicebag | dicebag.py | 1 | 3627 | #!/usr/bin/env python3
import random, pickle, sys, cmd
class Dice:
""" Contains x dice with n sides, or a plain modifier """
def __init__(self, dice):
""" Either takes in a string with a modifier, such as +4, or a dice description, such as 2d8 """
if dice[0] in ("+", "-"):
self.mod = int(dice)
self.num, self.sides = None, None
else:
self.num, self.sides = map(int, dice.split("d"))
self.mod = None
def roll(self):
""" rolls the dice, or just returns the modifier """
if self.mod != None:
return self.mod
else:
return sum([random.randrange(1, self.sides + 1) for x in range(self.num)])
def __str__(self):
if self.mod != None:
if self.mod < 0:
return "-" + str(self.mod)
else:
return "+" + str(self.mod)
return "+" + str(self.num) + "d" + str(self.sides)
class Roll:
""" Contains a set of dice and modifiers, provides a roll method to roll all its dice """
def __init__(self, desc_str):
desc = desc_str.split(" ")
self.dice_list = list(map(Dice, desc))
def roll(self):
return sum([x.roll() for x in self.dice_list])
def __str__(self):
return "".join(list(map(str, self.dice_list)))
def parse(args):
return args.split(" ")
class DiceInterpreter(cmd.Cmd):
""" The command line interface to the Roll class
Provides a dictionary that users can set and delete keys in, each key is a Roll
that users can roll. Users can also just specify a roll description on the command line, like 2d6 +10
Also provides a facility for saving the dictionary and opening it up again."""
prompt = "dice> "
DICE_PREFIX = ("+", "-", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9")
def preloop(self):
""" Initializes the rolls dictionary, possibly with a file passed as an argument """
self.rolls = {}
if len(sys.argv) > 1:
self.pickled_rolls = sys.argv[1]
self.rolls = pickle.load(open(self.pickled_rolls, 'rb'))
self.prompt = self.pickled_rolls + "> "
else:
self.pickled_rolls = None
def do_open(self, args):
""" Read a file into the rolls dictionary """
args = parse(args)
self.rolls = pickle.load(open(args[0], 'rb'))
def do_list(self, args):
""" List the contents of the rolls dictionary """
args = parse(args)
print(self.rolls)
def do_new(self, args):
""" Add a new Roll to the dictionary
The first argument is the name of the roll, the rest are the specifications. """
args = parse(args)
self.rolls[args[0]] = Roll(" ".join(args[1:]))
def do_del(self, args):
""" Deletes a roll from the dictionary
The first argument is the name of the roll """
args = parse(args)
del self.rolls[args[0]]
def default(self, line):
self.do_roll(line)
def do_roll(self, args):
""" Roll the specified rolls """
args = parse(args)
acc = 0
acc_str = ""
for dice in args:
if dice in self.rolls.keys():
acc_str += " " + str(self.rolls[dice])
acc += self.rolls[dice].roll()
elif dice[0] in self.DICE_PREFIX:
temp_dice = Dice(dice)
acc_str += " " + str(temp_dice)
acc += temp_dice.roll()
else:
print("A Roll of that name could not be found")
return
print(acc_str)
print(acc)
def do_exit(self, args):
""" Save the rolls dictionary, if desired, and then exit. """
return True
def postloop(self):
if self.pickled_rolls == None:
will_save = input("Do you wish to save (y/n): ")
if will_save != "n":
self.pickled_rolls = input("Where do you wish to save to: ")
pickle.dump(self.rolls, open(str(self.pickled_rolls), 'wb'))
else:
pickle.dump(self.rolls, open(str(self.pickled_rolls), 'wb'))
if __name__ == "__main__":
DiceInterpreter().cmdloop()
| mit | 1,283,960,359,057,703,400 | 29.225 | 102 | 0.632479 | false |
dsoprea/PySecure | pysecure/types.py | 1 | 4059 | import platform
from ctypes import *
from datetime import datetime
from pysecure.constants import TIME_DATETIME_FORMAT
from pysecure.constants.sftp import SSH_FILEXFER_TYPE_REGULAR, \
SSH_FILEXFER_TYPE_DIRECTORY, \
SSH_FILEXFER_TYPE_SYMLINK, \
SSH_FILEXFER_TYPE_SPECIAL, \
SSH_FILEXFER_TYPE_UNKNOWN
c_mode_t = c_int
c_uid_t = c_uint32
c_gid_t = c_uint32
# This are very-very unpredictable. We can only hope that this holds up for
# most systems.
# Returns something like "32bit" or "64bit".
arch_name = platform.architecture()[0]
arch_width = int(arch_name[0:2])
if arch_width == 64:
c_time_t = c_uint64
c_suseconds_t = c_uint64
else:
c_time_t = c_uint32
c_suseconds_t = c_uint32
class _CSftpAttributesStruct(Structure):
_fields_ = [('name', c_char_p),
('longname', c_char_p),
('flags', c_uint32),
('type', c_uint8),
('size', c_uint64),
('uid', c_uint32),
('gid', c_uint32),
('owner', c_char_p),
('group', c_char_p),
('permissions', c_uint32),
('atime64', c_uint64),
('atime', c_uint32),
('atime_nseconds', c_uint32),
('createtime', c_uint64),
('createtime_nseconds', c_uint32),
('mtime64', c_uint64),
('mtime', c_uint32),
('mtime_nseconds', c_uint32),
('acl', c_void_p), # NI: ssh_string
('extended_count', c_uint32),
('extended_type', c_void_p), # NI: ssh_string
('extended_data', c_void_p)] # NI: ssh_string
def __repr__(self):
mtime_phrase = datetime.fromtimestamp(self.mtime).\
strftime(TIME_DATETIME_FORMAT)
return ('<ATTR "%s" S=(%d) T=(%d) MT=[%s]>' %
(self.name, self.size, self.type, mtime_phrase))
@property
def is_regular(self):
return self.type == SSH_FILEXFER_TYPE_REGULAR
@property
def is_directory(self):
return self.type == SSH_FILEXFER_TYPE_DIRECTORY
@property
def is_symlink(self):
return self.type == SSH_FILEXFER_TYPE_SYMLINK
@property
def is_special(self):
return self.type == SSH_FILEXFER_TYPE_SPECIAL
@property
def is_unknown_type(self):
return self.type == SSH_FILEXFER_TYPE_UNKNOWN
@property
def modified_time(self):
# TODO: We're not sure if the mtime64 value is available on a 32-bit platform. We do this to be safe.
return self.mtime64 if self.mtime64 else self.mtime
@property
def modified_time_dt(self):
if self.mtime64:
return datetime.fromtimestamp(self.mtime64)
else:
return datetime.fromtimestamp(self.mtime)
_CSftpAttributes = POINTER(_CSftpAttributesStruct)
class CTimeval(Structure):
# it was easier to set these types based on what libssh assigns to them.
# The traditional definition leaves some platform ambiguity.
_fields_ = [('tv_sec', c_uint32),
('tv_usec', c_uint32)]
c_timeval = CTimeval
class _CSshKeyStruct(Structure):
_fields_ = [('type', c_int),
('flags', c_int),
('type_c', c_char_p),
('ecdsa_nid', c_int),
('dsa', c_void_p),
('rsa', c_void_p),
('ecdsa', c_void_p),
('cert', c_void_p)]
# Fortunately, we should probably be able to avoid most/all of the mechanics
# for the vast number of structs.
c_ssh_session = c_void_p #POINTER(CSshSessionStruct)
c_ssh_channel = c_void_p
c_sftp_session = c_void_p
c_sftp_attributes = _CSftpAttributes
c_sftp_dir = c_void_p
c_sftp_file = c_void_p
c_ssh_key = POINTER(_CSshKeyStruct)
# A simple aliasing assignment doesn't work, here.
# c_sftp_statvfs = c_void_p
| gpl-2.0 | -6,243,054,323,641,303,000 | 30.465116 | 101 | 0.55186 | false |
dke-knu/i2am | i2am-app/AlgorithmSelectionEngine/PeriodicClassification/DeepLearning(local).py | 1 | 2820 | """ Learned classification model """
import tensorflow as tf
from PeriodicClassification import ModelConfig as myConfig
from PeriodicClassification import Preprocess as pre
def _model(X, keep_prob):
# input
W1 = tf.Variable(tf.random_normal([myConfig.INPUT_SIZE, myConfig.HIDDEN_SIZE]), name="weight1")
b1 = tf.Variable(tf.random_normal([myConfig.HIDDEN_SIZE]))
L1 = tf.matmul(X, W1) + b1
L1 = tf.nn.dropout(L1, keep_prob[0])
"""hidden Layers
dropout:
"""
W2 = tf.Variable(tf.random_normal([myConfig.HIDDEN_SIZE, myConfig.HIDDEN_SIZE]), name="weight2")
b2 = tf.Variable(tf.random_normal([myConfig.HIDDEN_SIZE]))
L2 = tf.nn.softsign(tf.matmul(L1, W2) + b2)
L2 = tf.nn.dropout(L2, keep_prob[1])
W3 = tf.Variable(tf.random_normal([myConfig.HIDDEN_SIZE, myConfig.HIDDEN_SIZE]), name="weight3")
b3 = tf.Variable(tf.random_normal([myConfig.HIDDEN_SIZE]))
L3 = tf.nn.softsign(tf.matmul(L2, W3) + b3)
L3 = tf.nn.dropout(L3, keep_prob[1])
W4 = tf.Variable(tf.random_normal([myConfig.HIDDEN_SIZE, myConfig.HIDDEN_SIZE]), name="weight4")
b4 = tf.Variable(tf.random_normal([myConfig.HIDDEN_SIZE]))
L4 = tf.nn.softsign(tf.matmul(L3, W4) + b4)
L4 = tf.nn.dropout(L4, keep_prob[1])
W5 = tf.Variable(tf.random_normal([myConfig.HIDDEN_SIZE, myConfig.OUTPUT_SIZE]), name="weight5")
b5 = tf.Variable(tf.random_normal([myConfig.OUTPUT_SIZE]))
L5 = tf.nn.softsign(tf.matmul(L4, W5) + b5)
hypothesis = tf.nn.dropout(L5, keep_prob[2])
# weight paramenters and bias
param_list = [W1, W2, W3, W4, W5, b1, b2, b3, b4, b5]
saver = tf.train.Saver(param_list)
return hypothesis, saver
def _classification(hypothesis):
p = tf.nn.softmax(hypothesis)
h_predict = tf.argmax(p, 1)
return h_predict
def _DNN_main(USER_DATA_PATH):
list_time_series = pre._reader(USER_DATA_PATH)
time_series = pre._resize(list_time_series)
print(time_series.shape)
X = tf.placeholder(tf.float32, [None, myConfig.INPUT_SIZE])
keep_prob = tf.placeholder(tf.float32) #0.1, 0.2, 0.3
hypo, model_saver = _model(X=X, keep_prob=keep_prob)
h_predict = _classification(hypothesis=hypo)
"""Initialize"""
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.import_meta_graph(myConfig.SAVED_MODEL_PATH)
saver.restore(sess, tf.train.latest_checkpoint(myConfig.CHECKPOINT_PATH))
t_trained = sess.run([h_predict], feed_dict={X: time_series, keep_prob: [1.0, 1.0, 1.0]})
print(t_trained[0])
if t_trained[0] == 1:
print('Non periodic')
return False
else:
print('Periodic')
return True
# Usage Example
# _DNN_main("user's data path")
_DNN_main("D:/DKE/data/period_classification/시연데이터/ECG_데이터_1.csv") | apache-2.0 | 5,677,919,536,755,330,000 | 33.207317 | 100 | 0.661912 | false |
jgliss/pyplis | pyplis/model_functions.py | 1 | 8950 | # -*- coding: utf-8 -*-
#
# Pyplis is a Python library for the analysis of UV SO2 camera data
# Copyright (C) 2017 Jonas Gliss ([email protected])
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License a
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Pyplis module containing mathematical model functions."""
from __future__ import (absolute_import, division)
from numpy import exp, sin, cos
from pyplis import logger
import six
# Polynomial fit functions of different order, including versions that go
# through the origin of the coordinate system
# (e.g. used in doascalib.py), dictionary keys are the polynomial order
polys = {1: lambda x, a0, a1: a0 * x + a1,
2: lambda x, a0, a1, a2: a0 * x**2 + a1 * x + a2,
3: lambda x, a0, a1, a2, a3: a0 * x**3 + a1 * x**2 + a2 * x + a3}
polys_through_origin = {1: lambda x, a0: a0 * x,
2: lambda x, a0, a1: a0 * x**2 + a1 * x,
3: lambda x, a0, a1, a2: a0 * x**3 + a1 * x**2 + a2 * x
}
def cfun_kern2015(x, a0, a1):
return a0 * exp(x * a1) - 1
def cfun_kern2015_offs(x, a0, a1, a2):
return a0 * (exp(x * a1) - 1) + a2
class CalibFuns(object):
"""Class containing functions for fit of calibration curve."""
def __init__(self):
self.polys = {0: polys,
1: polys_through_origin}
self.custom_funs = {"kern2015": cfun_kern2015,
"kern2015_offs": cfun_kern2015_offs}
self._custom_funs_info = {"kern2015": ("see Eq. 6 in Kern et al., 2015"
"https://doi.org/10.1016/j."
"jvolgeores.2014.12.004"),
"kern2015_offs": ("Like previous, including "
"an offset term")}
def available_poly_orders(self, through_origin=False):
"""Return the available polynomial orders.
Parameter
---------
through_origin : bool
polys without offset
Returns
-------
list
list containing available polyorders
"""
return list(self.polys[through_origin].keys())
def print_poly_info(self):
"""Print information about available polynomials."""
logger.info("Available polyorders (with offset): %s"
"Available polyorders (without offset): %s"
% (list(self.polys[0].keys()), list(self.polys[1].keys())))
def print_custom_funs_info(self):
"""Print information about available curtom calib functions."""
logger.info("Available polyorders (with offset): %s"
"Available polyorders (without offset): %s"
% (list(self.polys[0].keys()), list(self.polys[1].keys())))
for k, v in six.iteritems(self._custom_funs_info):
logger.info("%s : %s" % (k, v))
def get_custom_fun(self, key="kern2015"):
"""Return an available custom calibration function.
Parameters
----------
key : str
access key of custom function (call :func:`print_custom_funs_info`
for info about available functions)
Returns
-------
the function object
"""
if key not in self.custom_funs.keys():
raise KeyError("No custom calibration function with key %s "
"available" % key)
return self.custom_funs[key]
def get_poly(self, order=1, through_origin=False):
"""Get a polynomial of certain order.
Parameters
----------
order : int
order of polynomial (choose from 1-3)
through_origin : bool
if True, the polynomial will have no offset term
Return
------
function
the polynomial function object (callable)
"""
if order not in self.polys[through_origin].keys():
raise ValueError("Polynomial of order %s is not supported "
"available orders are %s"
% (order,
list(self.polys[through_origin].keys())))
return self.polys[through_origin][order]
def dilutioncorr_model(dist, rad_ambient, i0, ext):
r"""Model function for light dilution correction.
This model is based on the findings of `Campion et al., 2015
<http://www.sciencedirect.com/science/article/pii/S0377027315000189>`_.
:param float dist: distance of dark (black) object in m
:param float rad_ambient: intensity of ambient atmosphere at position of
dark object
:param float i0: initial intensity of dark object before it enters the
scattering medium. It is determined from the illumination intensity
and the albedo of the dark object.
:param float atm_ext: atmospheric scattering extincion coefficient
:math:`\epsilon` (in Campion et al., 2015 denoted with :math:`\sigma`).
"""
return i0 * exp(-ext * dist) + rad_ambient * (1 - exp(-ext * dist))
def gaussian_no_offset(x, ampl, mu, sigma):
"""1D gauss with baseline zero.
:param float x: x position of evaluation
:param float ampl: Amplitude of gaussian
:param float mu: center poistion
:param float sigma: standard deviation
:returns float: value at position x
"""
# return float(ampl)*exp(-(x - float(mu))**2/(2*float(sigma)**2))
return ampl * exp(-(x - mu)**2 / (2 * sigma**2))
def gaussian(x, ampl, mu, sigma, offset):
"""1D gauss with arbitrary baseline.
:param float x: x position of evaluation
:param float ampl: Amplitude of gaussian
:param float mu: center poistion
:param float sigma: standard deviation
:param float offset: baseline of gaussian
:returns float: value at position x
"""
return gaussian_no_offset(x, ampl, mu, sigma) + offset
def multi_gaussian_no_offset(x, *params):
"""Superimposed 1D gauss functions with baseline zero.
:param array x: x array used for evaluation
:param list *params: List of length L = 3xN were N corresponds to the
number of gaussians e.g.::
[100,10,3,50,15,6]
would correspond to 2 gaussians with the following characteristics:
1. Peak amplitude: 100, Mu: 10, sigma: 3
2. Peak amplitude: 50, Mu: 15, sigma: 6
"""
res = 0
num = int(len(params) / 3)
for k in range(num):
p = params[k * 3:(k + 1) * 3]
res = res + gaussian_no_offset(x, *p)
return res
def multi_gaussian_same_offset(x, offset, *params):
"""Superimposed 1D gauss functions with baseline (offset).
See :func:`multi_gaussian_no_offset` for instructions
"""
return multi_gaussian_no_offset(x, *params) + offset
def supergauss_2d(position, amplitude, xm, ym, sigma, asym, shape, offset):
"""2D super gaussian without tilt.
:param tuple position: position (x, y) of Gauss
:param float amplitude: amplitude of peak
:param float xm: x position of maximum
:param float ym: y position of maximum
:param float asym: assymetry in y direction (1 is circle, smaller
means dillated in y direction)
:param float shape: super gaussian shape parameter (1 is gaussian)
:param float offset: base level of gaussian
"""
x, y = position
u = ((x - xm) / sigma) ** 2 + ((y - ym) * asym / sigma)**2
g = offset + amplitude * exp(-u**shape)
return g.ravel()
def supergauss_2d_tilt(position, amplitude, xm, ym, sigma, asym, shape, offset,
theta):
"""2D super gaussian without tilt.
:param tuple position: position (x, y) of Gauss
:param float amplitude: amplitude of peak
:param float xm: x position of maximum
:param float ym: y position of maximum
:param float asym: assymetry in y direction (1 is circle, smaller
means dillated in y direction)
:param float shape: super gaussian shape parameter (2 is gaussian)
:param float offset: base level of gaussian
:param float theta: tilt angle (rad) of super gaussian
"""
x, y = position
xprime = (x - xm) * cos(theta) - (y - ym) * sin(theta)
yprime = (x - xm) * sin(theta) + (y - ym) * cos(theta)
u = (xprime / sigma)**2 + (yprime * asym / sigma)**2
g = offset + amplitude * exp(-u**shape)
return g.ravel()
| gpl-3.0 | 7,274,057,895,621,544,000 | 35.234818 | 79 | 0.603128 | false |
gomezgoiri/reusingWebActuatorsFromSemanticSpace | actuation/api/space.py | 1 | 1442 | # -*- coding: utf-8 -*-
'''
Copyright (C) 2013 onwards University of Deusto
All rights reserved.
This software is licensed as described in the file COPYING, which
you should have received as part of this distribution.
This software consists of contributions made by many individuals,
listed below:
@author: Aitor Gómez Goiri <[email protected]>
'''
from abc import ABCMeta, abstractmethod
class Space(object):
__metaclass__ = ABCMeta
@abstractmethod
def write(self, graph):
pass
@abstractmethod
def read_by_wildcard(self, template):
pass
@abstractmethod
def read_by_sparql(self, query):
pass
@abstractmethod
def take_by_wildcard(self, template):
pass
@abstractmethod
def take_by_sparql(self, query):
pass
@abstractmethod
def take_by_uri(self, uri):
pass
@abstractmethod
def subscribe(self, template, callback):
pass
class AbstractCallback(object):
__metaclass__ = ABCMeta
@abstractmethod
def call(self):
pass
class AbstractSubscriptionTemplate(object):
__metaclass__ = ABCMeta
@abstractmethod
def matches(self, graph):
pass
class AbstractSubscriptionObserver(object): # local observer
__metaclass__ = ABCMeta
@abstractmethod
def notify_subscription(self, template):
pass | apache-2.0 | 147,625,966,535,404,900 | 18.226667 | 67 | 0.638446 | false |
UNC-Major-Lab/Fragment-Isotope-Distribution-Paper | scripts/theoretical/mergeHistogram.py | 1 | 1930 | #!/usr/bin/env python
import sys
import os
import re
from collections import defaultdict
from math import floor
from math import isnan
root_dir = sys.argv[1]
prefix = sys.argv[2]
do_iso = sys.argv[3]
if do_iso == 'F':
comp2bin2count = defaultdict(dict)
for f in os.listdir(root_dir):
fp = root_dir+"/"+f
if os.path.isfile(fp) and ".out" in f and f.startswith(prefix):
infile = open(fp)
for line in infile:
if (len(line.strip().split("\t")) == 2):
1
else:
[comp, bin, count] = line.strip().split("\t")
count = int(count)
if not comp2bin2count[comp].has_key(bin):
comp2bin2count[comp][bin] = 0
comp2bin2count[comp][bin]+=count
infile.close()
for comp in comp2bin2count:
for bin in comp2bin2count[comp]:
print "\t".join([comp, str(bin), str(comp2bin2count[comp][bin])])
else:
comp2iso2bin2count = defaultdict(dict)
for f in os.listdir(root_dir):
fp = root_dir+"/"+f
if os.path.isfile(fp) and ".out" in f and f.startswith(prefix):
infile = open(fp)
for line in infile:
if (len(line.strip().split("\t")) == 2):
1
else:
[comp, bin, iso, count] = line.strip().split("\t")
count = int(count)
if not comp2iso2bin2count[comp].has_key(iso):
comp2iso2bin2count[comp][iso] = defaultdict(int)
comp2iso2bin2count[comp][iso][bin]+=count
infile.close()
for comp in comp2iso2bin2count:
for iso in comp2iso2bin2count[comp]:
for bin in comp2iso2bin2count[comp][iso]:
print "\t".join([comp, str(bin), iso, str(comp2iso2bin2count[comp][iso][bin])]) | mit | 324,607,062,226,680,960 | 32.877193 | 95 | 0.526425 | false |
NETWAYS/ingraph | ingraph/api.py | 1 | 16388 | # inGraph (https://www.netways.org/projects/ingraph)
# Copyright (C) 2011-2012 NETWAYS GmbH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import xmlrpclib
import cPickle
import ingraph.model as model
class BackendRPCMethods(object):
def __init__(self, engine, queryqueue, logger):
self.hosts = {}
self.services = {}
self.hostservices = {}
self.plots = {}
self.engine = engine
self.queryqueue = queryqueue
self.logger = logger
self.shutdown_server = False
def setupTimeFrame(self, interval, retention_period=None):
tfs = model.TimeFrame.getAll(self.engine)
for tf in tfs:
if (tf.interval < interval and interval % tf.interval != 0) or \
(tf.interval > interval and tf.interval % interval != 0):
raise xmlrpclib.Fault(
1, 'Timeframe interval is invalid. Must be multiple of '
'existing timeframe or evenly divisible by existing '
'larger intervals.')
if tf.interval == interval:
tf.retention_period = retention_period
tf.save(self.engine)
return tf.id
tf = model.TimeFrame(interval, retention_period)
tf.save(self.engine)
return tf.id
def getTimeFrames(self):
tfs = model.TimeFrame.getAll(self.engine)
items = {}
for tf in tfs:
items[str(tf.interval)] = {'id': tf.id,
'interval': tf.interval,
'retention-period': tf.retention_period}
return items
def disableTimeFrame(self, tf_id):
tf = model.TimeFrame.getByID(self.engine, tf_id)
tf.active = False;
tf.save(self.engine)
return True
def _createHost(self, conn, name):
if name in self.hosts:
return self.hosts[name]
obj = model.Host.getByName(conn, name)
if obj == None:
obj = model.Host(name)
obj.save(conn)
self.hosts[name] = obj
return obj
def _createService(self, conn, name):
if name in self.services:
return self.services[name]
obj = model.Service.getByName(conn, name)
if obj == None:
obj = model.Service(name)
obj.save(conn)
self.services[name] = obj
return obj
def _createHostService(self, conn, host, service, parent_hostservice):
hostservice_key = (host, service)
if hostservice_key in self.hostservices:
return self.hostservices[hostservice_key]
objs = model.HostService.getByHostAndService(conn, host, service,
parent_hostservice)
if len(objs) == 0:
obj = model.HostService(host, service, parent_hostservice)
obj.save(conn)
else:
obj = objs[0]
self.hostservices[hostservice_key] = obj
return obj
def _createPlot(self, conn, hostservice, name):
plot_key = (hostservice, name)
if plot_key in self.plots:
return self.plots[plot_key]
objs = model.Plot.getByHostServiceAndName(conn, hostservice, name)
if len(objs) == 0:
obj = model.Plot(hostservice, name)
obj.save(conn)
else:
obj = objs[0]
self.plots[plot_key] = obj
return obj
def insertValueBulk(self, updates_raw):
updates = cPickle.loads(updates_raw)
conn = self.engine.connect()
for update in updates:
(host, parent_service, service, plot, timestamp, unit, value, lower_limit, upper_limit, warn_lower, warn_upper, warn_type,
crit_lower, crit_upper, crit_type, pluginstatus) = update
try:
host_obj = self._createHost(conn, host)
if parent_service != None:
parent_service_obj = self._createService(conn, parent_service)
parent_hostservice_obj = self._createHostService(
conn, host_obj, parent_service_obj, None)
else:
parent_hostservice_obj = None
service_obj = self._createService(conn, service)
hostservice_obj = self._createHostService(conn, host_obj,
service_obj,
parent_hostservice_obj)
plot_obj = self._createPlot(conn, hostservice_obj, plot)
queries = plot_obj.buildUpdateQueries(
conn, timestamp, unit, value, value, value, lower_limit,
upper_limit, warn_lower, warn_upper, warn_type, crit_lower,
crit_upper, crit_type)
for query in queries:
self.queryqueue.put(query)
# if pluginstatus in ['warning', 'critical']:
# status_obj = model.PluginStatus(hostservice_obj, timestamp, pluginstatus)
# status_obj.save(conn)
except Exception, e:
print e
continue
conn.close()
return True
def getHosts(self):
hosts = model.Host.getAll(self.engine)
items = []
for host in hosts:
items.append(host.name)
return items
def getHostsFiltered(self, pattern, limit=None, offset=None):
result = model.Host.getByPattern(self.engine,
pattern.replace('*', '%'),
limit, offset)
items = []
for host in result['hosts']:
items.append(host.name)
return {'total': result['total'], 'hosts': items}
def getServices(self, host_pattern, service_pattern=None, limit=None,
offset=None):
result = model.HostService.getByHostAndServicePattern(
self.engine, host_pattern.replace('*', '%'),
service_pattern.replace('*', '%'), limit, offset)
items = []
for hostservice_obj in result['services']:
if hostservice_obj.parent_hostservice == None:
parentservice = None
else:
parentservice = hostservice_obj.parent_hostservice.service.name
item = { 'service': hostservice_obj.service.name,
'parent_service': parentservice }
items.append(item)
return {'total': result['total'], 'services': items}
def _flattenCharts(self, dps):
charts = []
for plot_obj, plot_charts in dps['charts'].iteritems():
for type, data in plot_charts.iteritems():
label = plot_obj.name + '-' + type
hostservice_obj = plot_obj.hostservice
if hostservice_obj.parent_hostservice != None:
label = hostservice_obj.service.name + '-' + label
if hostservice_obj.service.name != '':
svc_id = ' - ' + hostservice_obj.service.name
else:
svc_id = ''
plot_id = hostservice_obj.host.name + svc_id + ' - ' + plot_obj.name + ' - ' + type
charts.append({'host': hostservice_obj.host.name,
'service': hostservice_obj.service.name,
'plot': plot_obj.name, 'type': type,
'label': label, 'unit': plot_obj.unit,
'start_timestamp': dps['start_timestamp'],
'end_timestamp': dps['end_timestamp'],
'granularity': dps['granularity'],
'data': data,
'plot_id': plot_id})
return charts
def getPlotValues2(self, query, start_timestamp=None, end_timestamp=None,
granularity=None, null_tolerance=0):
conn = self.engine.connect()
st = time.time()
charts = []
comments = []
statusdata = []
result = {'comments': comments, 'charts': charts, 'statusdata': statusdata,
'min_timestamp': model.dbload_min_timestamp,
'max_timestamp': time.time()}
if start_timestamp == '':
start_timestamp = None
if end_timestamp == '':
end_timestamp = None
if granularity == '':
granularity = None
vquery = {}
for spec in query:
host = model.Host.getByName(conn, spec['host'])
parent_hostservice = None
if spec['parent_service']:
parent_service = model.Service.getByName(conn, spec['parent_service'])
parent_hostservice = model.HostService.getByHostAndService(conn, host, parent_service, None)
try:
parent_hostservice = parent_hostservice[0]
except IndexError:
parent_hostservice = None
service = model.Service.getByName(conn, spec['service'], None)
hose = model.HostService.getByHostAndService(conn, host, service, parent_hostservice)
try:
hose = hose[0]
except IndexError:
# Not found
continue
plots = model.Plot.getByHostServiceAndName(conn, hose, spec['plot'])
for plot in plots:
if plot not in vquery:
vquery[plot] = []
if spec['type'] not in vquery[plot]:
vquery[plot].append(spec['type'])
dps = model.DataPoint.getValuesByInterval(conn, vquery,
start_timestamp, end_timestamp,
granularity, null_tolerance)
conn.close()
if 'comments' in dps:
comments.extend(dps['comments'])
if 'statusdata' in dps:
statusdata.extend(dps['statusdata'])
if 'charts' in dps:
charts.extend(self._flattenCharts(dps))
et = time.time()
self.logger.debug("Got filtered plot values in %f seconds" % (et - st))
return result
def _optimizePlot(self, plot):
prev = None
same = False
result = []
for nvpair in plot:
if prev != None and prev[1] == nvpair[1]:
same = True
elif prev == None or same:
same = False
result.append({'x': nvpair[0], 'y': nvpair[1]})
else:
result.append({'y': nvpair[1]})
prev = nvpair
return result
def getPlotValues3(self, query, start_timestamp=None, end_timestamp=None,
granularity=None, null_tolerance=0):
data = self.getPlotValues2(query, start_timestamp, end_timestamp,
granularity, null_tolerance)
for chart in data['charts']:
chart['data'] = self._optimizePlot(chart['data'])
return data
def shutdown(self):
self.shutdown_server = True
return True
def addOrUpdateComment(self, comment_id, host, parent_service, service,
timestamp, author, text):
host_obj = self._createHost(self.engine, host)
if comment_id == '':
comment_id = None
if parent_service == '':
parent_service = None
if parent_service != None:
parent_service_obj = self._createService(self.engine,
parent_service)
parent_hostservice_obj = self._createHostService(
self.engine, host_obj, parent_service_obj, None)
else:
parent_hostservice_obj = None
service_obj = self._createService(self.engine, service)
hostservice_obj = self._createHostService(self.engine, host_obj,
service_obj,
parent_hostservice_obj)
if comment_id == None:
comment = model.Comment(hostservice_obj, timestamp, author, text)
else:
comment = model.Comment.getByID(self.engine, comment_id)
comment.hostservice = hostservice_obj
comment.timestamp = timestamp
comment.author = author
comment.text = text
comment.save(self.engine)
return comment.id
def addComment(self, host, parent_service, service, timestamp, author,
text):
return self.addOrUpdateComment(None, host, parent_service, service, timestamp,
author, text)
def deleteComment(self, comment_id):
comment = model.Comment.getByID(self.engine, comment_id)
comment.delete(self.engine)
def updateComment(self, comment_id, host, parent_service, service,
timestamp, author, text):
return self.addOrUpdateComment(comment_id, host, parent_service,
service, timestamp, author, text)
def getPlots(self, host_name, service_name, parent_service_name=None):
res = []
host = model.Host.getByName(self.engine, host_name)
if host_name and not host:
return res
service = model.Service.getByName(self.engine, service_name,
parent_service_name)
if service_name and not service:
return res
parent_hose = None
"""
if parent_service_name:
parent_service = model.Service.getByName(
self.engine, parent_service_name, None)
if not parent_service:
return res
parent_hose = model.HostService.getByHostAndService(
self.engine, host, parent_service, None)
try:
parent_hose = parent_hose[0]
except IndexError:
# Not found
pass
"""
hose = model.HostService.getByHostAndService(
self.engine, host, service, parent_hose)
try:
hose = hose[0]
except IndexError:
# Not found
pass
else:
children = model.HostService.getByHostAndService(
self.engine, hose.host, None, hose)
if children:
for child in children:
if child.parent_hostservice != None:
parent_service_name = child.parent_hostservice.service.name
else:
parent_service_name = None
plots = model.Plot.getByHostServiceAndName(
self.engine, child, None)
for plot in plots:
res.append({
'service': child.service.name,
'plot': plot.name,
'parent_service': parent_service_name
})
else:
if hose.parent_hostservice != None:
parent_service_name = hose.parent_hostservice.service.name
else:
parent_service_name = None
plots = model.Plot.getByHostServiceAndName(
self.engine, hose, None)
for plot in plots:
res.append({
'service': hose.service.name,
'plot': plot.name,
'parent_service': parent_service_name
})
return res
| gpl-3.0 | 3,848,192,613,168,572,400 | 34.703704 | 134 | 0.527886 | false |
spark8103/deploy | app/celery_runner.py | 1 | 4976 | import subprocess
from subprocess import Popen, PIPE
from . import celery
@celery.task(bind=True)
def deploy_running_task(self, cmd, type='Deploy'):
has_error = False
result = None
output = ""
self.update_state(state='PROGRESS',
meta={'output': output,
'description': "",
'returncode': None})
print(str.format("About to execute: {0}", cmd))
proc = Popen([cmd], stdout=PIPE, stderr=subprocess.STDOUT, shell=True)
for line in iter(proc.stdout.readline, ''):
print(str(line))
output = output + line
self.update_state(state='PROGRESS', meta={'output': output, 'description': "", 'returncode': None})
return_code = proc.poll()
if return_code is 0:
meta = {'output': output,
'returncode': proc.returncode,
'description': ""
}
self.update_state(state='FINISHED',
meta=meta)
elif return_code is not 0:
# failure
meta = {'output': output,
'returncode': return_code,
'description': str.format("Celery ran the task, but {0} reported error", type)
}
self.update_state(state='FAILED',
meta=meta)
if len(output) is 0:
output = "no output, maybe no matching hosts?"
meta = {'output': output,
'returncode': return_code,
'description': str.format("Celery ran the task, but {0} reported error", type)
}
return meta
@celery.task(bind=True)
def ansible_running_task(self, cmd, type='Ansible'):
has_error = False
result = None
output = ""
self.update_state(state='PROGRESS',
meta={'output': output,
'description': "",
'returncode': None})
print(str.format("About to execute: {0}", cmd))
proc = Popen([cmd], stdout=PIPE, stderr=subprocess.STDOUT, shell=True)
for line in iter(proc.stdout.readline, ''):
print(str(line))
output = output + line
self.update_state(state='PROGRESS', meta={'output': output, 'description': "", 'returncode': None})
return_code = proc.poll()
if return_code is 0:
meta = {'output': output,
'returncode': proc.returncode,
'description': ""
}
self.update_state(state='FINISHED',
meta=meta)
elif return_code is not 0:
# failure
meta = {'output': output,
'returncode': return_code,
'description': str.format("Celery ran the task, but {0} reported error", type)
}
self.update_state(state='FAILED',
meta=meta)
if len(output) is 0:
output = "no output, maybe no matching hosts?"
meta = {'output': output,
'returncode': return_code,
'description': str.format("Celery ran the task, but {0} reported error", type)
}
return meta
@celery.task(bind=True)
def ansible_playbook_task(self, cmd, type='Ansible-Playbook'):
has_error = False
result = None
output = ""
self.update_state(state='PROGRESS',
meta={'output': output,
'description': "",
'returncode': None})
print(str.format("About to execute: {0}", cmd))
proc = Popen([cmd], stdout=PIPE, stderr=subprocess.STDOUT, shell=True)
for line in iter(proc.stdout.readline, ''):
print(str(line))
output = output + line
self.update_state(state='PROGRESS', meta={'output': output, 'description': "", 'returncode': None})
return_code = proc.poll()
if return_code is 0:
meta = {'output': output,
'returncode': proc.returncode,
'description': ""
}
self.update_state(state='FINISHED',
meta=meta)
elif return_code is not 0:
# failure
meta = {'output': output,
'returncode': return_code,
'description': str.format("Celery ran the task, but {0} reported error", type)
}
self.update_state(state='FAILED',
meta=meta)
if len(output) is 0:
output = "no output, maybe no matching hosts?"
meta = {'output': output,
'returncode': return_code,
'description': str.format("Celery ran the task, but {0} reported error", type)
}
return meta
@celery.task(name='celery_tasks.cmd')
def schedule_cmd(cmd):
child = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=subprocess.STDOUT, shell=True)
out, err = child.communicate()
ret = child.wait()
return {'returncode': ret,
'output': out,
'error:': err
}
| mit | 2,258,274,440,267,406,800 | 35.321168 | 107 | 0.527532 | false |
talumbau/webapp-public | webapp/apps/taxbrain/helpers.py | 1 | 26371 | from collections import namedtuple
import taxcalc
import dropq
import os
import requests
from requests.exceptions import Timeout, RequestException
import json
import pandas as pd
import time
#
# Prepare user params to send to DropQ/Taxcalc
#
NUM_BUDGET_YEARS = int(os.environ.get('NUM_BUDGET_YEARS', 10))
START_YEAR = int(os.environ.get('START_YEAR', 2015))
#Hard fail on lack of dropq workers
dropq_workers = os.environ.get('DROPQ_WORKERS', '')
DROPQ_WORKERS = dropq_workers.split(",")
TAXCALC_COMING_SOON_FIELDS = [
'_Dividend_rt1', '_Dividend_thd1',
'_Dividend_rt2', '_Dividend_thd2',
'_Dividend_rt3', '_Dividend_thd3', '_BE_inc', '_BE_sub',
'_BE_cg_per', '_BE_cg_trn'
]
TAXCALC_COMING_SOON_INDEXED_BY_MARS = [
'_CG_thd1', '_CG_thd2', '_Dividend_thd1','_Dividend_thd2', '_Dividend_thd3'
]
TIMEOUT_IN_SECONDS = 1.0
MAX_ATTEMPTS_SUBMIT_JOB = 20
#
# Display TaxCalc result data
#
TAXCALC_RESULTS_START_YEAR = START_YEAR
TAXCALC_RESULTS_MTABLE_COL_LABELS = taxcalc.TABLE_LABELS
TAXCALC_RESULTS_DFTABLE_COL_LABELS = taxcalc.DIFF_TABLE_LABELS
TAXCALC_RESULTS_MTABLE_COL_FORMATS = [
# divisor, unit, decimals
[ 1000, None, 0], # 'Returns',
[1000000000, 'Dollars', 1], # 'AGI',
[ 1000, None, 0], # 'Standard Deduction Filers',
[1000000000, 'Dollars', 1], # 'Standard Deduction',
[ 1000, None, 0], # 'Itemizers',
[1000000000, 'Dollars', 1], # 'Itemized Deduction',
[1000000000, 'Dollars', 1], # 'Personal Exemption',
[1000000000, 'Dollars', 1], # 'Taxable Income',
[1000000000, 'Dollars', 1], # 'Regular Tax',
[1000000000, 'Dollars', 1], # 'AMTI',
[ 1000, None, 0], # 'AMT Filers',
[1000000000, 'Dollars', 1], # 'AMT',
[1000000000, 'Dollars', 1], # 'Tax before Credits',
[1000000000, 'Dollars', 1], # 'Non-refundable Credits',
[1000000000, 'Dollars', 1], # 'Tax before Refundable Credits',
[1000000000, 'Dollars', 1], # 'Refundable Credits',
[1000000000, 'Dollars', 1], # 'Revenue'
]
TAXCALC_RESULTS_DFTABLE_COL_FORMATS = [
[ 1000, None, 0], # "Inds. w/ Tax Cut",
[ 1000, None, 0], # "Inds. w/ Tax Increase",
[ 1000, None, 0], # "Count",
[ 1, 'Dollars', 0], # "Mean Tax Difference",
[1000000000, 'Dollars', 1], # "Total Tax Difference",
[ 1, '%', 1], # "%age Tax Increase",
[ 1, '%', 1], # "%age Tax Decrease",
[ 1, '%', 1], # "Share of Overall Change"
]
TAXCALC_RESULTS_BIN_ROW_KEYS = dropq.dropq.bin_row_names
TAXCALC_RESULTS_BIN_ROW_KEY_LABELS = {
'less_than_10':'Less than 10',
'ten_twenty':'10-20',
'twenty_thirty':'20-30',
'thirty_forty':'30-40',
'forty_fifty':'40-50',
'fifty_seventyfive':'50-75',
'seventyfive_hundred':'75-100',
'hundred_twohundred':'100-200',
'twohundred_fivehundred':'200-500',
'fivehundred_thousand':'500-1000',
'thousand_up':'1000+',
'all':'All'
}
TAXCALC_RESULTS_DEC_ROW_KEYS = dropq.dropq.decile_row_names
TAXCALC_RESULTS_DEC_ROW_KEY_LABELS = {
'perc0-10':'0-10%',
'perc10-20':'10-20%',
'perc20-30':'20-30%',
'perc30-40':'30-40%',
'perc40-50':'40-50%',
'perc50-60':'50-60%',
'perc60-70':'60-70%',
'perc70-80':'70-80%',
'perc80-90':'80-90%',
'perc90-100':'90-100%',
'all':'All'
}
TAXCALC_RESULTS_TABLE_LABELS = {
'mX_dec': 'Base plan tax vars, weighted avg per AGI decile',
'mY_dec': 'User plan tax vars, weighted avg per AGI decile',
'df_dec': 'Difference between Base and User plans by AGI decile',
'mX_bin': 'Base plan tax vars, weighted avg per income bin',
'mY_bin': 'User plan tax vars, weighted avg per income bin',
'df_bin': 'Difference between Base and User plans by income bin',
'fiscal_tots': 'Total Revenue Change by Calendar Year',
}
def expand_1D(x, num_years):
"""
Expand the given data to account for the given number of budget years.
Expanded entries are None by default
"""
if len(x) >= num_years:
return list(x)
else:
ans = [None] * num_years
ans[:len(x)] = x
return ans
def expand_2D(x, num_years):
"""
Expand the given data to account for the given number of budget years.
For 2D arrays, we expand out the number of rows until we have num_years
number of rows. Added rows have all 'None' entries
"""
if len(x) >= num_years:
return list(x)
else:
ans = []
for i in range(0, num_years):
ans.append([None] * len(x[0]))
for i, arr in enumerate(x):
ans[i] = arr
return ans
def expand_list(x, num_years):
"""
Dispatch to either expand_1D or expand2D depending on the dimension of x
Parameters:
-----------
x : value to expand
num_years: int
Number of budget years to expand
Returns:
--------
expanded list
"""
if isinstance(x[0], list):
return expand_2D(x, num_years)
else:
return expand_1D(x, num_years)
def convert_to_floats(tsi):
"""
A helper function that tax all of the fields of a TaxSaveInputs model
and converts them to floats, or list of floats
"""
def numberfy_one(x):
if isinstance(x, float):
return x
else:
return float(x)
def numberfy(x):
if isinstance(x, list):
return [numberfy_one(i) for i in x]
else:
return numberfy_one(x)
attrs = vars(tsi)
return { k:numberfy(v) for k,v in attrs.items() if v}
def leave_name_in(key, val, dd):
"""
Under certain conditions, we will remove 'key' and its value
from the dictionary we pass to the dropq package. This function
will test those conditions and return a Bool.
Parameters:
-----------
key: a field name to potentially pass to the dropq package
dd: the default dictionary of data in taxcalc Parameters
Returns:
--------
Bool: True if we allow this field to get passed on. False
if it should be removed.
"""
if key in dd:
return True
else:
print "Don't have this pair: ", key, val
underscore_name_in_defaults = "_" + key in dd
is_cpi_name = key.endswith("_cpi")
is_array_name = (key.endswith("_0") or key.endswith("_1") or
key.endswith("_2") or key.endswith("_3"))
if (underscore_name_in_defaults or is_cpi_name or is_array_name):
return True
else:
return False
def package_up_vars(user_values):
dd = taxcalc.parameters.default_data(start_year=START_YEAR)
for k, v in user_values.items():
if not leave_name_in(k, v, dd):
print "Removing ", k, v
del user_values[k]
name_stems = {}
ans = {}
#Find the 'broken out' array values, these have special treatment
for k, v in user_values.items():
if (k.endswith("_0") or k.endswith("_1") or k.endswith("_2")
or k.endswith("_3")):
vals = name_stems.setdefault(k[:-2], [])
vals.append(k)
#For each array value, expand as necessary based on default data
#then add user values. It is acceptable to leave 'blanks' as None.
#This is handled on the taxcalc side
for k, vals in name_stems.items():
if k in dd:
default_data = dd[k]
param = k
else:
#add a leading underscore
default_data = dd["_" + k]
param = "_" + k
# get max number of years to advance
_max = 0
for name in vals:
num_years = len(user_values[name])
if num_years > _max:
_max = num_years
expnded = expand_list(default_data, _max)
#Now copy necessary data to expanded array
for name in vals:
idx = int(name[-1]) # either 0, 1, 2, 3
user_arr = user_values[name]
for new_arr, user_val in zip(expnded, user_arr):
new_arr[idx] = int(user_val)
del user_values[name]
ans[param] = expnded
#Process remaining values set by user
for k, v in user_values.items():
if k in dd:
default_data = dd[k]
param = k
elif k.endswith("_cpi"):
if k[:-4] in dd:
ans[k] = v
else:
ans['_' + k] = v
continue
else:
#add a leading underscore
default_data = dd["_" + k]
param = "_" + k
num_years = len(v)
expnded = expand_list(default_data, num_years)
for i, new_val in enumerate(v):
expnded[i] = new_val
ans[param] = expnded
return ans
#
# Gather data to assist in displaying TaxCalc param form
#
class TaxCalcField(object):
"""
An atomic unit of data for a TaxCalcParam, which can be stored as a field
Used for both CSV float fields (value column data) and boolean fields (cpi)
"""
def __init__(self, id, label, values, param):
self.id = id
self.label = label
self.values = values
self.param = param
self.values_by_year = {}
for i, value in enumerate(values):
year = param.start_year + i
self.values_by_year[year] = value
self.default_value = self.values_by_year[START_YEAR]
class TaxCalcParam(object):
"""
A collection of TaxCalcFields that represents all configurable details
for one of TaxCalc's Parameters
"""
def __init__(self, param_id, attributes):
self.__load_from_json(param_id, attributes)
def __load_from_json(self, param_id, attributes):
values_by_year = attributes['value']
col_labels = attributes['col_label']
self.tc_id = param_id
self.nice_id = param_id[1:] if param_id[0] == '_' else param_id
self.name = attributes['long_name']
self.info = " ".join([
attributes['description'],
attributes.get('irs_ref') or "", # sometimes this is blank
attributes.get('notes') or "" # sometimes this is blank
]).strip()
# Pretend the start year is 2015 (instead of 2013),
# until values for that year are provided by taxcalc
#self.start_year = int(attributes['start_year'])
self.start_year = START_YEAR
self.coming_soon = (self.tc_id in TAXCALC_COMING_SOON_FIELDS)
# normalize single-year default lists [] to [[]]
if not isinstance(values_by_year[0], list):
values_by_year = [values_by_year]
# organize defaults by column [[A1,B1],[A2,B2]] to [[A1,A2],[B1,B2]]
values_by_col = [list(x) for x in zip(*values_by_year)]
#
# normalize and format column labels
#
if self.tc_id in TAXCALC_COMING_SOON_INDEXED_BY_MARS:
col_labels = ["Single", "Married filing Jointly",
"Married filing Separately", "Head of Household"]
values_by_col = ['0','0','0','0']
elif isinstance(col_labels, list):
if col_labels == ["0kids", "1kid", "2kids", "3+kids"]:
col_labels = ["0 Kids", "1 Kid", "2 Kids", "3+ Kids"]
elif col_labels == ["single", "joint", "separate", "head of household",
"widow", "separate"] or col_labels == \
["single", "joint", "separate", "head of household",
"widow", "separate","dependent"]:
col_labels = ["Single", "Married filing Jointly",
"Married filing Separately", "Head of Household"]
else:
if col_labels == "NA" or col_labels == "":
col_labels = [""]
elif col_labels == "0kids 1kid 2kids 3+kids":
col_labels = ["0 Kids", "1 Kid", "2 Kids", "3+ Kids"]
# create col params
self.col_fields = []
if len(col_labels) == 1:
self.col_fields.append(TaxCalcField(
self.nice_id,
col_labels[0],
values_by_col[0],
self
))
else:
for col, label in enumerate(col_labels):
self.col_fields.append(TaxCalcField(
self.nice_id + "_{0}".format(col),
label,
values_by_col[col],
self
))
# we assume we can CPI inflate if first value isn't a ratio
first_value = self.col_fields[0].values[0]
self.inflatable = first_value > 1 and self.tc_id != '_ACTC_ChildNum'
if self.inflatable:
self.cpi_field = TaxCalcField(self.nice_id + "_cpi", "CPI", [True], self)
# Create a list of default parameters
TAXCALC_DEFAULT_PARAMS_JSON = taxcalc.parameters.default_data(metadata=True, start_year=2015)
default_taxcalc_params = {}
for k,v in TAXCALC_DEFAULT_PARAMS_JSON.iteritems():
param = TaxCalcParam(k,v)
default_taxcalc_params[param.nice_id] = param
#Behavior Effects not in params.json yet. Add in the appropriate info so that
#the params dictionary has the right info
# value, col_label, long_name, description, irs_ref, notes
be_params = []
be_inc_param = {'value':[0], 'col_label':['label'], 'long_name':'Income Effect',
'description': 'Behavior Effects', 'irs_ref':'', 'notes':''}
be_sub_param = {'value':[0], 'col_label':['label'], 'long_name':'Substitution Effect',
'description': 'Behavior Effects', 'irs_ref':'', 'notes':''}
be_cg_per_param = {'value':[0], 'col_label':['label'], 'long_name':'Persistent',
'description': 'Behavior Effects', 'irs_ref':'', 'notes':''}
be_cg_trn_param= {'value':[0], 'col_label':['label'], 'long_name':'Transitory',
'description': 'Behavior Effects', 'irs_ref':'', 'notes':''}
be_params.append(('_BE_inc', be_inc_param))
be_params.append(('_BE_sub', be_sub_param))
be_params.append(('_BE_cg_per', be_cg_per_param))
be_params.append(('_BE_cg_trn', be_cg_trn_param))
for k,v in be_params:
param = TaxCalcParam(k,v)
default_taxcalc_params[param.nice_id] = param
TAXCALC_DEFAULT_PARAMS = default_taxcalc_params
# Debug TaxParams
"""
for k, param in TAXCALC_DEFAULT_PARAMS.iteritems():
print(' -- ' + k + ' -- ')
print('TC id: ' + param.tc_id)
print('Nice id: ' + param.nice_id)
print('name: ' + param.name)
print('info: ' + param.info + '\n')
if param.inflatable:
field = param.cpi_field
print(field.id + ' - ' + field.label + ' - ' + str(field.values))
for field in param.col_fields:
print(field.id + ' - ' + field.label + ' - ' + str(field.values))
print('\n')
"""
def taxcalc_results_to_tables(results):
"""
Take various results from dropq, i.e. mY_dec, mX_bin, df_dec, etc
Return organized and labeled table results for display
"""
num_years = len(results['fiscal_tots'])
years = list(range(TAXCALC_RESULTS_START_YEAR,
TAXCALC_RESULTS_START_YEAR + num_years))
tables = {}
for table_id in results:
# Debug inputs
"""
print('\n ----- inputs ------- ')
print('looking at {0}'.format(table_id))
if table_id == 'fiscal_tots':
print('{0}'.format(results[table_id]))
else:
print('{0}'.format(results[table_id].keys()))
print(' ----- inputs ------- \n')
"""
if table_id in ['mX_dec', 'mY_dec']:
row_keys = TAXCALC_RESULTS_DEC_ROW_KEYS
row_labels = TAXCALC_RESULTS_DEC_ROW_KEY_LABELS
col_labels = TAXCALC_RESULTS_MTABLE_COL_LABELS
col_formats = TAXCALC_RESULTS_MTABLE_COL_FORMATS
table_data = results[table_id]
multi_year_cells = True
elif table_id in ['mX_bin', 'mY_bin']:
row_keys = TAXCALC_RESULTS_BIN_ROW_KEYS
row_labels = TAXCALC_RESULTS_BIN_ROW_KEY_LABELS
col_labels = TAXCALC_RESULTS_MTABLE_COL_LABELS
col_formats = TAXCALC_RESULTS_MTABLE_COL_FORMATS
table_data = results[table_id]
multi_year_cells = True
elif table_id == 'df_dec':
row_keys = TAXCALC_RESULTS_DEC_ROW_KEYS
row_labels = TAXCALC_RESULTS_DEC_ROW_KEY_LABELS
col_labels = TAXCALC_RESULTS_DFTABLE_COL_LABELS
col_formats = TAXCALC_RESULTS_DFTABLE_COL_FORMATS
table_data = results[table_id]
multi_year_cells = True
elif table_id == 'df_bin':
row_keys = TAXCALC_RESULTS_BIN_ROW_KEYS
row_labels = TAXCALC_RESULTS_BIN_ROW_KEY_LABELS
col_labels = TAXCALC_RESULTS_DFTABLE_COL_LABELS
col_formats = TAXCALC_RESULTS_DFTABLE_COL_FORMATS
table_data = results[table_id]
multi_year_cells = True
elif table_id == 'fiscal_tots':
# todo - move these into the above TC result param constants
row_keys = ['totals']
row_labels = {'totals': 'Total Revenue'}
col_labels = years
col_formats = [ [1000000000, 'Dollars', 1] for y in years]
table_data = {'totals': results[table_id]}
multi_year_cells = False
table = {
'col_labels': col_labels,
'cols': [],
'label': TAXCALC_RESULTS_TABLE_LABELS[table_id],
'rows': [],
'multi_valued': multi_year_cells
}
for col_key, label in enumerate(col_labels):
table['cols'].append({
'label': label,
'divisor': col_formats[col_key][0],
'units': col_formats[col_key][1],
'decimals': col_formats[col_key][2],
})
col_count = len(col_labels)
for row_key in row_keys:
row = {
'label': row_labels[row_key],
'cells': []
}
for col_key in range(0, col_count):
cell = {
'year_values': {},
'format': {
'divisor': table['cols'][col_key]['divisor'],
'decimals': table['cols'][col_key]['decimals'],
}
}
if multi_year_cells:
for yi, year in enumerate(years):
value = table_data["{0}_{1}".format(row_key, yi)][col_key]
if value[-1] == "%":
value = value[:-1]
cell['year_values'][year] = value
cell['first_value'] = cell['year_values'][TAXCALC_RESULTS_START_YEAR]
else:
value = table_data[row_key][col_key]
if value[-1] == "%":
value = value[:-1]
cell['value'] = value
row['cells'].append(cell)
table['rows'].append(row)
tables[table_id] = table
# Debug results
"""
print('\n ----- result ------- ')
print('{0}'.format(table))
print(' ----- result ------- \n')
"""
tables['result_years'] = years
return tables
def format_csv(tax_results, url_id):
"""
Takes a dictionary with the tax_results, having these keys:
[u'mY_bin', u'mX_bin', u'mY_dec', u'mX_dec', u'df_dec', u'df_bin',
u'fiscal_tots']
And then returns a list of list of strings for CSV output. The format
of the lines is as follows:
#URL: http://www.ospc.org/taxbrain/ID/csv/
#fiscal tots data
YEAR_0, ... YEAR_K
val, val, ... val
#mX_dec
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
#mY_dec
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
#df_dec
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
#mX_bin
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
#mY_bin
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
#df_bin
YEAR_0
col_0, col_1, ..., col_n
val, val, ..., val
YEAR_1
col_0, col_1, ..., col_n
val, val, ..., val
...
"""
res = []
#URL
res.append(["#URL: http://www.ospc.org/taxbrain/" + str(url_id) + "/"])
#FISCAL TOTS
res.append(["#fiscal totals data"])
ft = tax_results.get('fiscal_tots', [])
yrs = [START_YEAR + i for i in range(0, len(ft))]
if yrs:
res.append(yrs)
if ft:
res.append(ft)
#MX_DEC
res.append(["#mX_dec"])
mxd = tax_results.get('mX_dec', {})
if mxd:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_MTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_DEC_ROW_KEYS:
res.append(mxd[row+"_" + str(count)])
#MY_DEC
res.append(["#mY_dec"])
myd = tax_results.get('mY_dec', {})
if myd:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_MTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_DEC_ROW_KEYS:
res.append(myd[row+"_" + str(count)])
#DF_DEC
res.append(["#df_dec"])
dfd = tax_results.get('df_dec', {})
if dfd:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_DFTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_DEC_ROW_KEYS:
res.append(dfd[row+"_" + str(count)])
#MX_BIN
res.append(["#mX_bin"])
mxb = tax_results.get('mX_bin', {})
if mxb:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_MTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_BIN_ROW_KEYS:
res.append(mxb[row+"_" + str(count)])
#MY_BIN
res.append(["#mY_bin"])
myb = tax_results.get('mY_bin', {})
if myb:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_MTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_BIN_ROW_KEYS:
res.append(myb[row+"_" + str(count)])
#DF_BIN
res.append(["#df_bin"])
dfb = tax_results.get('df_bin', {})
if dfb:
for count, yr in enumerate(yrs):
res.append([yr])
res.append(TAXCALC_RESULTS_DFTABLE_COL_LABELS)
for row in TAXCALC_RESULTS_BIN_ROW_KEYS:
res.append(dfb[row+"_" + str(count)])
return res
def submit_dropq_calculation(mods):
print "mods is ", mods
user_mods = package_up_vars(mods)
if not bool(user_mods):
return False
print "user_mods is ", user_mods
print "submit work"
user_mods={START_YEAR:user_mods}
years = list(range(0,NUM_BUDGET_YEARS))
hostnames = DROPQ_WORKERS
num_hosts = len(hostnames)
data = {}
data['user_mods'] = json.dumps(user_mods)
job_ids = []
hostname_idx = 0
for y in years:
year_submitted = False
attempts = 0
while not year_submitted:
data['year'] = str(y)
theurl = "http://{hn}/dropq_start_job".format(hn=hostnames[hostname_idx])
try:
response = requests.post(theurl, data=data, timeout=TIMEOUT_IN_SECONDS)
if response.status_code == 200:
print "submitted: ", str(y), hostnames[hostname_idx]
year_submitted = True
job_ids.append((response.text, hostnames[hostname_idx]))
hostname_idx = (hostname_idx + 1) % num_hosts
else:
print "FAILED: ", str(y), hostnames[hostname_idx]
hostname_idx = (hostname_idx + 1) % num_hosts
attempts += 1
except Timeout:
print "Couldn't submit to: ", hostnames[hostname_idx]
hostname_idx = (hostname_idx + 1) % num_hosts
attempts += 1
except RequestException as re:
print "Something unexpected happened: ", re
hostname_idx = (hostname_idx + 1) % num_hosts
attempts += 1
if attempts > MAX_ATTEMPTS_SUBMIT_JOB:
print "Exceeded max attempts. Bailing out."
raise IOError()
return job_ids
def dropq_results_ready(job_ids):
jobs_done = [False] * len(job_ids)
for idx, id_hostname in enumerate(job_ids):
id_, hostname = id_hostname
result_url = "http://{hn}/dropq_query_result".format(hn=hostname)
job_response = requests.get(result_url, params={'job_id':id_})
if job_response.status_code == 200: # Valid response
rep = job_response.text
if rep == 'YES':
jobs_done[idx] = True
print "got one!: ", id_
return all(jobs_done)
def dropq_get_results(job_ids):
ans = []
for idx, id_hostname in enumerate(job_ids):
id_, hostname = id_hostname
result_url = "http://{hn}/dropq_get_result".format(hn=hostname)
job_response = requests.get(result_url, params={'job_id':id_})
if job_response.status_code == 200: # Valid response
ans.append(job_response.json())
mY_dec = {}
mX_dec = {}
df_dec = {}
mY_bin = {}
mX_bin = {}
df_bin = {}
fiscal_tots = []
for result in ans:
mY_dec.update(result['mY_dec'])
mX_dec.update(result['mX_dec'])
df_dec.update(result['df_dec'])
mY_bin.update(result['mY_bin'])
mX_bin.update(result['mX_bin'])
df_bin.update(result['df_bin'])
fiscal_tots.append(result['fiscal_tots'])
results = {'mY_dec': mY_dec, 'mX_dec': mX_dec, 'df_dec': df_dec,
'mY_bin': mY_bin, 'mX_bin': mX_bin, 'df_bin': df_bin,
'fiscal_tots': fiscal_tots}
return results
| mit | -2,011,549,738,239,617,500 | 31.881546 | 93 | 0.540101 | false |
lucc/alot | tests/utils/argparse_test.py | 1 | 5935 | # encoding=utf-8
# Copyright © 2017 Dylan Baker
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Tests for alot.utils.argparse"""
import argparse
import contextlib
import os
import shutil
import tempfile
import unittest
import mock
from alot.utils import argparse as cargparse
# Good descriptive test names often don't fit PEP8, which is meant to cover
# functions meant to be called by humans.
# pylint: disable=invalid-name
# When using mock asserts its possible that many methods will not use self,
# that's fine
# pylint: disable=no-self-use
class TestValidatedStore(unittest.TestCase):
"""Tests for the ValidatedStore action class."""
def _argparse(self, args):
"""Create an argparse instance with a validator."""
def validator(args):
if args == 'fail':
raise cargparse.ValidationFailed
parser = argparse.ArgumentParser()
parser.add_argument(
'foo',
action=cargparse.ValidatedStoreAction,
validator=validator)
with mock.patch('sys.stderr', mock.Mock()):
return parser.parse_args(args)
def test_validates(self):
# Arparse will raise a SystemExit (calls sys.exit) rather than letting
# the exception cause the program to close.
with self.assertRaises(SystemExit):
self._argparse(['fail'])
@contextlib.contextmanager
def temporary_directory(suffix='', prefix='', dir=None): # pylint: disable=redefined-builtin
"""Python3 interface implementation.
Python3 provides a class that can be used as a context manager, which
creates a temporary directory and removes it when the context manager
exits. This function emulates enough of the interface of
TemporaryDirectory, for this module to use, and is designed as a drop in
replacement that can be replaced after the python3 port.
The only user visible difference is that this does not implement the
cleanup method that TemporaryDirectory does.
"""
directory = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
yield directory
shutil.rmtree(directory)
class TestRequireFile(unittest.TestCase):
"""Tests for the require_file validator."""
def test_doesnt_exist(self):
with temporary_directory() as d:
with self.assertRaises(cargparse.ValidationFailed):
cargparse.require_file(os.path.join(d, 'doesnt-exist'))
def test_dir(self):
with temporary_directory() as d:
with self.assertRaises(cargparse.ValidationFailed):
cargparse.require_file(d)
def test_file(self):
with tempfile.NamedTemporaryFile() as f:
cargparse.require_file(f.name)
def test_char_special(self):
with self.assertRaises(cargparse.ValidationFailed):
cargparse.require_file('/dev/null')
def test_fifo(self):
with temporary_directory() as d:
path = os.path.join(d, 'fifo')
os.mkfifo(path)
with self.assertRaises(cargparse.ValidationFailed):
cargparse.require_file(path)
class TestRequireDir(unittest.TestCase):
"""Tests for the require_dir validator."""
def test_doesnt_exist(self):
with temporary_directory() as d:
with self.assertRaises(cargparse.ValidationFailed):
cargparse.require_dir(os.path.join(d, 'doesnt-exist'))
def test_dir(self):
with temporary_directory() as d:
cargparse.require_dir(d)
def test_file(self):
with tempfile.NamedTemporaryFile() as f:
with self.assertRaises(cargparse.ValidationFailed):
cargparse.require_dir(f.name)
def test_char_special(self):
with self.assertRaises(cargparse.ValidationFailed):
cargparse.require_dir('/dev/null')
def test_fifo(self):
with temporary_directory() as d:
path = os.path.join(d, 'fifo')
os.mkfifo(path)
with self.assertRaises(cargparse.ValidationFailed):
cargparse.require_dir(path)
class TestOptionalFileLike(unittest.TestCase):
"""Tests for the optional_file_like validator."""
def test_doesnt_exist(self):
with temporary_directory() as d:
cargparse.optional_file_like(os.path.join(d, 'doesnt-exist'))
def test_dir(self):
with temporary_directory() as d:
with self.assertRaises(cargparse.ValidationFailed):
cargparse.optional_file_like(d)
def test_file(self):
with tempfile.NamedTemporaryFile() as f:
cargparse.optional_file_like(f.name)
def test_char_special(self):
cargparse.optional_file_like('/dev/null')
def test_fifo(self):
with temporary_directory() as d:
path = os.path.join(d, 'fifo')
os.mkfifo(path)
cargparse.optional_file_like(path)
class TestIntOrPlusOrMinus(unittest.TestCase):
"""Tests for the is_int_or_pm validator."""
def test_int(self):
self.assertTrue(cargparse.is_int_or_pm('5'))
def test_pm(self):
self.assertTrue(cargparse.is_int_or_pm('+'))
self.assertTrue(cargparse.is_int_or_pm('-'))
def test_rubbish(self):
with self.assertRaises(cargparse.ValidationFailed):
cargparse.is_int_or_pm('XX')
| gpl-3.0 | -1,888,860,210,976,214,300 | 32.337079 | 93 | 0.667678 | false |
monkut/deso | deso/deso/layers/raster/management/commands/fill_raster_layer_cache.py | 1 | 2025 | """
Make requests for tiles at given zoom levels to fill the tilecache.
"""
from django.core.management.base import BaseCommand
from django.conf import settings
from ...models import RasterAggregatedLayer
WGS84_SRID = settings.WGS84_SRID
def request_layer_tiles(layer_url, layer, zoom):
"""
Request tiles for given layer
:param layer_url: Abosulute URL with layer_id (for example: http://HOST:PORT/pathtolayer/{layer_id}/)
:param layer: RasterAggregatedLayer object
:param zoom: Zoom level
:return: tile count
"""
count = 0
pass
class Command(BaseCommand):
help = __doc__
def add_arguments(self, parser):
parser.add_argument("-l", "--layers",
type=int,
nargs="+",
required=True,
default=None,
help="RasterAggregatedLayer Id(s) of layers to cache")
parser.add_argument("-z", "--zooms",
type=int,
nargs="+",
default=[14,],
help="Zoom Level(s) to cache [DEFAULT=14]")
DEFAULT_RASTER_LAYERS_URL = "http://{}:{}/raster/layer/{{layer_id}}/".format(settings.HOST,
settings.PORT)
parser.add_argument("-u", "--url",
default=DEFAULT_RASTER_LAYERS_URL,
help="Raster Layers URL to send requests to [DEFAULT='{}']".format(DEFAULT_RASTER_LAYERS_URL))
def handle(self, *args, **options):
layer_ids = sorted(options["layers"])
for layer_id in layer_ids:
try:
layer = RasterAggregatedLayer.objects.get(id=layer_id)
except RasterAggregatedLayer.DoesNotExist:
self.stderr.write("Given RasterAggregatedLayer({}) Does Not Exist -- SKIPPING!".format(layer_id))
center = layer.get_center()
| mit | 2,064,351,220,925,089,800 | 37.207547 | 122 | 0.536296 | false |
MissionCriticalCloud/marvin | marvin/cloudstackAPI/uploadCustomCertificate.py | 1 | 1685 | """Uploads a custom certificate for the console proxy VMs to use for SSL. Can be used to upload a single certificate signed by a known CA. Can also be used, through multiple calls, to upload a chain of certificates from CA to the custom certificate itself."""
from baseCmd import *
from baseResponse import *
class uploadCustomCertificateCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""The certificate to be uploaded."""
"""Required"""
self.certificate = None
self.typeInfo['certificate'] = 'string'
"""DNS domain suffix that the certificate is granted for."""
"""Required"""
self.domainsuffix = None
self.typeInfo['domainsuffix'] = 'string'
"""An integer providing the location in a chain that the certificate will hold. Usually, this can be left empty. When creating a chain, the top level certificate should have an ID of 1, with each step in the chain incrementing by one. Example, CA with id = 1, Intermediate CA with id = 2, Site certificate with ID = 3"""
self.id = None
self.typeInfo['id'] = 'integer'
"""A name / alias for the certificate."""
self.name = None
self.typeInfo['name'] = 'string'
"""The private key for the attached certificate."""
self.privatekey = None
self.typeInfo['privatekey'] = 'string'
self.required = ["certificate", "domainsuffix", ]
class uploadCustomCertificateResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""message of the certificate upload operation"""
self.message = None
self.typeInfo['message'] = 'string'
| apache-2.0 | 2,818,282,309,625,276,000 | 43.342105 | 328 | 0.651632 | false |
eunchong/build | scripts/slave/recipe_modules/chromium_tests/chromium_fyi.py | 1 | 63755 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from . import steps
RESULTS_URL = 'https://chromeperf.appspot.com'
SPEC = {
'settings': {
'build_gs_bucket': 'chromium-fyi-archive',
},
'builders': {
'Win Builder': {
'chromium_config': 'chromium',
'chromium_apply_config': ['ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'bot_type': 'builder',
'compile_targets': [
'chromium_builder_tests',
],
'testing': {
'platform': 'win',
},
'enable_swarming': True,
'use_isolate': True,
},
'Chromium Mac 10.10 MacViews': {
'chromium_config': 'chromium',
'gclient_config': 'chromium',
'chromium_apply_config': ['chromium_mac_mac_views'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'mac',
},
},
'Chromium Mac 10.11': {
'chromium_config': 'chromium',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'testing': {
'platform': 'mac',
},
},
'Chromium Mac 10.11 Force Mac Toolchain': {
'chromium_config': 'chromium',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'chromium_apply_config': ['force_mac_toolchain'],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
],
'testing': {
'platform': 'mac',
},
},
'Linux ARM': {
'chromium_config': 'chromium',
'gclient_config': 'chromium',
'chromium_apply_config': ['mb', 'ninja_confirm_noop'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_ARCH': 'arm',
'TARGET_BITS': 32,
},
'bot_type': 'builder_tester',
'testing': {
'platform': 'linux',
},
'archive_build': True,
'gs_bucket': 'chromium-fyi-archive',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'use_isolate': True,
'enable_swarming': True,
'swarming_dimensions': {
'cpu': 'armv7l-32',
'os': 'Ubuntu-14.04',
},
},
'Linux Trusty': {
'chromium_config': 'chromium',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder_tester',
'compile_targets': [
'all',
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'linux',
},
},
'Linux Trusty (dbg)': {
'chromium_config': 'chromium',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'bot_type': 'builder_tester',
'compile_targets': [
'all',
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'linux',
},
},
'Linux V8 API Stability': {
'chromium_config': 'chromium',
'gclient_config': 'chromium',
'gclient_apply_config': ['v8_canary', 'with_branch_heads'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder_tester',
'compile_targets': [
'all',
],
'testing': {
'platform': 'linux',
},
},
'MD Top Chrome ChromeOS non-material': {
'chromium_config': 'chromium',
'chromium_apply_config': ['chromeos'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_PLATFORM': 'chromeos',
'TARGET_BITS': 64,
},
'bot_type': 'builder_tester',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'linux',
},
},
'MD Top Chrome ChromeOS material-hybrid': {
'chromium_config': 'chromium',
'chromium_apply_config': ['chromeos'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_PLATFORM': 'chromeos',
'TARGET_BITS': 64,
},
'bot_type': 'builder_tester',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'linux',
},
},
'MD Top Chrome Win material': {
'chromium_config': 'chromium',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_PLATFORM': 'win',
'TARGET_BITS': 64,
},
'bot_type': 'builder_tester',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'win',
},
},
'MD Top Chrome Linux material': {
'chromium_config': 'chromium',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_PLATFORM': 'linux',
'TARGET_BITS': 64,
},
'bot_type': 'builder_tester',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'linux',
},
},
'Print Preview Linux': {
'chromium_config': 'chromium',
'gclient_config': 'chromium',
'GYP_DEFINES': {
'component': 'shared_library',
},
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_PLATFORM': 'linux',
'TARGET_BITS': 64,
},
'tests': [
steps.PrintPreviewTests(),
],
'bot_type': 'builder_tester',
'testing': {
'platform': 'linux',
},
},
'Print Preview Mac': {
'chromium_config': 'chromium',
'gclient_config': 'chromium',
'GYP_DEFINES': {
'component': 'shared_library',
},
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_PLATFORM': 'mac',
'TARGET_BITS': 64,
},
'tests': [
steps.PrintPreviewTests(),
],
'bot_type': 'builder_tester',
'testing': {
'platform': 'mac',
},
},
'Print Preview Win': {
'chromium_config': 'chromium',
'gclient_config': 'chromium',
'GYP_DEFINES': {
'component': 'shared_library',
},
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_PLATFORM': 'win',
'TARGET_BITS': 32,
},
'tests': [
steps.PrintPreviewTests(),
],
'bot_type': 'builder_tester',
'testing': {
'platform': 'win',
},
},
'CFI Linux': {
'chromium_config': 'chromium_cfi',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'bot_type': 'builder_tester',
'testing': {
'platform': 'linux',
},
},
'CFI Linux ToT': {
'chromium_config': 'chromium_cfi',
'chromium_apply_config': ['clang_tot'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'bot_type': 'builder_tester',
'testing': {
'platform': 'linux',
},
},
'CFI Linux CF': {
'chromium_config': 'chromium_cfi',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder',
'testing': {
'platform': 'linux',
},
},
'LTO Linux Perf': {
'chromium_config': 'chromium_official',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['chrome_internal'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder',
'compile_targets': [ 'chromium_builder_perf' ],
'testing': {
'platform': 'linux',
},
},
'Mac OpenSSL': {
'chromium_config': 'chromium',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'GYP_DEFINES': {
'use_openssl': '1',
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'mac',
},
},
'Site Isolation Linux': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb', 'ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'GYP_DEFINES': {
'dcheck_always_on': '1',
},
'bot_type': 'builder_tester',
'compile_targets': [
'content_unittests',
'content_browsertests',
],
'tests': [
steps.BlinkTest(extra_args=[
'--additional-driver-flag',
'--site-per-process',
'--additional-expectations',
'src/third_party/WebKit/LayoutTests/FlagExpectations/site-per-process',
'--options',
'http/tests',
]),
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'linux',
},
},
'Site Isolation Win': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_PLATFORM': 'win',
'TARGET_BITS': 64,
},
'GYP_DEFINES': {
'dcheck_always_on': '1',
},
'bot_type': 'builder_tester',
'compile_targets': [
'content_unittests',
'content_browsertests',
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'win',
},
},
'Browser Side Navigation Linux': {
'chromium_config': 'chromium',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder_tester',
'compile_targets': [
'content_unittests',
'content_browsertests',
],
'tests': [
steps.BlinkTest(["--additional-driver-flag=--enable-browser-side-navigation"]),
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'linux',
},
},
'ChromePracticeFullTester': {
'chromium_config': 'chromium',
'chromium_apply_config': ['ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder_tester',
'compile_targets': [
'chromium_swarm_tests',
],
'tests': [
steps.GTestTest('base_unittests'),
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
# The official builders specify the test_spec here as opposed to
# reading it from a file.
'test_spec': {
},
'testing': {
'platform': 'linux',
},
},
'ChromiumPractice': {
'chromium_config': 'chromium',
'gclient_config': 'blink_merged',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder',
'testing': {
'platform': 'linux',
},
},
'ChromiumPracticeTester': {
'chromium_config': 'chromium',
'gclient_config': 'blink_merged',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'tests': [
steps.BlinkTest(),
],
'bot_type': 'tester',
'parent_buildername': 'ChromiumPractice',
'testing': {
'platform': 'linux',
},
},
'ChromiumPracticeFullTester': {
'chromium_config': 'chromium',
'chromium_apply_config': ['ninja_confirm_noop'],
'gclient_config': 'blink_merged',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder_tester',
'compile_targets': [
'blink_tests',
'chromium_swarm_tests',
],
'tests': [
steps.BlinkTest(),
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'linux',
},
},
'WinClang': {
'chromium_config': 'chromium_win_clang',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'compile_targets': [
'all',
],
# Recipes builds Debug builds with component=shared_library by default.
'bot_type': 'builder',
'testing': {
'platform': 'win',
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'CrWinClang': {
'chromium_config': 'chromium_win_clang_official',
'gclient_config': 'chromium',
'gclient_apply_config': ['chrome_internal'],
'chromium_apply_config': ['mb'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'compile_targets': [
'all',
],
'bot_type': 'builder',
'testing': {
'platform': 'win',
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'CrWinClang tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'test_generators': [
steps.generate_gtest,
],
'bot_type': 'tester',
'parent_buildername': 'CrWinClang',
'testing': {
'platform': 'win',
},
'enable_swarming': True,
},
'CrWinClang(dbg)': {
'chromium_config': 'chromium_win_clang',
'gclient_config': 'chromium',
'chromium_apply_config': ['mb'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
},
'compile_targets': [
'all',
],
# Recipes builds Debug builds with component=shared_library by default.
'bot_type': 'builder',
'testing': {
'platform': 'win',
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'CrWinClang(dbg) tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
},
'test_generators': [
steps.generate_gtest,
],
'bot_type': 'tester',
'parent_buildername': 'CrWinClang(dbg)',
'testing': {
'platform': 'win',
},
'enable_swarming': True,
},
'CrWinClang(shared)': {
'chromium_config': 'chromium_win_clang',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'compile_targets': [
'all',
],
'GYP_DEFINES': { 'component': 'shared_library' },
'bot_type': 'builder',
'testing': {
'platform': 'win',
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'CrWinClang(shared) tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'test_generators': [
steps.generate_gtest,
],
'bot_type': 'tester',
'parent_buildername': 'CrWinClang(shared)',
'testing': {
'platform': 'win',
},
'enable_swarming': True,
},
'CrWinClang64': {
'chromium_config': 'chromium_win_clang_official',
'gclient_config': 'chromium',
'gclient_apply_config': ['chrome_internal'],
'chromium_apply_config': ['mb'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [
'all',
],
'bot_type': 'builder',
'testing': {
'platform': 'win',
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'CrWinClang64 tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
],
'bot_type': 'tester',
'parent_buildername': 'CrWinClang64',
'testing': {
'platform': 'win',
},
'enable_swarming': True,
},
'CrWinClang64(dbg)': {
'chromium_config': 'chromium_win_clang',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'compile_targets': [
'all',
],
# Recipes builds Debug builds with component=shared_library by default.
'bot_type': 'builder',
'testing': {
'platform': 'win',
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'CrWinClang64(dbg) tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
],
'bot_type': 'tester',
'parent_buildername': 'CrWinClang64(dbg)',
'testing': {
'platform': 'win',
},
'enable_swarming': True,
},
'CrWinClang64(dll)': {
'chromium_config': 'chromium_win_clang',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [
'all',
],
'GYP_DEFINES': { 'component': 'shared_library' },
'bot_type': 'builder',
'testing': {
'platform': 'win',
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'CrWinClang64(dll) tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
],
'bot_type': 'tester',
'parent_buildername': 'CrWinClang64(dll)',
'testing': {
'platform': 'win',
},
'enable_swarming': True,
},
'CrWinClangLLD': {
'chromium_config': 'chromium_win_clang_tot',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'compile_targets': [
'all',
],
'GYP_DEFINES': { 'component': 'shared_library', 'use_lld': 1 },
'bot_type': 'builder',
'testing': {
'platform': 'win',
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'CrWinClangLLD tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'test_generators': [
steps.generate_gtest,
],
'bot_type': 'tester',
'parent_buildername': 'CrWinClangLLD',
'testing': {
'platform': 'win',
},
'enable_swarming': True,
},
'CrWinClngLLDdbg': {
'chromium_config': 'chromium_win_clang_tot',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
},
'compile_targets': [
'all',
],
'GYP_DEFINES': { 'component': 'shared_library', 'use_lld': 1 },
'bot_type': 'builder',
'testing': {
'platform': 'win',
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'CrWinClngLLDdbg tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
},
'test_generators': [
steps.generate_gtest,
],
'bot_type': 'tester',
'parent_buildername': 'CrWinClngLLDdbg',
'testing': {
'platform': 'win',
},
'enable_swarming': True,
},
'CrWinClangLLD64': {
'chromium_config': 'chromium_win_clang_tot',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [
'all',
],
'GYP_DEFINES': { 'component': 'shared_library', 'use_lld': 1 },
'bot_type': 'builder',
'testing': {
'platform': 'win',
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'CrWinClangLLD64 tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
],
'bot_type': 'tester',
'parent_buildername': 'CrWinClangLLD64',
'testing': {
'platform': 'win',
},
'enable_swarming': True,
},
'CrWinClngLLD64dbg': {
'chromium_config': 'chromium_win_clang_tot',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [
'all',
],
'GYP_DEFINES': { 'component': 'shared_library', 'use_lld': 1 },
'bot_type': 'builder',
'testing': {
'platform': 'win',
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'CrWinClngLLD64dbg tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
],
'bot_type': 'tester',
'parent_buildername': 'CrWinClngLLD64dbg',
'testing': {
'platform': 'win',
},
'enable_swarming': True,
},
'CrWinAsan': {
'chromium_config': 'chromium_win_clang_asan_tot',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'bot_type': 'builder',
'testing': {
'platform': 'win',
},
'use_isolate': True,
'enable_swarming': True,
'compile_targets': [ 'chromium_builder_asan' ],
# add_tests_as_compile_targets not needed for the asan bot, it doesn't
# build everything.
},
'CrWinAsan tester': {
'chromium_config': 'chromium_win_asan',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'test_generators': [
steps.generate_gtest,
],
'bot_type': 'tester',
'parent_buildername': 'CrWinAsan',
'testing': {
'platform': 'win',
},
'enable_swarming': True,
},
'CrWinAsan(dll)': {
'chromium_config': 'chromium_win_clang_asan_tot',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'GYP_DEFINES': { 'component': 'shared_library' },
'bot_type': 'builder',
'testing': {
'platform': 'win',
},
'use_isolate': True,
'enable_swarming': True,
'compile_targets': [ 'chromium_builder_asan' ],
# add_tests_as_compile_targets not needed for the asan bot, it doesn't
# build everything.
},
'CrWinAsan(dll) tester': {
'chromium_config': 'chromium_win_asan',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'test_generators': [
steps.generate_gtest,
],
'bot_type': 'tester',
'parent_buildername': 'CrWinAsan(dll)',
'testing': {
'platform': 'win',
},
'enable_swarming': True,
},
'CrWinAsanCov': {
'chromium_config': 'chromium_win_clang_asan_tot_coverage',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'bot_type': 'builder',
'testing': {
'platform': 'win',
},
'use_isolate': True,
'enable_swarming': True,
'compile_targets': [ 'chromium_builder_asan' ],
# add_tests_as_compile_targets not needed for the asan bot, it doesn't
# build everything.
},
'CrWinAsanCov tester': {
'chromium_config': 'chromium_win_asan',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'test_generators': [
steps.generate_gtest,
],
'bot_type': 'tester',
'parent_buildername': 'CrWinAsanCov',
'testing': {
'platform': 'win',
},
'enable_swarming': True,
},
'CrWinGoma': {
'chromium_config': 'chromium',
'chromium_apply_config': ['goma_canary'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'compile_targets': [ 'chromium_builder_tests' ],
'goma_canary': True,
'tests': steps.GOMA_TESTS,
'testing': {
'platform': 'win'
}
},
'CrWinGoma(dll)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['goma_canary', 'shared_library'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'compile_targets': [ 'chromium_builder_tests' ],
'goma_canary': True,
'tests': steps.GOMA_TESTS,
'testing': {
'platform': 'win'
}
},
'CrWin7Goma': {
'chromium_config': 'chromium',
'chromium_apply_config': ['goma_canary'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'compile_targets': [ 'chromium_builder_tests' ],
'goma_canary': True,
'tests': steps.GOMA_TESTS,
'testing': {
'platform': 'win'
}
},
'CrWin7Goma(dll)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['goma_canary', 'shared_library'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'compile_targets': [ 'chromium_builder_tests' ],
'goma_canary': True,
'tests': steps.GOMA_TESTS,
'testing': {
'platform': 'win'
}
},
'CrWin7Goma(dbg)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['goma_canary'],
'gclient_config': 'chromium',
'GYP_DEFINES': {
'win_z7': '1'
},
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
},
'compile_targets': [ 'chromium_builder_tests' ],
'goma_canary': True,
'tests': steps.GOMA_TESTS,
'testing': {
'platform': 'win'
}
},
'CrWin7Goma(clbr)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['goma_canary', 'clobber', 'shared_library'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'compile_targets': [ 'chromium_builder_tests' ],
'goma_canary': True,
'tests': steps.GOMA_TESTS,
'testing': {
'platform': 'win'
}
},
'CrWinClangGoma': {
'chromium_config': 'chromium_win_clang',
'chromium_apply_config': ['goma_canary', 'clobber', 'shared_library'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'compile_targets': [ 'chromium_builder_tests' ],
'goma_canary': True,
'tests': steps.GOMA_TESTS,
'testing': {
'platform': 'win'
}
},
'Chromium Linux Goma Canary': {
'chromium_config': 'chromium',
'chromium_apply_config': ['goma_canary'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [ 'chromium_builder_tests' ],
'goma_canary': True,
'tests': steps.GOMA_TESTS,
'testing': {
'platform': 'linux'
}
},
'Chromium Linux Goma Canary (clobber)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['goma_canary', 'clobber'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [ 'chromium_builder_tests' ],
'goma_canary': True,
'tests': steps.GOMA_TESTS,
'testing': {
'platform': 'linux'
}
},
'Chromium Linux32 Goma Canary (clobber)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['goma_canary', 'clobber'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'compile_targets': [ 'chromium_builder_tests' ],
'goma_canary': True,
'tests': steps.GOMA_TESTS,
'testing': {
'platform': 'linux'
}
},
'Chromium Linux Precise Goma LinkTest': {
'chromium_config': 'chromium',
'chromium_apply_config': ['goma_canary', 'goma_linktest'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [ 'chromium_builder_tests' ],
'goma_canary': True,
'tests': steps.GOMA_TESTS,
'testing': {
'platform': 'linux'
}
},
'Chromium Mac 10.9 Goma Canary': {
'chromium_config': 'chromium',
'chromium_apply_config': ['goma_canary'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [ 'chromium_builder_tests' ],
'goma_canary': True,
'tests': steps.GOMA_TESTS,
'testing': {
'platform': 'mac'
}
},
'Chromium Mac 10.9 Goma Canary (dbg)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['goma_canary'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'compile_targets': [ 'chromium_builder_tests' ],
'goma_canary': True,
'tests': steps.GOMA_TESTS,
'testing': {
'platform': 'mac'
}
},
'Chromium Mac 10.9 Goma Canary (clobber)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['goma_canary', 'clobber'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [ 'chromium_builder_tests' ],
'goma_canary': True,
'tests': steps.GOMA_TESTS,
'testing': {
'platform': 'mac'
}
},
'Chromium Mac 10.9 Goma Canary (dbg)(clobber)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['goma_canary', 'clobber'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'compile_targets': [ 'chromium_builder_tests' ],
'goma_canary': True,
'tests': steps.GOMA_TESTS,
'testing': {
'platform': 'mac'
}
},
'ClangToTLinux': {
'chromium_config': 'clang_tot_linux',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'GYP_DEFINES': {
'component': 'shared_library',
# Enable debug info, as on official builders, to catch issues with
# optimized debug info.
'linux_dump_symbols': '1',
},
'compile_targets': [
'all',
],
'bot_type': 'builder',
'testing': {
'platform': 'linux',
},
'tests': {
steps.SizesStep(RESULTS_URL, 'ClangToTLinux')
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'ClangToTLinux tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'bot_type': 'tester',
'parent_buildername': 'ClangToTLinux',
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
},
'ClangToTLinux (dbg)': {
'chromium_config': 'clang_tot_linux',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'compile_targets': [
'all',
],
'bot_type': 'builder',
'testing': { 'platform': 'linux', },
'tests': {
steps.SizesStep(RESULTS_URL, 'ClangToTLinux (dbg)')
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'ClangToTLinuxASan': {
'chromium_config': 'clang_tot_linux_asan',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'chromium_apply_config': ['lsan'],
'compile_targets': [
'all',
],
'bot_type': 'builder',
'testing': { 'platform': 'linux', },
'tests': {
steps.SizesStep(RESULTS_URL, 'ClangToTLinuxASan')
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'ClangToTLinuxASan tester': {
'chromium_config': 'chromium_linux_asan',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'chromium_apply_config': ['lsan'],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'bot_type': 'tester',
'parent_buildername': 'ClangToTLinuxASan',
'testing': { 'platform': 'linux', },
'enable_swarming': True,
},
'ClangToTLinuxUBSanVptr': {
'chromium_config': 'clang_tot_linux_ubsan_vptr',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [
'all',
],
'bot_type': 'builder',
'testing': { 'platform': 'linux', },
'tests': {
steps.SizesStep(RESULTS_URL, 'ClangToTLinuxUBSanVptr')
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'ClangToTLinuxUBSanVptr tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'bot_type': 'tester',
'parent_buildername': 'ClangToTLinuxUBSanVptr',
'testing': { 'platform': 'linux', },
'enable_swarming': True,
},
'ClangToTAndroidASan': {
'chromium_config': 'clang_tot_android_asan',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_PLATFORM': 'android',
'TARGET_ARCH': 'arm',
'TARGET_BITS': 32,
},
'GYP_DEFINES': {
'component': 'shared_library',
},
'compile_targets': [
'all',
],
'bot_type': 'builder',
'android_config': 'clang_asan_tot_release_builder',
'testing': { 'platform': 'linux', },
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'ClangToTAndroidASan tester': {
'chromium_config': 'clang_tot_android_asan',
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'bot_type': 'tester',
'parent_buildername': 'ClangToTAndroidASan',
'android_config': 'clang_asan_tot_release_builder',
'root_devices': True,
'tests': [
steps.AndroidJunitTest('base_junit_tests'),
steps.GTestTest(
'components_browsertests',
android_isolate_path='components/components_browsertests.isolate',
android_tool='asan'),
steps.GTestTest('gfx_unittests', android_tool='asan'),
],
'testing': {
'platform': 'linux',
},
},
'ClangToTMac': {
'chromium_config': 'clang_tot_mac',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'GYP_DEFINES': {
'component': 'shared_library',
},
'compile_targets': [
'all',
],
'bot_type': 'builder',
'testing': { 'platform': 'mac', },
'tests': {
steps.SizesStep(RESULTS_URL, 'ClangToTMac')
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'ClangToTMac tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'bot_type': 'tester',
'parent_buildername': 'ClangToTMac',
'testing': {
'platform': 'mac',
},
'enable_swarming': True,
},
'ClangToTMac (dbg)': {
'chromium_config': 'clang_tot_mac',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'compile_targets': [
'all',
],
'bot_type': 'builder',
'testing': { 'platform': 'mac', },
'tests': {
steps.SizesStep(RESULTS_URL, 'ClangToTMac (dbg)')
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'ClangToTMacASan': {
'chromium_config': 'clang_tot_mac_asan',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [
'all',
],
'bot_type': 'builder',
'testing': { 'platform': 'mac', },
'tests': {
steps.SizesStep(RESULTS_URL, 'ClangToTMacASan')
},
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'ClangToTMacASan tester': {
'chromium_config': 'chromium_mac_asan',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'bot_type': 'tester',
'parent_buildername': 'ClangToTMacASan',
'testing': { 'platform': 'mac', },
'enable_swarming': True,
},
'ClangToTWin': {
'chromium_config': 'chromium_win_clang_official_tot',
'gclient_config': 'chromium',
'gclient_apply_config': ['chrome_internal'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'compile_targets': [
'all',
],
'bot_type': 'builder',
'testing': { 'platform': 'win', },
'tests': { steps.SizesStep(RESULTS_URL, 'ClangToTWin') },
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'ClangToTWin tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'test_generators': [steps.generate_gtest],
'bot_type': 'tester',
'parent_buildername': 'ClangToTWin',
'testing': { 'platform': 'win' },
'enable_swarming': True,
},
'ClangToTWin(dbg)': {
'chromium_config': 'chromium_win_clang_tot',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
},
'compile_targets': [
'all',
],
'bot_type': 'builder',
'testing': { 'platform': 'win', },
'tests': { steps.SizesStep(RESULTS_URL, 'ClangToTWin(dbg)') },
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'ClangToTWin(dbg) tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
},
'test_generators': [steps.generate_gtest],
'bot_type': 'tester',
'parent_buildername': 'ClangToTWin(dbg)',
'testing': { 'platform': 'win' },
'enable_swarming': True,
},
'ClangToTWin(dll)': {
'chromium_config': 'chromium_win_clang_tot',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'compile_targets': [
'all',
],
'GYP_DEFINES': { 'component': 'shared_library' },
'bot_type': 'builder',
'testing': { 'platform': 'win', },
'tests': { steps.SizesStep(RESULTS_URL, 'ClangToTWin(dll)') },
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'ClangToTWin(dll) tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'test_generators': [steps.generate_gtest],
'bot_type': 'tester',
'parent_buildername': 'ClangToTWin(dll)',
'testing': { 'platform': 'win' },
'enable_swarming': True,
},
'ClangToTWin64': {
'chromium_config': 'chromium_win_clang_official_tot',
'gclient_config': 'chromium',
'gclient_apply_config': ['chrome_internal'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [
'all',
],
'bot_type': 'builder',
'testing': { 'platform': 'win', },
'tests': { steps.SizesStep(RESULTS_URL, 'ClangToTWin64') },
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'ClangToTWin64 tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'test_generators': [steps.generate_gtest],
'bot_type': 'tester',
'parent_buildername': 'ClangToTWin64',
'testing': { 'platform': 'win' },
'enable_swarming': True,
},
'ClangToTWin64(dbg)': {
'chromium_config': 'chromium_win_clang_tot',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'compile_targets': [
'all',
],
'bot_type': 'builder',
'testing': { 'platform': 'win', },
'tests': { steps.SizesStep(RESULTS_URL, 'ClangToTWin64(dbg)') },
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'ClangToTWin64(dbg) tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'test_generators': [steps.generate_gtest],
'bot_type': 'tester',
'parent_buildername': 'ClangToTWin64(dbg)',
'testing': { 'platform': 'win' },
'enable_swarming': True,
},
'ClangToTWin64(dll)': {
'chromium_config': 'chromium_win_clang_tot',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'compile_targets': [
'all',
],
'GYP_DEFINES': { 'component': 'shared_library' },
'bot_type': 'builder',
'testing': { 'platform': 'win', },
'tests': { steps.SizesStep(RESULTS_URL, 'ClangToTWin64(dll)') },
'use_isolate': True,
'enable_swarming': True,
# Workaround so that recipes doesn't add random build targets to our
# compile line. We want to build everything.
'add_tests_as_compile_targets': False,
},
'ClangToTWin64(dll) tester': {
'chromium_config': 'chromium_no_goma',
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'test_generators': [steps.generate_gtest],
'bot_type': 'tester',
'parent_buildername': 'ClangToTWin64(dll)',
'testing': { 'platform': 'win' },
'enable_swarming': True,
},
'Win Builder (ANGLE)': {
'chromium_config': 'chromium',
'gclient_config': 'chromium',
'gclient_apply_config': ['angle_top_of_tree'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'bot_type': 'builder',
'compile_targets': [
'chromium_builder_tests',
],
'testing': {
'platform': 'win',
},
'patch_root': 'src/third_party/angle',
'enable_swarming': True,
'use_isolate': True,
},
'Win7 Tests (ANGLE)': {
'chromium_config': 'chromium',
'gclient_config': 'chromium',
'gclient_apply_config': ['angle_top_of_tree'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
},
'bot_type': 'tester',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'parent_buildername': 'Win Builder (ANGLE)',
'testing': {
'platform': 'win',
},
'enable_swarming': True,
'use_isolate': True,
},
'Headless Linux (dbg)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
'TARGET_PLATFORM': 'linux',
},
'bot_type': 'builder_tester',
'test_generators': [
steps.generate_gtest,
],
'testing': {
'platform': 'linux',
},
},
'Android Builder (dbg)': {
'chromium_config': 'android',
'chromium_apply_config': ['chrome_with_codecs', 'mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'android_config': 'main_builder',
'bot_type': 'builder',
'compile_targets': [
'chromedriver_webview_shell_apk',
],
'testing': {
'platform': 'linux',
},
},
'Android Tests (trial)(dbg)': {
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'bot_type': 'tester',
'parent_buildername': 'Android Builder (dbg)',
'android_config': 'non_device_wipe_provisioning',
'root_devices': True,
'tests': [
steps.GTestTest('gfx_unittests'),
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'linux',
},
},
'Android Tests (amp split)': {
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'amp_config': 'commit_queue_pool',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'bot_type': 'tester',
'parent_buildername': 'Android Builder (dbg)',
'android_config': 'main_builder',
'root_devices': True,
'enable_swarming': False,
'tests': [
steps.AndroidInstrumentationTest('AndroidWebViewTest'),
steps.AndroidInstrumentationTest('ContentShellTest'),
steps.AndroidInstrumentationTest('ChromePublicTest'),
steps.AndroidInstrumentationTest('ChromeSyncShellTest'),
steps.AMPGTestTest('android_webview_unittests',
device_name=['Nexus 5'], device_os=['4.4.2']),
steps.AMPGTestTest('base_unittests',
device_name=['Nexus 5'], device_os=['4.4.2'],
android_isolate_path='base/base_unittests.isolate'),
steps.GTestTest(
'breakpad_unittests',
override_compile_targets=['breakpad_unittests_deps'],
android_isolate_path='breakpad/breakpad_unittests.isolate'),
steps.GTestTest('cc_unittests'),
steps.AMPGTestTest('components_unittests',
device_name=['Nexus 5'], device_os=['4.4.2'],
android_isolate_path='components/components_unittests.isolate'),
steps.GTestTest('content_browsertests'),
steps.GTestTest('content_unittests'),
steps.AMPGTestTest('events_unittests',
device_name=['Nexus 5'], device_os=['4.4.2']),
steps.AMPGTestTest('gl_tests',
device_name=['Nexus 5'], device_os=['4.4.2']),
steps.GTestTest('gpu_unittests'),
steps.AMPGTestTest('ipc_tests', device_name=['Nexus 5'],
device_os=['4.4.2']),
steps.GTestTest('media_unittests'),
steps.GTestTest('net_unittests',
android_isolate_path='net/net_unittests.isolate',
android_shard_timeout=300),
steps.GTestTest(
'sandbox_linux_unittests',
override_compile_targets=['sandbox_linux_unittests_deps']),
steps.AMPGTestTest('sql_unittests',
device_name=['Nexus 5'], device_os=['4.4.2'],
android_isolate_path='sql/sql_unittests.isolate'),
steps.AMPGTestTest('sync_unit_tests',
device_name=['Nexus 5'], device_os=['4.4.2'],
android_isolate_path='sync/sync_unit_tests.isolate'),
steps.AMPGTestTest('ui_android_unittests',
device_name=['Nexus 5'], device_os=['4.4.2']),
steps.GTestTest('ui_base_unittests'),
steps.AMPGTestTest('ui_touch_selection_unittests',
device_name=['Nexus 5'], device_os=['4.4.2']),
steps.GTestTest('unit_tests'),
steps.AndroidJunitTest('junit_unit_tests'),
steps.AndroidJunitTest('chrome_junit_tests'),
steps.AndroidJunitTest('content_junit_tests'),
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'linux',
},
},
'Android Tests (amp instrumentation test split)': {
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'amp_config': 'main_pool',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'bot_type': 'tester',
'parent_buildername': 'Android Builder (dbg)',
'android_config': 'main_builder',
'root_devices': True,
'enable_swarming': False,
'tests': [
steps.AMPInstrumentationTest(
test_apk='AndroidWebViewTest',
apk_under_test='AndroidWebView',
android_isolate_path=
'android_webview/android_webview_test_apk.isolate',
compile_target='android_webview_test_apk',
device_name=['Nexus 5'], device_os=['4.4.2'],
fallback_to_local=True),
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'linux',
},
},
'Android Remoting Tests': {
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'compile_targets': [
'remoting_apk',
],
'parent_buildername': 'Android Builder (dbg)',
'bot_type': 'tester',
'android_config': 'main_builder',
'root_devices': True,
'enable_swarming': False,
'tests': [
steps.GTestTest('remoting_unittests'),
steps.AndroidInstrumentationTest('ChromotingTest'),
],
'testing': {
'platform': 'linux',
},
},
'Android Asan Builder Tests (dbg)': {
'chromium_config': 'android',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'bot_type': 'builder_tester',
'android_config': 'clang_tests',
'root_devices': True,
'tests': [
steps.AndroidInstrumentationTest('AndroidWebViewTest', tool='asan'),
steps.AndroidInstrumentationTest('ChromePublicTest', tool='asan'),
steps.AndroidInstrumentationTest('ContentShellTest', tool='asan'),
steps.AndroidInstrumentationTest('ChromeSyncShellTest', tool='asan'),
],
'test_generators': [
steps.generate_gtest,
steps.generate_instrumentation_test,
steps.generate_isolated_script,
steps.generate_script,
],
'testing': {
'platform': 'linux',
},
},
'Chromium Win 10': {
'chromium_config': 'chromium',
'gclient_config': 'chromium',
'GYP_DEFINES': {
'dcheck_always_on': '1',
},
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'win',
},
},
'Android Coverage (dbg)': {
'chromium_config': 'android',
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'bot_type': 'tester',
'parent_buildername': 'Android Builder (dbg)',
'android_config': 'incremental_coverage_builder_tests',
'root_devices': True,
'tests': [
steps.AndroidInstrumentationTest('AndroidWebViewTest'),
steps.AndroidInstrumentationTest('ChromePublicTest'),
steps.AndroidInstrumentationTest('ContentShellTest'),
steps.AndroidInstrumentationTest('ChromeSyncShellTest'),
steps.IncrementalCoverageTest(),
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'testing': {
'platform': 'linux',
},
},
'Android Cloud Tests': {
'chromium_config': 'android',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'gclient_apply_config': ['android'],
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 32,
'TARGET_PLATFORM': 'android',
},
'bot_type': 'builder_tester',
'android_config': 'gce_builder',
'tests': [
steps.AndroidJunitTest('base_junit_tests'),
steps.AndroidJunitTest('chrome_junit_tests'),
steps.AndroidJunitTest('components_junit_tests'),
steps.AndroidJunitTest('content_junit_tests'),
steps.AndroidJunitTest('junit_unit_tests'),
steps.AndroidJunitTest('net_junit_tests'),
],
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
steps.generate_instrumentation_test,
],
'testing': {
'platform': 'linux',
},
},
'Win SyzyAsan (rel)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['syzyasan_compile_only', 'shared_library'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_PLATFORM': 'win',
'TARGET_BITS': 32,
},
'compile_targets': [
'chrome',
],
'enable_swarming': True,
'testing': {
'platform': 'win',
},
},
},
}
| bsd-3-clause | -3,632,314,178,595,956,000 | 28.792056 | 87 | 0.536115 | false |
intelligent-agent/redeem | tests/gcode/test_M114.py | 1 | 1069 | from __future__ import absolute_import
import mock
from random import random
from .MockPrinter import MockPrinter
from redeem.Gcode import Gcode
class M114_Tests(MockPrinter):
def test_gcodes_M114(self):
A = round(random() * 200, 1)
B = round(random() * 200, 1)
C = round(random() * 200, 1)
X = round(random() * 200, 1)
Y = round(random() * 200, 1)
Z = round(random() * 200, 1)
E = round(random() * 200, 1)
H = round(random() * 200, 1)
self.printer.path_planner.get_current_pos = mock.Mock(return_value={
'A': A,
'C': C,
'B': B,
'E': E,
'H': H,
'Y': Y,
'X': X,
'Z': Z
})
g = Gcode({"message": "M114"})
self.printer.processor.gcodes[g.gcode].execute(g)
self.printer.path_planner.get_current_pos.assert_called_with(
ideal=True, mm=True) # kinda redundant, but hey.
self.assertEqual(
g.answer,
"ok C: X:{:.1f} Y:{:.1f} Z:{:.1f} E:{:.1f} A:{:.1f} B:{:.1f} C:{:.1f} H:{:.1f}".format(
X, Y, Z, E, A, B, C, H))
| gpl-3.0 | 9,131,512,291,480,535,000 | 27.891892 | 95 | 0.533209 | false |
sunnychaudhari/gstudio | gnowsys-ndf/gnowsys_ndf/ndf/views/ajax_views.py | 1 | 226833 |
''' -- imports from python libraries -- '''
# import os -- Keep such imports here
import datetime
import csv
import time
import ast
import json
import math
import multiprocessing
''' -- imports from installed packages -- '''
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.http import StreamingHttpResponse
from django.http import Http404
from django.core.paginator import Paginator
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.template.defaultfilters import slugify
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from mongokit import paginator
from django.contrib.sites.models import Site
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
''' -- imports from application folders/files -- '''
from gnowsys_ndf.settings import GAPPS
from gnowsys_ndf.settings import STATIC_ROOT, STATIC_URL
from gnowsys_ndf.ndf.models import NodeJSONEncoder
from gnowsys_ndf.ndf.models import node_collection, triple_collection
from gnowsys_ndf.ndf.models import *
from gnowsys_ndf.ndf.org2any import org2html
from gnowsys_ndf.ndf.views.file import *
from gnowsys_ndf.ndf.views.methods import check_existing_group, get_drawers, get_node_common_fields, get_node_metadata, create_grelation,create_gattribute,create_task,parse_template_data,get_execution_time
from gnowsys_ndf.ndf.views.methods import get_widget_built_up_data, parse_template_data
from gnowsys_ndf.ndf.views.methods import create_grelation, create_gattribute, create_task
from gnowsys_ndf.ndf.templatetags.ndf_tags import get_profile_pic, edit_drawer_widget, get_contents
from gnowsys_ndf.mobwrite.models import ViewObj
from gnowsys_ndf.notification import models as notification
theme_GST = node_collection.one({'_type': 'GSystemType', 'name': 'Theme'})
topic_GST = node_collection.one({'_type': 'GSystemType', 'name': 'Topic'})
theme_item_GST = node_collection.one({'_type': 'GSystemType', 'name': 'theme_item'})
# This function is used to check (while creating a new group) group exists or not
# This is called in the lost focus event of the group_name text box, to check the existance of group, in order to avoid duplication of group names.
@get_execution_time
class Encoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, ObjectId):
return str(obj)
else:
return obj
def checkgroup(request, group_name):
titl = request.GET.get("gname", "")
retfl = check_existing_group(titl)
if retfl:
return HttpResponse("success")
else:
return HttpResponse("failure")
@get_execution_time
def terms_list(request, group_id):
if request.is_ajax() and request.method == "POST":
# page number which have clicked on pagination
page_no = request.POST.get("page_no", '')
terms = []
gapp_GST = node_collection.one({'_type': 'MetaType', 'name': 'GAPP'})
term_GST = node_collection.one({'_type': 'GSystemType', 'name': 'Term', 'member_of':ObjectId(gapp_GST._id) })
# To list all term instances
terms_list = node_collection.find({
'_type': 'GSystem', 'member_of': ObjectId(term_GST._id),
'group_set': ObjectId(group_id)
}).sort('name', 1)
paged_terms = paginator.Paginator(terms_list, page_no, 25)
# Since "paged_terms" returns dict ,we append the dict items in a list to forwarded into template
for each in paged_terms.items:
terms.append(each)
return render_to_response(
'ndf/terms_list.html',
{
'group_id': group_id, 'groupid': group_id, "paged_terms": terms,
'page_info': paged_terms
},
context_instance=RequestContext(request)
)
# This ajax view renders the output as "node view" by clicking on collections
@get_execution_time
def collection_nav(request, group_id):
'''
This ajax function retunrs the node on main template, when clicked on collection hierarchy
'''
if request.is_ajax() and request.method == "POST":
node_id = request.POST.get("node_id", '')
curr_node_id = request.POST.get("curr_node", '')
node_type = request.POST.get("nod_type", '')
breadcrumbs_list = []
curr_node_obj = node_collection.one({'_id': ObjectId(curr_node_id)})
if node_type == "Topic":
theme_item_GST = node_collection.one({'_type': 'GSystemType', 'name': 'theme_item'})
for e in curr_node_obj.prior_node:
prior = node_collection.one({'_id': ObjectId(e)})
if curr_node_obj._id in prior.collection_set and theme_item_GST._id in prior.member_of:
breadcrumbs_list.append((str(prior._id), prior.name))
topic = ""
node_obj = node_collection.one({'_id': ObjectId(node_id)})
nav_list = request.POST.getlist("nav[]", '')
n_list = request.POST.get("nav", '')
# This "n_list" is for manipulating breadcrumbs events and its navigation
if n_list:
# Convert the incomming listfrom template into python list
n_list = n_list.replace("'", "'")
n_list = ast.literal_eval(n_list)
# For removing elements from breadcrumbs list to manipulate basd on which node is clicked
for e in reversed(n_list):
if e != unicode(node_obj._id):
n_list.remove(e)
else:
break
nav_list = n_list
# Firstly original node should go into breadcrumbs list
breadcrumbs_list.append( (str(curr_node_obj._id), curr_node_obj.name) )
if nav_list:
# create beadcrumbs list from navigation list sent from template.
for each in nav_list:
obj = node_collection.one({'_id': ObjectId(each) })
breadcrumbs_list.append( (str(obj._id), obj.name) )
b_list = []
for each in breadcrumbs_list:
b_list.append(each[0])
if str(node_obj._id) not in b_list:
# Add the tuple if clicked node is not there in breadcrumbs list
breadcrumbs_list.append( (str(node_obj._id), node_obj.name) )
else:
# To remove breadcrumbs untill clicked node have not reached(Removal starts in reverse order)
for e in reversed(breadcrumbs_list):
if node_id in e:
break
else:
breadcrumbs_list.remove(e)
# print "breadcrumbs_list: ",breadcrumbs_list,"\n"
return render_to_response('ndf/node_ajax_view.html',
{ 'node': node_obj,
'original_node':curr_node_obj,
'group_id': group_id,
'groupid':group_id,
'breadcrumbs_list':breadcrumbs_list,
'app_id': node_id, 'topic':topic, 'nav_list':nav_list
},
context_instance = RequestContext(request)
)
# This view handles the collection list of resource and its breadcrumbs
@get_execution_time
def collection_view(request, group_id):
'''
This ajax function returns breadcrumbs_list for clicked node in collection hierarchy
'''
if request.is_ajax() and request.method == "POST":
node_id = request.POST.get("node_id", '')
# breadcrumbs_list = request.POST.get("breadcrumbs_list", '')
node_obj = node_collection.one({'_id': ObjectId(node_id)})
# breadcrumbs_list = breadcrumbs_list.replace("'","'")
# breadcrumbs_list = ast.literal_eval(breadcrumbs_list)
# b_list = []
# for each in breadcrumbs_list:
# b_list.append(each[0])
# if str(node_obj._id) not in b_list:
# # Add the tuple if clicked node is not there in breadcrumbs list
# breadcrumbs_list.append( (str(node_obj._id), node_obj.name) )
# else:
# # To remove breadcrumbs untill clicked node have not reached(Removal starts in reverse order)
# for e in reversed(breadcrumbs_list):
# if node_id in e:
# break
# else:
# breadcrumbs_list.remove(e)
return render_to_response('ndf/collection_ajax_view.html',
{
'node': node_obj, 'group_id': group_id, 'groupid': group_id
},context_instance=RequestContext(request)
)
@login_required
@get_execution_time
def shelf(request, group_id):
if request.is_ajax() and request.method == "POST":
shelf = request.POST.get("shelf_name", '')
shelf_add = request.POST.get("shelf_add", '')
shelf_remove = request.POST.get("shelf_remove", '')
shelf_item_remove = request.POST.get("shelf_item_remove", '')
shelf_available = ""
shelf_item_available = ""
shelf_gst = node_collection.one({'_type': u'GSystemType', 'name': u'Shelf'})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
has_shelf_RT = node_collection.one({'_type': u'RelationType', 'name': u'has_shelf'})
if shelf:
shelf_gs = node_collection.one({'name': unicode(shelf), 'member_of': [ObjectId(shelf_gst._id)] })
if shelf_gs is None:
shelf_gs = node_collection.collection.GSystem()
shelf_gs.name = unicode(shelf)
shelf_gs.created_by = int(request.user.id)
shelf_gs.member_of.append(shelf_gst._id)
shelf_gs.save()
shelf_R = triple_collection.collection.GRelation()
shelf_R.subject = ObjectId(auth._id)
shelf_R.relation_type = has_shelf_RT
shelf_R.right_subject = ObjectId(shelf_gs._id)
shelf_R.save()
else:
if shelf_add:
shelf_item = ObjectId(shelf_add)
if shelf_item in shelf_gs.collection_set:
shelf_Item = node_collection.one({'_id': ObjectId(shelf_item)}).name
shelf_item_available = shelf_Item
return HttpResponse("failure")
else:
node_collection.collection.update({'_id': shelf_gs._id}, {'$push': {'collection_set': ObjectId(shelf_item) }}, upsert=False, multi=False)
shelf_gs.reload()
elif shelf_item_remove:
shelf_item = node_collection.one({'name': unicode(shelf_item_remove)})._id
node_collection.collection.update({'_id': shelf_gs._id}, {'$pull': {'collection_set': ObjectId(shelf_item) }}, upsert=False, multi=False)
shelf_gs.reload()
else:
shelf_available = shelf
elif shelf_remove:
shelf_gs = node_collection.one({'name': unicode(shelf_remove), 'member_of': [ObjectId(shelf_gst._id)] })
shelf_rel = triple_collection.one({'_type': 'GRelation', 'subject': ObjectId(auth._id),'right_subject': ObjectId(shelf_gs._id) })
shelf_rel.delete()
shelf_gs.delete()
else:
shelf_gs = None
shelves = []
shelf_list = {}
if auth:
shelf = triple_collection.find({'_type': 'GRelation', 'subject': ObjectId(auth._id), 'relation_type.$id': has_shelf_RT._id})
if shelf:
for each in shelf:
shelf_name = node_collection.one({'_id': ObjectId(each.right_subject)})
shelves.append(shelf_name)
shelf_list[shelf_name.name] = []
for ID in shelf_name.collection_set:
shelf_item = node_collection.one({'_id': ObjectId(ID)})
shelf_list[shelf_name.name].append(shelf_item.name)
else:
shelves = []
return render_to_response('ndf/shelf.html',
{ 'shelf_obj': shelf_gs,'shelf_list': shelf_list,'shelves': shelves,
'groupid':group_id
},
context_instance = RequestContext(request)
)
@get_execution_time
def drawer_widget(request, group_id):
drawer = None
drawers = None
drawer1 = None
drawer2 = None
dict_drawer = {}
dict1 = {}
dict2 = []
nlist = []
node = None
node_id = request.POST.get("node_id", '')
field = request.POST.get("field", '')
app = request.POST.get("app", '')
page_no = request.POST.get("page_no", '')
if node_id:
node = node_collection.one({'_id': ObjectId(node_id)})
if field == "prior_node":
app = None
nlist = node.prior_node
drawer, paged_resources = get_drawers(group_id, node._id, nlist, page_no, app)
elif field == "teaches":
app = None
relationtype = node_collection.one({"_type": "RelationType", "name": "teaches"})
list_grelations = triple_collection.find({"_type": "GRelation", "subject": node._id, "relation_type.$id": relationtype._id})
for relation in list_grelations:
nlist.append(ObjectId(relation.right_subject))
drawer, paged_resources = get_drawers(group_id, node._id, nlist, page_no, app)
elif field == "assesses":
app = field
relationtype = node_collection.one({"_type": "RelationType", "name": "assesses"})
list_grelations = triple_collection.find({"_type": "GRelation", "subject": node._id, "relation_type.$id": relationtype._id})
for relation in list_grelations:
nlist.append(ObjectId(relation.right_subject))
drawer, paged_resources = get_drawers(group_id, node._id, nlist, page_no, app)
elif field == "collection":
if app == "Quiz":
app = "QuizItem"
elif app == "Theme":
app = "Theme"
elif app == "Theme Item":
app == "theme_item"
elif app == "Topic":
app = "Topic"
elif app == "Module":
app = "Module"
else:
app = None
nlist = node.collection_set
drawer, paged_resources = get_drawers(group_id, node._id, nlist, page_no, app)
else:
if field == "collection" and app == "Quiz":
app = "QuizItem"
elif field == "collection" and app == "Theme":
app = "Theme"
elif field == "collection" and app == "Theme Item":
app = "theme_item"
elif field == "collection" and app == "Course":
app = "Module"
else:
app = None
nlist = []
drawer, paged_resources = get_drawers(group_id, None, nlist, page_no, app)
drawers = drawer
if not node_id:
drawer1 = drawers
else:
drawer1 = drawers['1']
drawer2 = drawers['2']
return render_to_response('ndf/drawer_widget.html',
{ 'widget_for': field,'drawer1': drawer1, 'drawer2': drawer2,'node_id': node_id,
'group_id': group_id,'groupid': group_id,"page_info": paged_resources
},
context_instance = RequestContext(request)
)
@get_execution_time
def select_drawer(request, group_id):
if request.is_ajax() and request.method == "POST":
drawer = None
drawers = None
drawer1 = None
drawer2 = None
selection_flag = True
node = None
dict_drawer = {}
dict1 = {}
dict2 = []
nlist=[]
check = ""
checked = ""
relationtype = ""
node_id = request.POST.get("node_id", '')
page_no = request.POST.get("page_no", '')
field = request.POST.get("field", '')
checked = request.POST.get("homo_collection", '')
node_type = request.POST.get("node_type", '')
if node_id:
node_id = ObjectId(node_id)
node = node_collection.one({'_id': ObjectId(node_id)})
if node_type:
if len(node.member_of) > 1:
n_type = node_collection.one({'_id': ObjectId(node.member_of[1])})
else:
n_type = node_collection.one({'_id': ObjectId(node.member_of[0])})
checked = n_type.name
if checked:
if checked == "QuizObj" :
quiz = node_collection.one({'_type': 'GSystemType', 'name': "Quiz" })
quizitem = node_collection.one({'_type': 'GSystemType', 'name': "QuizItem"})
elif checked == "Pandora Video":
check = node_collection.one({'_type': 'GSystemType', 'name': 'Pandora_video'})
else:
check = node_collection.one({'_type': 'GSystemType', 'name': unicode(checked)})
if node_id:
if field:
if field == "teaches":
relationtype = node_collection.one({"_type": "RelationType", "name":"teaches"})
list_grelations = triple_collection.find({
"_type": "GRelation", "subject": node._id,
"relation_type": relationtype._id
})
for relation in list_grelations:
nlist.append(ObjectId(relation.right_subject))
elif field == "assesses":
relationtype = node_collection.one({"_type": "RelationType", "name":"assesses"})
list_grelations = triple_collection.find({
"_type": "GRelation", "subject": node._id,
"relation_type": relationtype._id
})
for relation in list_grelations:
nlist.append(ObjectId(relation.right_subject))
elif field == "prior_node":
nlist = node.prior_node
elif field == "collection":
nlist = node.collection_set
else:
node_id = None
if node_id:
if node.collection_set:
if checked:
for k in node.collection_set:
obj = node_collection.one({'_id': ObjectId(k)})
if check:
if check._id in obj.member_of:
nlist.append(k)
else:
if quiz._id in obj.member_of or quizitem._id in obj.member_of:
nlist.append(k)
else:
nlist = node.collection_set
if field == "assesses":
checked = field
checked = None
drawer, paged_resources = get_drawers(group_id, node_id, nlist, page_no, checked)#get_drawers(group_id, node_id, nlist, checked)
if field == "course_units":
nlist.append("course_units")
selection_flag = False
drawers = get_drawers(group_id, node_id, nlist, checked)
drawers = drawer
if not node_id:
drawer1 = drawers
else:
drawer1 = drawers['1']
drawer2 = drawers['2']
if not field:
field = "collection"
return render_to_response("ndf/drawer_widget.html",
{"widget_for": field, "page_info": paged_resources,
"drawer1": drawer1, 'selection': selection_flag, 'node_id':node_id,
"drawer2": drawer2, "checked": checked,
"groupid": group_id
},
context_instance=RequestContext(request)
)
@get_execution_time
def search_drawer(request, group_id):
if request.is_ajax() and request.method == "POST":
search_name = request.POST.get("search_name", '')
node_id = request.POST.get("node_id", '')
selection = request.POST.get("selection", '')
field = request.POST.get("field", '')
search_drawer = None
drawers = None
drawer1 = None
drawer2 = None
dict_drawer = {}
dict1 = {}
dict2 = []
nlist=[]
node = None
page_no = 1
Page = node_collection.one({'_type': 'GSystemType', 'name': 'Page'})
File = node_collection.one({'_type': 'GSystemType', 'name': 'File'})
Quiz = node_collection.one({'_type': "GSystemType", 'name': "Quiz"})
if node_id:
node = node_collection.one({'_id': ObjectId(node_id)})
node_type = node_collection.one({'_id': ObjectId(node.member_of[0])})
if field:
if field == "teaches":
relationtype = node_collection.one({"_type": "RelationType", "name": "teaches"})
list_grelations = triple_collection.find({
"_type": "GRelation", "subject": node._id, "relation_type.$id": relationtype._id
})
for relation in list_grelations:
nlist.append(ObjectId(relation.right_subject))
elif field == "assesses":
relationtype = node_collection.one({"_type": "RelationType", "name": "assesses"})
list_grelations = triple_collection.find({
"_type": "GRelation", "subject": node._id, "relation_type.$id": relationtype._id
})
for relation in list_grelations:
nlist.append(ObjectId(relation.right_subject))
elif field == "prior_node":
nlist = node.prior_node
elif field == "collection":
nlist = node.collection_set
node.reload()
search_drawer = node_collection.find({'_type': {'$in' : [u"GSystem", u"File"]},
'member_of':{'$in':[Page._id,File._id,Quiz._id]},
'$and': [
{'name': {'$regex': str(search_name), '$options': "i"}},
{'group_set': {'$all': [ObjectId(group_id)]} }
]
})
else:
search_drawer = node_collection.find({'_type': {'$in' : [u"GSystem", u"File"]},
'member_of':{'$in':[Page._id,File._id,Quiz._id]},
'$and': [
{'name': {'$regex': str(search_name), '$options': "i"}},
{'group_set': {'$all': [ObjectId(group_id)]} }
]
})
if node_id:
for each in search_drawer:
if each._id != node._id:
if each._id not in nlist:
dict1[each._id] = each
for oid in nlist:
obj = node_collection.one({'_id': oid })
dict2.append(obj)
dict_drawer['1'] = dict1
dict_drawer['2'] = dict2
else:
if (node is None) and (not nlist):
for each in search_drawer:
dict_drawer[each._id] = each
drawers = dict_drawer
if not node_id:
drawer1 = drawers
else:
drawer1 = drawers['1']
drawer2 = drawers['2']
return render_to_response("ndf/drawer_widget.html",
{"widget_for": field,
"drawer1": drawer1, 'selection': selection,
"drawer2": drawer2, 'search_name': search_name,
"groupid": group_id, 'node_id': node_id
},
context_instance=RequestContext(request)
)
@get_execution_time
def get_topic_contents(request, group_id):
if request.is_ajax() and request.method == "POST":
node_id = request.POST.get("node_id", '')
selected = request.POST.get("selected", '')
choice = request.POST.get("choice", '')
# node = node_collection.one({'_id': ObjectId(node_id) })
contents = get_contents(node_id, selected, choice)
return HttpResponse(json.dumps(contents))
####Bellow part is for manipulating theme topic hierarchy####
@get_execution_time
def get_collection_list(collection_list, node):
inner_list = []
error_list = []
if node.collection_set:
for each in node.collection_set:
col_obj = node_collection.one({'_id': ObjectId(each)})
if col_obj:
if theme_item_GST._id in col_obj.member_of or topic_GST._id in col_obj.member_of:
for cl in collection_list:
if cl['id'] == node.pk:
node_type = node_collection.one({'_id': ObjectId(col_obj.member_of[0])}).name
inner_sub_dict = {'name': col_obj.name, 'id': col_obj.pk , 'node_type': node_type}
inner_sub_list = [inner_sub_dict]
inner_sub_list = get_collection_list(inner_sub_list, col_obj)
if inner_sub_list:
inner_list.append(inner_sub_list[0])
else:
inner_list.append(inner_sub_dict)
cl.update({'children': inner_list })
else:
error_message = "\n TreeHierarchyError: Node with given ObjectId ("+ str(each) +") not found!!!\n"
print "\n " + error_message
return collection_list
else:
return collection_list
@get_execution_time
def get_tree_hierarchy(request, group_id, node_id):
node = node_collection.one({'_id':ObjectId(node_id)})
Collapsible = request.GET.get("collapsible", "");
data = ""
collection_list = []
themes_list = []
theme_node = node_collection.one({'_id': ObjectId(node._id) })
# print "\ntheme_node: ",theme_node.name,"\n"
if theme_node.collection_set:
for e in theme_node.collection_set:
objs = node_collection.one({'_id': ObjectId(e) })
for l in objs.collection_set:
themes_list.append(l)
for each in theme_node.collection_set:
obj = node_collection.one({'_id': ObjectId(each) })
if obj._id not in themes_list:
if theme_item_GST._id in obj.member_of or topic_GST._id in obj.member_of:
node_type = node_collection.one({'_id': ObjectId(obj.member_of[0])}).name
collection_list.append({'name': obj.name, 'id': obj.pk, 'node_type': node_type})
collection_list = get_collection_list(collection_list, obj)
if Collapsible:
data = { "name": theme_node.name, "children": collection_list }
else:
data = collection_list
return HttpResponse(json.dumps(data))
# ###End of manipulating theme topic hierarchy####
##### bellow part is for manipulating nodes collections#####
@get_execution_time
def get_inner_collection(collection_list, node):
inner_list = []
error_list = []
if node.collection_set:
for each in node.collection_set:
col_obj = node_collection.one({'_id': ObjectId(each)})
if col_obj:
for cl in collection_list:
if cl['id'] == node.pk:
node_type = node_collection.one({'_id': ObjectId(col_obj.member_of[0])}).name
inner_sub_dict = {'name': col_obj.name, 'id': col_obj.pk,'node_type': node_type}
inner_sub_list = [inner_sub_dict]
inner_sub_list = get_inner_collection(inner_sub_list, col_obj)
if inner_sub_list:
inner_list.append(inner_sub_list[0])
else:
inner_list.append(inner_sub_dict)
cl.update({'children': inner_list })
else:
error_message = "\n TreeHierarchyError: Node with given ObjectId ("+ str(each) +") not found!!!\n"
print "\n " + error_message
return collection_list
else:
return collection_list
@get_execution_time
def get_collection(request, group_id, node_id):
node = node_collection.one({'_id':ObjectId(node_id)})
# print "\nnode: ",node.name,"\n"
collection_list = []
if node:
if node.collection_set:
for each in node.collection_set:
obj = node_collection.one({'_id': ObjectId(each) })
if obj:
node_type = node_collection.one({'_id': ObjectId(obj.member_of[0])}).name
collection_list.append({'name': obj.name, 'id': obj.pk,'node_type': node_type})
collection_list = get_inner_collection(collection_list, obj)
data = collection_list
return HttpResponse(json.dumps(data))
# ###End of manipulating nodes collection####
@get_execution_time
def add_sub_themes(request, group_id):
if request.is_ajax() and request.method == "POST":
context_node_id = request.POST.get("context_node", '')
sub_theme_name = request.POST.get("sub_theme_name", '')
themes_list = request.POST.get("nodes_list", '')
themes_list = themes_list.replace(""","'")
themes_list = ast.literal_eval(themes_list)
theme_GST = node_collection.one({'_type': 'GSystemType', 'name': 'theme_item'})
context_node = node_collection.one({'_id': ObjectId(context_node_id) })
# Save the sub-theme first
if sub_theme_name:
if not sub_theme_name.upper() in (theme_name.upper() for theme_name in themes_list):
node = node_collection.collection.GSystem()
# get_node_common_fields(request, node, group_id, theme_GST)
node.save(is_changed=get_node_common_fields(request, node, group_id, theme_item_GST))
node.reload()
# Add this sub-theme into context nodes collection_set
node_collection.collection.update({'_id': context_node._id}, {'$push': {'collection_set': ObjectId(node._id) }}, upsert=False, multi=False)
context_node.reload()
return HttpResponse("success")
return HttpResponse("failure")
return HttpResponse("None")
@get_execution_time
def add_theme_item(request, group_id):
if request.is_ajax() and request.method == "POST":
context_theme_id = request.POST.get("context_theme", '')
name =request.POST.get('name','')
context_theme = node_collection.one({'_id': ObjectId(context_theme_id) })
list_theme_items = []
if name and context_theme:
for each in context_theme.collection_set:
obj = node_collection.one({'_id': ObjectId(each) })
if obj.name == name:
return HttpResponse("failure")
theme_item_node = node_collection.collection.GSystem()
theme_item_node.save(is_changed=get_node_common_fields(request, theme_item_node, group_id, theme_item_GST))
theme_item_node.reload()
# Add this theme item into context theme's collection_set
node_collection.collection.update({'_id': context_theme._id}, {'$push': {'collection_set': ObjectId(theme_item_node._id) }}, upsert=False, multi=False)
context_theme.reload()
return HttpResponse("success")
@get_execution_time
def add_topics(request, group_id):
if request.is_ajax() and request.method == "POST":
# print "\n Inside add_topics ajax view\n"
context_node_id = request.POST.get("context_node", '')
add_topic_name = request.POST.get("add_topic_name", '')
topics_list = request.POST.get("nodes_list", '')
topics_list = topics_list.replace(""","'")
topics_list = ast.literal_eval(topics_list)
topic_GST = node_collection.one({'_type': 'GSystemType', 'name': 'Topic'})
context_node = node_collection.one({'_id': ObjectId(context_node_id) })
# Save the topic first
if add_topic_name:
# print "\ntopic name: ", add_topic_name
if not add_topic_name.upper() in (topic_name.upper() for topic_name in topics_list):
node = node_collection.collection.GSystem()
# get_node_common_fields(request, node, group_id, topic_GST)
node.save(is_changed=get_node_common_fields(request, node, group_id, topic_GST))
node.reload()
# Add this topic into context nodes collection_set
node_collection.collection.update({'_id': context_node._id}, {'$push': {'collection_set': ObjectId(node._id) }}, upsert=False, multi=False)
context_node.reload()
return HttpResponse("success")
return HttpResponse("failure")
return HttpResponse("None")
@get_execution_time
def add_page(request, group_id):
if request.is_ajax() and request.method == "POST":
context_node_id = request.POST.get("context_node", '')
css_node_id = request.POST.get("css_node", '')
unit_name = request.POST.get("unit_name", '')
context_name = request.POST.get("context_name", '')
gst_page = node_collection.one({'_type': "GSystemType", 'name': "Page"})
name = request.POST.get('name', '')
collection_list = []
context_node = None
response_dict = {"success": False}
context_node = node_collection.one({'_id': ObjectId(context_node_id)})
for each in context_node.collection_set:
obj = node_collection.one({'_id': ObjectId(each), 'group_set': ObjectId(group_id)})
collection_list.append(obj.name)
if name not in collection_list:
page_node = node_collection.collection.GSystem()
page_node.save(is_changed=get_node_common_fields(request, page_node, group_id, gst_page))
context_node.collection_set.append(page_node._id)
context_node.save()
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict))
else:
response_dict["success"] = False
return HttpResponse(json.dumps(response_dict))
response_dict["success"] = None
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def add_file(request, group_id):
# this is context node getting from the url get request
context_node_id = request.GET.get('context_node', '')
context_node = node_collection.one({'_id': ObjectId(context_node_id)})
if request.method == "POST":
context_name = request.POST.get("context_name", "")
css_node_id = request.POST.get("css_node_id", "")
course_node = request.POST.get("course_node", "")
unit_name = request.POST.get("unit_name_file", "")
app_id = request.POST.get("app_id", "")
app_set_id = request.POST.get("app_set_id", "")
if context_name is "Topic":
url_name = "/" + group_id + "/topic_details/" + context_node_id + ""
else:
# i.e if context_name is "Course"
url_name = "/" + group_id + "/course/add_units/?css_node_id=" + \
css_node_id + "&unit_node_id=" + context_node_id + "&course_node="+ course_node
if app_id and app_set_id:
url_name += "&app_id=" + app_id + "&app_set_id=" + app_set_id + ""
if context_node_id:
# set the unit node name
node_collection.collection.update({'_id': ObjectId(context_node_id)}, {'$set': {'name': unit_name }}, upsert=False, multi=False)
new_list = []
# For checking the node is already available in gridfs or not
for index, each in enumerate(request.FILES.getlist("doc[]", "")):
fileobj = node_collection.collection.File()
filemd5 = hashlib.md5(each.read()).hexdigest()
if not fileobj.fs.files.exists({"md5": filemd5}):
# If not available append to the list for making the collection for topic below
new_list.append(each)
else:
if context_name == "Course":
# If file exists, PUBLISH it and add to collection set
cur_oid = gridfs_collection.find_one({"md5": filemd5}, {'docid': 1, '_id': 0})
old_file_node = node_collection.find_one({'_id': ObjectId(str(cur_oid["docid"]))})
if old_file_node._id not in context_node.collection_set:
context_node.collection_set.append(old_file_node._id)
old_file_node.status = u"PUBLISHED"
old_file_node.prior_node.append(context_node._id)
old_file_node.save()
context_node.save()
else:
# If availbale ,then return to the topic page
return HttpResponseRedirect(url_name)
# After taking new_lst[] , now go for saving the files
submitDoc(request, group_id)
# After file gets saved , that file's id should be saved in collection_set of context topic node
for k in new_list:
cur_oid = gridfs_collection.find_one({"md5": filemd5}, {'docid': 1, '_id': 0})
file_obj = node_collection.find_one({'_id': ObjectId(str(cur_oid["docid"]))})
file_obj.prior_node.append(context_node._id)
file_obj.status = u"PUBLISHED"
file_obj.save()
context_node.collection_set.append(file_obj._id)
file_obj.save()
context_node.save()
return HttpResponseRedirect(url_name)
def collection_of_node(node=None, group_id=None):
theme_item_GST = node_collection.one({'_type': 'GSystemType', 'name': 'theme_item'})
if node.collection_set:
for each in node.collection_set:
each_node = node_collection.one({'_id': ObjectId(each)})
if each_node.collection_set:
collection_of_node(each_node, group_id)
else:
# After deleting theme instance it's should also remove from collection_set
cur = node_collection.find({'member_of': {'$all': [theme_item_GST._id]},'group_set':{'$all': [ObjectId(group_id)]}})
for e in cur:
if each_node._id in e.collection_set:
node_collection.collection.update({'_id': e._id}, {'$pull': {'collection_set': ObjectId(each_node._id) }}, upsert=False, multi=False)
# print "\n node ", each_node.name ,"has been deleted \n"
each_node.delete()
# After deleting theme instance it's should also remove from collection_set
cur = node_collection.find({'member_of': {'$all': [theme_item_GST._id]},'group_set':{'$all': [ObjectId(group_id)]}})
for e in cur:
if node._id in e.collection_set:
node_collection.collection.update({'_id': e._id}, {'$pull': {'collection_set': ObjectId(node._id) }}, upsert=False, multi=False)
# print "\n node ", node.name ,"has been deleted \n"
node.delete()
else:
# After deleting theme instance it's should also remove from collection_set
cur = node_collection.find({'member_of': {'$all': [theme_item_GST._id]},'group_set':{'$all': [ObjectId(group_id)]}})
for e in cur:
if node._id in e.collection_set:
node_collection.collection.update({'_id': e._id}, {'$pull': {'collection_set': ObjectId(node._id) }}, upsert=False, multi=False)
# print "\n node ", node.name ,"has been deleted \n"
node.delete()
return True
@get_execution_time
def theme_node_collection(node=None, group_id=None):
theme_GST = node_collection.one({'_type': 'GSystemType', 'name': 'Theme'})
theme_item_GST = node_collection.one({'_type': 'GSystemType', 'name': 'theme_item'})
if node.collection_set:
for each in node.collection_set:
each_node = node_collection.one({'_id': ObjectId(each)})
if each_node.collection_set:
collection_of_node(each_node, group_id)
else:
# After deleting theme instance it's should also remove from collection_set
cur = node_collection.find({'member_of': {'$all': [theme_GST._id,theme_item_GST._id]},'group_set':{'$all': [ObjectId(group_id)]}})
for e in cur:
if each_node._id in e.collection_set:
node_collection.collection.update({'_id': e._id}, {'$pull': {'collection_set': ObjectId(each_node._id) }}, upsert=False, multi=False)
# print "\n node ", each_node.name ,"has been deleted \n"
each_node.delete()
# After deleting theme instance it's should also remove from collection_set
cur = node_collection.find({'member_of': {'$all': [theme_GST._id,theme_item_GST._id]},'group_set':{'$all': [ObjectId(group_id)]}})
for e in cur:
if node._id in e.collection_set:
node_collection.collection.update({'_id': e._id}, {'$pull': {'collection_set': ObjectId(node._id) }}, upsert=False, multi=False)
# print "\n node ", node.name ,"has been deleted \n"
node.delete()
else:
# After deleting theme instance it's should also remove from collection_set
cur = node_collection.find({'member_of': {'$all': [theme_GST._id,theme_item_GST._id]},'group_set':{'$all': [ObjectId(group_id)]}})
for e in cur:
if node._id in e.collection_set:
node_collection.collection.update({'_id': e._id}, {'$pull': {'collection_set': ObjectId(node._id) }}, upsert=False, multi=False)
# print "\n node ", node.name ,"has been deleted \n"
node.delete()
return True
@get_execution_time
def delete_themes(request, group_id):
'''delete themes objects'''
send_dict = []
deleteobjects = ""
deleteobj = ""
if request.is_ajax() and request.method =="POST":
context_node_id=request.POST.get('context_theme','')
if context_node_id:
context_theme_node = node_collection.one({'_id': ObjectId(context_node_id)})
confirm = request.POST.get("confirm","")
deleteobj = request.POST.get('deleteobj',"")
theme_GST = node_collection.one({'_type': 'GSystemType', 'name': 'Theme'})
theme_item_GST = node_collection.one({'_type': 'GSystemType', 'name': 'theme_item'})
if deleteobj:
obj = node_collection.one({'_id': ObjectId(deleteobj) })
obj.delete()
node = node_collection.one({'member_of': {'$in':[theme_GST._id, theme_item_GST._id]}, 'collection_set': ObjectId(deleteobj) })
node_collection.collection.update({'_id': node._id}, {'$pull': {'collection_set': ObjectId(deleteobj) }}, upsert=False, multi=False)
else:
deleteobjects = request.POST['deleteobjects']
if deleteobjects:
for each in deleteobjects.split(","):
node = node_collection.one({ '_id': ObjectId(each)})
# print "\n confirmed objects: ", node.name
if confirm:
if context_node_id:
collection_of_node(node, group_id)
if node._id in context_theme_node.collection_set:
node_collection.collection.update({'_id': context_theme_node._id}, {'$pull': {'collection_set': ObjectId(node._id) }}, upsert=False, multi=False)
else:
theme_node_collection(node, group_id)
else:
send_dict.append({"title":node.name})
return StreamingHttpResponse(json.dumps(send_dict).encode('utf-8'),content_type="text/json", status=200)
@login_required
@get_execution_time
def change_group_settings(request,group_id):
'''
changing group's object data
'''
if request.is_ajax() and request.method == "POST":
try:
edit_policy = request.POST['edit_policy']
group_type = request.POST['group_type']
subscription_policy = request.POST['subscription_policy']
visibility_policy = request.POST['visibility_policy']
disclosure_policy = request.POST['disclosure_policy']
encryption_policy = request.POST['encryption_policy']
# group_id = request.POST['group_id']
group_node = node_collection.one({"_id": ObjectId(group_id)})
if group_node:
group_node.edit_policy = edit_policy
group_node.group_type = group_type
group_node.subscription_policy = subscription_policy
group_node.visibility_policy = visibility_policy
group_node.disclosure_policy = disclosure_policy
group_node.encryption_policy = encryption_policy
group_node.modified_by = int(request.user.id)
group_node.save()
return HttpResponse("changed successfully")
except:
return HttpResponse("failed")
return HttpResponse("failed")
list_of_collection = []
hm_obj = HistoryManager()
@get_execution_time
def get_module_set_list(node):
'''
Returns the list of collection inside the collections with hierarchy as they are in collection
'''
list = []
for each in node.collection_set:
each = node_collection.one({'_id': each})
dict = {}
dict['id'] = unicode(each._id)
dict['version_no'] = hm_obj.get_current_version(each)
if each._id not in list_of_collection:
list_of_collection.append(each._id)
if each.collection_set: #checking that same collection can'not be called again
dict['collection'] = get_module_set_list(each) #calling same function recursivaly
list.append(dict)
return list
@login_required
@get_execution_time
def make_module_set(request, group_id):
'''
This methode will create module of collection and stores objectid's with version number's
'''
if request.is_ajax():
try:
GST_MODULE = node_collection.one({"_type": "GSystemType", 'name': GAPPS[8]})
_id = request.GET.get("_id","")
if _id:
node = node_collection.one({'_id':ObjectId(_id)})
if node:
list_of_collection.append(node._id)
dict = {}
dict['id'] = unicode(node._id)
dict['version_no'] = hm_obj.get_current_version(node)
if node.collection_set:
dict['collection'] = get_module_set_list(node) #gives the list of collection with proper hierarchy as they are
#creating new Gsystem object and assining data of collection object
gsystem_obj = node_collection.collection.GSystem()
gsystem_obj.name = unicode(node.name)
gsystem_obj.content = unicode(node.content)
gsystem_obj.member_of.append(GST_MODULE._id)
gsystem_obj.group_set.append(ObjectId(group_id))
# if usrname not in gsystem_obj.group_set:
# gsystem_obj.group_set.append(int(usrname))
user_id = int(request.user.id)
gsystem_obj.created_by = user_id
gsystem_obj.modified_by = user_id
if user_id not in gsystem_obj.contributors:
gsystem_obj.contributors.append(user_id)
gsystem_obj.module_set.append(dict)
module_set_md5 = hashlib.md5(str(gsystem_obj.module_set)).hexdigest() #get module_set's md5
check =check_module_exits(module_set_md5) #checking module already exits or not
if(check == 'True'):
return HttpResponse("This module already Exists")
else:
gsystem_obj.save()
create_relation_of_module(node._id, gsystem_obj._id)
create_version_of_module(gsystem_obj._id,node._id)
check1 = sotore_md5_module_set(gsystem_obj._id, module_set_md5)
if (check1 == 'True'):
return HttpResponse("module succesfull created")
else:
gsystem_obj.delete()
return HttpResponse("Error Occured while storing md5 of object in attribute'")
else:
return HttpResponse("Object not present corresponds to this id")
else:
return HttpResponse("Not a valid id passed")
except Exception as e:
print "Error:",e
return HttpResponse(e)
@get_execution_time
def sotore_md5_module_set(object_id, module_set_md5):
'''
This method will store md5 of module_set of perticular GSystem into an Attribute
'''
node_at = node_collection.one({'$and':[{'_type': 'AttributeType'},{'name': 'module_set_md5'}]}) #retrving attribute type
if node_at is not None:
try:
attr_obj = triple_collection.collection.GAttribute() #created instance of attribute class
attr_obj.attribute_type = node_at
attr_obj.subject = object_id
attr_obj.object_value = unicode(module_set_md5)
attr_obj.save()
except Exception as e:
print "Exception:",e
return 'False'
return 'True'
else:
print "Run 'python manage.py filldb' commanad to create AttributeType 'module_set_md5' "
return 'False'
# under construction
@get_execution_time
def create_version_of_module(subject_id, node_id):
'''
This method will create attribute version_no of module with at type version
'''
rt_has_module = node_collection.one({'_type':'RelationType', 'name':'has_module'})
relation = triple_collection.find({'_type': 'GRelation', 'subject': node_id, 'relation_type.$id':rt_has_module._id})
at_version = node_collection.one({'_type':'AttributeType', 'name':'version'})
attr_versions = []
if relation.count() > 0:
for each in relation:
module_id = triple_collection.one({'_id': each['_id']})
if module_id:
attr = triple_collection.one({
'_type': 'GAttribute', 'subject': ObjectId(module_id.right_subject),
'attribute_type.$id': at_version._id
})
if attr:
attr_versions.append(attr.object_value)
if attr_versions:
attr_versions.sort()
attr_ver = float(attr_versions[-1])
attr = triple_collection.collection.GAttribute()
attr.attribute_type = at_version
attr.subject = subject_id
attr.object_value = round((attr_ver+0.1),1)
attr.save()
else:
attr = triple_collection.collection.GAttribute()
attr.attribute_type = at_version
attr.subject = ObjectId(subject_id)
attr.object_value = 1
attr.save()
@get_execution_time
def create_relation_of_module(subject_id, right_subject_id):
rt_has_module = node_collection.one({'_type': 'RelationType', 'name': 'has_module'})
if rt_has_module and subject_id and right_subject_id:
relation = triple_collection.collection.GRelation() #instance of GRelation class
relation.relation_type = rt_has_module
relation.right_subject = right_subject_id
relation.subject = subject_id
relation.save()
@get_execution_time
def check_module_exits(module_set_md5):
'''
This method will check is module already exits ?
'''
node_at = node_collection.one({'$and':[{'_type': 'AttributeType'},{'name': 'module_set_md5'}]})
attribute = triple_collection.one({'_type':'GAttribute', 'attribute_type.$id': node_at._id, 'object_value': module_set_md5})
if attribute is not None:
return 'True'
else:
return 'False'
@get_execution_time
def walk(node):
hm = HistoryManager()
list = []
for each in node:
dict = {}
node = node_collection.one({'_id':ObjectId(each['id'])})
n = hm.get_version_document(node,each['version_no'])
dict['label'] = n.name
dict['id'] = each['id']
dict['version_no'] = each['version_no']
if "collection" in each.keys():
dict['children'] = walk(each['collection'])
list.append(dict)
return list
@get_execution_time
def get_module_json(request, group_id):
_id = request.GET.get("_id", "")
node = node_collection.one({'_id': ObjectId(_id)})
data = walk(node.module_set)
return HttpResponse(json.dumps(data))
# ------------- For generating graph json data ------------
@get_execution_time
def graph_nodes(request, group_id):
page_node = node_collection.one({'_id': ObjectId(request.GET.get("id"))})
page_node.get_neighbourhood(page_node.member_of)
# print page_node.keys()
coll_relation = {'relation_name': 'has_collection', 'inverse_name': 'member_of_collection'}
prior_relation = {'relation_name': 'prerequisite', 'inverse_name': 'is_required_for'}
def _get_node_info(node_id):
node = node_collection.one( {'_id':node_id} )
# mime_type = "true" if node.structure.has_key('mime_type') else 'false'
return node.name
# def _get_username(id_int):
# return User.objects.get(id=id_int).username
# def _get_node_url(node_id):
# node_url = '/' + str(group_id)
# node = node_collection.one({'_id':node_id})
# if len(node.member_of) > 1:
# if node.mime_type == 'image/jpeg':
# node_url += '/image/image_detail/' + str(node_id)
# elif node.mime_type == 'video':
# node_url += '/video/video_detail/' + str(node_id)
# elif len(node.member_of) == 1:
# gapp_name = (node_collection.one({'_id':node.member_of[0]}).name).lower()
# if gapp_name == 'forum':
# node_url += '/forum/show/' + str(node_id)
# elif gapp_name == 'file':
# node_url += '/image/image_detail/' + str(node_id)
# elif gapp_name == 'page':
# node_url += '/page/details/' + str(node_id)
# elif gapp_name == 'quiz' or 'quizitem':
# node_url += '/quiz/details/' + str(node_id)
# return node_url
# page_node_id = str(id(page_node._id))
node_metadata ='{"screen_name":"' + page_node.name + '", "title":"' + page_node.name + '", "_id":"'+ str(page_node._id) +'", "refType":"GSystem"}, '
node_relations = ''
exception_items = [
"name", "content", "_id", "login_required", "attribute_set",
"member_of", "status", "comment_enabled", "start_publication",
"_type", "contributors", "created_by", "modified_by", "last_update", "url", "featured",
"created_at", "group_set", "type_of", "content_org", "author_set",
"fs_file_ids", "file_size", "mime_type", "location", "language",
"property_order", "rating", "apps_list", "annotations", "instance of"
]
# username = User.objects.get(id=page_node.created_by).username
i = 1
for key, value in page_node.items():
if (key in exception_items) or (not value):
pass
elif isinstance(value, list):
if len(value):
# node_metadata +='{"screen_name":"' + key + '", "_id":"'+ str(i) +'_r"}, '
node_metadata +='{"screen_name":"' + key + '", "_id":"'+ str(abs(hash(key+str(page_node._id)))) +'_r"}, '
node_relations += '{"type":"'+ key +'", "from":"'+ str(page_node._id) +'", "to": "'+ str(abs(hash(key+str(page_node._id)))) +'_r"},'
# key_id = str(i)
key_id = str(abs(hash(key+str(page_node._id))))
# i += 1
# if key in ("modified_by", "author_set"):
# for each in value:
# node_metadata += '{"screen_name":"' + _get_username(each) + '", "_id":"'+ str(i) +'_n"},'
# node_relations += '{"type":"'+ key +'", "from":"'+ key_id +'_r", "to": "'+ str(i) +'_n"},'
# i += 1
# else:
for each in value:
# print "\n====", key, "------", type(each)
if isinstance(each, ObjectId):
node_name = _get_node_info(each)
if key == "collection_set":
inverse = coll_relation['inverse_name']
elif key == "prior_node":
inverse = prior_relation['inverse_name']
else:
inverse = ""
node_metadata += '{"screen_name":"' + node_name + '", "title":"' + page_node.name + '", "_id":"'+ str(each) +'", "refType":"Relation", "inverse":"' + inverse + '", "flag":"1"},'
# node_metadata += '{"screen_name":"' + node_name + '", "_id":"'+ str(each) +'", "refType":"relation"},'
node_relations += '{"type":"'+ key +'", "from":"'+ key_id +'_r", "to": "'+ str(each) +'"},'
i += 1
# if "each" is Object of GSystem
elif isinstance(each, GSystem):
node_metadata += '{"screen_name":"' + each.name + '", "title":"' + page_node.name + '", "_id":"'+ str(each._id) + '", "refType":"Relation"},'
node_relations += '{"type":"'+ key +'", "from":"'+ key_id +'_r", "to": "'+ str(each._id) +'"},'
else:
node_metadata += '{"screen_name":"' + unicode(each) + '", "_id":"'+ unicode(each) +'_n"},'
node_relations += '{"type":"'+ key +'", "from":"'+ key_id +'_r", "to": "'+ unicode(each) +'_n"},'
i += 1
else:
# possibly gives GAttribute
node_metadata +='{"screen_name":"' + key + '", "_id":"'+ str(abs(hash(key+str(page_node._id)))) +'_r"},'
node_relations += '{"type":"'+ key +'", "from":"'+ str(page_node._id) +'", "to": "'+ str(abs(hash(key+str(page_node._id)))) +'_r"},'
# key_id = str(i)
key_id = str(abs(hash(key+str(page_node._id))))
if isinstance( value, list):
for each in value:
node_metadata += '{"screen_name":"' + each + '", "_id":"'+ str(i) +'_n"},'
node_relations += '{"type":"'+ key +'", "from":"'+ key_id +'_r", "to": "'+ str(i) +'_n"},'
i += 1
else:
node_metadata += '{"screen_name":"' + str(value) + '", "_id":"'+ str(i) +'_n"},'
node_relations += '{"type":"'+ key +'", "from":"'+ str(abs(hash(key+str(page_node._id)))) +'_r", "to": "'+ str(i) +'_n"},'
i += 1
# End of if - else
# End of for loop
# # getting all the relations of current node
# node_rel = page_node.get_possible_relations(page_node.member_of)
# # print "\n\n", node_rel
# for keyy, vall in node_rel.iteritems():
# if vall['subject_or_right_subject_list']:
# for eachnode in vall['subject_or_right_subject_list']:
# if keyy == "event_organised_by":
# pass
# # node_metadata +='{"screen_name":"' + keyy + '", "_id":"'+ str(abs(hash(keyy+str(page_node._id)))) +'_r"},'
# # node_relations += '{"type":"'+ keyy +'", "from":"'+ str(page_node._id) +'", "to": "'+ str(abs(hash(keyy+str(page_node._id)))) +'_r"},'
# # node_metadata += '{"screen_name":"' + str(vall) + '", "_id":"'+ str(i) +'_n"},'
# # node_relations += '{"type":"'+ keyy +'", "from":"'+ str(abs(hash(keyy+str(page_node._id)))) +'_r", "to": "'+ str(i) +'_n"},'
# else:
# node_metadata +='{"screen_name":"' + keyy + '", "_id":"'+ str(abs(hash(keyy+str(page_node._id)))) +'_r"},'
# node_relations += '{"type":"'+ keyy +'", "from":"'+ str(page_node._id) +'", "to": "'+ str(abs(hash(keyy+str(page_node._id)))) +'_r"},'
# vall = vall.altnames if ( len(vall['altnames'])) else _get_node_info(vall['subject_or_right_subject_list'][0])
# node_metadata += '{"screen_name":"' + str(vall) + '", "_id":"'+ str(i) +'_n"},'
# node_relations += '{"type":"'+ keyy +'", "f**rom":"'+ str(abs(hash(keyy+str(page_node._id)))) +'_r", "to": "'+ str(i) +'_n"},'
# print "\nkey : ", key, "=====", val
node_metadata = node_metadata[:-1]
node_relations = node_relations[:-1]
node_graph_data = '{ "node_metadata": [' + node_metadata + '], "relations": [' + node_relations + '] }'
# print node_graph_data
return StreamingHttpResponse(node_graph_data)
# ------ End of processing for graph ------
@get_execution_time
def get_data_for_switch_groups(request,group_id):
coll_obj_list = []
node_id = request.GET.get("object_id", "")
st = node_collection.find({"_type": "Group"})
node = node_collection.one({"_id": ObjectId(node_id)})
for each in node.group_set:
coll_obj_list.append(node_collection.one({'_id': each}))
data_list = set_drawer_widget(st, coll_obj_list)
return HttpResponse(json.dumps(data_list))
@get_execution_time
def get_data_for_drawer(request, group_id):
'''
designer module's drawer widget function
'''
coll_obj_list = []
node_id = request.GET.get("id","")
st = node_collection.find({"_type":"GSystemType"})
node = node_collection.one({"_id":ObjectId(node_id)})
for each in node.collection_set:
coll_obj_list.append(node_collection.one({'_id':each}))
data_list=set_drawer_widget(st,coll_obj_list)
return HttpResponse(json.dumps(data_list))
# This method is not in use
@get_execution_time
def get_data_for_user_drawer(request, group_id,):
# This method will return data for user widget
d1 = []
d2 = []
draw1 = {}
draw2 = {}
drawer1 = []
drawer2 = []
data_list = []
all_batch_user = []
users = []
st_batch_id = request.GET.get('st_batch_id','')
node_id = request.GET.get('_id','')
if st_batch_id:
batch_coll = node_collection.find({'member_of': {'$all': [ObjectId(st_batch_id)]}, 'group_set': {'$all': [ObjectId(group_id)]}})
group = node_collection.one({'_id':ObjectId(group_id)})
if batch_coll:
for each in batch_coll:
users = users+each.author_set
else:
users = []
user_list = list(set(group.author_set) - set(users))
for each in user_list:
user= User.objects.get(id=each)
dic = {}
dic['id'] = user.id
dic['name'] = user.username
d1.append(dic)
draw1['drawer1'] = d1
data_list.append(draw1)
if node_id:
for each in node_collection.one({'_id':ObjectId(node_id)}).author_set:
user= User.objects.get(id=each)
dic = {}
dic['id'] = user.id
dic['name'] = user.username
d2.append(dic)
draw2['drawer2'] = d2
data_list.append(draw2)
return HttpResponse(json.dumps(data_list))
else:
return HttpResponse("GSystemType for batch required")
@get_execution_time
def set_drawer_widget_for_users(st, coll_obj_list):
'''
NOTE : this method is used only for user drwers (Django user class)
'''
draw2={}
draw1={}
data_list=[]
d1=[]
d2=[]
for each in st:
dic = {}
dic['id'] = str(each.id)
dic['name'] = each.email # username
d1.append(dic)
draw1['drawer1'] = d1
data_list.append(draw1)
for each in coll_obj_list:
dic = {}
dic['id'] = str(each.id)
dic['name'] = each.email # username
d2.append(dic)
draw2['drawer2'] = d2
data_list.append(draw2)
return data_list
@get_execution_time
def get_data_for_batch_drawer(request, group_id):
'''
This method will return data for batch drawer widget
'''
d1 = []
d2 = []
draw1 = {}
draw2 = {}
drawer1 = []
drawer2 = []
data_list = []
st = node_collection.one({'_type':'GSystemType','name':'Student'})
node_id = request.GET.get('_id','')
batch_coll = node_collection.find({"_type": "GSystem", 'member_of':st._id, 'group_set': {'$all': [ObjectId(group_id)]}})
if node_id:
rt_has_batch_member = node_collection.one({'_type':'RelationType','name':'has_batch_member'})
relation_coll = triple_collection.find({'_type':'GRelation', 'right_subject':ObjectId(node_id), 'relation_type.$id':rt_has_batch_member._id})
for each in relation_coll:
dic = {}
n = triple_collection.one({'_id':ObjectId(each.subject)})
drawer2.append(n)
for each in batch_coll:
drawer1.append(each)
drawer_set1 = set(drawer1) - set(drawer2)
drawer_set2 = drawer2
for each in drawer_set1:
dic = {}
dic['id'] = str(each._id)
dic['name'] = each.name
d1.append(dic)
draw1['drawer1'] = d1
data_list.append(draw1)
for each in drawer_set2:
dic = {}
dic['id'] = str(each._id)
dic['name'] = each.name
d2.append(dic)
draw2['drawer2'] = d2
data_list.append(draw2)
return HttpResponse(json.dumps(data_list))
@get_execution_time
def set_drawer_widget(st, coll_obj_list):
'''
this method will set data for drawer widget
'''
stobjs=[]
coll_objs=[]
data_list = []
d1 = []
d2 = []
draw1 = {}
draw2 = {}
drawer1=[]
drawer2=[]
for each in st:
stobjs.append(each['_id'])
for each in coll_obj_list:
coll_objs.append(each['_id'])
drawer1_set = set(stobjs) - set(coll_objs)
lstset=[]
for each in drawer1_set:
obj=node_collection.one({'_id':each})
lstset.append(obj)
drawer1=lstset
drawer2 = coll_obj_list
for each in drawer1:
dic = {}
dic['id'] = str(each['_id'])
dic['name'] = each['name']
d1.append(dic)
draw1['drawer1'] = d1
data_list.append(draw1)
for each in drawer2:
dic = {}
dic['id'] = str(each['_id'])
dic['name'] = each['name']
d2.append(dic)
draw2['drawer2'] = d2
data_list.append(draw2)
return data_list
@get_execution_time
def get_data_for_event_task(request, group_id):
#date creation for task type is date month and year
day_list=[]
append = day_list.append
event_count={}
list31=[1,3,5,7,8,10,12]
list30=[4,6,9,11]
currentYear = datetime.datetime.now().year
#create the date format in unix format for querying it from data
#Task attribute_type start time's object value takes the only date
#in month/date/year format
#As events are quried from the nodes which store the date time in unix format
no = request.GET.get('no','')
month = request.GET.get('start','')[5:7]
year = request.GET.get('start','')[0:4]
start = datetime.datetime(int(currentYear), int(month), 1)
task_start = str(int(month))+"/"+"01"+"/"+str(int(year))
if int(month) in list31:
end=datetime.datetime(int(currentYear),int(month), 31)
task_end=str(int(month))+"/"+"31"+"/"+str(int(year))
elif int(month) in list30:
end=datetime.datetime(int(currentYear),int(month), 30)
task_end=str(int(month))+"/"+"30"+"/"+str(int(year))
else:
end=datetime.datetime(int(currentYear),int(month), 28)
task_end=str(int(month))+"/"+"28"+"/"+str(int(year))
#day_list of events
if no == '1' or no == '2':
#condition to search events only in case of above condition so that it
#doesnt gets executed when we are looking for other data
event = node_collection.one({'_type': "GSystemType", 'name': "Event"})
obj = node_collection.find({'type_of': event._id},{'_id':1})
all_list = [ each_gst._id for each_gst in obj ]
if no == '1':
nodes = node_collection.find({'_type':'GSystem','member_of':{'$in':all_list},'attribute_set.start_time':{'$gte':start,'$lt': end},'group_set':ObjectId(group_id)})
for i in nodes:
attr_value={}
update = attr_value.update
event_url="/"+str(group_id)+"/event/"+str(i.member_of[0]) +"/"+str(i._id)
update({'url':event_url})
update({'id':i._id})
update({'title':i.name})
date=i.attribute_set[0]['start_time']
formated_date=date.strftime("%Y-%m-%dT%H:%M:%S")
update({'start':formated_date})
for j in i.attribute_set:
if unicode('event_status') in j.keys():
if j['event_status'] == 'Scheduled':
#Default Color Blue would be applied
pass
if j['event_status'] == 'Rescheduled':
update({'backgroundColor':'#ffd700'})
if j['event_status'] == 'Completed':
update({'backgroundColor':'green'})
if j['event_status'] == 'Incomplete':
update({'backgroundColor':'red'})
append(dict(attr_value))
if no == '2':
#All the Rescheduled ones
nodes = node_collection.find({'_type':'GSystem','member_of':{'$in':list(all_list)},'attribute_set.event_edit_reschedule.reschedule_dates':{ '$elemMatch':{'$gt':start}},'group_set':ObjectId(group_id)},{'attribute_set.event_edit_reschedule.reschedule_dates':1,"name":1})
for k in nodes:
for a in k.attribute_set:
if unicode('event_edit_reschedule') in a:
for v in a['event_edit_reschedule']['reschedule_dates']:
attr_value={}
update = attr_value.update
event_url=" "
update({'url':event_url})
update({'id':k._id})
update({'title':k.name})
date = v
try:
formated_date=date.strftime("%Y-%m-%dT%H:%M:%S")
update({'start':formated_date})
update({'backgroundColor':'#7e7e7e'})
append(dict(attr_value))
except:
pass
date=""
user_assigned=[]
user_append = user_assigned.append
#day_list of task
if no == '3':
groupname=node_collection.find_one({"_id":ObjectId(group_id)})
attributetype_assignee = node_collection.find_one({"_type":'AttributeType', 'name':'Assignee'})
attributetype_key1 = node_collection.find_one({"_type":'AttributeType', 'name':'start_time'})
#check wheather the group is author group or the common group
if groupname._type == "Group":
GST_TASK = node_collection.one({'_type': "GSystemType", 'name': 'Task'})
task_nodes = node_collection.find({"_type": "GSystem", 'member_of':GST_TASK._id, 'group_set': ObjectId(group_id)})
if groupname._type == "Author":
task_nodes = triple_collection.find({"_type":"GAttribute", "attribute_type.$id":attributetype_assignee._id, "object_value":request.user.id}).sort('last_update',-1)
for attr in task_nodes:
if groupname._type == "Group":
task_node = node_collection.one({'_id':attr._id})
if groupname._type == "Author":
task_node = node_collection.one({'_id':attr.subject})
if task_node:
attr1=triple_collection.find_one({
"_type":"GAttribute", "subject":task_node._id, "attribute_type.$id":attributetype_key1._id,
'object_value':{'$gte':task_start,'$lte':task_end}
})
attr_value={}
update = attr_value.update
task_url="/" + groupname.name +"/" + "task"+"/" + str(task_node._id)
update({'id':task_node._id})
update({'title':task_node.name})
if attr1:
date = attr1.object_value
formated_date=date.strftime("%Y-%m-%dT%H:%M:%S")
update({'start':formated_date})
else:
date=task_node.created_at
formated_date=date.strftime("%Y-%m-%dT%H:%M:%S")
attr_value.update({'start':formated_date})
update({'url':task_url})
append(attr_value)
return HttpResponse(json.dumps(day_list,cls=NodeJSONEncoder))
@get_execution_time
def get_data_for_drawer_of_attributetype_set(request, group_id):
'''
this method will fetch data for designer module's drawer widget
'''
data_list = []
d1 = []
d2 = []
draw1 = {}
draw2 = {}
node_id = request.GET.get("id","")
coll_obj_list = []
st = node_collection.find({"_type":"AttributeType"})
node = node_collection.one({"_id":ObjectId(node_id)})
for each in node.attribute_type_set:
coll_obj_list.append(each)
drawer1 = list(set(st) - set(coll_obj_list))
drawer2 = coll_obj_list
for each in drawer1:
dic = {}
dic['id'] = str(each._id)
dic['name'] = str(each.name)
d1.append(dic)
draw1['drawer1'] = d1
data_list.append(draw1)
for each in drawer2:
dic = {}
dic['id'] = str(each._id)
dic['name'] = str(each.name)
d2.append(dic)
draw2['drawer2'] = d2
data_list.append(draw2)
return HttpResponse(json.dumps(data_list))
@get_execution_time
def get_data_for_drawer_of_relationtype_set(request, group_id):
'''
this method will fetch data for designer module's drawer widget
'''
data_list = []
d1 = []
d2 = []
draw1 = {}
draw2 = {}
node_id = request.GET.get("id","")
coll_obj_list = []
st = node_collection.find({"_type":"RelationType"})
node = node_collection.one({"_id":ObjectId(node_id)})
for each in node.relation_type_set:
coll_obj_list.append(each)
drawer1 = list(set(st) - set(coll_obj_list))
drawer2 = coll_obj_list
for each in drawer1:
dic = {}
dic['id'] = str(each._id)
dic['name'] = str(each.name)
d1.append(dic)
draw1['drawer1'] = d1
data_list.append(draw1)
for each in drawer2:
dic = {}
dic['id'] = str(each._id)
dic['name'] = str(each.name)
d2.append(dic)
draw2['drawer2'] = d2
data_list.append(draw2)
return HttpResponse(json.dumps(data_list))
@login_required
@get_execution_time
def deletion_instances(request, group_id):
"""
Deletes the given node(s) and associated GAttribute(s) & GRelation(s)
or provides all information before deleting for confirmation.
"""
send_dict = []
if request.is_ajax() and request.method =="POST":
deleteobjects = request.POST['deleteobjects']
confirm = request.POST.get("confirm", "")
for each in deleteobjects.split(","):
delete_list = []
node = node_collection.one({'_id': ObjectId(each)})
left_relations = triple_collection.find({"_type": "GRelation", "subject": node._id})
right_relations = triple_collection.find({"_type": "GRelation", "right_subject": node._id})
attributes = triple_collection.find({"_type": "GAttribute", "subject": node._id})
# When confirm holds "yes" value, all given node(s) is/are deleted.
# Otherwise, required information is provided for confirmation before deletion.
if confirm:
# Deleting GRelation(s) where given node is used as "subject"
for each_left_gr in left_relations:
# Special case
if each_left_gr.relation_type.name == "has_login":
auth_node = node_collection.one(
{'_id': each_left_gr.right_subject},
{'created_by': 1}
)
if auth_node:
node_collection.collection.update(
{'_type': "Group", '$or': [{'group_admin': auth_node.created_by}, {'author_set': auth_node.created_by}]},
{'$pull': {'group_admin': auth_node.created_by, 'author_set': auth_node.created_by}},
upsert=False, multi=True
)
# If given node is used in relationship with any other node (as subject)
# Then given node's ObjectId must be removed from "relation_set" field
# of other node, referred under key as inverse-name of the RelationType
node_collection.collection.update(
{'_id': each_left_gr.right_subject, 'relation_set.'+each_left_gr.relation_type.inverse_name: {'$exists': True}},
{'$pull': {'relation_set.$.'+each_left_gr.relation_type.inverse_name: node._id}},
upsert=False, multi=False
)
each_left_gr.delete()
# Deleting GRelation(s) where given node is used as "right_subject"
for each_right_gr in right_relations:
# If given node is used in relationship with any other node (as subject)
# Then given node's ObjectId must be removed from "relation_set" field
# of other node, referred under key as name of the RelationType
node_collection.collection.update({'_id': each_right_gr.subject, 'relation_set.'+each_right_gr.relation_type.name: {'$exists': True}},
{'$pull': {'relation_set.$.'+each_right_gr.relation_type.name: node._id}},
upsert=False, multi=False
)
each_right_gr.delete()
# Deleting GAttribute(s)
for each_ga in attributes:
each_ga.delete()
# Finally deleting given node
node.delete()
else:
if left_relations :
list_rel = []
for each in left_relations:
rname = node_collection.find_one({"_id":each.right_subject})
if not rname:
continue
rname = rname.name
alt_names = each.relation_type.name
if each.relation_type.altnames:
if ";" in each.relation_type.altnames:
alt_names = each.relation_type.altnames.split(";")[0]
list_rel.append(alt_names + " (Relation): " + rname)
delete_list.append({'left_relations': list_rel})
if right_relations :
list_rel = []
for each in right_relations:
lname = node_collection.find_one({"_id":each.subject})
if not lname:
continue
lname = lname.name
alt_names = each.relation_type.name
if each.relation_type.altnames:
if ";" in each.relation_type.altnames:
alt_names = each.relation_type.altnames.split(";")[1]
list_rel.append(alt_names + " (Inverse-Relation): " + lname)
delete_list.append({'right_relations': list_rel})
if attributes:
list_att = []
for each in attributes:
alt_names = each.attribute_type.name
if each.attribute_type.altnames:
alt_names = each.attribute_type.altnames
list_att.append(alt_names + " (Attribute): " + str(each.object_value))
delete_list.append({'attributes': list_att})
send_dict.append({"title": node.name, "content": delete_list})
if confirm:
return StreamingHttpResponse(str(len(deleteobjects.split(",")))+" objects deleted")
return StreamingHttpResponse(json.dumps(send_dict).encode('utf-8'),content_type="text/json", status=200)
@get_execution_time
def get_visited_location(request, group_id):
usrid = request.user.id
visited_location = ""
if(usrid):
usrid = int(request.user.id)
usrname = unicode(request.user.username)
author = node_collection.one({'_type': "GSystemType", 'name': "Author"})
user_group_location = node_collection.one({'_type': "Author", 'member_of': author._id, 'created_by': usrid, 'name': usrname})
if user_group_location:
visited_location = user_group_location.visited_location
return StreamingHttpResponse(json.dumps(visited_location))
@login_required
@get_execution_time
def get_online_editing_user(request, group_id):
'''
get user who is currently online and editing the node
'''
if request.is_ajax() and request.method == "POST":
editorid = request.POST.get('editorid', "")
viewobj = ViewObj.objects.filter(filename=editorid)
userslist = []
if viewobj:
for each in viewobj:
if not each.username == request.user.username:
blankdict = {}
blankdict['username']=each.username
get_profile = get_profile_pic(each.username)
if get_profile :
blankdict['pro_img'] = "/"+str(group_id)+"/image/thumbnail/"+str(get_profile._id)
else :
blankdict['pro_img'] = "no";
userslist.append(blankdict)
if len(userslist) == 0:
userslist.append("No users")
else :
userslist.append("No users")
return StreamingHttpResponse(json.dumps(userslist).encode('utf-8'),content_type="text/json")
@get_execution_time
def view_articles(request, group_id):
if request.is_ajax():
# extracting all the bibtex entries from database
GST_one=node_collection.one({'_type':'AttributeType','name':'Citation'})
list_item=['article','book','booklet','conference','inbook','incollection','inproceedings','manual','masterthesis','misc','phdthesis','proceedings','techreport','unpublished_entry']
response_dict=[]
for each in list_item:
dict2={}
ref=node_collection.one({'_type':'GSystemType','name':each})
ref_entry=node_collection.find({"_type": "GSystem", 'member_of':{'$all':[ref._id]},'group_set':{'$all':[ObjectId(group_id)]},'status':u'PUBLISHED'})
list_entry=[]
for every in ref_entry:
id=every._id
gst_attribute=triple_collection.one({"_type": "GAttribute", 'subject': ObjectId(every._id), 'attribute_type.$id':ObjectId(GST_one._id)})
cite=gst_attribute.object_value
dict1 = {'name': every.name, 'cite': cite}
list_entry.append(dict1)
dict2[each]=list_entry
response_dict.append(dict2)
return StreamingHttpResponse(json.dumps(response_dict))
@get_execution_time
def get_author_set_users(request, group_id):
'''
This ajax function will give all users present in node's author_set field
'''
user_list = []
can_remove = False
if request.is_ajax():
_id = request.GET.get('_id',"")
node = node_collection.one({'_id':ObjectId(_id)})
course_name = ""
rt_has_course = node_collection.one({'_type':'RelationType', 'name':'has_course'})
if rt_has_course and node._id:
course = triple_collection.one({"_type": "GRelation", 'right_subject':node._id, 'relation_type.$id':rt_has_course._id})
if course:
course_name = node_collection.one({'_id':ObjectId(course.subject)}).name
if node.created_by == request.user.id:
can_remove = True
if node.author_set:
for each in node.author_set:
user_list.append(User.objects.get(id = each))
return render_to_response("ndf/refresh_subscribed_users.html",
{"user_list":user_list,'can_remove':can_remove,'node_id':node._id,'course_name':course_name},
context_instance=RequestContext(request)
)
else:
return StreamingHttpResponse("Empty")
else:
return StreamingHttpResponse("Invalid ajax call")
@login_required
@get_execution_time
def remove_user_from_author_set(request, group_id):
'''
This ajax function remove the user from athor_set
'''
user_list = []
can_remove = False
if request.is_ajax():
_id = request.GET.get('_id',"")
user_id = int(request.GET.get('user_id',""))
node = node_collection.one({'_id':ObjectId(_id)})
if node.created_by == request.user.id:
node.author_set.remove(user_id)
can_remove = True
node.save()
if node.author_set:
for each in node.author_set:
user_list.append(User.objects.get(id = each))
return render_to_response("ndf/refresh_subscribed_users.html",
{"user_list":user_list,'can_remove':can_remove,'node_id':node._id},
context_instance=RequestContext(request)
)
else:
return StreamingHttpResponse("You are not authorised to remove user")
else:
return StreamingHttpResponse("Invalid Ajax call")
@get_execution_time
def get_filterd_user_list(request, group_id):
'''
This function will return (all user's) - (subscribed user for perticular group)
'''
user_list = []
if request.is_ajax():
_id = request.GET.get('_id',"")
node = node_collection.one({'_id':ObjectId(_id)})
all_users_list = [each.username for each in User.objects.all()]
if node._type == 'Group':
for each in node.author_set:
user_list.append(User.objects.get(id = each).username)
filtered_users = list(set(all_users_list) - set(user_list))
return HttpResponse(json.dumps(filtered_users))
@get_execution_time
def search_tasks(request, group_id):
'''
This function will return (all task's)
'''
user_list = []
app_id = node_collection.find_one({'_type':"GSystemType", "name":"Task"})
if request.is_ajax():
term = request.GET.get('term',"")
task_nodes = node_collection.find({
'member_of': {'$all': [app_id._id]},
'name': {'$regex': term, '$options': 'i'},
'group_set': {'$all': [ObjectId(group_id)]},
'status': {'$nin': ['HIDDEN']}
}).sort('last_update', -1)
for each in task_nodes :
user_list.append({"label":each.name,"value":each.name,"id":str(each._id)})
return HttpResponse(json.dumps(user_list))
else:
raise Http404
@get_execution_time
def get_group_member_user(request, group_id):
"""Returns member(s) of the group excluding (group-admin(s)) in form of
dictionary that consists of key-value pair:
key: Primary key from Django's User table
value: User-name of that User record
"""
user_list = {}
group = node_collection.find_one({'_id': ObjectId(group_id)})
if request.is_ajax():
if group.author_set:
for each in group.author_set:
user_list[each] = User.objects.get(id=each).username
return HttpResponse(json.dumps(user_list))
else:
raise Http404
@get_execution_time
def annotationlibInSelText(request, group_id):
"""
This view parses the annotations field of the currently selected node_id and evaluates if entry corresponding this selectedText already exists.
If it does, it appends the comment to this entry else creates a new one.
Arguments:
group_id - ObjectId of the currently selected group
obj_id - ObjectId of the currently selected node_id
comment - The comment added by user
selectedText - text for which comment was added
Returns:
The updated annoatations field
"""
obj_id = str(request.POST["node_id"])
sg_obj = node_collection.one({"_id":ObjectId(obj_id)})
comment = request.POST ["comment"]
comment = json.loads(comment)
comment_modified = {
'authorAvatarUrl' : comment['authorAvatarUrl'],
'authorName' : comment['authorName'],
'comment' : comment['comment']
}
selectedText = request.POST['selectedText']
# check if annotations for this text already exist!
flag = False
for entry in sg_obj.annotations:
if (entry['selectedText'].lower() == selectedText.lower()):
entry['comments'].append(comment_modified)
flag = True
break
if(not(flag)):
comment_list = []
comment_list.append(comment_modified)
ann = {
'selectedText' : selectedText,
'sectionId' : str(comment['sectionId']),
'comments' : comment_list
}
sg_obj.annotations.append(ann)
sg_obj.save()
return HttpResponse(json.dumps(sg_obj.annotations))
@get_execution_time
def delComment(request, group_id):
'''
Delete comment from thread
'''
return HttpResponse("comment deleted")
# Views related to MIS -------------------------------------------------------------
@get_execution_time
def get_students(request, group_id):
"""
This view returns list of students along with required data based on selection criteria
to student_data_review.html
Arguments:
group_id - ObjectId of the currently selected group
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
"""
response_dict = {'success': False, 'message': ""}
all_students_text = ""
try:
if request.is_ajax() and request.method == "POST":
groupid = request.POST.get("groupid", None)
app_id = request.POST.get("app_id", None)
app_set_id = request.POST.get("app_set_id", None)
stud_reg_year = str(request.POST.get("reg_year", None))
university_id = request.POST.get("student_belongs_to_university",None)
college_id = request.POST.get("student_belongs_to_college",None)
person_gst = node_collection.one({'_type': "GSystemType", 'name': "Student"}, {'name': 1, 'type_of': 1})
widget_for = []
query = {}
person_gs = node_collection.collection.GSystem()
person_gs.member_of.append(person_gst._id)
person_gs.get_neighbourhood(person_gs.member_of)
# university_gst = node_collection.one({'_type': "GSystemType", 'name': "University"})
mis_admin = node_collection.one({"_type": "Group", "name": "MIS_admin"}, {"_id": 1})
# univ_cur = node_collection.find({"member_of":university_gst._id,'group_set':mis_admin._id},{'name':1,"_id":1})
# rel_univ = node_collection.one({'_type': "RelationType", 'name': "student_belongs_to_university"}, {'_id': 1})
# rel_colg = node_collection.one({'_type': "RelationType", 'name': "student_belongs_to_college"}, {'_id': 1})
attr_deg_yr = node_collection.one({'_type': "AttributeType", 'name': "degree_year"}, {'_id': 1})
widget_for = ["name",
# rel_univ._id,
# rel_colg._id,
attr_deg_yr._id
]
# 'status'
# ]
widget_for = get_widget_built_up_data(widget_for, person_gs)
# Fetch field(s) from POST object
# if request.POST.has_key("student_belongs_to_university"):
# university_id = query_data = request.POST.get("student_belongs_to_university", "")
for each in widget_for:
field_name = each["name"]
if each["_type"] == "BaseField":
if field_name in request.POST:
query_data = request.POST.get(field_name, "")
query_data = parse_template_data(each["data_type"], query_data)
if field_name == "name":
query.update({field_name: {'$regex': query_data, '$options': "i"}})
else:
query.update({field_name: query_data})
elif each["_type"] == "AttributeType":
if field_name in request.POST:
query_data = request.POST.get(field_name, "")
query_data = parse_template_data(each["data_type"], query_data)
query.update({"attribute_set."+field_name: query_data})
# elif each["_type"] == "RelationType":
# if request.POST.has_key(field_name):
# print field_name,"\n\n"
# query_data = request.POST.get(field_name, "")
# query_data = parse_template_data(each["data_type"], query_data, field_instance=each)
# print query_data,"\n\n"
# if field_name == "student_belongs_to_university":
# university_id = query_data
# else:
# query.update({"relation_set."+field_name: query_data})
student = node_collection.one({'_type': "GSystemType", 'name': "Student"}, {'_id': 1})
query["member_of"] = student._id
date_lte = datetime.datetime.strptime("31/12/" + stud_reg_year, "%d/%m/%Y")
date_gte = datetime.datetime.strptime("1/1/" + stud_reg_year, "%d/%m/%Y")
query["attribute_set.registration_date"] = {'$gte': date_gte, '$lte': date_lte}
college_groupid = None
if college_id:
# Get selected college's groupid, where given college should belongs to MIS_admin group
college_groupid = node_collection.one({'_id': ObjectId(college_id), 'group_set': mis_admin._id, 'relation_set.has_group': {'$exists': True}},
{'relation_set.has_group': 1, 'name': 1}
)
response_dict["college"] = college_groupid.name
if college_groupid:
for each in college_groupid.relation_set:
if "has_group" in each.keys():
college_groupid = each["has_group"][0]
break
else:
college_groupid = None
groupid = ObjectId(groupid)
group_set_to_check = []
if groupid == mis_admin._id:
# It means group is either a college group or MIS_admin group
# In either case append MIS_admin group's ObjectId
# and if college_groupid exists, append it's ObjectId too!
if college_groupid:
group_set_to_check.append(college_groupid)
else:
group_set_to_check.append(mis_admin._id)
else:
# Otherwise, append given group's ObjectId
group_set_to_check.append(groupid)
if university_id:
university_id = ObjectId(university_id)
university = node_collection.one({'_id': university_id}, {'name': 1})
if university:
response_dict["university"] = university.name
query.update({'relation_set.student_belongs_to_university': university_id})
query.update({'group_set': {'$in': group_set_to_check}})
query.update({'status': u"PUBLISHED"})
rec = node_collection.collection.aggregate([{'$match': query},
{'$project': {'_id': 0,
'stud_id': '$_id',
'Enrollment Code': '$attribute_set.enrollment_code',
'Name': '$name',
# 'First Name': '$attribute_set.first_name',
# 'Middle Name': '$attribute_set.middle_name',
# 'Last Name': '$attribute_set.last_name',
'Reg# Date': '$attribute_set.registration_date',
'Gender': '$attribute_set.gender',
'Birth Date': '$attribute_set.dob',
'Religion': '$attribute_set.religion',
'Email ID': '$attribute_set.email_id',
'Languages Known': '$attribute_set.languages_known',
'Caste': '$relation_set.student_of_caste_category',
'Contact Number (Mobile)': '$attribute_set.mobile_number',
'Alternate Number / Landline': '$attribute_set.alternate_number',
'House / Street': '$attribute_set.house_street',
'Village': '$attribute_set.village',
'Taluka': '$attribute_set.taluka',
'Town / City': '$attribute_set.town_city',
'District': '$relation_set.person_belongs_to_district',
'State': '$relation_set.person_belongs_to_state',
'Pin Code': '$attribute_set.pin_code',
'Year of Passing 12th Standard': '$attribute_set.12_passing_year',
'Degree Name / Highest Degree': '$attribute_set.degree_name',
'Year of Study': '$attribute_set.degree_year',
'Stream / Degree Specialization': '$attribute_set.degree_specialization',
'College Enrolment Number / Roll No': '$attribute_set.college_enroll_num',
'College ( Graduation )': '$relation_set.student_belongs_to_college',
'Are you registered for NSS?': '$attribute_set.is_nss_registered'
}},
{'$sort': {'Name': 1}}
])
json_data = []
filename = ""
column_header = []
if len(rec["result"]):
for each_dict in rec["result"]:
new_dict = {}
for each_key in each_dict:
if each_dict[each_key]:
if type(each_dict[each_key]) == list:
data = each_dict[each_key][0]
else:
data = each_dict[each_key]
if type(data) == list:
# Perform parsing
if type(data) == list:
# Perform parsing
if type(data[0]) in [unicode, basestring, int]:
new_dict[each_key] = ', '.join(str(d) for d in data)
elif type(data[0]) in [ObjectId]:
# new_dict[each_key] = str(data)
d_list = []
for oid in data:
d = node_collection.one({'_id': oid}, {'name': 1})
d_list.append(str(d.name))
new_dict[each_key] = ', '.join(str(n) for n in d_list)
elif type(data) == datetime.datetime:
new_dict[each_key] = data.strftime("%d/%m/%Y")
elif type(data) == long:
new_dict[each_key] = str(data)
elif type(data) == bool:
if data:
new_dict[each_key] = "Yes"
else:
new_dict[each_key] = "No"
else:
new_dict[each_key] = str(data)
else:
# Perform parsing
if type(data) == list:
# Perform parsing
if type(data[0]) in [unicode, basestring, int]:
new_dict[each_key] = ', '.join(str(d) for d in data)
elif type(data[0]) in [ObjectId]:
new_dict[each_key] = str(data)
elif type(data) == datetime.datetime:
new_dict[each_key] = data.strftime("%d/%m/%Y")
elif type(data) == long:
new_dict[each_key] = str(data)
elif type(data) == bool:
if data:
new_dict[each_key] = "Yes"
else:
new_dict[each_key] = "No"
else:
new_dict[each_key] = str(data)
else:
new_dict[each_key] = ""
json_data.append(new_dict)
# Start: CSV file processing -------------------------------------------
column_header = [u"Enrollment Code", u'Name', u'Reg# Date', u'Gender', u'Birth Date', u'Religion', u'Email ID', u'Languages Known', u'Caste', u'Contact Number (Mobile)', u'Alternate Number / Landline', u'House / Street', u'Village', u'Taluka', u'Town / City', u'District', u'State', u'Pin Code', u'Year of Passing 12th Standard', u'Degree Name / Highest Degree', u'Year of Study', u'Stream / Degree Specialization', u'College Enrolment Number / Roll No', u'College ( Graduation )', u'Are you registered for NSS?']
t = time.strftime("%c").replace(":", "_").replace(" ", "_")
filename = "csv/" + "student_registration_data_" + t + ".csv"
filepath = os.path.join(STATIC_ROOT, filename)
filedir = os.path.dirname(filepath)
if not os.path.exists(filedir):
os.makedirs(filedir)
with open(filepath, 'wb') as csv_file:
fw = csv.DictWriter(csv_file, delimiter=',', fieldnames=column_header)
fw.writerow(dict((col,col) for col in column_header))
for row in json_data:
v = {}
v["stud_id"] = row.pop("stud_id")
fw.writerow(row)
row.update(v)
# End: CSV file processing ----------------------------------------------
# Column headers to be displayed on html
column_headers = [
("Enrollment Code", "Enrollment Code"),
("stud_id", "Edit"),
("Name", "Name"),
("Reg# Date", "Reg# Date"),
("Gender", "Gender"),
("Birth Date", "Birth Date"),
("Email ID", "Email ID"),
]
# college = node_collection.one({'_id': ObjectId(college_id)}, {"name": 1})
students_count = len(json_data)
response_dict["success"] = True
response_dict["groupid"] = groupid
response_dict["app_id"] = app_id
response_dict["app_set_id"] = app_set_id
response_dict["filename"] = filename
response_dict["students_count"] = students_count
response_dict["column_headers"] = column_headers
response_dict["students_data_set"] = json_data
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
else:
error_message = "StudentFindError: Either not an ajax call or not a POST request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
except OSError as oe:
error_message = "StudentFindError: " + str(oe) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
except Exception as e:
error_message = "StudentFindError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
@get_execution_time
def get_statewise_data(request, group_id):
"""
This view returns a download link of CSV created consisting of students statistical data based on degree_year for each college.
Arguments:
group_id - ObjectId of the currently selected group
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
download_link - file path of CSV created
"""
response_dict = {'success': False, 'message': ""}
try:
if request.is_ajax() and request.method == "GET":
# Fetching selected state's name
state_val = request.GET.get("state_val", None)
mis_admin = node_collection.one(
{'_type': "Group", 'name': "MIS_admin"},
{'_id': 1}
)
# Fetching selected state's node
state_gst = node_collection.one(
{'_type': "GSystemType", 'name': "State"}
)
state_gs = node_collection.one(
{
'member_of': state_gst._id,
'name': {'$regex': state_val, '$options': "i"},
'group_set': mis_admin._id
}
)
# Fetching universities belonging to that state
university_gst = node_collection.one(
{'_type': "GSystemType", 'name': "University"}
)
university_cur = node_collection.find(
{
'member_of': university_gst._id,
'group_set': mis_admin._id,
'relation_set.organization_belongs_to_state': state_gs._id
},
{
'name': 1,
'relation_set.affiliated_college': 1
}
).sort('name', 1)
student_gst = node_collection.one(
{'_type': "GSystemType", 'name': "Student"}
)
university_wise_data = {}
# Fetching university-wise data
for each_univ in university_cur:
university_wise_data[each_univ.name] = {}
# Fetching college(s) affiliated to given university
colleges_id_list = []
for rel in each_univ.relation_set:
if rel and "affiliated_college" in rel:
colleges_id_list = rel["affiliated_college"]
break
# Fetching college-wise data
college_cur = node_collection.find(
{'_id': {'$in': colleges_id_list}}
).sort('name', 1)
for each_college in college_cur:
university_wise_data[each_univ.name][each_college.name] = {}
rec = node_collection.collection.aggregate([
{
'$match': {
'member_of': student_gst._id,
'relation_set.student_belongs_to_college': each_college._id,
# 'attribute_set.registration_date': {
# '$gte': date_gte, '$lte': date_lte
# },
'status': u"PUBLISHED"
}
},
{
'$group': {
'_id': {
'College': '$each_college.name',
'Degree Year': '$attribute_set.degree_year'
},
'No of students': {'$sum': 1}
}
}
])
data = {}
for res in rec["result"]:
if res["_id"]["Degree Year"]:
data[res["_id"]["Degree Year"][0]] = \
res["No of students"]
if "I" not in data:
data["I"] = 0
if "II" not in data:
data["II"] = 0
if "III" not in data:
data["III"] = 0
data["Total"] = data["I"] + data["II"] + data["III"]
university_wise_data[each_univ.name][each_college.name] = data
response_dict["success"] = True
response_dict["university_wise_data"] = university_wise_data
return HttpResponse(json.dumps(response_dict))
else:
error_message = "CollegeSummaryDataError: Either not an ajax call or not a POST request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except OSError as oe:
error_message = "CollegeSummaryDataError: " + str(oe) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "CollegeSummaryDataError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def get_college_wise_students_data(request, group_id):
"""
This view returns a download link of CSV created consisting of students statistical data based on degree_year for each college.
Arguments:
group_id - ObjectId of the currently selected group
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
download_link - file path of CSV created
"""
response_dict = {'success': False, 'message': ""}
all_students_text = ""
try:
if request.is_ajax() and request.method == "GET":
groupid = request.GET.get("groupid", None)
mis_admin = node_collection.one({'_type': "Group", 'name': "MIS_admin"}, {'_id': 1})
college_gst = node_collection.one({'_type': "GSystemType", 'name': "College"}, {'_id': 1})
student = node_collection.one({'_type': "GSystemType", 'name': "Student"})
current_year = str(datetime.datetime.today().year)
date_gte = datetime.datetime.strptime("1/1/" + current_year, "%d/%m/%Y")
date_lte = datetime.datetime.strptime("31/12/" + current_year, "%d/%m/%Y")
college_cur = node_collection.find({'member_of': college_gst._id, 'group_set': mis_admin._id},
{'_id': 1, 'name': 1, 'relation_set': 1}).sort('name', 1)
json_data = []
for i, each in enumerate(college_cur):
data = {}
college_group_id = None
for each_dict in each.relation_set:
if u"has_group" in each_dict.keys():
college_group_id = each_dict["has_group"]
break
rec = node_collection.collection.aggregate([{'$match': {'member_of': student._id,
'group_set': {'$in': [college_group_id, mis_admin._id]},
'relation_set.student_belongs_to_college': each._id,
'attribute_set.registration_date': {'$gte': date_gte, '$lte': date_lte},
'status': u"PUBLISHED"
}},
{'$group': {
'_id': {'College': '$each.name', 'Degree Year': '$attribute_set.degree_year'},
'No of students': {'$sum': 1}
}}
])
data["College"] = each.name
for res in rec["result"]:
data[res["_id"]["Degree Year"][0]] = res["No of students"]
if "I" not in data:
data["I"] = 0
if "II" not in data:
data["II"] = 0
if "III" not in data:
data["III"] = 0
data["Total"] = data["I"] + data["II"] + data["III"]
json_data.append(data)
t = time.strftime("%c").replace(":", "_").replace(" ", "_")
filename = "csv/" + "college_wise_student_data_" + t + ".csv"
filepath = os.path.join(STATIC_ROOT, filename)
filedir = os.path.dirname(filepath)
if not os.path.exists(filedir):
os.makedirs(filedir)
column_header = [u"College", u"Program Officer", u"I", u"II", u"III", u"Total"]
PO = {
"Agra College": ["Mr. Rajaram Yadav"],
"Arts College Shamlaji": ["Mr. Ashish Varia"],
"Baba Bhairabananda Mahavidyalaya": ["Mr. Mithilesh Kumar"],
"Balugaon College": ["Mr. Pradeep Pradhan"],
"City Women's College": ["Ms. Rajni Sharma"],
"Comrade Godavari Shamrao Parulekar College of Arts, Commerce & Science": ["Mr. Rahul Sable"],
"Faculty of Arts": ["Mr. Jokhim", "Ms. Tusharika Kumbhar"],
"Gaya College": ["Ms. Rishvana Sheik"],
"Govt. M. H. College of Home Science & Science for Women, Autonomous": [],
"Govt. Mahakoshal Arts and Commerce College": ["Ms. Davis Yadav"],
"Govt. Mahaprabhu Vallabhacharya Post Graduate College": ["Mr. Gaurav Sharma"],
"Govt. Rani Durgavati Post Graduate College": ["Mr. Asad Ullah"],
"Jamshedpur Women's College": ["Mr. Arun Agrawal"],
"Kalyan Post Graduate College": ["Mr. Praveen Kumar"],
"Kamla Nehru College for Women": ["Ms. Tusharika Kumbhar", "Ms. Thaku Pujari"],
"L. B. S. M. College": ["Mr. Charles Kindo"],
"Mahila College": ["Mr. Sonu Kumar"],
"Marwari College": ["Mr. Avinash Anand"],
"Matsyodari Shikshan Sanstha's Arts, Commerce & Science College": ["Ms. Jyoti Kapale"],
"Nirmala College": [],
"Ranchi College": [],
"Ranchi Women's College": ["Mr. Avinash Anand"],
"Shiv Chhatrapati College": ["Mr. Swapnil Sardar"],
"Shri & Smt. PK Kotawala Arts College": ["Mr. Sawan Kumar"],
"Shri VR Patel College of Commerce": ["Mr. Sushil Mishra"],
"Sree Narayana Guru College of Commerce": ["Ms. Bharti Bhalerao"],
"Sri Mahanth Shatanand Giri College": ["Mr. Narendra Singh"],
"St. John's College": ["Mr. Himanshu Guru"],
"The Graduate School College For Women": ["Mr. Pradeep Gupta"],
"Vasant Rao Naik Mahavidyalaya": ["Mr. Dayanand Waghmare"],
"Vivekanand Arts, Sardar Dalip Singh Commerce & Science College": ["Mr. Anis Ambade"]
}
with open(filepath, 'wb') as csv_file:
fw = csv.DictWriter(csv_file, delimiter=',', fieldnames=column_header)
fw.writerow(dict((col,col) for col in column_header))
for row in json_data:
if row[u"College"] not in PO or not PO[row[u"College"]]:
row[u"Program Officer"] = "Not assigned yet"
else:
row[u"Program Officer"] = ", ".join(PO[row[u"College"]])
fw.writerow(row)
response_dict["success"] = True
response_dict["download_link"] = (STATIC_URL + filename)
return HttpResponse(json.dumps(response_dict))
else:
error_message = "CollegeSummaryDataError: Either not an ajax call or not a POST request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except OSError as oe:
error_message = "CollegeSummaryDataError: " + str(oe) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "CollegeSummaryDataError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def set_user_link(request, group_id):
"""
This view creates a relationship (has_login) between the given node (node_id) and the author node (username);
and also subscribes the user to his/her respective college group
Arguments:
group_id - ObjectId of the currently selected group
node_id - ObjectId of the currently selected node_id
username - Username of the user
Returns:
A dictionary consisting of following key:-
result - a bool variable indicating whether link is created or not and subscribed to group or not
message - a string variable giving the status of the link (also reason if any error occurs)
"""
gr_node = None
try:
if request.is_ajax() and request.method =="POST":
node_id = request.POST.get("node_id", "")
username = request.POST.get("username", "")
# Creating link between user-node and it's login credentials
author = node_collection.one({'_type': "Author", 'name': unicode(username)}, {'created_by': 1})
rt_has_login = node_collection.one({'_type': "RelationType", 'name': u"has_login"})
gr_node = create_grelation(node_id, rt_has_login, author._id)
if gr_node:
# Assigning the userid to respective private college groups's author_set,
# i.e. making user, member of college group to which he/she belongs
# Only after the given user's link (i.e., has_login relation) gets created
node = node_collection.one({'_id': ObjectId(node_id)}, {'member_of': 1})
node_type = node.member_of_names_list
has_group = node_collection.one({'_type': "RelationType", 'name': "has_group"}, {'_id': 1})
if "Student" in node_type:
student_belonds_to_college = node_collection.one({'_type': "RelationType", 'name': "student_belongs_to_college"}, {'_id': 1})
colleges = triple_collection.find({
'_type': "GRelation", 'subject': node._id,
'relation_type.$id': student_belonds_to_college._id
})
for each in colleges:
g = triple_collection.one({'_type': "GRelation", 'subject': each.right_subject, 'relation_type.$id': has_group._id})
node_collection.collection.update({'_id': g.right_subject}, {'$addToSet': {'author_set': author.created_by}}, upsert=False, multi=False)
elif "Voluntary Teacher" in node_type:
trainer_of_college = node_collection.one({'_type': "RelationType", 'name': "trainer_of_college"}, {'_id': 1})
colleges = triple_collection.find({'_type': "GRelation", 'subject': node._id, 'relation_type.$id': trainer_of_college._id})
for each in colleges:
g = triple_collection.one({'_type': "GRelation", 'subject': each.right_subject, 'relation_type.$id': has_group._id})
node_collection.collection.update({'_id': g.right_subject}, {'$addToSet': {'author_set': author.created_by}}, upsert=False, multi=False)
return HttpResponse(json.dumps({'result': True, 'message': " Link successfully created. \n\n Also subscribed to respective college group(s)."}))
else:
error_message = " UserLinkSetUpError: Either not an ajax call or not a POST request!!!"
return HttpResponse(json.dumps({'result': False, 'message': " Link not created - Something went wrong in ajax call !!! \n\n Please contact system administrator."}))
except Exception as e:
error_message = "\n UserLinkSetUpError: " + str(e) + "!!!"
result = False
if gr_node:
# node_collection.collection.remove({'_id': gr_node._id})
result = True
error_message = " Link created successfully. \n\n But facing problem(s) in subscribing to respective college group(s)!!!\n Please use group's 'Subscribe members' button to do so !!!"
else:
result = False
error_message = " Link not created - May be invalid username entered !!!"
return HttpResponse(json.dumps({'result': result, 'message': error_message}))
@get_execution_time
def set_enrollment_code(request, group_id):
"""
"""
if request.is_ajax() and request.method == "POST":
return HttpResponse("Five digit code")
else:
error_message = " EnrollementCodeError: Either not an ajax call or not a POST request!!!"
raise Exception(error_message)
@get_execution_time
def get_students_assignments(request, group_id):
"""
Arguments:
group_id - ObjectId of the currently selected group
Returns:
"""
gr_node = None
try:
if request.is_ajax() and request.method =="GET":
user_id = 0
if "user_id" in request.GET:
user_id = int(request.GET.get("user_id", ""))
# Fetching college group
college_group = node_collection.one({'_id': ObjectId(group_id)}, {'name': 1, 'tags': 1, 'author_set': 1, 'created_by': 1})
page_res = node_collection.one({'_type': "GSystemType", 'name': "Page"}, {'_id': 1})
file_res = node_collection.one({'_type': "GSystemType", 'name': "File"}, {'_id': 1})
image_res = node_collection.one({'_type': "GSystemType", 'name': "Image"}, {'_id': 1})
video_res = node_collection.one({'_type': "GSystemType", 'name': "Video"}, {'_id': 1})
student_list = []
if user_id:
# Fetch assignment details of a given student
student_dict = {}
num_pages = []
num_images = []
num_videos = []
num_files = []
# Fetch student's user-group
user_group = node_collection.one({'_type': "Author", 'created_by': user_id})
student_dict["username"] = user_group.name
# Fetch all resources from student's user-group
resources = node_collection.find({'group_set': user_group._id}, {'name': 1, 'member_of': 1, 'created_at': 1})
for res in resources:
if page_res._id in res.member_of:
num_pages.append(res)
elif image_res._id in res.member_of:
num_images.append(res)
elif video_res._id in res.member_of:
num_videos.append(res)
elif file_res._id in res.member_of:
num_files.append(res)
student_dict["Pages"] = num_pages
student_dict["Images"] = num_images
student_dict["Videos"] = num_videos
student_dict["Files"] = num_files
return HttpResponse(json.dumps(student_dict, cls=NodeJSONEncoder))
else:
# Fetch assignment details of all students belonging to the college group
for user_id in college_group.author_set:
if user_id == college_group.created_by:
continue
student_dict = {}
num_pages = 0
num_images = 0
num_videos = 0
num_files = 0
# Fetch student's user-group
user_group = node_collection.one({'_type': "Author", 'created_by': user_id})
# Fetch student's node from his/her has_login relationship
student_has_login_rel = triple_collection.one({'_type': "GRelation", 'right_subject': user_group._id})
student_node = node_collection.one({'_id': student_has_login_rel.subject}, {'name': 1})
student_dict["Name"] = student_node.name
student_dict["user_id"] = user_id
# Fetch all resources from student's user-group
resources = node_collection.find({'group_set': user_group._id}, {'member_of': 1})
for res in resources:
if page_res._id in res.member_of:
num_pages = num_pages + 1
elif image_res._id in res.member_of:
num_images = num_images + 1
elif video_res._id in res.member_of:
num_videos = num_videos + 1
elif file_res._id in res.member_of:
num_files = num_files + 1
student_dict["Pages"] = num_pages
student_dict["Images"] = num_images
student_dict["Videos"] = num_videos
student_dict["Files"] = num_files
student_dict["Total"] = num_pages + num_images + num_videos + num_files
student_list.append(student_dict)
# Outside of above for loop
return render_to_response("ndf/student_statistics.html",
{'node': college_group,'student_list': student_list},
context_instance = RequestContext(request)
)
else:
error_message = "StudentDataGetError: Invalid ajax call!!!"
return StreamingHttpResponse(error_message)
except Exception as e:
print "\n StudentDataGetError: " + str(e)
raise Http404(e)
@get_execution_time
def get_districts(request, group_id):
"""
This view fetches district(s) belonging to given state.
Arguments:
group_id - ObjectId of the currently selected group
state_id - ObjectId of the currently selected state`
Returns:
A dictionary consisting of following key:-
districts - a list variable consisting of two elements i.e.,
first-element: subject (District's ObjectId), second-element: manipulated-name-value (District's name)
message - a string variable giving the error-message
"""
gr_node = None
try:
if request.is_ajax() and request.method == "GET":
state_id = request.GET.get("state_id", "")
# districts -- [first-element: subject (District's ObjectId), second-element: manipulated-name-value (District's name)]
districts = []
# Fetching RelationType: District - district_of (name) | has_district (inverse_name) - State
rt_district_of = node_collection.one({'_type': "RelationType", 'name': "district_of"})
# Fetching all districts belonging to given state in sorted order by name
if rt_district_of:
cur_districts = triple_collection.find({
'_type': "GRelation", 'right_subject': ObjectId(state_id),
'relation_type.$id': rt_district_of._id
}).sort('name', 1)
if cur_districts.count():
for d in cur_districts:
districts.append([str(d.subject), d.name.split(" -- ")[0]])
else:
error_message = "No districts found"
raise Exception(error_message)
else:
error_message = "RelationType (district_of) doesn't exists"
raise Exception(error_message)
return HttpResponse(json.dumps(districts))
else:
error_message = " DistrictFetchError: Either not an ajax call or not a GET request!!!"
return HttpResponse(json.dumps({'message': " DistrictFetchError - Something went wrong in ajax call !!! \n\n Please contact system administrator."}))
except Exception as e:
error_message = "\n DistrictFetchError: " + str(e) + "!!!"
return HttpResponse(json.dumps({'message': error_message}))
@get_execution_time
def get_affiliated_colleges(request, group_id):
"""
This view returns list of colleges affiliated to given university.
Each element of the list is again a list where,
0th index-element: ObjectId of college
1st index-element: Name of college
Arguments:
group_id - ObjectId of the currently selected group
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
affiliated_colleges - List consisting of affiliated colleges (ObjectIds & names)
"""
response_dict = {'success': False, 'message': ""}
all_students_text = ""
try:
if request.is_ajax() and request.method == "GET":
# Fetch field(s) from GET object
university_id = request.GET.get("university_id", "")
req_university = None
req_affiliated_colleges = None
# Check whether any field has missing value or not
if university_id == "":
error_message = "AffiliatedCollegeFindError: Invalid data (No university selected)!!!"
raise Exception(error_message)
# Type-cast fetched field(s) into their appropriate type
university_id = ObjectId(university_id)
# Fetch required university
req_university = node_collection.one({'_id': university_id})
if not req_university:
error_message = "AffiliatedCollegeFindError: No university exists with given ObjectId("+university_id+")!!!"
raise Exception(error_message)
for each in req_university["relation_set"]:
if u"affiliated_college" in each.keys():
req_affiliated_colleges = node_collection.find({'_id': {'$in': each[u"affiliated_college"]}}, {'name': 1}).sort('name', 1)
req_affiliated_colleges_list = []
for each in req_affiliated_colleges:
req_affiliated_colleges_list.append([str(each._id), each.name])
response_dict["affiliated_colleges"] = req_affiliated_colleges_list
response_dict["success"] = True
response_dict["message"] = "This university ("+req_university.name+") has following list of affiliated colleges:"
for i, each in enumerate(req_affiliated_colleges_list):
response_dict["message"] += "\n\n " + str(i+1) + ". " + each[1]
return HttpResponse(json.dumps(response_dict))
else:
error_message = "AffiliatedCollegeFindError: Either not an ajax call or not a GET request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "AffiliatedCollegeFindError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def get_courses(request, group_id):
"""
This view returns list of NUSSD-Course(s) belonging to given course type.
Arguments:
group_id - ObjectId of the currently selected group
nussd_course_type - Type of NUSSD Course
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
unset_nc - dictionary consisting of NUSSD-Course(s)
"""
response_dict = {'success': False, 'message': ""}
try:
if request.is_ajax() and request.method == "GET":
# Fetch field(s) from GET object
nussd_course_type = request.GET.get("nussd_course_type", "")
# Check whether any field has missing value or not
if nussd_course_type == "":
error_message = "Invalid data: No data found in any of the " \
+ "field(s)!!!"
raise Exception(error_message)
# Fetch "Announced Course" GSystemType
mis_admin = node_collection.one(
{'_type': "Group", 'name': "MIS_admin"},
{'name': 1}
)
if not mis_admin:
# If not found, throw exception
error_message = "'MIS_admin' (Group) doesn't exists... " \
+ "Please create it first!"
raise Exception(error_message)
# Fetch "Announced Course" GSystemType
nussd_course_gt = node_collection.one(
{'_type': "GSystemType", 'name': "NUSSD Course"}
)
if not nussd_course_gt:
# If not found, throw exception
error_message = "'NUSSD Course' (GSystemType) doesn't exists... " \
+ "Please create it first!"
raise Exception(error_message)
# Type-cast fetched field(s) into their appropriate type
nussd_course_type = unicode(nussd_course_type)
# Fetch registered NUSSD-Courses of given type
nc_cur = node_collection.find(
{
'member_of': nussd_course_gt._id,
'group_set': mis_admin._id,
'attribute_set.nussd_course_type': nussd_course_type
},
{'name': 1}
)
nc_dict = {}
if nc_cur.count():
# If found, append them to a dict
for each in nc_cur:
nc_dict[str(each._id)] = each.name
response_dict["success"] = True
response_dict["unset_nc"] = nc_dict
else:
response_dict["message"] = "No " + nussd_course_type + " type of course exists." \
+ " Please register"
response_dict["success"] = False
return HttpResponse(json.dumps(response_dict))
else:
error_message = "AnnouncedCourseError: Either not an ajax call or" \
" not a GET request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "AnnouncedCourseError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def get_announced_courses_with_ctype(request, group_id):
"""
This view returns list of announced-course(s) that match given criteria
along with NUSSD-Course(s) for which match doesn't exists.
Arguments:
group_id - ObjectId of the currently selected group
nussd_course_type - Type of NUSSD course
Returns:
A dictionary consisting of following key-value pairs:-
acourse_ctype_list - list consisting of announced-course(s)
and/or NUSSD-Courses [if match not found]
"""
response_dict = {'success': False, 'message': ""}
try:
if request.is_ajax() and request.method == "GET":
# Fetch field(s) from GET object
nussd_course_type = request.GET.get("nussd_course_type", "")
ann_course_type = request.GET.get("ann_course_type", "0")
acourse_ctype_list = []
ac_of_colg = []
start_enroll = ""
end_enroll = ""
query = {}
# curr_date = datetime.datetime.now()
# Fetch "Announced Course" GSystemType
announced_course_gt = node_collection.one(
{'_type': "GSystemType", 'name': "Announced Course"}
)
if not announced_course_gt:
# If not found, throw exception
error_message = "Announced Course (GSystemType) doesn't " \
+ "exists... Please create it first!"
raise Exception(error_message)
mis_admin = node_collection.one(
{'_type': "Group", 'name': "MIS_admin"}
)
# Type-cast fetched field(s) into their appropriate type
nussd_course_type = unicode(nussd_course_type)
ann_course_type = int(ann_course_type)
if ann_course_type == 1:
# Return all Announced Course(s) for which enrollment not started yet
query = {
"member_of": announced_course_gt._id,
"group_set": ObjectId(mis_admin._id),
"status": "PUBLISHED",
"attribute_set.nussd_course_type": nussd_course_type,
"attribute_set.ann_course_closure": u"Open",
"relation_set.course_has_enrollment": {"$exists": False}
}
college = {}
course = {}
ac_data_set = []
records_list = []
if nussd_course_type == "Foundation Course":
rec = node_collection.collection.aggregate([{
"$match": {
"member_of": announced_course_gt._id,
"group_set": ObjectId(mis_admin._id),
"status": "PUBLISHED",
"attribute_set.nussd_course_type": nussd_course_type,
"attribute_set.ann_course_closure": u"Open",
"relation_set.course_has_enrollment": {"$exists": False}
}
}, {
'$group': {
"_id": {
"start_time": "$attribute_set.start_time",
"end_time": "$attribute_set.end_time",
'college': '$relation_set.acourse_for_college'
},
"foundation_course": {"$addToSet": {'ac_id': "$_id", 'course': '$relation_set.announced_for', 'created_at': "$created_at"}},
"fc_ann_ids": {"$addToSet": "$_id"}
}
}, {
'$sort': {'created_at': 1}
}])
records_list = rec["result"]
if records_list:
for each in records_list:
newrec = {}
if each['_id']["college"]:
colg_id = each['_id']["college"][0][0]
if colg_id not in college:
c = node_collection.one({"_id": colg_id}, {"name": 1, "relation_set.college_affiliated_to": 1,"attribute_set.enrollment_code":1})
newrec[u"college"] = c.name
newrec[u"college_id"] = c._id
newrec[u"created_at"] = each["foundation_course"][0]["created_at"]
college[colg_id] = {}
college[colg_id]["name"] = newrec[u"college"]
for rel in c.relation_set:
if rel and "college_affiliated_to" in rel:
univ_id = rel["college_affiliated_to"][0]
u = node_collection.one({"_id": univ_id}, {"name": 1})
each.update({"university": u.name})
college[colg_id]["university"] = each["university"]
college[colg_id]["university_id"] = u._id
newrec[u"university"] = u.name
newrec[u"university_id"] = u._id
else:
newrec["college"] = college[colg_id]["name"]
newrec["college_id"] = ObjectId(colg_id)
newrec["university_id"] = college[colg_id]["university_id"]
newrec["university"] = college[colg_id]["university"]
newrec[u"course"] = "Foundation Course"
newrec[u"ac_id"] = each["fc_ann_ids"]
newrec[u"name"] = "Foundation_Course_" + c["attribute_set"][0]["enrollment_code"] + "_" + each["_id"]["start_time"][0].strftime('%Y') + "_" + each["_id"]["end_time"][0].strftime('%Y')
ac_data_set.append(newrec)
else:
rec = node_collection.collection.aggregate([
{
'$match': query
}, {
'$project': {
'_id': 0,
'ac_id': "$_id",
'name': '$name',
'course': '$relation_set.announced_for',
'college': '$relation_set.acourse_for_college',
'created_at': "$created_at"
}
},
{
'$sort': {'created_at': 1}
}
])
records_list = rec["result"]
if records_list:
for each in rec["result"]:
if each["college"]:
colg_id = each["college"][0][0]
if colg_id not in college:
c = node_collection.one({"_id": colg_id}, {"name": 1, "relation_set.college_affiliated_to": 1})
each["college"] = c.name
each["college_id"] = c._id
college[colg_id] = {}
college[colg_id]["name"] = each["college"]
for rel in c.relation_set:
if rel and "college_affiliated_to" in rel:
univ_id = rel["college_affiliated_to"][0]
u = node_collection.one({"_id": univ_id}, {"name": 1})
each.update({"university": u.name})
college[colg_id]["university"] = each["university"]
college[colg_id]["university_id"] = u._id
each["university_id"] = u._id
else:
each["college"] = college[colg_id]["name"]
each["college_id"] = colg_id
each.update({"university": college[colg_id]["university"]})
each.update({"university_id": college[colg_id]["university_id"]})
if each["course"]:
course_id = each["course"][0][0]
if course_id not in course:
each["course"] = node_collection.one({"_id": course_id}).name
course[course_id] = each["course"]
else:
each["course"] = course[course_id]
ac_data_set.append(each)
column_headers = [
("name", "Announced Course Name"),
("course", "Course Name"),
("college", "College"),
("university", "University")
]
if records_list:
# If Announced Course(s) records found
response_dict["column_headers"] = column_headers
response_dict["ac_data_set"] = ac_data_set
else:
# Else, where No Announced Course exist
response_dict["ac_data_set"] = records_list
response_dict["message"] = "No Announced Course found of selected type (" + nussd_course_type + ") !"
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
if(ObjectId(group_id) == mis_admin._id):
ac_cur = node_collection.find({
'member_of': announced_course_gt._id,
'group_set': ObjectId(group_id),
'attribute_set.nussd_course_type': nussd_course_type
}, {
"name": 1, "attribute_set": 1, "relation_set": 1
})
else:
colg_gst = node_collection.one(
{'_type': "GSystemType", 'name': 'College'}
)
# Fetch Courses announced for given college (or college group)
# Get college node & courses announced for it from
# college group's ObjectId
req_colg_id = node_collection.one({
'member_of': colg_gst._id,
'relation_set.has_group': ObjectId(group_id)
}, {
'relation_set.college_has_acourse': 1
})
for rel in req_colg_id.relation_set:
if rel and "college_has_acourse" in rel:
ac_of_colg = rel["college_has_acourse"]
# Keeping only those announced courses which are active
# (i.e. PUBLISHED)
ac_cur = node_collection.find({
'_id': {'$in': ac_of_colg},
'member_of': announced_course_gt._id,
'attribute_set.nussd_course_type': nussd_course_type,
# 'relation_set.course_selected': {'$exists': True, '$not': {'$size': 0}},
'status': u"PUBLISHED"
# 'attribute_set.start_enroll':{'$lte': curr_date},
# 'attribute_set.end_enroll':{'$gte': curr_date}
}, {
"name": 1, "attribute_set": 1, "relation_set": 1
})
if ac_cur.count():
sce_gs_dict = {}
for each_ac in ac_cur:
# NOTE: This ajax-call is used in various templates
# Following is used especially only in new_create_batch.html
# Fetch enrolled students count from announced course node's course_selected
enrolled_stud_count = 0
if ann_course_type != 1:
for rel in each_ac.relation_set:
if rel and "course_has_enrollment" in rel:
if rel["course_has_enrollment"]:
sce_gs_id = rel["course_has_enrollment"][0]
str_sce_gs_id = str(sce_gs_id)
if str_sce_gs_id in sce_gs_dict:
enrolled_stud_count = sce_gs_dict[str_sce_gs_id]
break
sce_gs_node = node_collection.one({
"_id": ObjectId(sce_gs_id)
}, {
"attribute_set.has_approved": 1
})
sce_gs_dict[str_sce_gs_id] = enrolled_stud_count
for attr in sce_gs_node.attribute_set:
if attr and "has_approved" in attr:
if attr["has_approved"]:
enrolled_stud_count = len(attr["has_approved"])
sce_gs_dict[str_sce_gs_id] = enrolled_stud_count
break
break
each_ac["enrolled_stud_count"] = enrolled_stud_count
acourse_ctype_list.append(each_ac)
response_dict["success"] = True
info_message = "Announced Courses are available"
else:
response_dict["success"] = False
info_message = "No Announced Courses are available"
response_dict["message"] = info_message
response_dict["acourse_ctype_list"] = json.dumps(
acourse_ctype_list, cls=NodeJSONEncoder
)
return HttpResponse(json.dumps(response_dict))
else:
error_message = " AnnouncedCourseFetchError - Something went wrong in " \
+ "ajax call !!! \n\n Please contact system administrator."
return HttpResponse(json.dumps({
'message': error_message
}))
except Exception as e:
error_message = "\n AnnouncedCourseFetchError: Either you are in user " \
+ "group or something went wrong!!!"
return HttpResponse(json.dumps({'message': error_message}))
@get_execution_time
def get_colleges(request, group_id, app_id):
"""This view returns HttpResponse with following data:
- List of college(s) affiliated to given university where
Program Officer is not subscribed
- List of college(s) affiliated to given university where
Course(s) is/are already announced for given duration
- List of college(s) affiliated to given university where
Course(s) is/are not announced for given duration
Arguments:
group_id - ObjectId of the currently selected group
univ_id - ObjectId of currently selected University
start_time - Start time of announcement (MM/YYYY)
end_time - End time of announcement (MM/YYYY)
dc_courses_id_list - List of ObjectId(s) of Course(s)
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
unassigned_po_colg_list - List of college(s) affiliated to given university
where Program Officer is not subscribed
already_announced_in_colg_list - List of college(s) affiliated to given
university where Course(s) is/are already announced for given duration
drawer_widget - Drawer containing list of college(s) affiliated to given
university where Course(s) is/are not announced for given duration
"""
response_dict = {'success': False, 'message': ""}
try:
if request.is_ajax() and request.method == "GET":
# Fetch field(s) from GET object
nussd_course_type = request.GET.get("nussd_course_type", "")
mis_admin = node_collection.one(
{'_type': "Group", 'name': "MIS_admin"}, {'name': 1}
)
if not mis_admin:
# If not found, throw exception
error_message = "'MIS_admin' (Group) doesn't exists... " \
"Please create it first!"
raise Exception(error_message)
univ_id = request.GET.get("univ_id", "")
start_time = request.GET.get("start_time", "")
end_time = request.GET.get("end_time", "")
dc_courses_id_list = request.GET.getlist("dc_courses_id_list[]")
# all_univs = request.GET.get("all_univs", "")
# Check whether any field has missing value or not
if univ_id == "" or start_time == "" or end_time == "":
error_message = "Invalid data: " \
"No data found in any of the field(s)!!!"
raise Exception(error_message)
# Fetch all college groups
college = node_collection.one(
{'_type': "GSystemType", 'name': "College"}, {'name': 1}
)
if not college:
# If not found, throw exception
error_message = "'College' (GSystemType) doesn't exists... "\
"Please create it first!"
raise Exception(error_message)
# Type-cast fetched field(s) into their appropriate type
univ_id = ObjectId(univ_id)
start_time = datetime.datetime.strptime(start_time, "%m/%Y")
end_time = datetime.datetime.strptime(end_time, "%m/%Y")
dc_courses_id_list = [ObjectId(dc) for dc in dc_courses_id_list]
# Fetch the node of selected university
# university_node = node_collection.one(
# {'_id': univ_id},
# {'relation_set': 1, 'name': 1}
# )
# Fetch the list of colleges that are affiliated to
# the selected university (univ_id)
colg_under_univ_id = node_collection.find(
{
'member_of': college._id,
'relation_set.college_affiliated_to': univ_id
},
{
'name': 1, 'member_of': 1, 'created_by': 1,
'created_at': 1, 'content': 1,
'relation_set.has_officer_incharge': 1,
'relation_set.college_has_acourse': 1
}
).sort('name', 1)
list_colg = []
unassigned_po_colg_list = []
already_announced_in_colg_list = []
for each in colg_under_univ_id:
is_po_exists = False
if each.relation_set:
for rel in each.relation_set:
if rel and "has_officer_incharge" in rel:
if rel["has_officer_incharge"]:
is_po_exists = True
if rel and "college_has_acourse" in rel:
if rel["college_has_acourse"]:
if dc_courses_id_list:
acourse_exists = node_collection.find_one({
'_id': {'$in': rel["college_has_acourse"]},
'relation_set.announced_for': {'$in': dc_courses_id_list},
'attribute_set.start_time': start_time,
'attribute_set.end_time': end_time,
'attribute_set.ann_course_closure': "Open",
'status': "PUBLISHED"
})
if acourse_exists:
if each._id not in already_announced_in_colg_list:
already_announced_in_colg_list.append(each.name)
if each.name in already_announced_in_colg_list:
continue
elif is_po_exists:
if each not in list_colg:
list_colg.append(each)
else:
if each not in unassigned_po_colg_list:
unassigned_po_colg_list.append(each.name)
response_dict["already_announced_in_colg_list"] = \
already_announced_in_colg_list
response_dict["unassigned_PO_colg_list"] = unassigned_po_colg_list
if list_colg:
drawer_template_context = edit_drawer_widget(
"RelationType", group_id, None, None,
checked="announced_course_create_edit",
left_drawer_content=list_colg
)
drawer_template_context["widget_for"] = \
"announced_course_create_edit"
drawer_widget = render_to_string(
'ndf/drawer_widget.html', drawer_template_context,
context_instance=RequestContext(request)
)
response_dict["drawer_widget"] = drawer_widget
msg_string = "Following are the list of colleges where " + \
"selected Course(s) should be announced:"
else:
msg_string = "There are no colleges under this university " + \
"where selected Course(s) could be announced!!!"
# nc_dict = {}
if colg_under_univ_id.count():
response_dict["success"] = True
else:
msg_string = "No college is affiliated to under selected " + \
"University!!!"
response_dict["success"] = False
# response_dict["unset_nc"] = nc_dict
response_dict["message"] = msg_string
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "CollegeFetchError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def get_anncourses_allstudents(request, group_id):
"""
This view returns ...
Arguments:
group_id - ObjectId of the currently selected group
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
"""
response_dict = {'success': False, 'message': ""}
all_students_text = ""
query = {}
try:
if request.is_ajax() and request.method == "GET":
registration_year = str(request.GET.get("registration_year", ""))
all_students = request.GET.get("all_students", "")
degree_year = request.GET.get("degree_year", "")
degree_name = request.GET.get("degree_name", "")
sce_gs_id = request.GET.get("sce_gs_id", "")
acourse_val = request.GET.getlist("acourse_val[]", "")
for i, each in enumerate(acourse_val):
acourse_val[i] = ObjectId(each)
# Following parameters to be used for edit_drawer_widget()
# node = None
# checked = None
enrolled_stud_count = 0
non_enrolled_stud_count = 0
colg_of_acourse_id = None
# Check whether any field has missing value or not
if registration_year == "" or all_students == "":
# registration_year = datetime.datetime.now().year.__str__()
all_students = u"false"
# error_message = "Invalid data: No data found in any of the field(s)!!!"
student = node_collection.one({'_type': "GSystemType", 'name': "Student"})
sce_gs = node_collection.one({'_id':ObjectId(sce_gs_id)},
{'member_of': 1, 'attribute_set.has_enrolled': 1, 'relation_set.for_college':1}
)
# From Announced Course node fetch College's ObjectId
# acourse_node = node_collection.find_one(
# {'_id': {'$in': acourse_val}, 'relation_set.acourse_for_college': {'$exists': True}},
# {'attribute_set': 1, 'relation_set.acourse_for_college': 1}
# )
for rel in sce_gs.relation_set:
if rel:
colg_of_acourse_id = rel["for_college"][0]
break
date_gte = datetime.datetime.strptime("1/1/"+registration_year, "%d/%m/%Y")
date_lte = datetime.datetime.strptime("31/12/"+registration_year, "%d/%m/%Y")
# query = {
# 'member_of': student._id,
# 'attribute_set.registration_date': {'$gte': date_gte, '$lte': date_lte},
# # 'attribute_set.degree_year':degree_year,
# # 'attribute_set.degree_name':degree_name,
# 'relation_set.student_belongs_to_college': ObjectId(colg_of_acourse_id)
# }
# If College's ObjectId exists, fetch respective College's group
if colg_of_acourse_id:
colg_of_acourse = node_collection.one(
{'_id': colg_of_acourse_id, 'relation_set.has_group': {'$exists': True}},
{'relation_set.has_group': 1}
)
if colg_of_acourse:
for rel in colg_of_acourse.relation_set:
if rel and "has_group" in rel:
# If rel exists, it means it's has_group
# then update query
query = {
'$or': [
{
'member_of': student._id,
'group_set': rel["has_group"][0],
'attribute_set.registration_date': {'$gte': date_gte, '$lte': date_lte},
},
{
'member_of': student._id,
'relation_set.student_belongs_to_college': ObjectId(colg_of_acourse_id),
'attribute_set.registration_date': {'$gte': date_gte, '$lte': date_lte},
}
]
}
break
if degree_year:
query.update({'attribute_set.degree_year': degree_year })
if degree_name:
query.update({'attribute_set.degree_name': degree_name })
# Check whether StudentCourseEnrollment created for given acourse_val
# Set node as StudentCourseEnrollment node
# and checked as "has_enrolled", i.e. AT of StudentCourseEnrollment node
enrolled_stud_list = []
if sce_gs:
for attr in sce_gs.attribute_set:
if attr and "has_enrolled" in attr:
enrolled_stud_list = attr["has_enrolled"]
enrolled_stud_count = str(len(attr["has_enrolled"]))
break
# sce_gs.get_neighbourhood(sce_gs.member_of)
# node = sce_gs
# checked = "has_enrolled"
res = None
if all_students == u"true":
all_students_text = "All students (including enrolled ones)"
res = node_collection.collection.aggregate([
{
'$match': query
}, {
'$project': {
'_id': 1,
'name': '$name',
'degree_name': '$attribute_set.degree_name',
'degree_year':'$attribute_set.degree_year',
'registration_year':'$attribute_set.registration_year'
}
},
{
'$sort': {'name': 1}
}
])
total_students_count = len(res["result"])
all_students_text += " [Count("+str(total_students_count)+")]"
non_enrolled_stud_count = total_students_count - int(enrolled_stud_count)
elif all_students == u"false":
query.update({'_id': {'$nin': enrolled_stud_list}})
all_students_text = "Only non-enrolled students"
# Find students which are not enrolled in selected announced course
# query.update({'relation_set.selected_course': {'$ne': acourse_node._id}})
query.update({'relation_set.selected_course': {'$nin': acourse_val}})
res = node_collection.collection.aggregate([
{
'$match': query
}, {
'$project': {
'_id': 1,
'name': '$name',
'degree_name': '$attribute_set.degree_name',
'degree_year':'$attribute_set.degree_year',
'registration_year':'$attribute_set.registration_year'
}
},
{
'$sort': {'name': 1}
}
])
non_enrolled_stud_count = str(len(res["result"]))
all_students_text += " [Count("+non_enrolled_stud_count+")]"
# response_dict["announced_courses"] = []
column_headers = [
("name", "Name"),
("degree_name", "Degree"),
("degree_year", "Year"),
]
response_dict["column_headers"] = column_headers
response_dict["success"] = True
response_dict["students_data_set"] = res["result"]
if not res["result"]:
response_dict["message"] = "No filtered results found"
response_dict["enrolled_stud_count"] = enrolled_stud_count
response_dict["non_enrolled_stud_count"] = non_enrolled_stud_count
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
else:
error_message = "EnrollInCourseError: Either not an ajax call or not a GET request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "EnrollInCourseError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def get_course_details_for_trainer(request, group_id):
"""
This view returns a dictionary holding data required for trainer's enrollment
into given announced course(s).
Arguments:
group_id - ObjectId of the currently selected group
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
course_enrollement_details - Dictionary that has following structure:
Key: Course-name
Value: A list of dictionary where this dictionary's structure is as follows:
1) Key: ann_course_id; Value: ObjectId of corresponding Announced Course
2) Key: university; Value: University-name
3) Key: college; Value: College GSystem's document
"""
response_dict = {'success': False, 'message': ""}
try:
if request.is_ajax() and request.method == "GET":
course_type = request.GET.get("course_type", "")
trainer_type = request.GET.get("trainer_type", "")
# Check whether any field has missing value or not
if course_type == "" or trainer_type == "":
error_message = "Invalid data: No data found in any of the field(s)!!!"
raise Exception(error_message)
# Using below text variable to fetch specific attribute based on which
# type of trainer we are dealing with
# Voluntary Teacher -- voln_tr_qualifications
# Master Trainer -- mast_tr_qualifications
fetch_attribute_for_trainer = ""
bool_trainer_type = None
if trainer_type == "Voluntary Teacher":
fetch_attribute_for_trainer = "voln_tr_qualifications"
bool_trainer_type = True
elif trainer_type == "Master Trainer":
fetch_attribute_for_trainer = "mast_tr_qualifications"
bool_trainer_type = False
# Fetch required GSystemTypes (NUSSD Course, Announced Course, University, College)
course_gst = node_collection.one({
'_type': "GSystemType", 'name': "NUSSD Course"
}, {
'_id': 1
})
college_gst = node_collection.one({
'_type': "GSystemType", 'name': "College"
}, {
'_id': 1
})
university_gst = node_collection.one({
'_type': "GSystemType", 'name': "University"
}, {
'_id': 1
})
mis_admin = node_collection.one({
'_type': "Group", 'name': "MIS_admin"
}, {
'_id': 1
})
course_enrollement_details = {}
course_requirements = {}
college_dict = {}
university_dict = {}
course_dict = {}
# Fetching NUSSD Course(s) registered under MIS_admin group
nussd_courses_cur = node_collection.find({
"member_of": course_gst._id,
"group_set": mis_admin._id,
"attribute_set.nussd_course_type": course_type
}, {
"name": 1,
"attribute_set." + fetch_attribute_for_trainer: 1
})
for course in nussd_courses_cur:
course_dict[course.name] = course._id
# Set given course's requirements
for requirement in course.attribute_set:
if requirement:
course_requirements[course.name] = requirement[fetch_attribute_for_trainer]
course_enrollement_details[course.name] = []
if nussd_courses_cur.count():
college_cur = node_collection.find({
"member_of": college_gst._id,
"group_set": mis_admin._id
}, {
"name": 1,
"college_affiliated_to": 1
})
for college in college_cur:
university_gs = None
if college._id not in university_dict:
university_gs = node_collection.find_one({
'member_of': university_gst._id,
'relation_set.affiliated_college': college._id
}, {
'_id': 1,
'name': 1
})
if university_gs:
university_dict[college._id] = university_gs
college_data = {}
college_data["college"] = college.name
college_data["university"] = university_gs.name
if bool_trainer_type:
# If bool_trainer_type (True, i.e Voluntary Teacher)
# Set organization_id as College's ObjectId
# As creating linking between Voluntary Teacher & College
college_data["organization_id"] = college._id
else:
# If bool_trainer_type (False, i.e Master Trainer)
# Set organization_id as University's ObjectId
# As creating linking between Master Trainer & University
college_data["organization_id"] = university_gs._id
college_dict[college._id] = college_data
if college._id in university_dict:
for course_name in course_enrollement_details.keys():
data_dict = {}
data_dict["ann_course_id"] = course_dict[course_name]
data_dict.update(college_dict[college._id])
course_enrollement_details[course_name].append(data_dict)
response_dict["course_enrollement_details"] = course_enrollement_details
response_dict["course_requirements"] = course_requirements
response_dict["success"] = True
response_dict["message"] = ""
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
else:
error_message = "TrainerCourseDetailError: Either not an ajax call or not a GET request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "TrainerCourseDetailError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def get_students_for_approval(request, group_id):
"""This returns data-review list of students that need approval for Course enrollment.
"""
response_dict = {'success': False, 'message': ""}
try:
if request.is_ajax() and request.method == "POST":
enrollment_id = request.POST.get("enrollment_id", "")
sce_gst = node_collection.one({'_type': "GSystemType", 'name': "StudentCourseEnrollment"})
if sce_gst:
sce_gs = node_collection.one(
{'_id': ObjectId(enrollment_id), 'member_of': sce_gst._id, 'group_set': ObjectId(group_id), 'status': u"PUBLISHED"},
{'member_of': 1}
)
approval_nodes = []
data = {}
if sce_gs:
sce_gs.get_neighbourhood(sce_gs.member_of)
data["pk"] = str(sce_gs._id)
data["CollegeId"] = sce_gs.for_college[0]._id
data["College"] = sce_gs.for_college[0].name
course_id_list = []
for each in sce_gs.for_acourse:
course_id_list.append(each._id.__str__())
data["CourseId"] = ",".join(course_id_list)
if len(sce_gs.for_acourse) > 1:
# It means it's a Foundation Course's (FC) enrollment
start_enroll = None
end_enroll = None
for each in sce_gs.for_acourse[0].attribute_set:
if not each:
pass
elif "start_time" in each:
start_time = each["start_time"]
elif "end_time" in each:
end_time = each["end_time"]
data["Course"] = "Foundation_Course" + "_" + start_time.strftime("%b-%Y") + "_" + end_time.strftime("%b-%Y")
else:
# Courses other than FC
data["Course"] = sce_gs.for_acourse[0].name
# data["CompletedOn"] = sce_gs.completed_on
data["Enrolled"] = len(sce_gs.has_enrolled)
# approve_task = sce_gs.has_current_approval_task[0]
approve_task = sce_gs.has_current_approval_task[0]
approve_task.get_neighbourhood(approve_task.member_of)
# Code should be written in create_task: rename it create_update_task
# Patch: doing here only
# if data["Enrolled"] > 0:
# approve_task.Status = u"In Progress"
# else:
# approve_task.Status = u"Resolved"
# approve_task.save()
data["Status"] = approve_task.Status
if sce_gs.has_key("has_approved"):
if sce_gs.has_approved:
data["Approved"] = len(sce_gs.has_approved)
else:
data["Approved"] = None
if sce_gs.has_key("has_rejected"):
if sce_gs.has_rejected:
data["Rejected"] = len(sce_gs.has_rejected)
else:
data["Rejected"] = None
enrolled_students_list = []
if sce_gs.has_enrolled:
enrolled_students_list = sce_gs.has_enrolled
approved_students_list = []
if sce_gs.has_approved:
approved_students_list = sce_gs.has_approved
rejected_students_list = []
if sce_gs.has_rejected:
rejected_students_list = sce_gs.has_rejected
# Update Enrolled students list
updated_enrolled_students_list = []
for each_id in enrolled_students_list:
if (each_id not in approved_students_list) and (each_id not in rejected_students_list):
updated_enrolled_students_list.append(each_id)
res = node_collection.collection.aggregate([
{
'$match': {
'_id':{"$in":updated_enrolled_students_list}
}
}, {
'$project': {
'_id': 1,
'name': '$name',
'degree_name': '$attribute_set.degree_name',
'degree_year':'$attribute_set.degree_year',
# 'registration_year':{"$date": "$attribute_set.registration_date"}
'registration_year':"$attribute_set.registration_date"
}
},
{
'$sort': {'name': 1}
}
])
# To convert full registration date
for each in res["result"]:
reg_year = each["registration_year"][0]
each["registration_year"] = datetime.datetime.strftime(reg_year,"%Y")
enrollment_columns = [
("name", "Name"),
("degree_name", "Degree"),
("degree_year", "Year of Study"),
("registration_year", "Registration Year")
]
response_dict["success"] = True
response_dict["enrollment_details"] = data
response_dict["column_headers"] = enrollment_columns
response_dict["student_approval_data"] = res["result"]
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
except Exception as e:
error_message = "StudentCourseApprovalError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def approve_students(request, group_id):
"""This returns approved and/or rejected students count respectively.
"""
try:
response_dict = {'success': False, 'message': ""}
if request.is_ajax() and request.method == "POST":
approval_state = request.POST.get("approval_state", "")
enrollment_id = request.POST.get("enrollment_id", "")
enrollment_id = ObjectId(enrollment_id)
course_ids = request.POST.get("course_id", "")
course_ids = [(ObjectId(each.strip()), each.strip()) for each in course_ids.split(",")]
course_name = request.POST.get("course_name", "")
students_selected = request.POST.getlist("students_selected[]", "")
students_selected = [ObjectId(each_str_id) for each_str_id in students_selected]
college_id = request.POST.get("college_id", "")
college_id = ObjectId(college_id)
college_name = request.POST.get("college_name", "")
sce_gs = node_collection.collection.aggregate([{
"$match": {
"_id": enrollment_id, "group_set": ObjectId(group_id),
"relation_set.has_current_approval_task": {"$exists": True},
"status": u"PUBLISHED"
}
}, {
"$project": {
"has_enrolled": "$attribute_set.has_enrolled",
"has_approved": "$attribute_set.has_approved",
"has_rejected": "$attribute_set.has_rejected",
"has_approval_task": "$attribute_set.has_approval_task",
"has_current_approval_task": "$relation_set.has_current_approval_task"
}
}])
user_id = int(request.user.id) # getting django user's id
user_name = request.user.username # getting django user's username
remaining_count = None
enrolled_list = []
approved_list = []
rejected_list = []
error_id_list = []
has_approval_task_dict = {}
approved_or_rejected_list = []
has_approval_task_dict = sce_gs["result"][0]["has_approval_task"]
if has_approval_task_dict:
has_approval_task_dict = has_approval_task_dict[0]
enrolled_list = sce_gs["result"][0]["has_enrolled"]
if enrolled_list:
enrolled_list = enrolled_list[0]
approved_list = sce_gs["result"][0]["has_approved"]
if approved_list:
approved_list = approved_list[0]
rejected_list = sce_gs["result"][0]["has_rejected"]
if rejected_list:
rejected_list = rejected_list[0]
at_name = ""
course_enrollment_status_text = u""
has_approved_or_rejected_at = None
if approval_state == "Approve":
at_name = "has_approved"
course_enrollment_status_text = u"Enrollment Approved"
approved_or_rejected_list = approved_list
elif approval_state == "Reject":
at_name = "has_rejected"
course_enrollment_status_text = u"Enrollment Rejected"
approved_or_rejected_list = rejected_list
course_enrollment_status_at = node_collection.one({
'_type': "AttributeType", 'name': "course_enrollment_status"
})
# For each student, approve enrollment into given course(Domain)/courses(Foundation Course)
# For that update value as "Enrollment Approved" against corresponding course (Course ObjectId)
# in "course_enrollment_status" attribute of respective student
# This should be done only for Course(s) which exists in "selected_course" relation for that student
stud_cur = node_collection.collection.aggregate([{
"$match": {
"_id": {"$in": students_selected}
}
}, {
"$project": {
"_id": 1,
"selected_course": "$relation_set.selected_course",
"course_enrollment_status": "$attribute_set.course_enrollment_status"
}
}])
# Performing multiprocessing to fasten out the below processing of
# for loop; that is, performing approval of students to respective course(s)
prev_approved_or_rejected_list = []
new_list = []
prev_approved_or_rejected_list.extend(approved_or_rejected_list)
new_list = mp_approve_students(
stud_cur["result"], course_ids,
course_enrollment_status_text,
course_enrollment_status_at,
prev_approved_or_rejected_list,
num_of_processes=multiprocessing.cpu_count()
)
approved_or_rejected_list.extend(new_list)
has_approved_or_rejected_at = node_collection.one({
'_type': "AttributeType", 'name': at_name
})
try:
attr_node = create_gattribute(
enrollment_id,
has_approved_or_rejected_at,
approved_or_rejected_list
)
except Exception as e:
error_id_list.append(enrollment_id)
# Update student's counts in enrolled, approved & rejecetd list
enrolled_count = len(enrolled_list)
if approval_state == "Approve":
approved_count = len(approved_or_rejected_list)
else:
approved_count = len(approved_list)
if approval_state == "Reject":
rejected_count = len(approved_or_rejected_list)
else:
rejected_count = len(rejected_list)
remaining_count = enrolled_count - (approved_count + rejected_count)
# Update status of Approval task
has_current_approval_task_id = sce_gs["result"][0]["has_current_approval_task"]
if has_current_approval_task_id:
has_current_approval_task_id = has_current_approval_task_id[0][0]
task_status_at = node_collection.one({
'_type': "AttributeType", 'name': "Status"
})
task_status_value = ""
task_status_msg = ""
if remaining_count == 0:
if enrolled_count == (approved_count + rejected_count):
task_status_value = u"Closed"
task_status_msg = "This task has been closed after successful completion " + \
"of approval process of students."
else:
task_status_value = u"In Progress"
task_status_msg = "This task is in progress."
try:
# Update the approval task's status as "Closed"
task_dict = {}
task_dict["_id"] = has_current_approval_task_id
task_dict["Status"] = task_status_value
# Update description of Approval task only at time of it's closure
if task_status_value is u"Closed":
task_dict["created_by_name"] = user_name
task_message = task_status_msg + " Following are the details " + \
"of this approval process:-" + \
"\n Total No. of student(s) enrolled: " + str(enrolled_count) + \
"\n Total No. of student(s) approved: " + str(approved_count) + \
"\n Total No. of student(s) rejected: " + str(rejected_count) + \
"\n Total No. of student(s) remaining: " + str(remaining_count)
task_dict["content_org"] = unicode(task_message)
task_dict["modified_by"] = user_id
task_node = create_task(task_dict)
if task_status_value == u"Closed":
# Update the StudentCourseEnrollment node's status as "CLOSED"
at_type_node = None
at_type_node = node_collection.one({
'_type': "AttributeType",
'name': u"enrollment_status"
})
if at_type_node:
at_node = create_gattribute(enrollment_id, at_type_node, u"CLOSED")
# Set completion status for closed approval task in StudentCourseEnrollment node's has_enrollment_task
completed_on = datetime.datetime.now()
if str(has_current_approval_task_id) in has_approval_task_dict:
has_approval_task_dict[str(has_current_approval_task_id)] = {
"completed_on": completed_on, "completed_by": user_id
}
at_type_node = None
at_type_node = node_collection.one({
'_type': "AttributeType",
'name': u"has_approval_task"
})
if at_type_node:
attr_node = create_gattribute(enrollment_id, at_type_node, has_approval_task_dict)
# Send intimation to PO's and admin to create batches
from_user = user_id
url_link_without_domain_part = ""
url_link = ""
activity_text = "batch creation"
msg = "This is to inform you that approval process of " + \
"students for " + college_name + " college has been " + \
"completed with following details:" + \
"\n\tCourse name: " + course_name + \
"\n\tTotal No. of student(s) enrolled: " + str(enrolled_count) + \
"\n\tTotal No. of student(s) approved: " + str(approved_count) + \
"\n\tTotal No. of student(s) rejected: " + str(rejected_count) + \
"\n\tTotal No. of student(s) remaining: " + str(remaining_count) + \
"\n\nYou can proceed with batch creation for given course in this college."
# Fetch college group to get Program Officers of the college
college_group_node = node_collection.find_one({
"_type": "Group", "relation_set.group_of": college_id
}, {
"created_by": 1, "group_admin": 1
})
to_django_user_list = []
user_id_list = []
user_id_list.extend(college_group_node.group_admin)
user_id_list.append(college_group_node.created_by)
for each_user_id in user_id_list:
user_obj = User.objects.get(id=each_user_id)
if user_obj not in to_django_user_list:
to_django_user_list.append(user_obj)
if url_link_without_domain_part:
site = Site.objects.get(pk=1)
site = site.name.__str__()
domain = "http://" + site
url_link = domain + url_link_without_domain_part
render_label = render_to_string(
"notification/label.html",
{
"sender": from_user,
"activity": activity_text,
"conjunction": "-",
"link": url_link
}
)
notification.create_notice_type(render_label, msg, "notification")
notification.send(to_django_user_list, render_label, {"from_user": from_user})
except Exception as e:
error_id_list.append(has_current_approval_task_id)
response_dict["success"] = True
response_dict["enrolled"] = enrolled_count
response_dict["approved"] = approved_count
response_dict["rejected"] = rejected_count
response_dict["remaining"] = remaining_count
response_dict["task_status"] = task_status_value
return HttpResponse(json.dumps(response_dict, cls=NodeJSONEncoder))
except Exception as e:
error_message = "ApproveStudentsError: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@get_execution_time
def mp_approve_students(student_cur, course_ids, course_enrollment_status_text, course_enrollment_status_at, approved_or_rejected_list, num_of_processes=4):
def worker(student_cur, course_ids, course_enrollment_status_text, course_enrollment_status_at, approved_or_rejected_list, out_q):
updated_approved_or_rejected_list = []
for each_stud in student_cur:
# Fetch student node along with selected_course and course_enrollment_status
student_id = each_stud["_id"]
selected_course = each_stud["selected_course"]
if selected_course:
selected_course = selected_course[0]
# Fetch course_enrollment_status -- Holding Course(s) along with it's enrollment status
course_enrollment_status = each_stud["course_enrollment_status"]
if course_enrollment_status:
course_enrollment_status = course_enrollment_status[0]
else:
course_enrollment_status = {}
for each_course_id, str_course_id in course_ids:
# If ObjectId exists in selected_course and ObjectId(in string format)
# exists as key in course_enrollment_status
# Then only update status as "Enrollment Approved"/"Enrollment Rejected"
if each_course_id in selected_course and str_course_id in course_enrollment_status:
# course_enrollment_status.update({str_course_id: course_enrollment_status_text})
course_enrollment_status[str_course_id] = course_enrollment_status_text
try:
at_node = create_gattribute(student_id, course_enrollment_status_at, course_enrollment_status)
if at_node:
# If status updated, then only update approved_or_rejected_list
# by appending given student's ObjectId into it
if student_id not in approved_or_rejected_list and student_id not in updated_approved_or_rejected_list:
# approved_or_rejected_list.appendingpend(student_id)
updated_approved_or_rejected_list.append(student_id)
except Exception as e:
error_id_list.append(student_id)
continue
out_q.put(updated_approved_or_rejected_list)
# Each process will get 'chunksize' student_cur and a queue to put his out
# dict into
out_q = multiprocessing.Queue()
chunksize = int(math.ceil(len(student_cur) / float(num_of_processes)))
procs = []
for i in range(num_of_processes):
p = multiprocessing.Process(
target=worker,
args=(student_cur[chunksize * i:chunksize * (i + 1)], course_ids, course_enrollment_status_text, course_enrollment_status_at, approved_or_rejected_list, out_q)
)
procs.append(p)
p.start()
# Collect all results into a single result list. We know how many lists
# with results to expect.
resultlist = []
for i in range(num_of_processes):
resultlist.extend(out_q.get())
# Wait for all worker processes to finish
for p in procs:
p.join()
return resultlist
@get_execution_time
def get_students_for_batches(request, group_id):
"""
This view returns ...
Arguments:
group_id - ObjectId of the currently selected group
Returns:
A dictionary consisting of following key-value pairs:-
success - Boolean giving the state of ajax call
message - Basestring giving the error/information message
"""
response_dict = {'success': False, 'message': ""}
b_arr=[]
try:
if request.is_ajax() and request.method == "GET":
btn_id = request.GET.get('btn_id', "")
batch_id = request.GET.get('node_id', "")
ac_id = request.GET.get('ac_id', "")
batch_name_index = 1
batches_for_same_course = []
all_batches_in_grp = []
batch_mem_dict = {}
batch_member_list = []
batch_gst = node_collection.one({'_type':"GSystemType", 'name':"Batch"})
batch_for_group = node_collection.find({'member_of': batch_gst._id, 'relation_set.has_course': ObjectId(ac_id)})
for each1 in batch_for_group:
existing_batch = node_collection.one({'_id': ObjectId(each1._id)})
batch_name_index += 1
for each2 in each1.relation_set:
if "has_batch_member" in each2:
batch_member_list.extend(each2['has_batch_member'])
break
each1.get_neighbourhood(each1.member_of)
batch_mem_dict[each1.name] = each1
# College's ObjectId is required, if student record can't be found
# using group's ObjectId
# A use-case where records created via csv file appends MIS_admin group's
# ObjectId in group_set field & not college-group's ObjectId
ann_course = node_collection.one({'_id': ObjectId(ac_id)}, {'relation_set.acourse_for_college': 1,"relation_set.course_has_enrollment":1})
sce_id = None
for rel in ann_course.relation_set:
if rel and "course_has_enrollment" in rel:
sce_id = rel["course_has_enrollment"][0]
break
sce_node = node_collection.one({"_id":ObjectId(sce_id)},{"attribute_set.has_approved":1})
approved_students_list = []
for attr in sce_node.attribute_set:
if attr and "has_approved" in attr:
approved_students_list = attr["has_approved"]
break
approve_not_in_batch_studs = [stud_id for stud_id in approved_students_list if stud_id not in batch_member_list]
student = node_collection.one({'_type': "GSystemType", 'name': "Student"})
res = node_collection.find(
{
'_id': {"$in": approve_not_in_batch_studs},
'member_of': student._id
# '$or': [
# {'group_set': ObjectId(group_id)},
# {'relation_set.student_belongs_to_college': college_id}
# ],
# 'relation_set.selected_course': ObjectId(ac_id)
},
{'_id': 1, 'name': 1, 'member_of': 1, 'created_by': 1, 'created_at': 1, 'content': 1}
).sort("name", 1)
drawer_template_context = edit_drawer_widget("RelationType", group_id, None, None, None, left_drawer_content=res)
drawer_template_context["widget_for"] = "new_create_batch"
drawer_widget = render_to_string(
'ndf/drawer_widget.html',
drawer_template_context,
context_instance = RequestContext(request)
)
response_dict["success"] = True
response_dict["drawer_widget"] = drawer_widget
response_dict["student_count"] = res.count()
response_dict["batch_name_index"] = batch_name_index
response_dict["batches_for_same_course"] = json.dumps(batch_mem_dict, cls=NodeJSONEncoder)
return HttpResponse(json.dumps(response_dict))
else:
error_message = "Batch Drawer: Either not an ajax call or not a GET request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "Batch Drawer: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
# ====================================================================================================
@get_execution_time
def edit_task_title(request, group_id):
'''
This function will edit task's title
'''
if request.is_ajax() and request.method =="POST":
taskid = request.POST.get('taskid',"")
title = request.POST.get('title',"")
task = node_collection.find_one({'_id':ObjectId(taskid)})
task.name = title
task.save()
return HttpResponse(task.name)
else:
raise Http404
@get_execution_time
def edit_task_content(request, group_id):
'''
This function will edit task's title
'''
if request.is_ajax() and request.method =="POST":
taskid = request.POST.get('taskid',"")
content_org = request.POST.get('content_org',"")
task = node_collection.find_one({'_id':ObjectId(taskid)})
task.content_org = unicode(content_org)
# Required to link temporary files with the current user who is modifying this document
usrname = request.user.username
filename = slugify(task.name) + "-" + usrname + "-"
task.content = org2html(content_org, file_prefix=filename)
task.save()
return HttpResponse(task.content)
else:
raise Http404
@get_execution_time
def insert_picture(request, group_id):
if request.is_ajax():
resource_list=node_collection.find({'_type' : 'File', 'mime_type' : u"image/jpeg" },{'name': 1})
resources=list(resource_list)
n=[]
for each in resources:
each['_id'] =str(each['_id'])
file_obj = node_collection.one({'_id':ObjectId(str(each['_id']))})
if file_obj.fs_file_ids:
grid_fs_obj = file_obj.fs.files.get(file_obj.fs_file_ids[0])
each['fname']=grid_fs_obj.filename
each['name'] = each['name']
n.append(each)
return StreamingHttpResponse(json.dumps(n))
# =============================================================================
@get_execution_time
def close_event(request, group_id, node):
#close_event checks if the event start date is greater than or less than current date time
#if current date time if greater than event time than it changes tha edit button
#on the Gui to reschedule and in database puts the current date and time for reference check
#till when the event is allowed to reschedule
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_edit_reschedule"})
create_gattribute(ObjectId(node),reschedule_event,{"reschedule_till":datetime.datetime.today(),"reschedule_allow":False})
return HttpResponse("event closed")
@get_execution_time
def save_time(request, group_id, node):
start_time = request.POST.get('start_time','')
end_time = request.POST.get('end_time','')
reschedule_event_start = node_collection.one({"_type":"AttributeType","name":"start_time"})
reschedule_event_end = node_collection.one({"_type":"AttributeType","name":"end_time"})
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_edit_reschedule"})
start_time= parse_template_data(datetime.datetime,start_time, date_format_string="%d/%m/%Y %H:%M")
end_time= parse_template_data(datetime.datetime,end_time, date_format_string="%d/%m/%Y %H:%M")
create_gattribute(ObjectId(node),reschedule_event_start,start_time)
create_gattribute(ObjectId(node),reschedule_event_end,end_time)
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_edit_reschedule"})
event_node = node_collection.one({"_id":ObjectId(node)})
# below code gets the old value from the database
# if value exists it append new value to it
# else a new time is assigned to it
a = {}
for i in event_node.attribute_set:
if unicode('event_edit_reschedule') in i.keys():
a = i['event_edit_reschedule']
a['reschedule_till'] = start_time
create_gattribute(ObjectId(node),reschedule_event,a)
#change the name of the event based on new time
if event_node:
name = event_node.name
name_arr = name.split("--")
new_name = unicode(str(name_arr[0]) + "--" + str(name_arr[1]) + "--" + str(start_time))
event_node.name = new_name
event_node.save()
return HttpResponse("Session rescheduled")
@get_execution_time
def check_date(request, group_id, node):
reschedule = request.POST.get('reschedule','')
test_output = node_collection.find({"_id":ObjectId(node),"attribute_set.start_time":{'$gt':datetime.datetime.today()}})
a = {}
if test_output.count() == 0 and reschedule == 'True':
test_output = node_collection.find({"_id":ObjectId(node),"attribute_set.event_edit_reschedule.reschedule_till":{'$gt':datetime.datetime.today()}})
if test_output.count() != 0:
message = "event Open"
if test_output.count() == 0:
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_edit_reschedule"})
event_node = node_collection.one({"_id":ObjectId(node)})
a=""
for i in event_node.attribute_set:
if unicode('event_edit_reschedule') in i.keys():
a = i['event_edit_reschedule']
if a:
for i in a:
if unicode('reschedule_allow') in i:
a['reschedule_allow'] = False
create_gattribute(ObjectId(node),reschedule_event,a)
else:
create_gattribute(ObjectId(node),reschedule_event,{'reschedule_allow':False})
event_node = node_collection.one({"_id":ObjectId(node)})
message = "event closed"
return HttpResponse(message)
@get_execution_time
def reschedule_task(request, group_id, node):
task_dict={}
#name of the programe officer who has initiated this task
'''Required keys: _id[optional], name, group_set, created_by, modified_by, contributors, content_org,
created_by_name, Status, Priority, start_time, end_time, Assignee, has_type
'''
task_groupset=node_collection.one({"_type":"Group","name":"MIS_admin"})
a=[]
b=[]
c=[]
listing=task_groupset.group_admin
listing.append(task_groupset.created_by)
return_message=""
values=[]
if request.user.id in listing:
reschedule_attendance = node_collection.one({"_type":"AttributeType","name":"reschedule_attendance"})
marks_entry = node_collection.find({"_type":"AttributeType","name":"marks_entry_completed"})
reschedule_type = request.POST.get('reschedule_type','')
reshedule_choice = request.POST.get('reshedule_choice','')
session = request.POST.get('session','')
end_time = node_collection.one({"name":"end_time"})
from datetime import date,time,timedelta
date1 = datetime.date.today() + timedelta(2)
ti = datetime.time(0,0)
event_start_time = ""
start_time = request.POST.get('reschedule_date','')
b = parse_template_data(datetime.datetime,start_time, date_format_string="%d/%m/%Y %H:%M")
#fetch event
event_node = node_collection.one({"_id":ObjectId(node)})
reschedule_dates = []
#for any type change the event status to re-schdueled if the request comes
#for generating a task for reschdueling a event
event_status = node_collection.one({"_type":"AttributeType","name":"event_status"})
create_gattribute(ObjectId(node),event_status,unicode('Rescheduled'))
task_id= {}
if reschedule_type == 'event_reschedule' :
for i in event_node.attribute_set:
if unicode('event_edit_reschedule') in i.keys():
if unicode ('reschedule_dates') in i['event_edit_reschedule']:
reschedule_dates = i['event_edit_reschedule']['reschedule_dates']
if unicode("event_date_task") in i.keys():
task_id = i["event_date_task"]
if unicode("start_time") in i.keys():
event_start_time = i["start_time"]
if task_id:
for i in task_id:
if unicode('Task') == i:
tid = i
task_node = node_collection.find({"_id":ObjectId(task_id["Task"])})
task_attribute = node_collection.one({"_type":"AttributeType","name":"Status"})
create_gattribute(ObjectId(task_node[0]._id),task_attribute,unicode("Closed"))
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_date_task"})
task_id['Reschedule_Task'] = True
create_gattribute(ObjectId(node),reschedule_event,task_id)
reschedule_dates.append(event_start_time)
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_edit_reschedule"})
create_gattribute(ObjectId(node),reschedule_event,{"reschedule_till":b,"reschedule_allow":True,"reschedule_dates":reschedule_dates})
return_message = "Event Dates Re-Schedule Opened"
else:
event_details = ""
for i in event_node.attribute_set:
if unicode('reschedule_attendance') in i.keys():
if unicode ('reschedule_dates') in i['reschedule_attendance']:
reschedule_dates = i['reschedule_attendance']['reschedule_dates']
if unicode('marks_entry_completed') in i.keys():
marks_entry_completed = i['marks_entry_completed']
if unicode("event_attendance_task") in i.keys():
task_id = i["event_attendance_task"]
if task_id:
for i in task_id:
if unicode('Task') == i:
tid = task_id['Task']
task_node = node_collection.find({"_id":ObjectId(tid)})
task_attribute = node_collection.one({"_type":"AttributeType","name":"Status"})
create_gattribute(ObjectId(task_node[0]._id),task_attribute,unicode("Closed"))
break
reschedule_dates.append(datetime.datetime.today())
if reshedule_choice == "Attendance" or reshedule_choice == "" :
create_gattribute(ObjectId(node),reschedule_attendance,{"reschedule_till":b,"reschedule_allow":True,"reschedule_dates":reschedule_dates})
if session != str(1) and reshedule_choice == "Assessment" :
create_gattribute(ObjectId(node),marks_entry[0],False)
task_id['Reschedule_Task'] = True
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_attendance_task"})
create_gattribute(ObjectId(node),reschedule_event,task_id)
return_message="Event Re-scheduled"
else:
reschedule_type = request.POST.get('reschedule_type','')
reshedule_choice = request.POST.get('reshedule_choice','')
if reschedule_type == "attendance_reschedule":
if reshedule_choice == "Attendance" or reshedule_choice == "":
content = "Attendance"
if reshedule_choice == "Assessment":
content = "Assessment"
else:
content = "start time"
Mis_admin=node_collection.find({"name":"MIS_admin"})
Mis_admin_list=Mis_admin[0].group_admin
Mis_admin_list.append(Mis_admin[0].created_by)
path=request.POST.get('path','')
site = Site.objects.get(pk=1)
site = site.name.__str__()
event_reschedule_link = "http://" + site + path
b.append(task_groupset._id)
glist_gst = node_collection.one({'_type': "GSystemType", 'name': "GList"})
task_type = []
task_type.append(node_collection.one({'member_of': glist_gst._id, 'name':"Re-schedule Event"})._id)
task_dict.update({"has_type" :task_type})
task_dict.update({'name':unicode("Re-schedule Event" + " " + content)})
task_dict.update({'group_set':b})
task_dict.update({'created_by':request.user.id})
task_dict.update({'modified_by':request.user.id})
task_dict.update({'content_org':unicode("Please Re-Schedule the Following event"+" \t " "\n- Please click [[" + event_reschedule_link + "][here]] to reschedule event " + " " + content )})
task_dict.update({'created_by_name':request.user.username})
task_dict.update({'Status':unicode("New")})
task_dict.update({'Priority':unicode('Normal')})
date1=datetime.date.today()
ti=datetime.time(0,0)
Today=datetime.datetime.combine(date1,ti)
task_dict.update({'start_time':Today})
task_dict.update({'Assignee':Mis_admin_list})
task = create_task(task_dict)
if reschedule_type == 'event_reschedule' :
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_date_task"})
create_gattribute(ObjectId(node),reschedule_event,{'Task':ObjectId(task._id),'Reschedule_Task':False})
else:
reschedule_event=node_collection.one({"_type":"AttributeType","name":"event_attendance_task"})
create_gattribute(ObjectId(node),reschedule_event,{'Task':ObjectId(task._id),'Reschedule_Task':False})
return_message="Message is sent to central office soon you will get update."
return HttpResponse(return_message)
@get_execution_time
def event_assginee(request, group_id, app_set_instance_id=None):
Event= request.POST.getlist("Event","")
Event_attended_by=request.POST.getlist("Event_attended_by[]","")
marks=request.POST.getlist("marks","")
assessmentdone = request.POST.get("assessmentdone","")
attendancedone = request.POST.get("attendancedone","")
attendancesession = request.POST.get("attendancesession","")
oid=node_collection.find_one({"_type" : "RelationType","name":"has_attended"})
Assignment_rel=node_collection.find({"_type":"AttributeType","name":"Assignment_marks_record"})
Assessmentmarks_rel=node_collection.find({"_type":"AttributeType","name":"Assessment_marks_record"})
performance_record=node_collection.find({"_type":"AttributeType","name":"performance_record"})
student_details=node_collection.find({"_type":"AttributeType","name":"attendance_record"})
marks_entry_completed=node_collection.find({"_type":"AttributeType","name":"marks_entry_completed"})
reschedule_attendance = node_collection.one({"_type":"AttributeType","name":"reschedule_attendance"})
event_node = node_collection.one({"_id":ObjectId(app_set_instance_id)})
#code for saving Attendance and Assesment of Assignment And Assesment Session
attendedlist=[]
for info in Event_attended_by:
a=ast.literal_eval(info)
if (a['Name'] != 'undefined'):
student_dict={}
if (a['save'] == '2' or a['save'] == '3'):
student_dict.update({"marks":a['Attendance_marks'],'Event':ObjectId(Event[0])})
create_gattribute(ObjectId(a['Name']),Assignment_rel[0], student_dict)
if(a['save'] == '2' or a['save'] == '4'):
student_dict.update({"marks":a['Assessment_marks'],'Event':ObjectId(Event[0])})
create_gattribute(ObjectId(a['Name']),Assessmentmarks_rel[0], student_dict)
if(a['save'] == '5'):
student_dict.update({"marks":a['Assessment_marks'],'Event':ObjectId(Event[0])})
create_gattribute(ObjectId(a['Name']),performance_record[0], student_dict)
create_gattribute(ObjectId(a['Name']),student_details[0],{"atandance":a['Presence'],'Event':ObjectId(Event[0])})
if(a['Presence'] == 'True'):
attendedlist.append(a['Name'])
if attendancesession != str(1):
create_gattribute(ObjectId(app_set_instance_id),marks_entry_completed[0],False)
if assessmentdone == 'True':
event_status = node_collection.one({"_type":"AttributeType","name":"event_status"})
create_gattribute(ObjectId(app_set_instance_id),event_status,unicode('Completed'))
create_gattribute(ObjectId(app_set_instance_id),marks_entry_completed[0],True)
reschedule_dates={}
if attendancedone == 'True' or assessmentdone == 'True':
for j in event_node.attribute_set:
if unicode('reschedule_attendance') in j.keys():
reschedule_dates = j['reschedule_attendance']
reschedule_dates["reschedule_allow"] = False
create_gattribute(ObjectId(app_set_instance_id),reschedule_attendance,reschedule_dates)
if attendancesession == str(1):
event_status = node_collection.one({"_type":"AttributeType","name":"event_status"})
create_gattribute(ObjectId(app_set_instance_id),event_status,unicode('Completed'))
create_grelation(ObjectId(app_set_instance_id), oid,attendedlist)
return HttpResponse("Details Entered")
@get_execution_time
def fetch_course_name(request, group_id,Course_type):
courses=node_collection.find({"attribute_set.nussd_course_type":unicode(Course_type)})
course_detail={}
course_list=[]
for i in courses:
course_detail.update({"name":i.name})
course_detail.update({"id":str(i._id)})
course_list.append(course_detail)
course_detail={}
return HttpResponse(json.dumps(course_list))
@get_execution_time
def fetch_course_Module(request, group_id,announced_course):
#Course_name
batch = request.GET.get('batchid','')
superdict={}
module_Detail={}
module_list=[]
event_type_ids=[]
courses = node_collection.one({"_id":ObjectId(announced_course)},{'relation_set.announced_for':1,'relation_set.acourse_for_college':1})
eventtypes = node_collection.find({'_type': "GSystemType", 'name': {'$in': ["Classroom Session", "Exam"]}})
for i in eventtypes:
event_type_ids.append(i._id)
for i in courses.relation_set:
if unicode('announced_for') in i.keys():
announced_for = i['announced_for']
if unicode('acourse_for_college') in i.keys():
for j in i['acourse_for_college']:
group_of = j
courses = node_collection.find({"_id":{'$in':announced_for}})
trainers = node_collection.find({"relation_set.trainer_teaches_course_in_college":[ObjectId(courses[0]._id),ObjectId(group_of)]})
course_modules = node_collection.find({"_id":{'$in':courses[0].collection_set}})
#condition for all the modules to be listed is session in it should not be part of the event
checklist=[]
for i in course_modules:
checklist = i.collection_set
#check if this collection_set exists in any
event = node_collection.find({"member_of":{'$in':event_type_ids},"relation_set.session_of":{'$elemMatch':{'$in':i.collection_set}}
,'relation_set.event_has_batch':ObjectId(batch)})
for k in event:
for j in k.relation_set:
if unicode('session_of') in j.keys():
if j['session_of'][0] in checklist:
checklist.remove(j['session_of'][0])
if len(checklist) > 0:
module_Detail.update({"name":i.name})
module_Detail.update({"id":str(i._id)})
module_list.append(module_Detail)
module_Detail={}
trainerlist=[]
trainer_detail={}
for i in trainers:
trainer_detail.update({"name":i.name})
trainer_detail.update({"id":str(i._id)})
trainerlist.append(trainer_detail)
trainer_detail={}
superdict['Module']=json.dumps(module_list,cls=NodeJSONEncoder)
superdict['trainer'] = json.dumps(trainerlist,cls=NodeJSONEncoder)
return HttpResponse(json.dumps(superdict))
@get_execution_time
def fetch_batch_student(request, group_id,Course_name):
try:
courses=node_collection.one({"_id":ObjectId(Course_name)},{'relation_set.has_batch_member':1})
dict1={}
list1=[]
for i in courses.relation_set:
if unicode('has_batch_member') in i.keys():
has_batch = i['has_batch_member']
for i in has_batch:
dict1.update({"id":str(i)})
list1.append(dict1)
dict1={}
return HttpResponse(json.dumps(list1))
except:
return HttpResponse(json.dumps(list1))
@get_execution_time
def fetch_course_session(request, group_id,Course_name):
try:
courses=node_collection.one({"_id":ObjectId(Course_name)})
batch = request.GET.get('batchid','')
dict1={}
list1=[]
checklist = []
event_type_ids = []
checklist = courses.collection_set
eventtypes = node_collection.find({'_type': "GSystemType", 'name': {'$in': ["Classroom Session", "Exam"]}})
for i in eventtypes:
event_type_ids.append(i._id)
module_node = node_collection.find({"member_of":{'$in':event_type_ids},"relation_set.session_of":{'$elemMatch':{'$in':checklist}}
,'relation_set.event_has_batch':ObjectId(batch)})
for i in module_node:
for k in i.relation_set:
if unicode('session_of') in k.keys():
if k['session_of'][0] in checklist:
checklist.remove(k['session_of'][0])
course_modules=node_collection.find({"_id":{'$in':checklist}})
for i in course_modules:
dict1.update({"name":i.name})
dict1.update({"id":str(i._id)})
for j in i.attribute_set:
if "course_structure_minutes" in j.keys() :
dict1.update({"minutes":str(j["course_structure_minutes"])})
list1.append(dict1)
dict1={}
return HttpResponse(json.dumps(list1))
except:
return HttpResponse(json.dumps(list1))
@get_execution_time
def fetch_course_batches(request, group_id,Course_name):
#courses=node_collection.one({"_id":ObjectId(Course_name)})
#courses=node_collection.find({"relation_set.announced_for":ObjectId(Course_name)})
try:
dict1={}
list1=[]
batch=node_collection.find({"_type":"GSystemType","name":"Batch"})
batches=node_collection.find({"member_of":batch[0]._id,"relation_set.has_course":ObjectId(Course_name)})
for i in batches:
dict1.update({"name":i.name})
dict1.update({"id":str(i._id)})
list1.append(dict1)
dict1={}
return HttpResponse(json.dumps(list1))
except:
return HttpResponse(json.dumps(list1))
@get_execution_time
def save_csv(request,group_id,app_set_instance_id=None):
#column_header = [u'Name', 'Presence','Attendance_marks','Assessment_marks']
json_data=request.POST.getlist("attendance[]","")
column_header=request.POST.getlist("column[]","")
t = time.strftime("%c").replace(":", "_").replace(" ", "_")
filename = "csv/" + "Attendance_data_" + t + ".csv"
filepath = os.path.join(STATIC_ROOT, filename)
filedir = os.path.dirname(filepath)
if not os.path.exists(filedir):
os.makedirs(filedir)
data={}
with open(filepath, 'wb') as csv_file:
fw = csv.DictWriter(csv_file, delimiter=',', fieldnames=column_header)
fw.writerow(dict((col,col) for col in column_header))
for row in list(json_data):
v = {}
fw.writerow(ast.literal_eval(row))
return HttpResponse((STATIC_URL + filename))
def get_assessment(request,group_id,app_set_instance_id):
node = node_collection.one({'_type': "GSystem", '_id': ObjectId(app_set_instance_id)})
node.get_neighbourhood(node.member_of)
marks_list=[]
Assesslist=[]
val=False
for i in node.has_attendees:
dict1={}
dict1.update({'name':i.name})
for j in i.attribute_set:
if j.keys()[0] == 'performance_record':
if (str(j['performance_record']['Event']) == str(app_set_instance_id)) is True:
val=True
dict1.update({'marks':j['performance_record']['marks']})
else:
dict1.update({'marks':""})
dict1.update({'id':str(i._id)})
if val is True:
marks_list.append(dict1)
else:
dict1.update({'marks':"0"})
marks_list.append(dict1)
return HttpResponse(json.dumps(marks_list))
@get_execution_time
def get_attendees(request,group_id,node):
#get all the ObjectId of the people who would attend the event
node=node_collection.one({'_id':ObjectId(node)})
attendieslist=[]
#below code would give the the Object Id of Possible attendies
for i in node.relation_set:
if ('has_attendees' in i):
for j in i['has_attendees']:
attendieslist.append(j)
attendee_name=[]
#below code is meant for if a batch or member of group id is found, fetch the attendees list-
#from the members of the batches if members are selected from the interface their names would be returned
#attendees_id=node_collection.find({ '_id':{'$in': attendieslist}},{"group_admin":1})
attendees_id=node_collection.find({ '_id':{'$in': attendieslist}})
for i in attendees_id:
#if i["group_admin"]:
# User_info=(collectigeton.Node.find({'_type':"Author",'created_by':{'$in':i["group_admin"]}}))
#else:
User_info=(node_collection.find({'_id':ObjectId(i._id)}))
for i in User_info:
attendee_name.append(i)
attendee_name_list=[]
for i in attendee_name:
if i not in attendee_name_list:
attendee_name_list.append(i)
a=[]
d={}
for i in attendee_name_list:
d={}
d.update({'name':i.name})
d.update({'id':str(i._id)})
a.append(d)
return HttpResponse(json.dumps(a))
@get_execution_time
def get_attendance(request,group_id,node):
#method is written to get the presence and absence of attendees for the event
node=node_collection.one({'_id':ObjectId(node)})
attendieslist=[]
#below code would give the the Object Id of Possible attendies
for i in node.relation_set:
if ('has_attendees' in i):
for j in i['has_attendees']:
attendieslist.append(j)
attendee_name=[]
attendees_id=node_collection.find({ '_id':{'$in': attendieslist}})
for i in attendees_id:
#if i["group_admin"]:
# User_info=(node_collection.find({'_type':"Author",'created_by':{'$in':i["group_admin"]}}))
#else:
User_info=(node_collection.find({'_id':ObjectId(i._id)}))
for i in User_info:
attendee_name.append(i)
attendee_name_list=[]
for i in attendee_name:
if i not in attendee_name_list:
attendee_name_list.append(i)
a=[]
d={}
has_attended_event=node_collection.find({'_id':ObjectId(node.pk)},{'relation_set':1})
#get all the objectid
attendieslist=[]
for i in has_attended_event[0].relation_set:
if ('has_attended' in i):
for j in i['has_attended']:
attendieslist.append(j)
#create the table
count=0
attendance=[]
temp_attendance={}
#the below code would compare between the supposed attendees and has_attended the event
#and accordingly mark their presence or absence for the event
node.get_neighbourhood(node.member_of)
Assess_marks_list=[]
Assign_marks_list=[]
Assesslist=[]
marks_list=[]
val=False
assign=False
asses=False
member_of=node_collection.one({"_id":{'$in':node.member_of}})
for i in attendee_name_list:
if (i._id in attendieslist):
attendees=node_collection.one({"_id":ObjectId(i._id)})
dict1={}
dict2={}
for j in attendees.attribute_set:
if member_of.name != "Exam":
if unicode('Assignment_marks_record') in j.keys():
if (str(j['Assignment_marks_record']['Event']) == str(node._id)) is True:
val=True
assign=True
dict1.update({'marks':j['Assignment_marks_record']['marks']})
else:
dict1.update({'marks':"0"})
if unicode('Assessment_marks_record') in j.keys():
if(str(j['Assessment_marks_record']['Event']) == str(node._id)) is True:
val=True
asses=True
dict2.update({'marks':j['Assessment_marks_record']['marks']})
else:
dict2.update({'marks':"0"})
if member_of.name == "Exam":
dict1.update({'marks':"0"})
if unicode('performance_record') in j.keys():
if(str(j['performance_record']['Event']) == str(node._id)) is True:
val=True
asses=True
dict2.update({'marks':j['performance_record']['marks']})
else:
dict2.update({'marks':"0"})
temp_attendance.update({'id':str(i._id)})
temp_attendance.update({'name':i.name})
temp_attendance.update({'presence':'Present'})
if dict1.has_key('marks'):
temp_attendance.update({'Assignment_marks':dict1['marks']})
if dict2.has_key('marks'):
temp_attendance.update({'Assessment_marks':dict2['marks']})
attendance.append(temp_attendance)
else:
temp_attendance.update({'id':str(i._id)})
temp_attendance.update({'name':i.name})
temp_attendance.update({'presence':'Absent'})
temp_attendance.update({'Assignment_marks':"0"})
temp_attendance.update({'Assessment_marks':"0"})
attendance.append(temp_attendance)
temp_attendance={}
return HttpResponse(json.dumps(attendance))
@get_execution_time
def attendees_relations(request,group_id,node):
test_output = node_collection.find({"_id":ObjectId(node),"attribute_set.start_time":{'$lt':datetime.datetime.today()}})
if test_output.count() != 0:
event_has_attended=node_collection.find({'_id':ObjectId(node)})
column_list=[]
column_count=0
course_assignment=False
course_assessment=False
reschedule = True
#marks = False
marks = True
member_of=node_collection.one({"_id":{'$in':event_has_attended[0].member_of}})
if member_of.name != "Exam":
for i in event_has_attended[0].relation_set:
#True if (has_attended relation is their means attendance is already taken)
#False (signifies attendence is not taken yet for the event)
if ('has_attended' in i):
a = "True"
else:
a = "False"
if ('session_of' in i):
session=node_collection.one({"_id":{'$in':i['session_of']}})
for i in session.attribute_set:
if unicode('course_structure_assignment') in i:
if i['course_structure_assignment'] == True:
course_assignment=True
if unicode('course_structure_assessment') in i:
if i['course_structure_assessment'] == True:
course_assessment=True
# meaning of the numbers
#2 :- populate both assesment and assignment marks columns
#3 :- popuplate only Asssignment marks Columns
#4 :- populate only Assesment marks Columns
#1 :- populate Only Attendance taking part donot populate Assesment and Attendance taking part
if course_assessment == True:
column_count = 4
if course_assignment == True:
column_count = 3
if (course_assessment == True and course_assignment == True):
column_count = 2
if (course_assignment == False and course_assessment == False):
column_count = 1
column_list.append(a)
column_list.append(column_count)
else:
column_count=5
column_list.append('True')
column_list.append(column_count)
node = node_collection.one({"_id":ObjectId(node)})
for i in node.attribute_set:
if unicode("reschedule_attendance") in i.keys():
if unicode('reschedule_allow') in i['reschedule_attendance']:
reschedule=i['reschedule_attendance']['reschedule_allow']
if unicode("marks_entry_completed") in i.keys():
marks=i["marks_entry_completed"]
column_list.append(reschedule)
column_list.append(marks)
else:
column_list=[]
return HttpResponse(json.dumps(column_list))
@get_execution_time
def page_scroll(request,group_id,page):
Group_Activity = node_collection.find(
{'group_set':ObjectId(group_id)}).sort('last_update', -1)
if Group_Activity.count() >=10:
paged_resources = Paginator(Group_Activity,10)
else:
paged_resources = Paginator(Group_Activity,Group_Activity.count())
files_list = []
user_activity = []
tot_page=paged_resources.num_pages
if int(page) <= int(tot_page):
if int(page)==1:
page='1'
if int(page) != int(tot_page) and int(page) != int(1):
page=int(page)+1
for each in (paged_resources.page(int(page))).object_list:
if each.created_by == each.modified_by :
if each.last_update == each.created_at:
activity = 'created'
else:
activity = 'modified'
else:
activity = 'created'
if each._type == 'Group':
user_activity.append(each)
each.update({'activity':activity})
files_list.append(each)
else:
page=0
return render_to_response('ndf/scrolldata.html',
{ 'activity_list': files_list,
'group_id': group_id,
'groupid':group_id,
'page':page
# 'imageCollection':imageCollection
},
context_instance = RequestContext(request)
)
@get_execution_time
def get_batches_with_acourse(request, group_id):
"""
This view returns list of batches that match given criteria
along with Announced-course for which match doesn't exists.
Arguments:
group_id - ObjectId of the currently selected group
"""
response_dict = {'success': False, 'message': ""}
batches_list = []
batch_gst = node_collection.one({'_type':'GSystemType','name':'Batch'})
try:
if request.is_ajax() and request.method == "GET":
# Fetch field(s) from GET object
announced_course_id = request.GET.get("ac_id", "")
mis_admin = node_collection.one({'_type': "Group", 'name': "MIS_admin"})
if(ObjectId(group_id) == mis_admin._id):
pass
else:
colg_gst = node_collection.one({'_type': "GSystemType", 'name': 'College'})
req_colg_id = node_collection.one({'member_of':colg_gst._id,'relation_set.has_group':ObjectId(group_id)})
b = node_collection.find({'member_of':batch_gst._id,'relation_set.has_course':ObjectId(announced_course_id)})
for each in b:
batches_list.append(each)
response_dict["success"] = True
info_message = "Batch for this course is available"
response_dict["message"] = info_message
response_dict["batches_list"] = json.dumps(batches_list, cls=NodeJSONEncoder)
return HttpResponse(json.dumps(response_dict))
else:
error_message = " BatchFetchError: Either not an ajax call or not a GET request!!!"
return HttpResponse(json.dumps({'message': " BatchCourseFetchError - Something went wrong in ajax call !!! \n\n Please contact system administrator."}))
except Exception as e:
error_message = "\n BatchFetchError: " + str(e) + "!!!"
return HttpResponse(json.dumps({'message': error_message}))
| agpl-3.0 | 7,496,256,682,763,233,000 | 40.658953 | 521 | 0.557128 | false |
yuhangc/HRI_planner | scripts/hri/human_traj_generator.py | 1 | 5437 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
class HumanTrajGenerator:
def __init__(self, T, dt):
self.T = T
self.dt = dt
# set some parameters
self.v_max = 1.0
self.a_max = 0.6
self.k_v = 0.8
self.k_hr = 0.6
self.th_hr = 2.5
self.u_std = [0.1, 0.1]
def generate_path_ignore_robot(self, x_init, x_goal):
x = []
u = []
x_last = x_init
for t in range(self.T):
# compute the desired velocity first
x_diff = x_goal - x_last[0:2]
vd = self.k_v * x_diff
# clip the velocity
v_dir = np.abs(vd) / np.linalg.norm(vd)
vd = np.clip(vd, -self.v_max * v_dir, self.v_max * v_dir)
# compute desired acceleration and clip
ud = (vd - x_last[2:4]) / self.dt
u_dir = np.abs(ud) / np.linalg.norm(ud)
ud = np.clip(ud, -self.a_max * u_dir, self.a_max * u_dir)
# inject noise into control
dux = np.random.normal(0.0, self.u_std[0], 1)[0]
duy = np.random.normal(0.0, self.u_std[1], 1)[0]
ud += np.array([dux, duy])
# compute the actual velocity and displacement
x_new = np.zeros((4, ))
x_new[0:2] = x_last[0:2] + x_last[2:4] * self.dt + 0.5 * ud * self.dt**2
x_new[2:4] = x_last[2:4] + ud * self.dt
# append to list
x.append(x_new)
u.append(ud)
x_last = x_new
# visualize
x = np.asarray(x)
u = np.asarray(u)
fig, ax = plt.subplots()
ax.plot(x[:, 0], x[:, 1], "-o", color=(0.1, 0.1, 0.1), fillstyle="none", lw=1.5, label="human_traj")
ax.plot(x_goal[0], x_goal[1], 'ok')
ax.axis("equal")
plt.show()
return x, u
def generate_path_avoid_robot(self, x_init, x_goal, x_robot):
x = []
u = []
x_last = x_init
for t in range(self.T):
# compute the desired velocity first
x_diff = x_goal - x_last[0:2]
vd = self.k_v * x_diff
# clip the velocity
v_dir = np.abs(vd) / np.linalg.norm(vd)
vd = np.clip(vd, -self.v_max * v_dir, self.v_max * v_dir)
# compute desired acceleration and clip
ud = (vd - x_last[2:4]) / self.dt
# add in "force/acc" from avoiding robot
x_rh = x_last[0:2] - x_robot
dot = np.dot(-x_rh, x_diff)
if dot > 0 and np.linalg.norm(x_rh) < self.th_hr:
f_hr = self.k_hr * x_rh
# make f_hr perpendicular to ud
f_hr = np.array([-x_diff[1], x_diff[0]]) / np.linalg.norm(x_diff) * np.linalg.norm(f_hr)
else:
f_hr = np.array([0.0, 0.0])
ud += f_hr
u_dir = np.abs(ud) / np.linalg.norm(ud)
ud = np.clip(ud, -self.a_max * u_dir, self.a_max * u_dir)
# inject noise into control
dux = np.random.normal(0.0, self.u_std[0], 1)[0]
duy = np.random.normal(0.0, self.u_std[1], 1)[0]
ud += np.array([dux, duy])
# compute the actual velocity and displacement
x_new = np.zeros((4, ))
x_new[0:2] = x_last[0:2] + x_last[2:4] * self.dt + 0.5 * ud * self.dt**2
x_new[2:4] = x_last[2:4] + ud * self.dt
# append to list
x.append(x_new)
u.append(ud)
x_last = x_new
# visualize
x = np.asarray(x)
u = np.asarray(u)
fig, ax = plt.subplots()
ax.plot(x[:, 0], x[:, 1], "-o", color=(0.1, 0.1, 0.1), fillstyle="none", lw=1.5, label="human_traj")
ax.plot(x_goal[0], x_goal[1], 'ok')
ax.axis("equal")
plt.show()
return x, u
def gen_and_save_trajectories(path, trial=-1, method="ignore_robot"):
# load init and goal data
init_data = np.loadtxt(path + "/init.txt", delimiter=",")
goal_data = np.loadtxt(path + "/goal.txt", delimiter=",")
# create a generator
generator = HumanTrajGenerator(16, 0.5)
# generate a single trajectory
if trial == -1:
i = 0
for x_init, x_goal in zip(init_data, goal_data):
if method == "ignore_robot":
x, u = generator.generate_path_ignore_robot(x_init[0:4], x_goal[0:2])
else:
x_robot = 0.5 * (x_init[0:2] + x_goal[0:2])
x, u = generator.generate_path_avoid_robot(x_init[0:4], x_goal[0:2], x_robot)
# save data to file
np.savetxt(path + "/test" + str(i) + ".txt", np.hstack((x, u)), delimiter=',')
i += 1
else:
x_init = init_data[trial]
x_goal = goal_data[trial]
if method == "ignore_robot":
x, u = generator.generate_path_ignore_robot(x_init[0:4], x_goal[0:2])
else:
x_robot = 0.5 * (x_init[0:2] + x_goal[0:2])
x, u = generator.generate_path_avoid_robot(x_init[0:4], x_goal[0:2], x_robot)
# save data to file
np.savetxt(path + "/test" + str(trial) + ".txt", np.hstack((x, u)), delimiter=',')
if __name__ == "__main__":
# gen_and_save_trajectories("/home/yuhang/Documents/hri_log/test_data")
gen_and_save_trajectories("/home/yuhang/Documents/hri_log/test_data", trial=4, method="avoid_robot")
| apache-2.0 | -6,814,724,475,351,384,000 | 31.363095 | 108 | 0.48924 | false |
WaveBlocks/WaveBlocksND | examples/henon_heiles/henon2_p.py | 1 | 1311 | algorithm = "hagedorn"
propagator = "semiclassical"
splitting_method = "Y4"
T = 6
dt = 0.01
dimension = 2
ncomponents = 1
eps = 0.05
potential = "henon_heiles"
a = 1
b = 3
# The parameter set of the initial wavepacket
Q = [[1.0, 0.0],
[0.0, 1.0]]
P = [[1.0j, 0.0 ],
[0.0, 1.0j]]
q = [[0.06],
[0.0]]
p = [[-0.01],
[ 0.01]]
S = [[0.0]]
# What it takes to specify a wavepacket!
wp0 = {
"type": "HagedornWavepacket",
"dimension": 2,
"ncomponents": 1,
"eps": eps,
"Pi": [q, p, Q, P, S],
"basis_shapes": [{
"type": "HyperCubicShape",
"limits": [10, 10],
"dimension": 2
}],
"coefficients": [[((0, 0), 1.0)]],
"innerproduct": {
"type": "HomogeneousInnerProduct",
"delegate": {
"type": "DirectHomogeneousQuadrature",
'qr': {
'type': 'TensorProductQR',
'dimension': 2,
'qr_rules': [{'dimension': 1, 'order': 15, 'type': 'GaussHermiteQR'},
{'dimension': 1, 'order': 15, 'type': 'GaussHermiteQR'}],
}
}
}
}
# Which wavepackets are initial values
initvals = [wp0]
leading_component = 0
# How often do we write data to disk
write_nth = 5
matrix_exponential = "arnoldi"
arnoldi_steps = 175
| bsd-3-clause | 1,590,440,808,835,866,000 | 18.279412 | 86 | 0.508772 | false |
mromanello/CitationExtractor | citation_extractor/settings/svm.py | 1 | 1036 | """Settings for an SVM-based citation extractor."""
import pkg_resources
from sklearn.svm import LinearSVC
# Sets debug on (=true) or off (=false)
DEBUG = False
POS = True
# leave empty to write the log to the console
LOG_FILE = ""
# list of directories containing data (IOB format with .iob extension)
DATA_DIRS = (
pkg_resources.resource_filename(
'citation_extractor',
'data/aph_corpus/goldset/iob/'
),
)
CLASSIFIER = CLASSIFIER = LinearSVC(verbose=False)
TEST_DIR = ()
TRAIN_COLLECTIONS = ()
TEST_COLLECTIONS = ()
DATA_FILE = ""
TEMP_DIR = ""
OUTPUT_DIR = ""
# number of iterations for the k-fold cross validation
CROSS_VAL_FOLDS = 10
CRFPP_TEMPLATE_DIR = pkg_resources.resource_filename(
'citation_extractor',
'crfpp_templates/'
)
CRFPP_TEMPLATE = "template_5.tpl"
# Leave empty to use CRF++'s default value
CRFPP_PARAM_C = ''
# Leave empty to use CRF++'s default value
CRFPP_PARAM_A = ''
# Leave empty to use CRF++'s default value
CRFPP_PARAM_F = ''
| gpl-3.0 | 4,482,813,493,128,714,000 | 19.313725 | 70 | 0.665058 | false |
Astyan-42/skepticalscience | skepticalsciencewebsite/custompayment/forms.py | 1 | 3142 | from django import forms
from django.core.exceptions import ValidationError
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from custompayment.models import Address, Order
class AddressForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AddressForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id_addressForm'
self.helper.add_input(Submit('submit', _('Save')))
class Meta:
model = Address
fields = ["first_name", "last_name", "company_name", "street_address_1", "street_address_2", "city",
"city_area", "postal_code", "country", "country_area", "phone"]
class DiscountOrderForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(DiscountOrderForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
# self.helper.form_class = 'form-inline' STUPID INLINE FORM
# self.helper.field_template = 'bootstrap3/layout/inline_field.html'
self.helper.form_id = 'id_discountorderForm'
self.helper.add_input(Submit('submit', _('Apply')))
def clean(self):
try:
discount = self.cleaned_data['discount']
except KeyError:
pass
def is_valid(self):
valid = super(DiscountOrderForm, self).is_valid()
if not valid:
return valid
discount = self.cleaned_data['discount']
if discount is None:
self.add_error('discount', forms.ValidationError(_("Empty value not authorised")))
return False
today = timezone.now().date()
if today < discount.starting_date:
self.add_error('discount', forms.ValidationError(_("This discount code hasn't started yet")))
return False
elif today > discount.ending_date:
self.add_error('discount', forms.ValidationError(_("This discount code has ended")))
return False
return True
class Meta:
model = Order
fields = ["discount"]
widgets = {'discount' : forms.TextInput()}
class PaymentMethodsForm(forms.Form):
method = forms.ChoiceField(
choices=settings.CHECKOUT_PAYMENT_CHOICES, widget=forms.RadioSelect,
initial=settings.CHECKOUT_PAYMENT_CHOICES[0][0])
def __init__(self, *args, **kwargs):
super(PaymentMethodsForm, self).__init__(*args, ** kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id_paymentmethodForm'
self.helper.add_input(Submit('submit', _('Proceed to payment')))
class AcceptSellingForm(forms.Form):
accepted = forms.BooleanField(label="Accept the conditions of sell", initial=False)
def __init__(self, *args, **kwargs):
super(AcceptSellingForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id_acceptsellingForm'
self.helper.add_input(Submit('submit', _('Accept and pay'))) | agpl-3.0 | -482,083,300,925,337,660 | 36.86747 | 108 | 0.63972 | false |
Vaidyanath/tempest | tempest/tests/cmd/test_verify_tempest_config.py | 1 | 17401 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from tempest.cmd import verify_tempest_config
from tempest import config
from tempest.openstack.common.fixture import mockpatch
from tempest.tests import base
from tempest.tests import fake_config
class TestGetAPIVersions(base.TestCase):
def test_url_grab_versioned_nova_nossl(self):
base_url = 'http://127.0.0.1:8774/v2/'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('http://127.0.0.1:8774', endpoint)
def test_url_grab_versioned_nova_ssl(self):
base_url = 'https://127.0.0.1:8774/v3/'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('https://127.0.0.1:8774', endpoint)
class TestDiscovery(base.TestCase):
def setUp(self):
super(TestDiscovery, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
def test_get_keystone_api_versions(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': {'values': [{'id': 'v2.0'}, {'id': 'v3.0'}]}}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.PatchObject(
verify_tempest_config.RAW_HTTP, 'request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'keystone')
self.assertIn('v2.0', versions)
self.assertIn('v3.0', versions)
def test_get_cinder_api_versions(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v1.0'}, {'id': 'v2.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.PatchObject(
verify_tempest_config.RAW_HTTP, 'request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'cinder')
self.assertIn('v1.0', versions)
self.assertIn('v2.0', versions)
def test_get_nova_versions(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v2.0'}, {'id': 'v3.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.PatchObject(
verify_tempest_config.RAW_HTTP, 'request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'nova')
self.assertIn('v2.0', versions)
self.assertIn('v3.0', versions)
def test_verify_api_versions(self):
api_services = ['cinder', 'glance', 'keystone']
fake_os = mock.MagicMock()
for svc in api_services:
m = 'verify_%s_api_versions' % svc
with mock.patch.object(verify_tempest_config, m) as verify_mock:
verify_tempest_config.verify_api_versions(fake_os, svc, True)
verify_mock.assert_called_once_with(fake_os, True)
def test_verify_api_versions_not_implemented(self):
api_services = ['cinder', 'glance', 'keystone']
fake_os = mock.MagicMock()
for svc in api_services:
m = 'verify_%s_api_versions' % svc
with mock.patch.object(verify_tempest_config, m) as verify_mock:
verify_tempest_config.verify_api_versions(fake_os, 'foo', True)
self.assertFalse(verify_mock.called)
def test_verify_keystone_api_versions_no_v3(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': {'values': [{'id': 'v2.0'}]}}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.PatchObject(
verify_tempest_config.RAW_HTTP, 'request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_keystone_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v3',
'identity_feature_enabled',
False, True)
def test_verify_keystone_api_versions_no_v2(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': {'values': [{'id': 'v3.0'}]}}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.PatchObject(
verify_tempest_config.RAW_HTTP, 'request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_keystone_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2',
'identity_feature_enabled',
False, True)
def test_verify_cinder_api_versions_no_v2(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v1.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.PatchObject(
verify_tempest_config.RAW_HTTP, 'request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_cinder_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2', 'volume_feature_enabled',
False, True)
def test_verify_cinder_api_versions_no_v1(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v2.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.PatchObject(
verify_tempest_config.RAW_HTTP, 'request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_cinder_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v1', 'volume_feature_enabled',
False, True)
def test_verify_glance_version_no_v2_with_v1_1(self):
def fake_get_versions():
return (['v1.1'])
fake_os = mock.MagicMock()
fake_os.image_client.get_versions = fake_get_versions
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
False, True)
def test_verify_glance_version_no_v2_with_v1_0(self):
def fake_get_versions():
return (['v1.0'])
fake_os = mock.MagicMock()
fake_os.image_client.get_versions = fake_get_versions
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
False, True)
def test_verify_glance_version_no_v1(self):
def fake_get_versions():
return (['v2.0'])
fake_os = mock.MagicMock()
fake_os.image_client.get_versions = fake_get_versions
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v1', 'image_feature_enabled',
False, True)
def test_verify_extensions_neutron(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_os.network_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'neutron', {})
self.assertIn('neutron', results)
self.assertIn('fake1', results['neutron'])
self.assertTrue(results['neutron']['fake1'])
self.assertIn('fake2', results['neutron'])
self.assertTrue(results['neutron']['fake2'])
self.assertIn('fake3', results['neutron'])
self.assertFalse(results['neutron']['fake3'])
self.assertIn('not_fake', results['neutron'])
self.assertFalse(results['neutron']['not_fake'])
def test_verify_extensions_neutron_all(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_os.network_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'neutron', {})
self.assertIn('neutron', results)
self.assertIn('extensions', results['neutron'])
self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
sorted(results['neutron']['extensions']))
def test_verify_extensions_cinder(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_os.volumes_extension_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'cinder', {})
self.assertIn('cinder', results)
self.assertIn('fake1', results['cinder'])
self.assertTrue(results['cinder']['fake1'])
self.assertIn('fake2', results['cinder'])
self.assertTrue(results['cinder']['fake2'])
self.assertIn('fake3', results['cinder'])
self.assertFalse(results['cinder']['fake3'])
self.assertIn('not_fake', results['cinder'])
self.assertFalse(results['cinder']['not_fake'])
def test_verify_extensions_cinder_all(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_os.volumes_extension_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'cinder', {})
self.assertIn('cinder', results)
self.assertIn('extensions', results['cinder'])
self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
sorted(results['cinder']['extensions']))
def test_verify_extensions_nova(self):
def fake_list_extensions():
return ([{'alias': 'fake1'}, {'alias': 'fake2'},
{'alias': 'not_fake'}])
fake_os = mock.MagicMock()
fake_os.extensions_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'nova', {})
self.assertIn('nova', results)
self.assertIn('fake1', results['nova'])
self.assertTrue(results['nova']['fake1'])
self.assertIn('fake2', results['nova'])
self.assertTrue(results['nova']['fake2'])
self.assertIn('fake3', results['nova'])
self.assertFalse(results['nova']['fake3'])
self.assertIn('not_fake', results['nova'])
self.assertFalse(results['nova']['not_fake'])
def test_verify_extensions_nova_all(self):
def fake_list_extensions():
return ({'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]})
fake_os = mock.MagicMock()
fake_os.extensions_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'nova', {})
self.assertIn('nova', results)
self.assertIn('extensions', results['nova'])
self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
sorted(results['nova']['extensions']))
def test_verify_extensions_swift(self):
def fake_list_extensions():
return (None, {'fake1': 'metadata',
'fake2': 'metadata',
'not_fake': 'metadata',
'swift': 'metadata'})
fake_os = mock.MagicMock()
fake_os.account_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os, 'swift', {})
self.assertIn('swift', results)
self.assertIn('fake1', results['swift'])
self.assertTrue(results['swift']['fake1'])
self.assertIn('fake2', results['swift'])
self.assertTrue(results['swift']['fake2'])
self.assertIn('fake3', results['swift'])
self.assertFalse(results['swift']['fake3'])
self.assertIn('not_fake', results['swift'])
self.assertFalse(results['swift']['not_fake'])
def test_verify_extensions_swift_all(self):
def fake_list_extensions():
return (None, {'fake1': 'metadata',
'fake2': 'metadata',
'not_fake': 'metadata',
'swift': 'metadata'})
fake_os = mock.MagicMock()
fake_os.account_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'swift', {})
self.assertIn('swift', results)
self.assertIn('extensions', results['swift'])
self.assertEqual(sorted(['not_fake', 'fake1', 'fake2']),
sorted(results['swift']['extensions']))
| apache-2.0 | -2,029,038,440,269,864,700 | 47.470752 | 79 | 0.569163 | false |
giliam/turbo-songwriter | backend/songwriter/migrations/0001_initial.py | 1 | 5963 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-04 14:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(blank=True, default='', max_length=150)),
('lastname', models.CharField(blank=True, default='', max_length=150)),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')),
('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')),
],
),
migrations.CreateModel(
name='Chord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('note', models.CharField(default='', max_length=15)),
],
),
migrations.CreateModel(
name='Editor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, default='', max_length=150)),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')),
('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')),
],
),
migrations.CreateModel(
name='Harmonization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('spot_in_verse', models.PositiveIntegerField()),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')),
('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')),
('chord', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='songwriter.Chord')),
],
),
migrations.CreateModel(
name='Paragraph',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField()),
('is_refrain', models.BooleanField(default=False, verbose_name='Is a refrain paragraph?')),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')),
('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='', max_length=150)),
('rights_paid', models.BooleanField(default=True, verbose_name='rights paid')),
('secli_number', models.CharField(blank=True, default='', max_length=150)),
('sacem_number', models.CharField(blank=True, default='', max_length=150)),
('comments', models.TextField(verbose_name='Comments')),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')),
('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='songwriter.Author')),
('editor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='songwriter.Editor')),
],
),
migrations.CreateModel(
name='Theme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=150)),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')),
('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')),
],
),
migrations.CreateModel(
name='Verse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField()),
('content', models.TextField()),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the database')),
('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')),
],
),
migrations.AddField(
model_name='song',
name='theme',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='songwriter.Theme'),
),
migrations.AddField(
model_name='paragraph',
name='song',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='paragraphs', to='songwriter.Song'),
),
migrations.AddField(
model_name='harmonization',
name='verse',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='harmonizations', to='songwriter.Verse'),
),
]
| mit | 1,539,306,744,246,456,000 | 52.241071 | 135 | 0.584605 | false |
hkzhe/ddz_project | game_server/client.py | 1 | 1279 | import socket
import json
import struct
import sys
import time
def build_login_cmd():
cmd_dict = {}
cmd_dict["userID"] = sys.argv[1]
cmd_dict["cmd"] = "login"
return json.dumps( cmd_dict )
def send_cmd( sock , cmd ):
cmd_len = len( cmd )
send_str = struct.pack( 'i' , cmd_len )
sock.send( send_str )
sock.send( cmd )
def send_out_cards( sock , my_pokes ):
uid = sys.argv[1]
cmd_dict = {}
cmd_dict[ "cmd" ] = "outcard"
cmd_dict["userID"] = sys.argv[1]
cmd_dict[ "outPokes" ] = [ my_pokes[0] , my_pokes[1] ]
print "send pokes = %d , %d" %( my_pokes[0] , my_pokes[1] )
send_cmd( sock , json.dumps( cmd_dict ) )
def recv_cmd( sock ):
head_str = sock.recv( 4 )
tmp_tuple = struct.unpack( 'i' , head_str )
body_len = tmp_tuple[0]
body_str = sock.recv( body_len )
print "recv cmd = " + body_str
return body_str
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('127.0.0.1', 8000))
cmd = build_login_cmd()
send_cmd( sock , cmd )
cmd_str = recv_cmd( sock )
cmd_dict = json.loads( cmd_str )
my_pokes = cmd_dict[ sys.argv[1] ]
boss_id = cmd_dict[ "boss" ]
#if boss_id == sys.argv[1] :
#send_out_cards( sock , my_pokes )
recv_cmd( sock )
time.sleep(10)
| bsd-3-clause | 6,844,430,092,311,269,000 | 24.102041 | 60 | 0.591869 | false |
dongweiming/web_develop | chapter10/section2/server.py | 1 | 3462 | # coding=utf-8
import os
import sys
from datetime import datetime
sys.path.append('gen-py')
sys.path.append('/usr/lib/python2.7/site-packages')
from flask_sqlalchemy import SQLAlchemy
from app import app
from models import PasteFile as BasePasteFile
from utils import get_file_md5
db = SQLAlchemy(app)
from thrift.transport import TTransport, TSocket
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
from pastefile import PasteFileService
from pastefile.ttypes import PasteFile, UploadImageError, NotFound
class RealPasteFile(db.Model, BasePasteFile):
def __init__(self, *args, **kwargs):
BasePasteFile.__init__(self, *args, **kwargs)
@classmethod
def create_by_upload_file(cls, uploaded_file):
rst = uploaded_file
with open(rst.path) as f:
filemd5 = get_file_md5(f)
uploaded_file = cls.get_by_md5(filemd5)
if uploaded_file:
os.remove(rst.path)
return uploaded_file
filestat = os.stat(rst.path)
rst.size = filestat.st_size
rst.filemd5 = filemd5
return rst
def get_url(self, subtype, is_symlink=False):
hash_or_link = self.symlink if is_symlink else self.filehash
return 'http://%s/{subtype}/{hash_or_link}'.format(
subtype=subtype, hash_or_link=hash_or_link)
class PasteFileHandler(object):
def get_file_info(self, filename, mimetype):
rst = RealPasteFile(filename, mimetype, 0)
return rst.filehash, rst.path
def create(self, request):
width = request.width
height = request.height
upload_file = RealPasteFile(request.filename, request.mimetype, 0,
request.filehash)
try:
if width and height:
paste_file = RealPasteFile.rsize(upload_file, width, height)
else:
paste_file = RealPasteFile.create_by_upload_file(
upload_file)
except:
raise UploadImageError()
db.session.add(paste_file)
db.session.commit()
return self.convert_type(paste_file)
def get(self, pid):
paste_file = RealPasteFile.query.filter_by(id=pid).first()
if not paste_file:
raise NotFound()
return self.convert_type(paste_file)
@classmethod
def convert_type(cls, paste_file):
'''将模型转化为Thrift结构体的类型'''
new_paste_file = PasteFile()
for attr in ('id', 'filehash', 'filename', 'filemd5', 'uploadtime',
'mimetype', 'symlink', 'size', 'quoteurl', 'size', 'type',
'url_d', 'url_i', 'url_s', 'url_p'):
val = getattr(paste_file, attr)
if isinstance(val, unicode):
val = val.encode('utf-8')
if isinstance(val, datetime):
val = str(val)
setattr(new_paste_file, attr, val)
return new_paste_file
if __name__ == '__main__':
import logging
logging.basicConfig()
handler = PasteFileHandler()
processor = PasteFileService.Processor(handler)
transport = TSocket.TServerSocket(port=8200)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TServer.TThreadPoolServer(
processor, transport, tfactory, pfactory)
print 'Starting the server...'
server.serve()
| gpl-3.0 | 7,807,537,685,541,028,000 | 31.130841 | 79 | 0.62071 | false |
YoungKwonJo/mlxtend | tests/tests_classifier/test_logistic_regression.py | 1 | 1935 | from mlxtend.classifier import LogisticRegression
from mlxtend.data import iris_data
import numpy as np
X, y = iris_data()
X = X[:, [0, 3]] # sepal length and petal width
X = X[0:100] # class 0 and class 1
y = y[0:100] # class 0 and class 1
# standardize
X[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()
X[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()
def test_logistic_regression_gd():
t = np.array([0.55, 1.29, 4.39])
lr = LogisticRegression(epochs=100, eta=0.01, learning='gd', random_seed=0)
lr.fit(X, y) # 0, 1 class
np.testing.assert_almost_equal(lr.w_, t, 2)
assert((y == lr.predict(X)).all())
def test_logistic_regression_sgd():
t = np.array([0.55, 1.26, 4.39])
lr = LogisticRegression(epochs=100, eta=0.01,
learning='sgd', random_seed=0)
lr.fit(X, y) # 0, 1 class
np.testing.assert_almost_equal(lr.w_, t, 2)
assert((y == lr.predict(X)).all())
def test_l2_regularization_gd():
lr = LogisticRegression(eta=0.01,
epochs=20,
learning='gd',
l2_lambda=1.0,
regularization='l2',
random_seed=0)
lr.fit(X, y)
y_pred = lr.predict(X)
expect_weights = np.array([0.252, 1.186, 2.296])
np.testing.assert_almost_equal(lr.w_, expect_weights, 3)
acc = sum(y_pred == y) / len(y)
assert(acc == 1.0)
def test_l2_regularization_sgd():
lr = LogisticRegression(eta=0.01, epochs=20,
learning='sgd',
l2_lambda=1.0,
regularization='l2',
random_seed=0)
lr.fit(X, y)
y_pred = lr.predict(X)
expect_weights = np.array([0.100, 0.232, 0.346])
np.testing.assert_almost_equal(lr.w_, expect_weights, 3)
acc = sum(y_pred == y) / len(y)
assert(acc == 1.0)
| bsd-3-clause | -4,449,064,169,604,691,000 | 29.714286 | 79 | 0.518863 | false |
lambdamusic/OntoSPy | ontospy/ontodocs/utils.py | 1 | 6309 | # !/usr/bin/env python
# -*- coding: UTF-8 -*-
import json
# ===========
# Utilities
# ===========
def build_D3treeStandard(old, MAX_DEPTH, level=1, toplayer=None):
"""
For d3s examples all we need is a json with name, children and size .. eg
{
"name": "flare",
"children": [
{
"name": "analytics",
"children": [
{
"name": "cluster",
"children": [
{"name": "AgglomerativeCluster", "size": 3938},
{"name": "CommunityStructure", "size": 3812},
{"name": "HierarchicalCluster", "size": 6714},
{"name": "MergeEdge", "size": 743}
]
},
etc...
"""
out = []
if not old:
old = toplayer
for x in old:
d = {}
# print "*" * level, x.label
d['qname'] = x.qname
d['name'] = x.bestLabel(quotes=False).replace("_", " ")
d['objid'] = x.id
if x.children() and level < MAX_DEPTH:
d['size'] = len(x.children()) + 5 # fake size
d['realsize'] = len(x.children()) # real size
d['children'] = build_D3treeStandard(x.children(), MAX_DEPTH,
level + 1)
else:
d['size'] = 1 # default size
d['realsize'] = 0 # default size
out += [d]
return out
# note: duplicate of templatetagg so to avoid circular imports
def truncchar_inverse(value, arg):
if len(value) < arg:
return value
else:
x = len(value) - arg
return '...' + value[x:]
def build_D3bubbleChart(old, MAX_DEPTH, level=1, toplayer=None):
"""
Similar to standar d3, but nodes with children need to be duplicated otherwise they are
not depicted explicitly but just color coded
"name": "all",
"children": [
{"name": "Biological Science", "size": 9000},
{"name": "Biological Science", "children": [
{"name": "Biological techniques", "size": 6939},
{"name": "Cell biology", "size": 4166},
{"name": "Drug discovery X", "size": 3620, "children": [
{"name": "Biochemistry X", "size": 4585},
{"name": "Biochemistry X", "size": 4585 },
]},
{"name": "Drug discovery Y", "size": 3620, "children": [
{"name": "Biochemistry Y", "size": 4585},
{"name": "Biochemistry Y", "size": 4585 },
]},
{"name": "Drug discovery A", "size": 3620, "children": [
{"name": "Biochemistry A", "size": 4585},
]},
{"name": "Drug discovery B", "size": 3620, },
]},
etc...
"""
out = []
if not old:
old = toplayer
for x in old:
d = {}
# print "*" * level, x.label
d['qname'] = x.qname
d['name'] = x.bestLabel(quotes=False).replace("_", " ")
d['objid'] = x.id
if x.children() and level < MAX_DEPTH:
duplicate_row = {}
duplicate_row['qname'] = x.qname
duplicate_row['name'] = x.bestLabel(quotes=False).replace("_", " ")
duplicate_row['objid'] = x.id
duplicate_row['size'] = len(x.children()) + 5 # fake size
duplicate_row['realsize'] = len(x.children()) # real size
out += [duplicate_row]
d['children'] = build_D3bubbleChart(x.children(), MAX_DEPTH,
level + 1)
else:
d['size'] = 1 # default size
d['realsize'] = 0 # default size
out += [d]
return out
def build_D3treepie(old, MAX_DEPTH, level=1, toplayer=None):
"""
Create the JSON needed by the treePie viz
http://bl.ocks.org/adewes/4710330/94a7c0aeb6f09d681dbfdd0e5150578e4935c6ae
Eg
['origin' , [n1, n2],
{ 'name1' :
['name1', [n1, n2],
{'name1-1' : ...}
] ,
} ,
]
"""
d = {}
if not old:
old = toplayer
for x in old:
label = x.bestLabel(quotes=False).replace("_", " ")
if x.children() and level < MAX_DEPTH:
size = len(x.children())
d[x.qname] = [
label, [size, size],
build_D3treepie(x.children(), MAX_DEPTH, level + 1)
]
else:
size = 1
d[x.qname] = [label, [size, size], {}]
return d
##################
#
# TREE DISPLAY FUNCTIONS [from ontospy web]
#
##################
def formatHTML_EntityTreeTable(treedict, element=0):
""" outputs an html tree representation based on the dictionary we get from the Inspector
object....
EG:
<table class=h>
<tr>
<td class="tc" colspan=4><a href="../DataType">DataType</a>
</td>
</tr>
<tr>
<td class="tc" colspan=4><a href="../DataType">DataType</a>
</td>
</tr>
<tr>
<td class="space"></td>
<td class="bar"></td>
<td class="space"></td>
<td>
<table class=h>
<tr><td class="tc" colspan=4><a href="../Boolean">Boolean</a>
</td>
</tr>
<tr><td class="tc" colspan=4><a href="../Boolean">Boolean</a>
</td>
</tr>
</table>
</td>
</tr>
</table>
Note: The top level owl:Thing never appears as a link.
"""
# ontoFile = onto.ontologyMaskedLocation or onto.ontologyPhysicalLocation
# if not treedict:
# treedict = onto.ontologyClassTree()
stringa = """<table class="h">"""
for x in treedict[element]:
if x.qname == "owl:Thing":
stringa += """<tr>
<td class="tc" colspan=4><a>%s</a></td>
</tr>""" % (truncchar_inverse(x.qname, 50))
else:
stringa += """<tr>
<td class="tc" colspan=4><a title=\"%s\" class=\"treelinks\" href=\"%s.html\">%s</a></td>
</tr>""" % (x.uri, x.slug, truncchar_inverse(x.qname, 50))
if treedict.get(x, None):
stringa += """ <tr>
<td class="space"></td>
<td class="bar"></td>
<td class="space"></td>
<td>%s</td>
</tr>""" % formatHTML_EntityTreeTable(treedict, x)
# stringa += formatHTML_ClassTree(onto, treedict, x)
# stringa += "</li>"
stringa += "</table>"
return stringa
def get_onto_for_testing(TEST_ONLINE=False):
"Wrapper for util script used in viz main methods"
if TEST_ONLINE:
from ontospy import Ontospy
g = Ontospy("http://cohere.open.ac.uk/ontology/cohere.owl#")
else:
from ontospy.core.manager import get_random_ontology
uri, g = get_random_ontology(50)
return g
| gpl-3.0 | -6,854,177,205,183,017,000 | 26.077253 | 96 | 0.5191 | false |
wutali/sauron | sauron/metrics/RedisMetric.py | 1 | 6731 | import redis
from sauron import logger
from sauron.metrics import Metric, MetricException
class RedisMetric(Metric):
@staticmethod
def parseMemory(x):
try:
if 'G' in x:
return (x.replace('G', ''), 'Gigabytes')
elif 'M' in x:
return (x.replace('M', ''), 'Megabytes')
elif 'K' in x:
return (x.replace('K', ''), 'Kilobytes')
else:
return (x, 'Bytes')
except:
return (x, 'Bytes')
infoUnits = {
'redis_version' : lambda x: (int(x.replace('.', '')), 'None'),
'redis_git_sha1' : lambda x: (int(x, 16), 'None'),
'redis_dig_dirty' : lambda x: (x, 'None'),
'arch_bits' : lambda x: (x, 'Count'),
'process_id' : lambda x: (x, 'None'),
'uptime_in_seconds' : lambda x: (x, 'Seconds'),
'uptime_in_days' : lambda x: (x, 'None'),
'lru_clock' : lambda x: (x, 'Seconds'),
'used_cpu_sys' : lambda x: (x, 'Seconds'),
'used_cpu_user' : lambda x: (x, 'Seconds'),
'used_cpu_sys_children' : lambda x: (x, 'Seconds'),
'used_cpu_user_children' : lambda x: (x, 'Seconds'),
'connected_clients' : lambda x: (x, 'Count'),
'connected_slaves' : lambda x: (x, 'Count'),
'client_longest_output_list': lambda x: (x, 'Count'),
'client_biggest_input_buf' : lambda x: (x, 'Bytes'),
'blocked_clients' : lambda x: (x, 'Count'),
'used_memory' : lambda x: RedisMetric.parseMemory(x),
'used_memory_human' : lambda x: RedisMetric.parseMemory(x),
'used_memory_rss' : lambda x: RedisMetric.parseMemory(x),
'used_memroy_peak' : lambda x: RedisMetric.parseMemory(x),
'used_memory_peak_human' : lambda x: RedisMetric.parseMemory(x),
'mem_fragmentation_ratio' : lambda x: (x, 'None'),
'loading' : lambda x: (x, 'None'),
'aof_enabled' : lambda x: (x, 'None'),
'changes_since_last_save' : lambda x: (x, 'Count'),
'bgsave_in_progress' : lambda x: (x, 'None'),
'last_save_time' : lambda x: (x, 'Seconds'),
'bgrewriteaof_in_progress' : lambda x: (x, 'None'),
'total_connections_received': lambda x: (x, 'Count'),
'total_commands_processed' : lambda x: (x, 'Count'),
'expired_keys' : lambda x: (x, 'Count'),
'evicted_keys' : lambda x: (x, 'Count'),
'keyspace_hits' : lambda x: (x, 'Count'),
'keyspace_misses' : lambda x: (x, 'Count'),
'pubsub_channels' : lambda x: (x, 'Count'),
'pubsub_patterns' : lambda x: (x, 'Count'),
'latest_fork_usec' : lambda x: (x, 'Microseconds'),
'vm_enabled' : lambda x: (x, 'None'),
'aof_current_size' : lambda x: (x, 'Bytes'),
'aof_base_size' : lambda x: (x, 'Bytes'),
'aof_pending_rewrite' : lambda x: (x, 'None'),
}
def __init__(self, name, **kwargs):
Metric.__init__(self, name, **kwargs)
self.reconfig(name, **kwargs)
def reconfig(self, name, **kwargs):
Metric.reconfig(self, name, **kwargs)
# These are a selection of argument names. If they're
# present, then we'll use them, otherwise, we'll use
# the default provided by the redis module itself
redisArgs = {}
for arg in ['host', 'port', 'db', 'password', 'charset', 'errors', 'unix_socket_path']:
try:
redisArgs[arg] = kwargs[arg]
except KeyError:
pass
self.redis = redis.Redis(**redisArgs)
# The keys we should save from the 'info' command in redis
self.info = kwargs.get('info' , [])
# The keys we should get and interpret as numbers
self.get = kwargs.get('get' , [])
# The keys we should get, and report their length
self.llen = kwargs.get('llen', [])
# The keys we should get and report the hash length
self.hlen = kwargs.get('hlen', [])
# The keys we should get and report the particular key from
self.hget = kwargs.get('hget', {})
# The keys we should get and report the cardinality of
self.scard = kwargs.get('scard', [])
# The keys we should get and report the zcardinality of
self.zcard = kwargs.get('zcard', [])
# The patterns we should count the number of keys of
self.patterns = kwargs.get('patterns', [])
def values(self):
try:
results = {}
info = self.redis.info()
for i in self.info:
try:
results[i] = RedisMetric.infoUnits[i](info[i])
except Exception as e:
print repr(e)
results[i] = (info[i], 'None')
both = list(self.get)
both.extend(self.llen)
both.extend(self.hlen)
both.extend(['%s-%s' % (k, v) for k,v in self.hget.items()])
both.extend(self.scard)
both.extend(self.zcard)
both.extend(self.patterns)
with self.redis.pipeline() as pipe:
for g in self.get:
logger.debug('get %s' % g)
pipe.get(g)
for l in self.llen:
logger.debug('llen %s' % l)
pipe.llen(l)
for h in self.hlen:
logger.debug('hlen %s' % h)
pipe.hlen(h)
for k,v in self.hget.items():
logger.debug('hget %s %s' % (k, v))
pipe.hget(k, v)
for s in self.scard:
logger.debug('scard %s' % s)
pipe.scard(s)
for z in self.zcard:
logger.debug('zcard %s' % z)
pipe.zcard(z)
for pattern in self.patterns:
logger.debug('keys %s' % pattern)
pipe.keys(pattern)
fetched = pipe.execute()
for k, f in zip(both, fetched):
if isinstance(f, list):
results[k] = (len(f), 'Count')
else:
results[k] = (f, 'Count')
return {'results': results}
except redis.RedisError as e:
raise MetricException(e)
| mit | 2,291,710,560,482,880,000 | 44.47973 | 95 | 0.473332 | false |
rolandgeider/wger | wger/mailer/urls.py | 1 | 1189 | # -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Django
from django.conf.urls import include
from django.urls import path
# wger
from wger.mailer.forms import EmailListForm
from wger.mailer.views import gym
# sub patterns for email lists
patterns_email = [
path('overview/gym/<int:gym_pk>',
gym.EmailLogListView.as_view(),
name='overview'),
path('add/gym/<int:gym_pk>',
gym.EmailListFormPreview(EmailListForm),
name='add-gym'),
]
urlpatterns = [
path('email', include((patterns_email, 'email'), namespace="email")),
]
| agpl-3.0 | 4,714,017,575,843,777,000 | 29.487179 | 78 | 0.721615 | false |
rvykydal/blivet | blivet/formats/disklabel.py | 1 | 22009 | # disklabel.py
# Device format classes for anaconda's storage configuration module.
#
# Copyright (C) 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Dave Lehman <[email protected]>
#
import gi
import os
gi.require_version("BlockDev", "2.0")
from gi.repository import BlockDev as blockdev
from ..storage_log import log_exception_info, log_method_call
import parted
import _ped
from ..errors import DiskLabelCommitError, InvalidDiskLabelError, AlignmentError
from .. import arch
from ..events.manager import event_manager
from .. import udev
from .. import util
from ..flags import flags
from ..i18n import _, N_
from . import DeviceFormat, register_device_format
from ..size import Size
import logging
log = logging.getLogger("blivet")
class DiskLabel(DeviceFormat):
""" Disklabel """
_type = "disklabel"
_name = N_("partition table")
_formattable = True # can be formatted
_default_label_type = None
def __init__(self, **kwargs):
"""
:keyword device: full path to the block device node
:type device: str
:keyword str uuid: disklabel UUID
:keyword label_type: type of disklabel to create
:type label_type: str
:keyword exists: whether the formatting exists
:type exists: bool
"""
log_method_call(self, **kwargs)
DeviceFormat.__init__(self, **kwargs)
self._label_type = ""
if not self.exists:
self._label_type = kwargs.get("label_type") or ""
self._size = Size(0)
self._parted_device = None
self._parted_disk = None
self._orig_parted_disk = None
self._supported = True
self._disk_label_alignment = None
self._minimal_alignment = None
self._optimal_alignment = None
if self.parted_device:
# set up the parted objects and raise exception on failure
try:
self.update_orig_parted_disk()
except Exception as e: # pylint: disable=broad-except
self._supported = False
self._label_type = kwargs.get("label_type") or ""
log.warning("error setting up disklabel object on %s: %s", self.device, str(e))
def __deepcopy__(self, memo):
""" Create a deep copy of a Disklabel instance.
We can't do copy.deepcopy on parted objects, which is okay.
"""
return util.variable_copy(self, memo,
shallow=('_parted_device', '_optimal_alignment', '_minimal_alignment',
'_disk_label_alignment'),
duplicate=('_parted_disk', '_orig_parted_disk'))
def __repr__(self):
s = DeviceFormat.__repr__(self)
if flags.testing:
return s
s += (" type = %(type)s partition count = %(count)s"
" sector_size = %(sector_size)s\n"
" align_offset = %(offset)s align_grain = %(grain)s\n"
" parted_disk = %(disk)s\n"
" orig_parted_disk = %(orig_disk)r\n"
" parted_device = %(dev)s\n" %
{"type": self.label_type, "count": len(self.partitions),
"sector_size": self.sector_size,
"offset": self.get_alignment().offset,
"grain": self.get_alignment().grainSize,
"disk": self.parted_disk, "orig_disk": self._orig_parted_disk,
"dev": self.parted_device})
return s
@property
def desc(self):
return "%s %s" % (self.label_type, self.type)
@property
def dict(self):
d = super(DiskLabel, self).dict
if flags.testing:
return d
d.update({"label_type": self.label_type,
"partition_count": len(self.partitions),
"sector_size": self.sector_size,
"offset": self.get_alignment().offset,
"grain_size": self.get_alignment().grainSize})
return d
@property
def supported(self):
return self._supported
def update_parted_disk(self):
""" re-read the disklabel from the device """
self._parted_disk = None
mask = event_manager.add_mask(device=os.path.basename(self.device), partitions=True)
self.update_orig_parted_disk()
udev.settle()
event_manager.remove_mask(mask)
def update_orig_parted_disk(self):
self._orig_parted_disk = self.parted_disk.duplicate()
def reset_parted_disk(self):
""" Set this instance's parted_disk to reflect the disk's contents. """
log_method_call(self, device=self.device)
self._parted_disk = self._orig_parted_disk
def fresh_parted_disk(self):
""" Return a new, empty parted.Disk instance for this device. """
log_method_call(self, device=self.device, label_type=self.label_type)
return parted.freshDisk(device=self.parted_device, ty=self.label_type)
@property
def parted_disk(self):
if not self.parted_device:
return None
if not self._parted_disk and self.supported:
if self.exists:
try:
self._parted_disk = parted.Disk(device=self.parted_device)
except (_ped.DiskLabelException, _ped.IOException, NotImplementedError):
self._supported = False
return None
if self._parted_disk.type == "loop":
# When the device has no partition table but it has a FS,
# it will be created with label type loop. Treat the
# same as if the device had no label (cause it really
# doesn't).
raise InvalidDiskLabelError()
else:
self._parted_disk = self.fresh_parted_disk()
# turn off cylinder alignment
if self._parted_disk.isFlagAvailable(parted.DISK_CYLINDER_ALIGNMENT):
self._parted_disk.unsetFlag(parted.DISK_CYLINDER_ALIGNMENT)
# Set the boot flag on the GPT PMBR, this helps some BIOS systems boot
if self._parted_disk.isFlagAvailable(parted.DISK_GPT_PMBR_BOOT):
# MAC can boot as EFI or as BIOS, neither should have PMBR boot set
if arch.is_efi() or arch.is_mactel():
self._parted_disk.unsetFlag(parted.DISK_GPT_PMBR_BOOT)
log.debug("Clear pmbr_boot on %s", self._parted_disk)
else:
self._parted_disk.setFlag(parted.DISK_GPT_PMBR_BOOT)
log.debug("Set pmbr_boot on %s", self._parted_disk)
else:
log.debug("Did not change pmbr_boot on %s", self._parted_disk)
udev.settle(quiet=True)
return self._parted_disk
@property
def parted_device(self):
if not self._parted_device and self.device:
if os.path.exists(self.device):
# We aren't guaranteed to be able to get a device. In
# particular, built-in USB flash readers show up as devices but
# do not always have any media present, so parted won't be able
# to find a device.
try:
self._parted_device = parted.Device(path=self.device)
except (_ped.IOException, _ped.DeviceException) as e:
log.error("DiskLabel.parted_device: Parted exception: %s", e)
else:
log.info("DiskLabel.parted_device: %s does not exist", self.device)
if not self._parted_device:
log.info("DiskLabel.parted_device returning None")
return self._parted_device
@classmethod
def get_platform_label_types(cls):
label_types = ["msdos", "gpt"]
if arch.is_pmac():
label_types = ["mac"]
elif arch.is_aarch64():
label_types = ["gpt", "msdos"]
elif arch.is_efi() and arch.is_arm():
label_types = ["msdos", "gpt"]
elif arch.is_efi() and not arch.is_aarch64():
label_types = ["gpt", "msdos"]
elif arch.is_s390():
label_types = ["msdos", "dasd"]
return label_types
@classmethod
def set_default_label_type(cls, labeltype):
cls._default_label_type = labeltype
log.debug("default disklabel has been set to %s", labeltype)
def _label_type_size_check(self, label_type):
if self.parted_device is None:
return False
label = parted.freshDisk(device=self.parted_device, ty=label_type)
return self.parted_device.length < label.maxPartitionStartSector
def _get_best_label_type(self):
label_type = self._default_label_type
label_types = self.get_platform_label_types()[:]
if label_type in label_types:
label_types.remove(label_type)
if label_type:
label_types.insert(0, label_type)
if arch.is_s390():
if blockdev.s390.dasd_is_fba(self.device):
# the device is FBA DASD
return "msdos"
elif self.parted_device.type == parted.DEVICE_DASD:
# the device is DASD
return "dasd"
elif util.detect_virt():
# check for dasds exported into qemu as normal virtio/scsi disks
try:
_parted_disk = parted.Disk(device=self.parted_device)
except (_ped.DiskLabelException, _ped.IOException, NotImplementedError):
pass
else:
if _parted_disk.type == "dasd":
return "dasd"
for lt in label_types:
if self._label_type_size_check(lt):
log.debug("selecting %s disklabel for %s based on size",
label_type, os.path.basename(self.device))
label_type = lt
break
return label_type
@property
def label_type(self):
""" The disklabel type (eg: 'gpt', 'msdos') """
if not self.supported:
return self._label_type
# For new disklabels, user-specified type overrides built-in logic.
# XXX This determines the type we pass to parted.Disk
if not self.exists and not self._parted_disk:
if self._label_type:
lt = self._label_type
else:
lt = self._get_best_label_type()
return lt
try:
lt = self.parted_disk.type
except Exception: # pylint: disable=broad-except
log_exception_info()
lt = self._label_type
return lt
@property
def sector_size(self):
try:
return Size(self.parted_device.sectorSize)
except AttributeError:
log_exception_info()
return None
@property
def name(self):
if self.supported:
_str = "%(name)s (%(type)s)"
else:
# Translators: Name for an unsupported disklabel; e.g. "Unsupported partition table"
_str = _("Unsupported %(name)s")
return _str % {"name": _(self._name), "type": self.label_type.upper()}
@property
def size(self):
size = self._size
if not size:
try:
size = Size(self.parted_device.getLength(unit="B"))
except Exception: # pylint: disable=broad-except
log_exception_info()
size = Size(0)
return size
@property
def status(self):
""" Device status. """
return False
@property
def supports_names(self):
if not self.supported or not self.parted_disk:
return False
return self.parted_disk.supportsFeature(parted.DISK_TYPE_PARTITION_NAME)
def _create(self, **kwargs):
""" Create the device. """
log_method_call(self, device=self.device,
type=self.type, status=self.status)
# We're relying on someone having called reset_parted_disk -- we
# could ensure a fresh disklabel by setting self._parted_disk to
# None right before calling self.commit(), but that might hide
# other problems.
self.commit()
def commit(self):
""" Commit the current partition table to disk and notify the OS. """
log_method_call(self, device=self.device,
numparts=len(self.partitions))
try:
self.parted_disk.commit()
except parted.DiskException as msg:
raise DiskLabelCommitError(msg)
else:
self.update_orig_parted_disk()
udev.settle()
def commit_to_disk(self):
""" Commit the current partition table to disk. """
log_method_call(self, device=self.device,
numparts=len(self.partitions))
try:
self.parted_disk.commitToDevice()
except parted.DiskException as msg:
raise DiskLabelCommitError(msg)
else:
self.update_orig_parted_disk()
def add_partition(self, start, end, ptype=None):
""" Add a partition to the disklabel.
:param int start: start sector
:param int end: end sector
:param ptype: partition type or None
:type ptype: int (parted partition type constant) or NoneType
Partition type will default to either PARTITION_NORMAL or
PARTITION_LOGICAL, depending on whether the start sector is within
an extended partition.
"""
if ptype is None:
extended = self.extended_partition
if extended and extended.geometry.contains(start):
ptype = parted.PARTITION_LOGICAL
else:
ptype = parted.PARTITION_NORMAL
geometry = parted.Geometry(device=self.parted_device,
start=start, end=end)
new_partition = parted.Partition(disk=self.parted_disk,
type=ptype,
geometry=geometry)
constraint = parted.Constraint(exactGeom=geometry)
self.parted_disk.addPartition(partition=new_partition,
constraint=constraint)
def remove_partition(self, partition):
""" Remove a partition from the disklabel.
:param partition: the partition to remove
:type partition: :class:`parted.Partition`
"""
self.parted_disk.removePartition(partition)
@property
def extended_partition(self):
try:
extended = self.parted_disk.getExtendedPartition()
except Exception: # pylint: disable=broad-except
log_exception_info()
extended = None
return extended
@property
def logical_partitions(self):
try:
logicals = self.parted_disk.getLogicalPartitions()
except Exception: # pylint: disable=broad-except
log_exception_info()
logicals = []
return logicals
@property
def primary_partitions(self):
try:
primaries = self.parted_disk.getPrimaryPartitions()
except Exception: # pylint: disable=broad-except
log_exception_info()
primaries = []
return primaries
@property
def first_partition(self):
try:
part = self.parted_disk.getFirstPartition()
except Exception: # pylint: disable=broad-except
log_exception_info()
part = None
return part
@property
def partitions(self):
return getattr(self.parted_disk, "partitions", [])
def _get_disk_label_alignment(self):
""" Return the disklabel's required alignment for new partitions.
:rtype: :class:`parted.Alignment`
"""
if not self._disk_label_alignment:
try:
self._disk_label_alignment = self.parted_disk.partitionAlignment
except (_ped.CreateException, AttributeError):
self._disk_label_alignment = parted.Alignment(offset=0,
grainSize=1)
return self._disk_label_alignment
def get_minimal_alignment(self):
""" Return the device's minimal alignment for new partitions.
:rtype: :class:`parted.Alignment`
"""
if not self._minimal_alignment:
disklabel_alignment = self._get_disk_label_alignment()
try:
minimal_alignment = self.parted_device.minimumAlignment
except (_ped.CreateException, AttributeError):
# handle this in the same place we'd handle an ArithmeticError
minimal_alignment = None
try:
alignment = minimal_alignment.intersect(disklabel_alignment)
except (ArithmeticError, AttributeError):
alignment = disklabel_alignment
self._minimal_alignment = alignment
return self._minimal_alignment
def get_optimal_alignment(self):
""" Return the device's optimal alignment for new partitions.
:rtype: :class:`parted.Alignment`
.. note::
If there is no device-supplied optimal alignment this method
returns the minimal device alignment.
"""
if not self._optimal_alignment:
disklabel_alignment = self._get_disk_label_alignment()
try:
optimal_alignment = self.parted_device.optimumAlignment
except (_ped.CreateException, AttributeError):
# if there is no optimal alignment, use the minimal alignment,
# which has already been intersected with the disklabel
# alignment
alignment = self.get_minimal_alignment()
else:
try:
alignment = optimal_alignment.intersect(disklabel_alignment)
except ArithmeticError:
alignment = disklabel_alignment
self._optimal_alignment = alignment
return self._optimal_alignment
def get_alignment(self, size=None):
""" Return an appropriate alignment for a new partition.
:keyword size: proposed partition size (optional)
:type size: :class:`~.size.Size`
:returns: the appropriate alignment to use
:rtype: :class:`parted.Alignment`
:raises :class:`~.errors.AlignmentError`: if the partition is too
small to be aligned
"""
# default to the optimal alignment
alignment = self.get_optimal_alignment()
if size is None:
return alignment
# use the minimal alignment if the requested size is smaller than the
# optimal io size
minimal_alignment = self.get_minimal_alignment()
optimal_grain_size = Size(alignment.grainSize * self.sector_size)
minimal_grain_size = Size(minimal_alignment.grainSize * self.sector_size)
if size < minimal_grain_size:
raise AlignmentError("requested size cannot be aligned")
elif size < optimal_grain_size:
alignment = minimal_alignment
return alignment
def get_end_alignment(self, size=None, alignment=None):
""" Return an appropriate end-alignment for a new partition.
:keyword size: proposed partition size (optional)
:type size: :class:`~.size.Size`
:keyword alignment: the start alignment (optional)
:type alignment: :class:`parted.Alignment`
:returns: the appropriate alignment to use
:rtype: :class:`parted.Alignment`
:raises :class:`~.errors.AlignmentError`: if the partition is too
small to be aligned
"""
if alignment is None:
alignment = self.get_alignment(size=size)
return parted.Alignment(offset=alignment.offset - 1,
grainSize=alignment.grainSize)
@property
def alignment(self):
return self.get_alignment()
@property
def end_alignment(self):
return self.get_end_alignment()
@property
def free(self):
if self.parted_disk is not None:
free_areas = self.parted_disk.getFreeSpacePartitions()
else:
free_areas = []
return sum((Size(f.getLength(unit="B")) for f in free_areas), Size(0))
@property
def magic_partition_number(self):
""" Number of disklabel-type-specific special partition. """
if self.label_type == "mac":
return 1
elif self.label_type == "sun":
return 3
else:
return 0
register_device_format(DiskLabel)
| lgpl-2.1 | 3,892,311,891,357,951,500 | 35.927852 | 104 | 0.576809 | false |
JianfengYao/python-web-app | www/transwarp/db.py | 1 | 13974 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao'
'''
Database operation module.
'''
import time, uuid, functools, threading, logging
# Dict object:
class Dict(dict):
'''
Simple dict but support access as x.y style.
>>> d1 = Dict()
>>> d1['x'] = 100
>>> d1.x
100
>>> d1.y = 200
>>> d1['y']
200
>>> d2 = Dict(a=1, b=2, c='3')
>>> d2.c
'3'
>>> d2['empty']
Traceback (most recent call last):
...
KeyError: 'empty'
>>> d2.empty
Traceback (most recent call last):
...
AttributeError: 'Dict' object has no attribute 'empty'
>>> d3 = Dict(('a', 'b', 'c'), (1, 2, 3))
>>> d3.a
1
>>> d3.b
2
>>> d3.c
3
'''
def __init__(self, names=(), values=(), **kw):
super(Dict, self).__init__(**kw)
for k, v in zip(names, values):
self[k] = v
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
def next_id(t=None):
'''
Return next id as 50-char string.
Args:
t: unix timestamp, default to None and using time.time().
'''
if t is None:
t = time.time()
return '%015d%s000' % (int(t * 1000), uuid.uuid4().hex)
def _profiling(start, sql=''):
t = time.time() - start
if t > 0.1:
logging.warning('[PROFILING] [DB] %s: %s' % (t, sql))
else:
logging.info('[PROFILING] [DB] %s: %s' % (t, sql))
class DBError(Exception):
pass
class MultiColumnsError(DBError):
pass
class _LasyConnection(object):
def __init__(self):
self.connection = None
def cursor(self):
if self.connection is None:
connection = engine.connect()
logging.info('open connection <%s>...' % hex(id(connection)))
self.connection = connection
return self.connection.cursor()
def commit(self):
self.connection.commit()
def rollback(self):
self.connection.rollback()
def cleanup(self):
if self.connection:
connection = self.connection
self.connection = None
logging.info('close connection <%s>...' % hex(id(connection)))
connection.close()
class _DbCtx(threading.local):
'''
Thread local object that holds connection info.
'''
def __init__(self):
self.connection = None
self.transactions = 0
def is_init(self):
return not self.connection is None
def init(self):
logging.info('open lazy connection...')
self.connection = _LasyConnection()
self.transactions = 0
def cleanup(self):
self.connection.cleanup()
self.connection = None
def cursor(self):
'''
Return cursor
'''
return self.connection.cursor()
# thread-local db context:
_db_ctx = _DbCtx()
# global engine object:
engine = None
class _Engine(object):
def __init__(self, connect):
self._connect = connect
def connect(self):
return self._connect()
def create_engine(user, password, database, host='127.0.0.1', port=3306, **kw):
import mysql.connector
global engine
if engine is not None:
raise DBError('Engine is already initialized.')
params = dict(user=user, password=password, database=database, host=host, port=port)
defaults = dict(use_unicode=True, charset='utf8', collation='utf8_general_ci', autocommit=False)
for k, v in defaults.iteritems():
params[k] = kw.pop(k, v)
params.update(kw)
params['buffered'] = True
engine = _Engine(lambda: mysql.connector.connect(**params))
# test connection...
logging.info('Init mysql engine <%s> ok.' % hex(id(engine)))
class _ConnectionCtx(object):
'''
_ConnectionCtx object that can open and close connection context. _ConnectionCtx object can be nested and only the most
outer connection has effect.
with connection():
pass
with connection():
pass
'''
def __enter__(self):
global _db_ctx
self.should_cleanup = False
if not _db_ctx.is_init():
_db_ctx.init()
self.should_cleanup = True
return self
def __exit__(self, exctype, excvalue, traceback):
global _db_ctx
if self.should_cleanup:
_db_ctx.cleanup()
def connection():
'''
Return _ConnectionCtx object that can be used by 'with' statement:
with connection():
pass
'''
return _ConnectionCtx()
def with_connection(func):
'''
Decorator for reuse connection.
@with_connection
def foo(*args, **kw):
f1()
f2()
f3()
'''
@functools.wraps(func)
def _wrapper(*args, **kw):
with _ConnectionCtx():
return func(*args, **kw)
return _wrapper
class _TransactionCtx(object):
'''
_TransactionCtx object that can handle transactions.
with _TransactionCtx():
pass
'''
def __enter__(self):
global _db_ctx
self.should_close_conn = False
if not _db_ctx.is_init():
# needs open a connection first:
_db_ctx.init()
self.should_close_conn = True
_db_ctx.transactions = _db_ctx.transactions + 1
logging.info('begin transaction...' if _db_ctx.transactions==1 else 'join current transaction...')
return self
def __exit__(self, exctype, excvalue, traceback):
global _db_ctx
_db_ctx.transactions = _db_ctx.transactions - 1
try:
if _db_ctx.transactions==0:
if exctype is None:
self.commit()
else:
self.rollback()
finally:
if self.should_close_conn:
_db_ctx.cleanup()
def commit(self):
global _db_ctx
logging.info('commit transaction...')
try:
_db_ctx.connection.commit()
logging.info('commit ok.')
except:
logging.warning('commit failed. try rollback...')
_db_ctx.connection.rollback()
logging.warning('rollback ok.')
raise
def rollback(self):
global _db_ctx
logging.warning('rollback transaction...')
_db_ctx.connection.rollback()
logging.info('rollback ok.')
def transaction():
'''
Create a transaction object so can use with statement:
with transaction():
pass
>>> def update_profile(id, name, rollback):
... u = dict(id=id, name=name, email='%[email protected]' % name, passwd=name, last_modified=time.time())
... insert('user', **u)
... r = update('update user set passwd=? where id=?', name.upper(), id)
... if rollback:
... raise StandardError('will cause rollback...')
>>> with transaction():
... update_profile(900301, 'Python', False)
>>> select_one('select * from user where id=?', 900301).name
u'Python'
>>> with transaction():
... update_profile(900302, 'Ruby', True)
Traceback (most recent call last):
...
StandardError: will cause rollback...
>>> select('select * from user where id=?', 900302)
[]
'''
return _TransactionCtx()
def with_transaction(func):
'''
A decorator that makes function around transaction.
>>> @with_transaction
... def update_profile(id, name, rollback):
... u = dict(id=id, name=name, email='%[email protected]' % name, passwd=name, last_modified=time.time())
... insert('user', **u)
... r = update('update user set passwd=? where id=?', name.upper(), id)
... if rollback:
... raise StandardError('will cause rollback...')
>>> update_profile(8080, 'Julia', False)
>>> select_one('select * from user where id=?', 8080).passwd
u'JULIA'
>>> update_profile(9090, 'Robert', True)
Traceback (most recent call last):
...
StandardError: will cause rollback...
>>> select('select * from user where id=?', 9090)
[]
'''
@functools.wraps(func)
def _wrapper(*args, **kw):
_start = time.time()
with _TransactionCtx():
return func(*args, **kw)
_profiling(_start)
return _wrapper
def _select(sql, first, *args):
' execute select SQL and return unique result or list results.'
global _db_ctx
cursor = None
sql = sql.replace('?', '%s')
logging.info('SQL: %s, ARGS: %s' % (sql, args))
try:
cursor = _db_ctx.connection.cursor()
cursor.execute(sql, args)
if cursor.description:
names = [x[0] for x in cursor.description]
if first:
values = cursor.fetchone()
if not values:
return None
return Dict(names, values)
return [Dict(names, x) for x in cursor.fetchall()]
finally:
if cursor:
cursor.close()
@with_connection
def select_one(sql, *args):
'''
Execute select SQL and expected one result.
If no result found, return None.
If multiple results found, the first one returned.
>>> u1 = dict(id=100, name='Alice', email='[email protected]', passwd='ABC-12345', last_modified=time.time())
>>> u2 = dict(id=101, name='Sarah', email='[email protected]', passwd='ABC-12345', last_modified=time.time())
>>> insert('user', **u1)
1
>>> insert('user', **u2)
1
>>> u = select_one('select * from user where id=?', 100)
>>> u.name
u'Alice'
>>> select_one('select * from user where email=?', '[email protected]')
>>> u2 = select_one('select * from user where passwd=? order by email', 'ABC-12345')
>>> u2.name
u'Alice'
'''
return _select(sql, True, *args)
@with_connection
def select_int(sql, *args):
'''
Execute select SQL and expected one int and only one int result.
>>> n = update('delete from user')
>>> u1 = dict(id=96900, name='Ada', email='[email protected]', passwd='A-12345', last_modified=time.time())
>>> u2 = dict(id=96901, name='Adam', email='[email protected]', passwd='A-12345', last_modified=time.time())
>>> insert('user', **u1)
1
>>> insert('user', **u2)
1
>>> select_int('select count(*) from user')
2
>>> select_int('select count(*) from user where email=?', '[email protected]')
1
>>> select_int('select count(*) from user where email=?', '[email protected]')
0
>>> select_int('select id from user where email=?', '[email protected]')
96900
>>> select_int('select id, name from user where email=?', '[email protected]')
Traceback (most recent call last):
...
MultiColumnsError: Expect only one column.
'''
d = _select(sql, True, *args)
if len(d)!=1:
raise MultiColumnsError('Expect only one column.')
return d.values()[0]
@with_connection
def select(sql, *args):
'''
Execute select SQL and return list or empty list if no result.
>>> u1 = dict(id=200, name='Wall.E', email='[email protected]', passwd='back-to-earth', last_modified=time.time())
>>> u2 = dict(id=201, name='Eva', email='[email protected]', passwd='back-to-earth', last_modified=time.time())
>>> insert('user', **u1)
1
>>> insert('user', **u2)
1
>>> L = select('select * from user where id=?', 900900900)
>>> L
[]
>>> L = select('select * from user where id=?', 200)
>>> L[0].email
u'[email protected]'
>>> L = select('select * from user where passwd=? order by id desc', 'back-to-earth')
>>> L[0].name
u'Eva'
>>> L[1].name
u'Wall.E'
'''
return _select(sql, False, *args)
@with_connection
def _update(sql, *args):
global _db_ctx
cursor = None
sql = sql.replace('?', '%s')
logging.info('SQL: %s, ARGS: %s' % (sql, args))
try:
cursor = _db_ctx.connection.cursor()
cursor.execute(sql, args)
r = cursor.rowcount
if _db_ctx.transactions==0:
# no transaction enviroment:
logging.info('auto commit')
_db_ctx.connection.commit()
return r
finally:
if cursor:
cursor.close()
def insert(table, **kw):
'''
Execute insert SQL.
>>> u1 = dict(id=2000, name='Bob', email='[email protected]', passwd='bobobob', last_modified=time.time())
>>> insert('user', **u1)
1
>>> u2 = select_one('select * from user where id=?', 2000)
>>> u2.name
u'Bob'
>>> insert('user', **u2)
Traceback (most recent call last):
...
IntegrityError: 1062 (23000): Duplicate entry '2000' for key 'PRIMARY'
'''
cols, args = zip(*kw.iteritems())
sql = 'insert into `%s` (%s) values (%s)' % (table, ','.join(['`%s`' % col for col in cols]), ','.join(['?' for i in range(len(cols))]))
return _update(sql, *args)
def update(sql, *args):
r'''
Execute update SQL.
>>> u1 = dict(id=1000, name='Michael', email='[email protected]', passwd='123456', last_modified=time.time())
>>> insert('user', **u1)
1
>>> u2 = select_one('select * from user where id=?', 1000)
>>> u2.email
u'[email protected]'
>>> u2.passwd
u'123456'
>>> update('update user set email=?, passwd=? where id=?', '[email protected]', '654321', 1000)
1
>>> u3 = select_one('select * from user where id=?', 1000)
>>> u3.email
u'[email protected]'
>>> u3.passwd
u'654321'
>>> update('update user set passwd=? where id=?', '***', '123\' or id=\'456')
0
'''
return _update(sql, *args)
if __name__=='__main__':
logging.basicConfig(level=logging.DEBUG)
create_engine('www-data', 'www-data', 'test')
update('drop table if exists user')
update('create table user (id int primary key, name text, email text, passwd text, last_modified real)')
import doctest
doctest.testmod()
| gpl-2.0 | -7,054,294,330,750,922,000 | 27.871901 | 140 | 0.563761 | false |
suprotkin/atm | atm/atm/settings.py | 1 | 2842 | """
Django settings for atm project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k%f-1_1n5y^c68*(wa^&oq)m6xevu5pgha31i6*v5ssm@6dl*e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'card',
'common',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'django.middleware.security.SecurityMiddleware',
'card.middleware.CardAuthMiddleware',
)
ROOT_URLCONF = 'atm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'atm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"),)
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
| gpl-2.0 | -6,900,148,488,778,329,000 | 25.314815 | 74 | 0.69247 | false |
foundertherapy/django-users-plus | accountsplus/views.py | 1 | 8660 | from __future__ import unicode_literals
import logging
from django.utils.translation import ugettext as _
import django.views.decorators.cache
import django.views.decorators.csrf
import django.views.decorators.debug
import django.contrib.auth.decorators
import django.contrib.auth.views
import django.contrib.auth.forms
import django.contrib.auth
import django.contrib.messages
import django.shortcuts
import django.http
import django.template.response
import django.utils.module_loading
import django.core.urlresolvers
from django.conf import settings as app_settings
from axes import utils
import signals
import forms
import settings
logger = logging.getLogger(__name__)
def logout_then_login(request, login_url=None, extra_context=None):
"""
Logs out the user if they are logged in. Then redirects to the log-in page.
"""
# if a user is masquerading, don't log them out, just kill the masquerade
if request.session.get('is_masquerading'):
return django.shortcuts.redirect('end_masquerade')
else:
return django.contrib.auth.views.logout_then_login(request, login_url, extra_context)
@django.views.decorators.cache.never_cache
@django.contrib.auth.decorators.login_required
def masquerade(request, user_id=None):
User = django.contrib.auth.get_user_model()
return_page = request.META.get('HTTP_REFERER') or 'admin:index'
if not user_id:
django.contrib.messages.error(request, 'Masquerade failed: no user specified')
return django.shortcuts.redirect(return_page)
if not request.user.has_perm(User.PERMISSION_MASQUERADE):
django.contrib.messages.error(request, 'Masquerade failed: insufficient privileges')
return django.shortcuts.redirect(return_page)
if not (request.user.is_superuser or request.user.is_staff):
django.contrib.messages.error(request, 'Masquerade failed: must be staff or superuser')
return django.shortcuts.redirect(return_page)
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
logger.error('User {} ({}) masquerading failed for user {}'.format(request.user.email, request.user.id, user_id))
django.contrib.messages.error(request, 'Masquerade failed: unknown user {}'.format(user_id))
return django.shortcuts.redirect(return_page)
if user.is_superuser:
logger.warning(
'User {} ({}) cannot masquerade as superuser {} ({})'.format(request.user.email, request.user.id, user.email, user.id))
django.contrib.messages.warning(request, 'Cannot masquerade as a superuser')
return django.shortcuts.redirect(return_page)
admin_user = request.user
user.backend = request.session[django.contrib.auth.BACKEND_SESSION_KEY]
# log the new user in
signals.masquerade_start.send(sender=masquerade, request=request, user=admin_user, masquerade_as=user)
# this is needed to track whether this login is for a masquerade
setattr(user, 'is_masquerading', True)
setattr(user, 'masquerading_user', admin_user)
django.contrib.auth.login(request, user)
request.session['is_masquerading'] = True
request.session['masquerade_user_id'] = admin_user.id
request.session['return_page'] = return_page
request.session['masquerade_is_superuser'] = admin_user.is_superuser
logger.info(
'User {} ({}) masquerading as {} ({})'.format(admin_user.email, admin_user.id, request.user.email, request.user.id))
django.contrib.messages.success(request, 'Masquerading as user {0}'.format(user.email))
return django.http.HttpResponseRedirect(app_settings.LOGIN_REDIRECT_URL)
@django.views.decorators.cache.never_cache
@django.contrib.auth.decorators.login_required
def end_masquerade(request):
User = django.contrib.auth.get_user_model()
if 'is_masquerading' not in request.session:
return django.shortcuts.redirect('admin:index')
if 'masquerade_user_id' in request.session:
try:
masqueraded_user = request.user
user = User.objects.get(
pk=request.session['masquerade_user_id'])
user.backend = request.session[
django.contrib.auth.BACKEND_SESSION_KEY]
# this is needed to track whether this login is for a masquerade
django.contrib.auth.logout(request)
signals.masquerade_end.send(
sender=end_masquerade, request=request, user=user,
masquerade_as=masqueraded_user)
django.contrib.auth.login(request, user)
logging.info('End masquerade user: {} ({}) by: {} ({})'.format(
masqueraded_user.email, masqueraded_user.id,
user.email, user.id))
django.contrib.messages.success(request, 'Masquerade ended')
except User.DoesNotExist as e:
logging.critical(
'Masquerading user {} does not exist'.format(
request.session['masquerade_user_id']))
return django.shortcuts.redirect('admin:index')
@django.views.decorators.debug.sensitive_post_parameters()
@django.views.decorators.csrf.csrf_protect
@django.contrib.auth.decorators.login_required
def password_change(request,
template_name='registration/password_change_form.html',
post_change_redirect=None,
password_change_form=django.contrib.auth.forms.
PasswordChangeForm,
current_app=None, extra_context=None):
if post_change_redirect is None:
post_change_redirect = django.core.urlresolvers.reverse(
'password_change_done')
else:
post_change_redirect = django.shortcuts.resolve_url(
post_change_redirect)
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
# Updating the password logs out all other sessions for the user
# except the current one if
# django.contrib.auth.middleware.SessionAuthenticationMiddleware
# is enabled.
django.contrib.auth.update_session_auth_hash(request, form.user)
signals.user_password_change.send(
sender=password_change, request=request, user=form.user)
return django.http.HttpResponseRedirect(post_change_redirect)
else:
form = password_change_form(user=request.user)
context = {
'form': form,
'title': _('Password change'),
}
if extra_context is not None:
context.update(extra_context)
return django.template.response.TemplateResponse(request, template_name, context)
@django.views.decorators.csrf.csrf_protect
def password_reset(request,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
subject_template_name='registration/password_reset_subject.txt',
password_reset_form=django.contrib.auth.forms.PasswordResetForm,
token_generator=django.contrib.auth.views.default_token_generator,
post_reset_redirect=None,
from_email=None,
current_app=None,
extra_context=None,
html_email_template_name=None,
extra_email_context=None):
User = django.contrib.auth.get_user_model()
response = django.contrib.auth.views.password_reset(
request, template_name, email_template_name,
subject_template_name, password_reset_form, token_generator,
post_reset_redirect, from_email, extra_context,
html_email_template_name, extra_email_context)
if request.method == 'POST':
email = request.POST['email']
try:
user = User.objects.get(email=email)
signals.user_password_reset_request.send(
sender=password_reset, request=request, user=user)
except User.DoesNotExist:
pass
return response
class GenericLockedView(django.views.generic.FormView):
template_name = settings.LOCKOUT_TEMPLATE
form_class = forms.CaptchaForm
urlPattern = ''
def get_success_url(self):
return django.urls.reverse_lazy(self.urlPattern)
def form_valid(self, form):
utils.reset(username=form.cleaned_data['username'])
return super(GenericLockedView, self).form_valid(form)
class UserLockedOutView(GenericLockedView):
urlPattern = 'login'
class AdminLockedOutView(GenericLockedView):
urlPattern = 'admin:index'
| mit | -1,332,505,607,043,437,000 | 40.238095 | 131 | 0.675751 | false |
enolfc/oauthenticator | oauthenticator/cilogon.py | 1 | 6879 | """CILogon OAuthAuthenticator for JupyterHub
Uses OAuth 2.0 with cilogon.org (override with CILOGON_HOST)
Caveats:
- For user whitelist/admin purposes, username will be the ePPN by default.
This is typically an email address and may not work as a Unix userid.
Normalization may be required to turn the JupyterHub username into a Unix username.
- Default username_claim of ePPN does not work for all providers,
e.g. generic OAuth such as Google.
Use `c.CILogonOAuthenticator.username_claim = 'email'` to use
email instead of ePPN as the JupyterHub username.
"""
import json
import os
from tornado.auth import OAuth2Mixin
from tornado import gen, web
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from traitlets import Unicode, List, Bool, validate
from jupyterhub.auth import LocalAuthenticator
from .oauth2 import OAuthLoginHandler, OAuthenticator
CILOGON_HOST = os.environ.get('CILOGON_HOST') or 'cilogon.org'
class CILogonMixin(OAuth2Mixin):
_OAUTH_AUTHORIZE_URL = "https://%s/authorize" % CILOGON_HOST
_OAUTH_TOKEN_URL = "https://%s/oauth2/token" % CILOGON_HOST
class CILogonLoginHandler(OAuthLoginHandler, CILogonMixin):
"""See http://www.cilogon.org/oidc for general information."""
def authorize_redirect(self, *args, **kwargs):
"""Add idp, skin to redirect params"""
extra_params = kwargs.setdefault('extra_params', {})
if self.authenticator.idp:
extra_params["selected_idp"] = self.authenticator.idp
if self.authenticator.skin:
extra_params["skin"] = self.authenticator.skin
return super().authorize_redirect(*args, **kwargs)
class CILogonOAuthenticator(OAuthenticator):
login_service = "CILogon"
client_id_env = 'CILOGON_CLIENT_ID'
client_secret_env = 'CILOGON_CLIENT_SECRET'
login_handler = CILogonLoginHandler
scope = List(Unicode(), default_value=['openid', 'email', 'org.cilogon.userinfo'],
config=True,
help="""The OAuth scopes to request.
See cilogon_scope.md for details.
At least 'openid' is required.
""",
)
@validate('scope')
def _validate_scope(self, proposal):
"""ensure openid is requested"""
if 'openid' not in proposal.value:
return ['openid'] + proposal.value
return proposal.value
idp_whitelist = List(
config=True,
help="""A list of IDP which can be stripped from the username after the @ sign.""",
)
strip_idp_domain = Bool(
False,
config=True,
help="""Remove the IDP domain from the username. Note that only domains which
appear in the `idp_whitelist` will be stripped.""",
)
idp = Unicode(
config=True,
help="""The `idp` attribute is the SAML Entity ID of the user's selected
identity provider.
See https://cilogon.org/include/idplist.xml for the list of identity
providers supported by CILogon.
""",
)
skin = Unicode(
config=True,
help="""The `skin` attribute is the name of the custom CILogon interface skin
for your application.
Contact [email protected] to request a custom skin.
""",
)
username_claim = Unicode(
"eppn",
config=True,
help="""The claim in the userinfo response from which to get the JupyterHub username
Examples include: eppn, email
What keys are available will depend on the scopes requested.
See http://www.cilogon.org/oidc for details.
""",
)
@gen.coroutine
def authenticate(self, handler, data=None):
"""We set up auth_state based on additional CILogon info if we
receive it.
"""
code = handler.get_argument("code")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
# Exchange the OAuth code for a CILogon Access Token
# See: http://www.cilogon.org/oidc
headers = {
"Accept": "application/json",
"User-Agent": "JupyterHub",
}
params = dict(
client_id=self.client_id,
client_secret=self.client_secret,
redirect_uri=self.oauth_callback_url,
code=code,
grant_type='authorization_code',
)
url = url_concat("https://%s/oauth2/token" % CILOGON_HOST, params)
req = HTTPRequest(url,
headers=headers,
method="POST",
body=''
)
resp = yield http_client.fetch(req)
token_response = json.loads(resp.body.decode('utf8', 'replace'))
access_token = token_response['access_token']
self.log.info("Access token acquired.")
# Determine who the logged in user is
params = dict(access_token=access_token)
req = HTTPRequest(url_concat("https://%s/oauth2/userinfo" %
CILOGON_HOST, params),
headers=headers
)
resp = yield http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
username = resp_json.get(self.username_claim)
if not username:
self.log.error("Username claim %s not found in the response: %s",
self.username_claim, sorted(resp_json.keys())
)
raise web.HTTPError(500, "Failed to get username from CILogon")
if self.idp_whitelist:
gotten_name, gotten_idp = username.split('@')
if gotten_idp not in self.idp_whitelist:
self.log.error(
"Trying to login from not whitelisted domain %s", gotten_idp)
raise web.HTTPError(
500, "Trying to login from not whitelisted domain")
if len(self.idp_whitelist) == 1 and self.strip_idp_domain:
username = gotten_name
userdict = {"name": username}
# Now we set up auth_state
userdict["auth_state"] = auth_state = {}
# Save the token response and full CILogon reply in auth state
# These can be used for user provisioning
# in the Lab/Notebook environment.
auth_state['token_response'] = token_response
# store the whole user model in auth_state.cilogon_user
# keep access_token as well, in case anyone was relying on it
auth_state['access_token'] = access_token
auth_state['cilogon_user'] = resp_json
return userdict
class LocalCILogonOAuthenticator(LocalAuthenticator, CILogonOAuthenticator):
"""A version that mixes in local system user creation"""
pass
| bsd-3-clause | 213,098,262,406,120,640 | 34.276923 | 92 | 0.612153 | false |
nacl-webkit/chrome_deps | tools/telemetry/telemetry/inspector_timeline_unittest.py | 1 | 4095 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import tab_test_case
from telemetry import util
from telemetry.inspector_timeline import InspectorTimeline
_SAMPLE_MESSAGE = {
'children': [
{'data': {},
'startTime': 1352783525921.823,
'type': 'BeginFrame',
'usedHeapSize': 1870736},
{'children': [],
'data': {'height': 723,
'width': 1272,
'x': 0,
'y': 0},
'endTime': 1352783525921.8992,
'frameId': '10.2',
'startTime': 1352783525921.8281,
'type': 'Layout',
'usedHeapSize': 1870736},
{'children': [
{'children': [],
'data': {'imageType': 'PNG'},
'endTime': 1352783525927.7939,
'startTime': 1352783525922.4241,
'type': 'DecodeImage',
'usedHeapSize': 1870736}
],
'data': {'height': 432,
'width': 1272,
'x': 0,
'y': 8},
'endTime': 1352783525927.9822,
'frameId': '10.2',
'startTime': 1352783525921.9292,
'type': 'Paint',
'usedHeapSize': 1870736}
],
'data': {},
'endTime': 1352783525928.041,
'startTime': 1352783525921.8049,
'type': 'Program'}
class InspectorEventParsingTest(unittest.TestCase):
def testParsingWithSampleData(self):
root_event = InspectorTimeline.RawEventToTimelineEvent(_SAMPLE_MESSAGE)
self.assertTrue(root_event)
decode_image_event = [
child for child in root_event.GetAllChildrenRecursive()
if child.name == 'DecodeImage'][0]
self.assertEquals(decode_image_event.args['data']['imageType'], 'PNG')
self.assertTrue(decode_image_event.duration_ms > 0)
def testParsingWithSimpleData(self):
raw_event = {'type': 'Foo',
'startTime': 1,
'endTime': 3,
'children': []}
event = InspectorTimeline.RawEventToTimelineEvent(raw_event)
self.assertEquals('Foo', event.name)
self.assertEquals(1, event.start_time_ms)
self.assertEquals(3, event.end_time_ms)
self.assertEquals(2, event.duration_ms)
self.assertEquals([], event.children)
def testParsingWithArgs(self):
raw_event = {'type': 'Foo',
'startTime': 1,
'endTime': 3,
'foo': 7,
'bar': {'x': 1}}
event = InspectorTimeline.RawEventToTimelineEvent(raw_event)
self.assertEquals('Foo', event.name)
self.assertEquals(1, event.start_time_ms)
self.assertEquals(3, event.end_time_ms)
self.assertEquals(2, event.duration_ms)
self.assertEquals([], event.children)
self.assertEquals(7, event.args['foo'])
self.assertEquals(1, event.args['bar']['x'])
def testEventsWithNoStartTimeAreDropped(self):
raw_event = {'type': 'Foo',
'endTime': 1,
'children': []}
event = InspectorTimeline.RawEventToTimelineEvent(raw_event)
self.assertEquals(None, event)
def testEventsWithNoEndTimeAreDropped(self):
raw_event = {'type': 'Foo',
'endTime': 1,
'children': []}
event = InspectorTimeline.RawEventToTimelineEvent(raw_event)
self.assertEquals(None, event)
class InspectorTimelineTabTest(tab_test_case.TabTestCase):
def _StartServer(self):
base_dir = os.path.dirname(__file__)
self._browser.SetHTTPServerDirectory(os.path.join(base_dir, '..',
'unittest_data'))
def _WaitForAnimationFrame(self):
def _IsDone():
js_is_done = """done"""
return bool(self._tab.EvaluateJavaScript(js_is_done))
util.WaitFor(_IsDone, 5)
def testGotTimeline(self):
with InspectorTimeline.Recorder(self._tab):
self._tab.ExecuteJavaScript(
"""
var done = false;
window.webkitRequestAnimationFrame(function() { done = true; });
""")
self._WaitForAnimationFrame()
r = self._tab.timeline_model.GetAllOfName('FireAnimationFrame')
self.assertTrue(len(r) > 0)
self.assertTrue(r[0].duration_ms > 0)
| bsd-3-clause | 1,318,567,845,950,442,500 | 31.76 | 75 | 0.61978 | false |
kevin-intel/scikit-learn | sklearn/datasets/_kddcup99.py | 3 | 12676 | """KDDCUP 99 dataset.
A classic dataset for anomaly detection.
The dataset page is available from UCI Machine Learning Repository
https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
"""
import errno
from gzip import GzipFile
import logging
import os
from os.path import dirname, exists, join
import numpy as np
import joblib
from ._base import _fetch_remote
from ._base import _convert_data_dataframe
from . import get_data_home
from ._base import RemoteFileMetadata
from ..utils import Bunch
from ..utils import check_random_state
from ..utils import shuffle as shuffle_method
# The original data can be found at:
# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
ARCHIVE = RemoteFileMetadata(
filename='kddcup99_data',
url='https://ndownloader.figshare.com/files/5976045',
checksum=('3b6c942aa0356c0ca35b7b595a26c89d'
'343652c9db428893e7494f837b274292'))
# The original data can be found at:
# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data_10_percent.gz
ARCHIVE_10_PERCENT = RemoteFileMetadata(
filename='kddcup99_10_data',
url='https://ndownloader.figshare.com/files/5976042',
checksum=('8045aca0d84e70e622d1148d7df78249'
'6f6333bf6eb979a1b0837c42a9fd9561'))
logger = logging.getLogger(__name__)
def fetch_kddcup99(*, subset=None, data_home=None, shuffle=False,
random_state=None,
percent10=True, download_if_missing=True, return_X_y=False,
as_frame=False):
"""Load the kddcup99 dataset (classification).
Download it if necessary.
================= ====================================
Classes 23
Samples total 4898431
Dimensionality 41
Features discrete (int) or continuous (float)
================= ====================================
Read more in the :ref:`User Guide <kddcup99_dataset>`.
.. versionadded:: 0.18
Parameters
----------
subset : {'SA', 'SF', 'http', 'smtp'}, default=None
To return the corresponding classical subsets of kddcup 99.
If None, return the entire kddcup 99 dataset.
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
.. versionadded:: 0.19
shuffle : bool, default=False
Whether to shuffle dataset.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling and for
selection of abnormal samples if `subset='SA'`. Pass an int for
reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
percent10 : bool, default=True
Whether to load only 10 percent of the data.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` object.
.. versionadded:: 0.20
as_frame : bool, default=False
If `True`, returns a pandas Dataframe for the ``data`` and ``target``
objects in the `Bunch` returned object; `Bunch` return object will also
have a ``frame`` member.
.. versionadded:: 0.24
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (494021, 41)
The data matrix to learn. If `as_frame=True`, `data` will be a
pandas DataFrame.
target : {ndarray, series} of shape (494021,)
The regression target for each sample. If `as_frame=True`, `target`
will be a pandas Series.
frame : dataframe of shape (494021, 42)
Only present when `as_frame=True`. Contains `data` and `target`.
DESCR : str
The full description of the dataset.
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.20
"""
data_home = get_data_home(data_home=data_home)
kddcup99 = _fetch_brute_kddcup99(
data_home=data_home,
percent10=percent10,
download_if_missing=download_if_missing
)
data = kddcup99.data
target = kddcup99.target
feature_names = kddcup99.feature_names
target_names = kddcup99.target_names
if subset == 'SA':
s = target == b'normal.'
t = np.logical_not(s)
normal_samples = data[s, :]
normal_targets = target[s]
abnormal_samples = data[t, :]
abnormal_targets = target[t]
n_samples_abnormal = abnormal_samples.shape[0]
# selected abnormal samples:
random_state = check_random_state(random_state)
r = random_state.randint(0, n_samples_abnormal, 3377)
abnormal_samples = abnormal_samples[r]
abnormal_targets = abnormal_targets[r]
data = np.r_[normal_samples, abnormal_samples]
target = np.r_[normal_targets, abnormal_targets]
if subset == 'SF' or subset == 'http' or subset == 'smtp':
# select all samples with positive logged_in attribute:
s = data[:, 11] == 1
data = np.c_[data[s, :11], data[s, 12:]]
feature_names = feature_names[:11] + feature_names[12:]
target = target[s]
data[:, 0] = np.log((data[:, 0] + 0.1).astype(float, copy=False))
data[:, 4] = np.log((data[:, 4] + 0.1).astype(float, copy=False))
data[:, 5] = np.log((data[:, 5] + 0.1).astype(float, copy=False))
if subset == 'http':
s = data[:, 2] == b'http'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
feature_names = [feature_names[0], feature_names[4],
feature_names[5]]
if subset == 'smtp':
s = data[:, 2] == b'smtp'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
feature_names = [feature_names[0], feature_names[4],
feature_names[5]]
if subset == 'SF':
data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]]
feature_names = [feature_names[0], feature_names[2],
feature_names[4], feature_names[5]]
if shuffle:
data, target = shuffle_method(data, target, random_state=random_state)
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'kddcup99.rst')) as rst_file:
fdescr = rst_file.read()
frame = None
if as_frame:
frame, data, target = _convert_data_dataframe(
"fetch_kddcup99", data, target, feature_names, target_names
)
if return_X_y:
return data, target
return Bunch(
data=data,
target=target,
frame=frame,
target_names=target_names,
feature_names=feature_names,
DESCR=fdescr,
)
def _fetch_brute_kddcup99(data_home=None,
download_if_missing=True, percent10=True):
"""Load the kddcup99 dataset, downloading it if necessary.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
percent10 : bool, default=True
Whether to load only 10 percent of the data.
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray of shape (494021, 41)
Each row corresponds to the 41 features in the dataset.
target : ndarray of shape (494021,)
Each value corresponds to one of the 21 attack types or to the
label 'normal.'.
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
DESCR : str
Description of the kddcup99 dataset.
"""
data_home = get_data_home(data_home=data_home)
dir_suffix = "-py3"
if percent10:
kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix)
archive = ARCHIVE_10_PERCENT
else:
kddcup_dir = join(data_home, "kddcup99" + dir_suffix)
archive = ARCHIVE
samples_path = join(kddcup_dir, "samples")
targets_path = join(kddcup_dir, "targets")
available = exists(samples_path)
dt = [('duration', int),
('protocol_type', 'S4'),
('service', 'S11'),
('flag', 'S6'),
('src_bytes', int),
('dst_bytes', int),
('land', int),
('wrong_fragment', int),
('urgent', int),
('hot', int),
('num_failed_logins', int),
('logged_in', int),
('num_compromised', int),
('root_shell', int),
('su_attempted', int),
('num_root', int),
('num_file_creations', int),
('num_shells', int),
('num_access_files', int),
('num_outbound_cmds', int),
('is_host_login', int),
('is_guest_login', int),
('count', int),
('srv_count', int),
('serror_rate', float),
('srv_serror_rate', float),
('rerror_rate', float),
('srv_rerror_rate', float),
('same_srv_rate', float),
('diff_srv_rate', float),
('srv_diff_host_rate', float),
('dst_host_count', int),
('dst_host_srv_count', int),
('dst_host_same_srv_rate', float),
('dst_host_diff_srv_rate', float),
('dst_host_same_src_port_rate', float),
('dst_host_srv_diff_host_rate', float),
('dst_host_serror_rate', float),
('dst_host_srv_serror_rate', float),
('dst_host_rerror_rate', float),
('dst_host_srv_rerror_rate', float),
('labels', 'S16')]
column_names = [c[0] for c in dt]
target_names = column_names[-1]
feature_names = column_names[:-1]
if available:
try:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
except Exception as e:
raise IOError(
"The cache for fetch_kddcup99 is invalid, please delete "
f"{str(kddcup_dir)} and run the fetch_kddcup99 again") from e
elif download_if_missing:
_mkdirp(kddcup_dir)
logger.info("Downloading %s" % archive.url)
_fetch_remote(archive, dirname=kddcup_dir)
DT = np.dtype(dt)
logger.debug("extracting archive")
archive_path = join(kddcup_dir, archive.filename)
file_ = GzipFile(filename=archive_path, mode='r')
Xy = []
for line in file_.readlines():
line = line.decode()
Xy.append(line.replace('\n', '').split(','))
file_.close()
logger.debug('extraction done')
os.remove(archive_path)
Xy = np.asarray(Xy, dtype=object)
for j in range(42):
Xy[:, j] = Xy[:, j].astype(DT[j])
X = Xy[:, :-1]
y = Xy[:, -1]
# XXX bug when compress!=0:
# (error: 'Incorrect data length while decompressing[...] the file
# could be corrupted.')
joblib.dump(X, samples_path, compress=0)
joblib.dump(y, targets_path, compress=0)
else:
raise IOError("Data not found and `download_if_missing` is False")
return Bunch(
data=X,
target=y,
feature_names=feature_names,
target_names=[target_names],
)
def _mkdirp(d):
"""Ensure directory d exists (like mkdir -p on Unix)
No guarantee that the directory is writable.
"""
try:
os.makedirs(d)
except OSError as e:
if e.errno != errno.EEXIST:
raise
| bsd-3-clause | 3,550,723,356,904,437,000 | 32.983914 | 98 | 0.574787 | false |
wheeler-microfluidics/dmf-device-ui | dmf_device_ui/canvas.py | 1 | 51678 | # -*- coding: utf-8 -*-
from collections import OrderedDict
import itertools
import functools as ft
import logging
import threading
from cairo_helpers.surface import flatten_surfaces
from logging_helpers import _L
from pygtkhelpers.ui.views.shapes_canvas_view import GtkShapesCanvasView
from pygtkhelpers.utils import gsignal
from pygst_utils.video_view.video_sink import VideoSink
from pygst_utils.video_view import np_to_cairo
from svg_model import compute_shape_centers
from svg_model.color import hex_color_to_rgba
import cairo
import debounce
import gtk
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
class Route(object):
'''
Attributes
----------
device : microdrop.dmf_device.DmfDevice
electrode_ids : list
Ordered list of **connected** electrodes ids.
Represents an actuation sequence of electrodes that would support
liquid movement between the first and last electrode.
'''
def __init__(self, device):
self.device = device
self.electrode_ids = []
def __str__(self):
return '<Route electrode_ids=%s>' % self.electrode_ids
def append(self, electrode_id):
'''
Append the specified electrode to the route.
The route is not modified (i.e., electrode is not appended) if
electrode is not connected to the last electrode in the existing route.
Parameters
----------
electrode_id : str
Electrode identifier.
'''
do_append = False
if not self.electrode_ids:
do_append = True
elif self.device.shape_indexes.shape[0] > 0:
source = self.electrode_ids[-1]
target = electrode_id
if not (source == target):
source_id, target_id = self.device.shape_indexes[[source,
target]]
try:
if self.device.adjacency_matrix[source_id, target_id]:
# Electrodes are connected, so append target to current
# route.
do_append = True
except IndexError:
logger.warning('Electrodes `%s` and `%s` are not '
'connected.', source, target)
if do_append:
self.electrode_ids.append(electrode_id)
return do_append
class DmfDeviceCanvas(GtkShapesCanvasView):
'''
Draw device layout from SVG file.
Mouse events are handled as follows:
- Click and release on the same electrode emits electrode selected signal.
- Click on one electrode, drag, and release on another electrode emits
electrode *pair* selected signal, with *source* electrode and *target*
electrode.
- Moving mouse cursor over electrode emits electrode mouse over signal.
- Moving mouse cursor out of electrode emits electrode mouse out signal.
Signals are emitted as gobject signals. See `emit` calls for payload
formats.
'''
gsignal('device-set', object)
gsignal('electrode-command', str, str, object)
gsignal('electrode-mouseout', object)
gsignal('electrode-mouseover', object)
gsignal('electrode-pair-selected', object)
gsignal('electrode-selected', object)
#: .. versionadded:: 0.13
gsignal('global-command', str, str, object)
gsignal('key-press', object)
gsignal('key-release', object)
gsignal('route-command', str, str, object)
gsignal('route-electrode-added', object)
gsignal('route-selected', object)
#: .. versionadded:: 0.11.3
gsignal('routes-set', object)
gsignal('surface-rendered', str, object)
gsignal('surfaces-reset', object)
# Video signals
gsignal('point-pair-selected', object)
gsignal('video-enabled')
gsignal('video-disabled')
def __init__(self, connections_alpha=1., connections_color=1.,
transport='tcp', target_host='*', port=None, **kwargs):
# Video sink socket info.
self.socket_info = {'transport': transport,
'host': target_host,
'port': port}
# Identifier for video incoming socket check.
self.callback_id = None
self._enabled = False # Video enable
self.start_event = None # Video modify start click event
# Matched corner points between canvas and video frame. Used to
# generate map between coordinate spaces.
self.df_canvas_corners = pd.DataFrame(None, columns=['x', 'y'],
dtype=float)
self.df_frame_corners = pd.DataFrame(None, columns=['x', 'y'],
dtype=float)
# Matrix map from frame coordinates to canvas coordinates.
self.frame_to_canvas_map = None
# Matrix map from canvas coordinates to frame coordinates.
self.canvas_to_frame_map = None
# Shape of canvas (i.e., drawing area widget).
self.shape = None
self.mode = 'control'
# Read SVG polygons into dataframe, one row per polygon vertex.
df_shapes = pd.DataFrame(None, columns=['id', 'vertex_i', 'x', 'y'])
self.device = None
self.shape_i_column = 'id'
# Save alpha for drawing connections.
self.connections_alpha = connections_alpha
# Save color for drawing connections.
self.connections_color = connections_color
#: ..versionadded:: 0.12
self._dynamic_electrodes = pd.Series()
self.reset_states()
self.reset_routes()
self.connections_attrs = {}
self.last_pressed = None
self.last_hovered = None
self._route = None
self.connections_enabled = (self.connections_alpha > 0)
self.default_corners = {} # {'canvas': None, 'frame': None}
#: .. versionadded:: 0.13
#: Registered global commands
self.global_commands = OrderedDict()
# Registered electrode commands
self.electrode_commands = OrderedDict()
# Register test command
#self.register_electrode_command('ping',
#group='microdrop.device_info_plugin')
# Registered route commands
self.route_commands = OrderedDict()
super(DmfDeviceCanvas, self).__init__(df_shapes, self.shape_i_column,
**kwargs)
@property
def df_routes(self):
'''
.. versionadded:: 0.11.3
'''
return self._df_routes
@df_routes.setter
def df_routes(self, value):
'''
.. versionadded:: 0.11.3
'''
self._df_routes = value
try:
self.emit('routes-set', self._df_routes.copy())
except TypeError:
pass
def reset_canvas_corners(self):
self.df_canvas_corners = (self.default_corners
.get('canvas',
self.default_shapes_corners()))
def reset_frame_corners(self):
self.df_frame_corners = (self.default_corners
.get('frame', self.default_frame_corners()))
def default_shapes_corners(self):
if self.canvas is None:
return self.df_canvas_corners
width, height = self.canvas.source_shape
return pd.DataFrame([[0, 0], [width, 0], [width, height], [0, height]],
columns=['x', 'y'], dtype=float)
def default_frame_corners(self):
if self.video_sink.frame_shape is None:
return self.df_frame_corners
width, height = self.video_sink.frame_shape
return pd.DataFrame([[0, 0], [width, 0], [width, height], [0, height]],
columns=['x', 'y'], dtype=float)
def update_transforms(self):
from opencv_helpers.safe_cv import cv2
if (self.df_canvas_corners.shape[0] == 0 or
self.df_frame_corners.shape[0] == 0):
return
self.canvas_to_frame_map = cv2.findHomography(self.df_canvas_corners
.values,
self.df_frame_corners
.values)[0]
self.frame_to_canvas_map = cv2.findHomography(self.df_frame_corners
.values,
self.df_canvas_corners
.values)[0]
# Translate transform shape coordinate space to drawing area coordinate
# space.
transform = self.frame_to_canvas_map
if self.canvas is not None:
transform = (self.canvas.shapes_to_canvas_transform.values
.dot(transform))
self.video_sink.transform = transform
self.set_surface('registration', self.render_registration())
def create_ui(self):
'''
.. versionchanged:: 0.9
Update device registration in real-time while dragging video
control point to new position.
.. versionchanged:: 0.12
Add ``dynamic_electrode_state_shapes`` layer to show dynamic
electrode actuations.
'''
super(DmfDeviceCanvas, self).create_ui()
self.video_sink = VideoSink(*[self.socket_info[k]
for k in ['transport', 'host', 'port']])
# Initialize video sink socket.
self.video_sink.reset()
# Required to have key-press and key-release events trigger.
self.widget.set_flags(gtk.CAN_FOCUS)
self.widget.add_events(gtk.gdk.KEY_PRESS_MASK |
gtk.gdk.KEY_RELEASE_MASK)
# Create initial (empty) cairo surfaces.
surface_names = ('background', 'shapes', 'connections', 'routes',
'channel_labels', 'static_electrode_state_shapes',
'dynamic_electrode_state_shapes', 'registration')
self.df_surfaces = pd.DataFrame([[self.get_surface(), 1.]
for i in xrange(len(surface_names))],
columns=['surface', 'alpha'],
index=pd.Index(surface_names,
name='name'))
def _update_registration(event):
try:
start_event = self.start_event.copy()
self.start_event = event.copy()
self.emit('point-pair-selected', {'start_event': start_event,
'end_event': event})
except AttributeError:
# Mouse button was released, causing `self.start_event` to be
# `None` before event was handled here.
pass
# Debounce calls to `_update_registration` function to prevent too many
# calls being triggered from mouse movement events.
update_registration = debounce.Debounce(_update_registration, wait=10)
def _on_mouse_move(area, event):
# XXX Need to make a copy of the event here since the original
# event will be deallocated before the debounced
# `update_registration` function is called.
event = event.copy()
if self.mode == 'register_video' and self.start_event is not None:
update_registration(event.copy())
# Connect video registration update event to mouse movement event.
self.widget.connect("motion_notify_event", _on_mouse_move)
def reset_canvas(self, width, height):
super(DmfDeviceCanvas, self).reset_canvas(width, height)
if self.device is None or self.canvas.df_canvas_shapes.shape[0] == 0:
return
self.canvas.df_canvas_shapes =\
compute_shape_centers(self.canvas.df_canvas_shapes
[[self.shape_i_column, 'vertex_i', 'x',
'y']], self.shape_i_column)
self.canvas.df_shape_centers = (self.canvas.df_canvas_shapes
[[self.shape_i_column, 'x_center',
'y_center']].drop_duplicates()
.set_index(self.shape_i_column))
df_shape_connections = self.device.df_shape_connections
self.canvas.df_connection_centers =\
(df_shape_connections.join(self.canvas.df_shape_centers
.loc[df_shape_connections.source]
.reset_index(drop=True))
.join(self.canvas.df_shape_centers.loc[df_shape_connections
.target]
.reset_index(drop=True), lsuffix='_source',
rsuffix='_target'))
def reset_states(self):
self.electrode_states = pd.Series(name='electrode_states')
self.electrode_states.index.name = 'electrode_id'
def reset_routes(self):
self.df_routes = pd.DataFrame(None, columns=['route_i', 'electrode_i',
'transition_i'])
def set_device(self, dmf_device):
self.device = dmf_device
# Index channels by electrode ID for fast look up.
self.electrode_channels = (self.device.df_electrode_channels
.set_index('electrode_id'))
self.df_shapes = self.device.df_shapes
self.reset_routes()
self.reset_states()
x, y, width, height = self.widget.get_allocation()
if width > 0 and height > 0:
self.canvas = None
self._dirty_size = width, height
self.emit('device-set', dmf_device)
def get_labels(self):
if self.device is None:
return pd.Series(None, index=pd.Index([], name='channel'))
return (self.electrode_channels.astype(str)
.groupby(level='electrode_id', axis=0)
.agg(lambda v: ', '.join(v))['channel'])
###########################################################################
# Properties
@property
def connection_count(self):
return self.device.df_shape_connections.shape[0] if self.device else 0
@property
def shape_count(self):
return self.df_shapes[self.shape_i_column].unique().shape[0]
@property
def enabled(self):
return self._enabled
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
if value in ('register_video', 'control'):
self._mode = value
###########################################################################
# ## Mutators ##
def insert_surface(self, position, name, surface, alpha=1.):
'''
Insert Cairo surface as new layer.
Args
----
position (int) : Index position to insert layer at.
name (str) : Name of layer.
surface (cairo.Context) : Surface to render.
alpha (float) : Alpha/transparency level in the range `[0, 1]`.
'''
if name in self.df_surfaces.index:
raise NameError('Surface already exists with `name="{}"`.'
.format(name))
self.df_surfaces.loc[name] = surface, alpha
# Reorder layers such that the new surface is placed at the specified
# layer position (relative to the background surface).
surfaces_order = self.df_surfaces.index.values.tolist()
surfaces_order.remove(name)
base_index = surfaces_order.index('background') + 1
if position < 0:
position = len(surfaces_order) + position
surfaces_order.insert(base_index + position, name)
self.reorder_surfaces(surfaces_order)
def append_surface(self, name, surface, alpha=1.):
'''
Append Cairo surface as new layer on top of existing layers.
Args
----
name (str) : Name of layer.
surface (cairo.ImageSurface) : Surface to render.
alpha (float) : Alpha/transparency level in the range `[0, 1]`.
'''
self.insert_surface(position=self.df_surfaces.index.shape[0],
name=name, surface=surface, alpha=alpha)
def remove_surface(self, name):
'''
Remove layer from rendering stack and flatten remaining layers.
Args
----
name (str) : Name of layer.
'''
self.df_surfaces.drop(name, axis=0, inplace=True)
# Order of layers may have changed after removing a layer. Trigger
# refresh of surfaces.
self.reorder_surfaces(self.df_surfaces.index)
def clone_surface(self, source_name, target_name, target_position=-1,
alpha=1.):
'''
Clone surface from existing layer to a new name, inserting new surface
at specified position.
By default, new surface is appended as the top surface layer.
Args
----
source_name (str) : Name of layer to clone.
target_name (str) : Name of new layer.
'''
source_surface = self.df_surfaces.surface.ix[source_name]
source_width = source_surface.get_width()
source_height = source_surface.get_height()
source_format = source_surface.get_format()
target_surface = cairo.ImageSurface(source_format, source_width,
source_height)
target_cairo_context = cairo.Context(target_surface)
target_cairo_context.set_source_surface(source_surface, 0, 0)
target_cairo_context.paint()
self.insert_surface(target_position, target_name, target_surface,
alpha)
def enable(self):
if self.callback_id is None:
self._enabled = True
self.set_surface('shapes', self.render_shapes())
# Add layer to which video frames will be rendered.
if 'video' in self.df_surfaces.index:
self.set_surface('video', self.render_shapes())
else:
self.df_surfaces.loc['video'] = self.render_shapes(), 1.
# Reorder layers such that the video layer is directly on top of
# the background layer.
surfaces_order = self.df_surfaces.index.values.tolist()
surfaces_order.remove('video')
surfaces_order.insert(surfaces_order.index('background') + 1,
'video')
self.reorder_surfaces(surfaces_order)
self.render()
self.callback_id = self.video_sink.connect('frame-update',
self.on_frame_update)
self.emit('video-enabled')
def disable(self):
if self.callback_id is not None:
self._enabled = False
self.set_surface('shapes', self.render_shapes())
self.video_sink.disconnect(self.callback_id)
self.callback_id = None
if 'video' in self.df_surfaces.index:
self.df_surfaces.drop('video', axis=0, inplace=True)
self.reorder_surfaces(self.df_surfaces.index)
self.emit('video-disabled')
self.on_frame_update(None, None)
###########################################################################
# ## Drawing area event handling ##
def check_dirty(self):
if self._dirty_size is not None:
width, height = self._dirty_size
self.set_shape(width, height)
transform_update_required = True
else:
transform_update_required = False
result = super(DmfDeviceCanvas, self).check_dirty()
if transform_update_required:
gtk.idle_add(self.update_transforms)
return result
def set_shape(self, width, height):
logger.debug('[set_shape]: Set drawing area shape to %sx%s', width,
height)
self.shape = width, height
# Set new target size for scaled frames from video sink.
self.video_sink.shape = width, height
self.update_transforms()
if not self._enabled:
gtk.idle_add(self.on_frame_update, None, None)
###########################################################################
# ## Drawing methods ##
def get_surfaces(self):
surface1 = cairo.ImageSurface(cairo.FORMAT_ARGB32, 320, 240)
surface1_context = cairo.Context(surface1)
surface1_context.set_source_rgba(0, 0, 1, .5)
surface1_context.rectangle(0, 0, surface1.get_width(), surface1.get_height())
surface1_context.fill()
surface2 = cairo.ImageSurface(cairo.FORMAT_ARGB32, 800, 600)
surface2_context = cairo.Context(surface2)
surface2_context.save()
surface2_context.translate(100, 200)
surface2_context.set_source_rgba(0, 1, .5, .5)
surface2_context.rectangle(0, 0, surface1.get_width(), surface1.get_height())
surface2_context.fill()
surface2_context.restore()
return [surface1, surface2]
def draw_surface(self, surface, operator=cairo.OPERATOR_OVER):
x, y, width, height = self.widget.get_allocation()
if width <= 0 and height <= 0 or self.widget.window is None:
return
cairo_context = self.widget.window.cairo_create()
cairo_context.set_operator(operator)
cairo_context.set_source_surface(surface)
cairo_context.rectangle(0, 0, width, height)
cairo_context.fill()
###########################################################################
# Render methods
def render_dynamic_electrode_state_shapes(self):
'''
Render **dynamic** states reported by the electrode controller.
**Dynamic** electrode states are only applied while a protocol is
running -- _not_ while in real-time programming mode.
See also :meth:`render_electrode_shapes()`.
.. versionadded:: 0.12
'''
df_shapes = self.canvas.df_canvas_shapes.copy()
# Only include shapes for electrodes reported as actuated.
on_electrodes = self._dynamic_electrodes[self._dynamic_electrodes > 0]
df_shapes = (df_shapes.set_index('id').loc[on_electrodes.index]
.reset_index())
return self.render_electrode_shapes(df_shapes=df_shapes,
shape_scale=0.75,
# Lignt blue
fill=(136 / 255.,
189 / 255.,
230 / 255.))
def render_static_electrode_state_shapes(self):
'''
Render **static** states reported by the electrode controller.
**Static** electrode states are applied while a protocol is **running**
_or_ while **real-time** control is activated.
See also :meth:`render_electrode_shapes()`.
.. versionadded:: 0.12
'''
df_shapes = self.canvas.df_canvas_shapes.copy()
if self.electrode_states.shape[0]:
df_shapes['state'] = self.electrode_states.ix[df_shapes.id].values
else:
df_shapes['state'] = 0
df_shapes = df_shapes.loc[df_shapes.state > 0].dropna(subset=['state'])
return self.render_electrode_shapes(df_shapes=df_shapes)
def render_electrode_shapes(self, df_shapes=None, shape_scale=0.8,
fill=(1, 1, 1)):
'''
Render electrode state shapes.
By default, draw each electrode shape filled white.
See also :meth:`render_shapes()`.
Parameters
----------
df_shapes = : pandas.DataFrame
.. versionadded:: 0.12
'''
surface = self.get_surface()
if df_shapes is None:
if hasattr(self.canvas, 'df_canvas_shapes'):
df_shapes = self.canvas.df_canvas_shapes
else:
return surface
if 'x_center' not in df_shapes or 'y_center' not in df_shapes:
# No center points have been computed for shapes.
return surface
cairo_context = cairo.Context(surface)
df_shapes = df_shapes.copy()
# Scale shapes to leave shape edges uncovered.
df_shapes[['x', 'y']] = (df_shapes[['x_center', 'y_center']] +
df_shapes[['x_center_offset',
'y_center_offset']].values *
shape_scale)
for path_id, df_path_i in (df_shapes.groupby(self.canvas
.shape_i_columns)[['x',
'y']]):
# Use attribute lookup for `x` and `y`, since it is considerably
# faster than `get`-based lookup using columns name strings.
vertices_x = df_path_i.x.values
vertices_y = df_path_i.y.values
cairo_context.move_to(vertices_x[0], vertices_y[0])
for x, y in itertools.izip(vertices_x[1:], vertices_y[1:]):
cairo_context.line_to(x, y)
cairo_context.close_path()
# Draw filled shape to indicate actuated electrode state.
cairo_context.set_source_rgba(*fill)
cairo_context.fill()
return surface
def render_background(self):
surface = self.get_surface()
context = cairo.Context(surface)
context.set_source_rgb(0, 0, 0)
context.paint()
return surface
def render_connections(self, indexes=None, hex_color='#fff', alpha=1.,
**kwargs):
surface = self.get_surface()
if not hasattr(self.canvas, 'df_connection_centers'):
return surface
cairo_context = cairo.Context(surface)
coords_columns = ['source', 'target',
'x_center_source', 'y_center_source',
'x_center_target', 'y_center_target']
df_connection_coords = (self.canvas.df_connection_centers
[coords_columns])
if indexes is not None:
df_connection_coords = df_connection_coords.loc[indexes].copy()
rgba = hex_color_to_rgba(hex_color, normalize_to=1.)
if rgba[-1] is None:
rgba = rgba[:-1] + (alpha, )
cairo_context.set_line_width(2.5)
for i, (target, source, x1, y1, x2, y2) in (df_connection_coords
.iterrows()):
cairo_context.move_to(x1, y1)
cairo_context.set_source_rgba(*rgba)
for k, v in kwargs.iteritems():
getattr(cairo_context, 'set_' + k)(v)
cairo_context.line_to(x2, y2)
cairo_context.stroke()
return surface
def render_shapes(self, df_shapes=None, clip=False):
'''
Render static electrode shapes (independent of actuation state).
If video is enabled, draw white outline for each electrode (no fill).
If video is disabled, draw white outline for each electrode and fill
blue.
See also :meth:`render_electrode_state_shapes()`.
'''
surface = self.get_surface()
if df_shapes is None:
if hasattr(self.canvas, 'df_canvas_shapes'):
df_shapes = self.canvas.df_canvas_shapes
else:
return surface
cairo_context = cairo.Context(surface)
for path_id, df_path_i in (df_shapes
.groupby(self.canvas
.shape_i_columns)[['x', 'y']]):
# Use attribute lookup for `x` and `y`, since it is considerably
# faster than `get`-based lookup using columns name strings.
vertices_x = df_path_i.x.values
vertices_y = df_path_i.y.values
cairo_context.move_to(vertices_x[0], vertices_y[0])
for x, y in itertools.izip(vertices_x[1:], vertices_y[1:]):
cairo_context.line_to(x, y)
cairo_context.close_path()
if self.enabled:
# Video is enabled.
# Draw white border around electrode.
line_width = 1
if path_id not in self.electrode_channels.index:
# on off on off
dashes = [10, 10]
color = (1, 0, 1)
line_width *= 2
else:
dashes = []
color = (1, 1, 1)
cairo_context.set_dash(dashes)
cairo_context.set_line_width(line_width)
cairo_context.set_source_rgb(*color)
cairo_context.stroke()
else:
# Video is enabled. Fill electrode blue.
color = ((0, 0, 1) if path_id in self.electrode_channels.index
else (1, 0, 1))
cairo_context.set_source_rgb(*color)
cairo_context.fill_preserve()
# Draw white border around electrode.
cairo_context.set_line_width(1)
cairo_context.set_source_rgba(1, 1, 1)
cairo_context.stroke()
return surface
def render_routes(self):
surface = self.get_surface()
if (not hasattr(self.device, 'df_shape_connections') or
not hasattr(self.canvas, 'df_shape_centers')):
return surface
cairo_context = cairo.Context(surface)
connections = self.device.df_shape_connections
for route_i, df_route in self.df_routes.groupby('route_i'):
source_id = df_route.electrode_i.iloc[0]
source_connections = connections.loc[(connections.source ==
source_id) |
(connections.target ==
source_id)]
# Colors from ["Show me the numbers"][1].
#
# [1]: http://blog.axc.net/its-the-colors-you-have/
# LiteOrange = rgb(251,178,88);
# MedOrange = rgb(250,164,58);
# LiteGreen = rgb(144,205,151);
# MedGreen = rgb(96,189,104);
if source_connections.shape[0] == 1:
# Electrode only has one adjacent electrode, assume reservoir.
color_rgb_255 = np.array([250, 164, 58, 255])
else:
color_rgb_255 = np.array([96, 189, 104, 255])
color = (color_rgb_255 / 255.).tolist()
self.draw_route(df_route, cairo_context, color=color,
line_width=.25)
return surface
def render_channel_labels(self, color_rgba=None):
return self.render_labels(self.get_labels(), color_rgba=color_rgba)
def render_registration(self):
'''
Render pinned points on video frame as red rectangle.
'''
surface = self.get_surface()
if self.canvas is None or self.df_canvas_corners.shape[0] == 0:
return surface
corners = self.df_canvas_corners.copy()
corners['w'] = 1
transform = self.canvas.shapes_to_canvas_transform
canvas_corners = corners.values.dot(transform.T.values).T
points_x = canvas_corners[0]
points_y = canvas_corners[1]
cairo_context = cairo.Context(surface)
cairo_context.move_to(points_x[0], points_y[0])
for x, y in zip(points_x[1:], points_y[1:]):
cairo_context.line_to(x, y)
cairo_context.line_to(points_x[0], points_y[0])
cairo_context.set_source_rgb(1, 0, 0)
cairo_context.stroke()
return surface
def set_surface(self, name, surface):
self.df_surfaces.loc[name, 'surface'] = surface
self.emit('surface-rendered', name, surface)
def set_surface_alpha(self, name, alpha):
if 'alpha' not in self.df_surfaces:
self.df_surfaces['alpha'] = 1.
if name in self.df_surfaces.index:
self.df_surfaces.loc[name, 'alpha'] = alpha
def reorder_surfaces(self, surface_names):
assert(len(surface_names) == self.df_surfaces.shape[0])
self.df_surfaces = self.df_surfaces.ix[surface_names]
self.emit('surfaces-reset', self.df_surfaces)
self.cairo_surface = flatten_surfaces(self.df_surfaces)
def render(self):
'''
.. versionchanged:: 0.12
Add ``dynamic_electrode_state_shapes`` layer to show dynamic
electrode actuations.
'''
# Render each layer and update data frame with new content for each
# surface.
surface_names = ('background', 'shapes', 'connections', 'routes',
'channel_labels', 'static_electrode_state_shapes',
'dynamic_electrode_state_shapes', 'registration')
for k in surface_names:
self.set_surface(k, getattr(self, 'render_' + k)())
self.emit('surfaces-reset', self.df_surfaces)
self.cairo_surface = flatten_surfaces(self.df_surfaces)
###########################################################################
# Drawing helper methods
def draw_route(self, df_route, cr, color=None, line_width=None):
'''
Draw a line between electrodes listed in a route.
Arguments
---------
- `df_route`:
* A `pandas.DataFrame` containing a column named `electrode_i`.
* For each row, `electrode_i` corresponds to the integer index of
the corresponding electrode.
- `cr`: Cairo context.
- `color`: Either a RGB or RGBA tuple, with each color channel in the
range [0, 1]. If `color` is `None`, the electrode color is set to
white.
'''
df_route_centers = (self.canvas.df_shape_centers
.ix[df_route.electrode_i][['x_center',
'y_center']])
df_endpoint_marker = (.6 * self.get_endpoint_marker(df_route_centers)
+ df_route_centers.iloc[-1].values)
# Save cairo context to restore after drawing route.
cr.save()
if color is None:
# Colors from ["Show me the numbers"][1].
#
# [1]: http://blog.axc.net/its-the-colors-you-have/
# LiteOrange = rgb(251,178,88);
# MedOrange = rgb(250,164,58);
# LiteGreen = rgb(144,205,151);
# MedGreen = rgb(96,189,104);
color_rgb_255 = np.array([96,189,104, .8 * 255])
color = (color_rgb_255 / 255.).tolist()
if len(color) < 4:
color += [1.] * (4 - len(color))
cr.set_source_rgba(*color)
cr.move_to(*df_route_centers.iloc[0])
for electrode_i, center_i in df_route_centers.iloc[1:].iterrows():
cr.line_to(*center_i)
if line_width is None:
line_width = np.sqrt((df_endpoint_marker.max().values -
df_endpoint_marker.min().values).prod()) * .1
cr.set_line_width(4)
cr.stroke()
cr.move_to(*df_endpoint_marker.iloc[0])
for electrode_i, center_i in df_endpoint_marker.iloc[1:].iterrows():
cr.line_to(*center_i)
cr.close_path()
cr.set_source_rgba(*color)
cr.fill()
# Restore cairo context after drawing route.
cr.restore()
def get_endpoint_marker(self, df_route_centers):
df_shapes = self.canvas.df_canvas_shapes
df_endpoint_electrode = df_shapes.loc[df_shapes.id ==
df_route_centers.index[-1]]
df_endpoint_bbox = (df_endpoint_electrode[['x_center_offset',
'y_center_offset']]
.describe().loc[['min', 'max']])
return pd.DataFrame([[df_endpoint_bbox.x_center_offset['min'],
df_endpoint_bbox.y_center_offset['min']],
[df_endpoint_bbox.x_center_offset['min'],
df_endpoint_bbox.y_center_offset['max']],
[df_endpoint_bbox.x_center_offset['max'],
df_endpoint_bbox.y_center_offset['max']],
[df_endpoint_bbox.x_center_offset['max'],
df_endpoint_bbox.y_center_offset['min']]],
columns=['x_center_offset', 'y_center_offset'])
###########################################################################
# ## Mouse event handling ##
def on_widget__button_press_event(self, widget, event):
'''
Called when any mouse button is pressed.
.. versionchanged:: 0.11
Do not trigger `route-electrode-added` event if `ALT` key is
pressed.
'''
if self.mode == 'register_video' and event.button == 1:
self.start_event = event.copy()
return
elif self.mode == 'control':
shape = self.canvas.find_shape(event.x, event.y)
if shape is None: return
state = event.get_state()
if event.button == 1:
# Start a new route.
self._route = Route(self.device)
self._route.append(shape)
self.last_pressed = shape
if not (state & gtk.gdk.MOD1_MASK):
# `<Alt>` key is not held down.
self.emit('route-electrode-added', shape)
def on_widget__button_release_event(self, widget, event):
'''
Called when any mouse button is released.
.. versionchanged:: 0.11.3
Always reset pending route, regardless of whether a route was
completed. This includes a) removing temporary routes from routes
table, and b) resetting the state of the current route electrode
queue. This fixes
https://github.com/sci-bots/microdrop/issues/256.
'''
event = event.copy()
if self.mode == 'register_video' and (event.button == 1 and
self.start_event is not None):
self.emit('point-pair-selected', {'start_event': self.start_event,
'end_event': event.copy()})
self.start_event = None
return
elif self.mode == 'control':
# XXX Negative `route_i` corresponds to temporary route being
# drawn. Since release of mouse button terminates route drawing,
# clear any rows corresponding to negative `route_i` values from
# the routes table.
self.df_routes = self.df_routes.loc[self.df_routes.route_i >=
0].copy()
shape = self.canvas.find_shape(event.x, event.y)
if shape is not None:
electrode_data = {'electrode_id': shape, 'event': event.copy()}
if event.button == 1:
if gtk.gdk.BUTTON1_MASK == event.get_state():
if self._route.append(shape):
self.emit('route-electrode-added', shape)
if len(self._route.electrode_ids) == 1:
# Single electrode, so select electrode.
self.emit('electrode-selected', electrode_data)
else:
# Multiple electrodes, so select route.
route = self._route
self.emit('route-selected', route)
elif (event.get_state() == (gtk.gdk.MOD1_MASK |
gtk.gdk.BUTTON1_MASK) and
self.last_pressed != shape):
# `<Alt>` key was held down.
self.emit('electrode-pair-selected',
{'source_id': self.last_pressed,
'target_id': shape, 'event': event.copy()})
self.last_pressed = None
elif event.button == 3:
# Create right-click pop-up menu.
menu = self.create_context_menu(event, shape)
# Display menu popup
menu.popup(None, None, None, event.button, event.time)
# Clear route.
self._route = None
def create_context_menu(self, event, shape):
'''
Parameters
----------
event : gtk.gdk.Event
GTK mouse click event.
shape : str
Electrode shape identifier (e.g., `"electrode028"`).
Returns
-------
gtk.Menu
Context menu.
.. versionchanged:: 0.13
- Deprecate hard-coded commands (e.g., clear electrodes, clear
routes).
- Add anonymous global commands section at head of menu (i.e.,
commands not specific to an electrode or route).
- Add "Electrode" and "Route(s)" sub-menus.
'''
routes = self.df_routes.loc[self.df_routes.electrode_i == shape,
'route_i'].astype(int).unique().tolist()
def _connect_callback(menu_item, command_signal, group, command, data):
callback_called = threading.Event()
def _callback(signal, widget, *args):
if callback_called.is_set():
return
callback_called.set()
_L().debug('`%s`: %s %s %s', signal, group, command, data)
gtk.idle_add(self.emit, command_signal, group, command, data)
menu_item.connect('activate', ft.partial(_callback, 'activate'))
menu_item.connect('button-press-event',
ft.partial(_callback, 'button-press-event'))
if group is not None:
menu_item.set_tooltip_text(group)
menu = gtk.Menu()
# Add menu items/groups for registered global commands.
if self.global_commands:
data = {'event': event.copy()}
command_signal = 'global-command'
for group, commands in self.global_commands.iteritems():
for command, title in commands.iteritems():
menu_item_j = gtk.MenuItem(title)
menu.append(menu_item_j)
_connect_callback(menu_item_j, command_signal, group,
command, data)
# Add menu items/groups for registered electrode commands.
if self.electrode_commands:
separator = gtk.SeparatorMenuItem()
menu.append(separator)
# Add electrode sub-menu.
menu_e = gtk.Menu()
menu_head_e = gtk.MenuItem('_Electrode')
menu_head_e.set_submenu(menu_e)
menu_head_e.set_use_underline(True)
menu.append(menu_head_e)
command_signal = 'electrode-command'
data = {'electrode_id': shape, 'event': event.copy()}
for group, commands in self.electrode_commands.iteritems():
for command, title in commands.iteritems():
menu_item_j = gtk.MenuItem(title)
menu_e.append(menu_item_j)
_connect_callback(menu_item_j, command_signal, group,
command, data)
# Add menu items/groups for registered route commands.
if routes and self.route_commands:
# TODO: Refactor electrode/route command menu code to reduce code
# duplication (i.e., DRY).
separator = gtk.SeparatorMenuItem()
menu.append(separator)
# Add route sub-menu.
menu_r = gtk.Menu()
menu_head_r = gtk.MenuItem('_Route(s)')
menu_head_r.set_submenu(menu_r)
menu_head_r.set_use_underline(True)
menu.append(menu_head_r)
command_signal = 'route-command'
data = {'route_ids': routes, 'event': event.copy()}
for group, commands in self.route_commands.iteritems():
for command, title in commands.iteritems():
menu_item_j = gtk.MenuItem(title)
menu_r.append(menu_item_j)
_connect_callback(menu_item_j, command_signal, group,
command, data)
menu.show_all()
return menu
def on_widget__motion_notify_event(self, widget, event):
'''
Called when mouse pointer is moved within drawing area.
.. versionchanged:: 0.11
Do not trigger `route-electrode-added` event if `ALT` key is
pressed.
'''
if self.canvas is None:
# Canvas has not been initialized. Nothing to do.
return
elif event.is_hint:
pointer = event.window.get_pointer()
x, y, mod_type = pointer
else:
x = event.x
y = event.y
shape = self.canvas.find_shape(x, y)
# Grab focus to [enable notification on key press/release events][1].
#
# [1]: http://mailman.daa.com.au/cgi-bin/pipermail/pygtk/2003-August/005770.html
self.widget.grab_focus()
if shape != self.last_hovered:
if self.last_hovered is not None:
# Leaving shape
self.emit('electrode-mouseout', {'electrode_id':
self.last_hovered,
'event': event.copy()})
self.last_hovered = None
elif shape is not None:
# Entering shape
self.last_hovered = shape
if self._route is not None:
if self._route.append(shape) and not (event.get_state() &
gtk.gdk.MOD1_MASK):
# `<Alt>` key was not held down.
self.emit('route-electrode-added', shape)
self.emit('electrode-mouseover', {'electrode_id':
self.last_hovered,
'event': event.copy()})
def on_widget__key_press_event(self, widget, event):
'''
Called when key is pressed when widget has focus.
'''
self.emit('key-press', {'event': event.copy()})
def on_widget__key_release_event(self, widget, event):
'''
Called when key is released when widget has focus.
'''
self.emit('key-release', {'event': event.copy()})
###########################################################################
# ## Slave signal handling ##
def on_video_sink__frame_shape_changed(self, slave, old_shape, new_shape):
# Video frame is a new shape.
if old_shape is not None:
# Switched video resolution, so scale existing corners to maintain
# video registration.
old_shape = pd.Series(old_shape, dtype=float, index=['width',
'height'])
new_shape = pd.Series(new_shape, dtype=float, index=['width',
'height'])
old_aspect_ratio = old_shape.width / old_shape.height
new_aspect_ratio = new_shape.width / new_shape.height
if old_aspect_ratio != new_aspect_ratio:
# The aspect ratio has changed. The registration will have the
# proper rotational orientation, but the scale will be off and
# will require manual adjustment.
logger.warning('Aspect ratio does not match previous frame. '
'Manual adjustment of registration is required.')
corners_scale = new_shape / old_shape
df_frame_corners = self.df_frame_corners.copy()
df_frame_corners.y = old_shape.height - df_frame_corners.y
df_frame_corners *= corners_scale.values
df_frame_corners.y = new_shape.height - df_frame_corners.y
self.df_frame_corners = df_frame_corners
else:
# No existing frame shape, so nothing to scale from.
self.reset_frame_corners()
self.update_transforms()
def on_frame_update(self, slave, np_frame):
if self.widget.window is None:
return
if np_frame is None or not self._enabled:
if 'video' in self.df_surfaces.index:
self.df_surfaces.drop('video', axis=0, inplace=True)
self.reorder_surfaces(self.df_surfaces.index)
else:
cr_warped, np_warped_view = np_to_cairo(np_frame)
self.set_surface('video', cr_warped)
self.cairo_surface = flatten_surfaces(self.df_surfaces)
# Execute a few gtk main loop iterations to improve responsiveness when
# using high video frame rates.
#
# N.B., Without doing this, for example, some mouse over events may be
# missed, leading to problems drawing routes, etc.
for i in xrange(5):
if not gtk.events_pending():
break
gtk.main_iteration_do()
self.draw()
###########################################################################
# ## Electrode operation registration ##
def register_global_command(self, command, title=None, group=None):
'''
.. versionadded:: 0.13
Register global command (i.e., not specific to electrode or route).
Add global command to context menu.
'''
commands = self.global_commands.setdefault(group, OrderedDict())
if title is None:
title = (command[:1].upper() + command[1:]).replace('_', ' ')
commands[command] = title
def register_electrode_command(self, command, title=None, group=None):
'''
Register electrode command.
Add electrode plugin command to context menu.
'''
commands = self.electrode_commands.setdefault(group, OrderedDict())
if title is None:
title = (command[:1].upper() + command[1:]).replace('_', ' ')
commands[command] = title
###########################################################################
# ## Route operation registration ##
def register_route_command(self, command, title=None, group=None):
'''
Register route command.
Add route plugin command to context menu.
'''
commands = self.route_commands.setdefault(group, OrderedDict())
if title is None:
title = (command[:1].upper() + command[1:]).replace('_', ' ')
commands[command] = title
| lgpl-2.1 | 1,564,422,671,550,628,900 | 40.079491 | 88 | 0.531251 | false |
hobson/pug-dj | pug/dj/miner/models.py | 1 | 13381 | import datetime
from django.db import models
#from django_hstore import hstore
from jsonfield import JSONField
from pug.nlp.db import representation
# FIXME: simplify circular import/dependencies with miner app
#from pug.dj.miner import explore
from model_mixin import DateMixin
class Connection(models.Model):
"The username, password, IP Address or URL required to access a database"
_IMPORTANT_FIELDS = ('pk', 'uri', 'user')
ip = models.CharField(max_length=15, null=True)
uri = models.CharField(max_length=256, null=True)
fqdn = models.CharField(max_length=128, null=True)
user = models.CharField(max_length=128, null=True)
password = models.CharField(max_length=128, null=True)
port = models.IntegerField(null=False)
def __unicode__(self):
return representation(self)
class AggregatedResults(DateMixin):
"""Storage a results json string that was returned by any restful web-based service
DateMixin adds the fields 'updated' and 'created'.
"""
name = models.CharField(max_length=2000, default='', blank=False)
slug = models.CharField(max_length=2000, default='', blank=False)
uri = models.URLField(max_length=2000, help_text='Base service URI without the GET API query')
get_dict = JSONField(
help_text='Complete GET Request URI')
filter_dict = JSONField(
help_text='The query `filter()` portion of the GET Request URI formatted in a form acceptable as a `queryset.filter(**filter_dict)`')
exclude_dict = JSONField(
help_text='The query `exclude()` portion of the GET Request URI formatted in a form evaluated as a `for k, v in exclude_dict.items(): queryset = queryset.exclude({k,v});`')
results = JSONField(
help_text="The dictionary of data used to display the Queries summary table at the top of the Quick Table with aggregate statistics 'mean' (lag), 'num_matches', 'num_returns', 'num_sales', 'effective_return_rate', 'word_match_rate', 'mura_match_rate', 'nprb_match_rate', 'last_update', 'num_mura_match', 'num_word_match', 'num_nprb_match'")
class Database(models.Model):
"""Metadata about a Database (postgres or Microsoft SQL "USE" argument)"""
_IMPORTANT_FIELDS = ('pk', 'name', 'date')
name = models.CharField(max_length=128, null=False, default='')
date = models.DateTimeField(help_text='Timestamp when the metadata was calculated', auto_now_add=True, null=False) # default=datetime.datetime.now, <-- mutually exclusive withe auto_now_add
connection = models.ForeignKey(Connection, null=True, default=None)
__unicode__ = representation
class Table(models.Model):
"""Metadata about a Database table and its Django model"""
_IMPORTANT_FIELDS = ('pk', 'django_model', 'db_table', 'count')
app = models.CharField(max_length=256, default='', null=False, blank=True)
database = models.ForeignKey(Database, default=None)
db_table = models.CharField(max_length=256, null=True)
django_model = models.CharField(max_length=256, null=True, default=None)
primary_key = models.OneToOneField('Field', null=True, default=None)
count = models.IntegerField(null=True, default=None)
__unicode__ = representation
class ChangeLog(models.Model):
'''Log of hash of `.values()` of records in any database.table (app.model)
Used to track changes to tables across databases.
Facilitates mirroring across databases.
'''
model = models.CharField(max_length=255, default='', null=False, blank=True)
app = models.CharField(max_length=255, default='', null=False, blank=True)
primary_key = models.IntegerField(default=None, null=True)
values_hash = models.IntegerField(db_index=True, help_text='Integer hash of a tuple of all of the fields, hash(tuple(record.values_list())), for the source data record.', default=None, null=True, blank=True)
class Type(models.Model):
FIND_DJANGO_TYPE = {
'Integer': 'IntegerField',
'long': 'IntegerField',
'LONG': 'IntegerField',
'int': 'IntegerField',
'INT': 'IntegerField',
'float': 'FloatField',
'Float': 'FloatField',
'double': 'FloatField',
'Double': 'FloatField',
'char': 'CharField',
'str': 'CharField',
'CHAR': 'CharField',
'STR': 'CharField',
'string': 'CharField',
'STRING': 'CharField',
'text': 'TextField',
'TEXT': 'TextField',
'23': '',
'1043': '',
'21': '',
'23': '',
'25': '',
'701': '',
'1043': '',
'1184': '',
'1700': '',
'boolean': 'NullBooleanField',
'decimal': 'DecimalField',
'Decimal': 'DecimalField',
'DECIMAL': 'DecimalField',
'VARCHAR': 'CharField',
'NCHAR': 'CharField',
'NVARCHAR': 'CharField',
'SMALLINT': 'IntegerField',
'REAL': 'FloatField',
'DOUBLE PRECISION': 'FloatField',
'NUMERIC': 'FloatField',
'numeric': 'FloatField',
'NUMBER': 'FloatField',
'number': 'FloatField',
'DATE': 'DateField',
'TIME': 'TimeField',
'datetime': 'DateTimeField',
'Datetime': 'DateTimeField',
'TIMESTAMP': 'DateTimeField',
'TIMESTAMPTZ': 'DateTimeField',
}
CHOICES_NATIVE_TYPE = (
('image', 'A Microsoft binary image'),
)
CHOICES_ANSI_TYPE = (
('CHAR', 'Fixed=width *n*-character string, padded with spaces as needed'),
('VARCHAR', 'Variable-width string with a maximum size of *n* characters'),
('NCHAR', 'Fixed width string supporting an international character set'),
('NVARCHAR', 'Variable-width string supporting an international character set'),
('BIT', 'A fixed-length array of *n* bits'),
('BIT VARYING', 'An array of up to *n* bits'),
('INTEGER', 'An integer'),
('SMALLINT', 'A reduced-precision integer'),
('FLOAT', 'A floating-point number'),
('REAL', 'A floating-point number'),
('DOUBLE PRECISION', 'A floating-point number with greater precision'),
('NUMERIC', 'A number with arbitratry *precision* and *scale*, e.g. 123.45 has a *precision* of 5 and a *scale* of 2'),
('DECIMAL', 'A number with arbitratry *precision* and *scale*, e.g. 123.45 has a *precision* of 5 and a *scale* of 2'),
('DATE', 'A date value, e.g. 1970-12-25'),
('TIME', 'A time value, typically with precision of 1 "tick" or 100 nanoseconds, e.g. 06:01:02'),
('TIMESTAMP', 'A naive date and time value (without timezone information), typically with precision of 1 "tick" or 100 nanoseconds, e.g. 1970-12-25 06:01:02'),
('TIMESTAMPTZ', 'A date and time value with timezone, typically with precision of 1 "tick" or 100 nanoseconds, e.g. 1970-12-25 06:01:02'),
)
CHOICES_DJANGO_TYPE = (
(None, 'Null'),
('FloatField', 'FloatField'),
('ForeignKey', 'ForeignKey'), # inspectdb produces this
('CharField', 'CharField'), # inspectdb produces this
('TextField', 'TextField'), # inspectdb produces this
('IntegerField', 'IntegerField'),
('NullBooleanField', 'NullBooleanField'), # inspectdb produces this
('BooleanField', 'BooleanField'),
('DecimalField', 'DecimalField'),
('DateTimeField', 'DateTimeField'), # inspectdb produces this
('DateField', 'DateField'),
('TextField', 'TextField'), # inspectdb produces this
('DecimalField', 'DecimalField'), # inspectdb produces this
)
django_type = models.CharField(choices=CHOICES_DJANGO_TYPE, default=None, max_length=20, null=True)
ansi_type = models.CharField(choices=CHOICES_ANSI_TYPE, max_length=20, null=True)
__unicode__ = representation
class Field(models.Model):
"""Metadata about a Database field and its Django Field"""
_IMPORTANT_FIELDS = ('pk', 'db_column', 'db_table', 'type', 'fraction_distinct')
# objects = hstore.HStoreManager()
table_stats = models.ForeignKey(Table)
django_field = models.CharField(max_length=255, null=False, default='', blank=True)
max_length = models.IntegerField(null=True)
blank = models.BooleanField(default=False)
choices = models.TextField(null=True)
django_type = models.ForeignKey(Type, null=True, default=None)
type = models.CharField(max_length=32, null=False, blank=True, default='')
scale = models.IntegerField(null=True)
db_column = models.CharField(max_length=255, null=False, default='', blank=True)
display_size = models.IntegerField(null=True)
min = models.TextField(help_text='Python string representation (repr) of the minimum value', null=True) # repr() of minimum value
max = models.TextField(help_text='Python string representation (repr) of the maximum value', null=True) # repr() of minimum value
shortest = models.TextField(help_text='Shortest string among the field values', null=True)
longest = models.TextField(help_text='Longest string among the field values', null=True)
num_distinct = models.IntegerField(help_text="count of distinct (different) discrete values within the column",
null=True, default=None)
num_null = models.IntegerField(null=True, default=None)
precision = models.IntegerField(null=True, default=None)
fraction_distinct = models.FloatField(help_text="num_distinct / float((count - num_null) or 1)",
null=True, default=None)
internal_size = models.IntegerField(null=True, default=None)
null_ok = models.NullBooleanField(default=None)
primary_key = models.NullBooleanField(default=None)
relative = models.ForeignKey('Field', help_text='A modeled foreign key or one-to-one relationship within the django model.', null=True, related_name='relative_source')
relative_type = models.CharField(choices=(('ForeignKey', 'ForeignKey'), ('OneToOneField', 'OneToOneField'), ('ManyToManyField', 'ManyToManyField')), max_length=20)
peer = models.ManyToManyField('Field', through='Correlation', help_text='A field statistically related to this one in some way other than as a foreign key')
# most_frequent = hstore.DictionaryField(db_index=True, default=None, null=True)
__unicode__ = representation
class Correlation(models.Model):
"Graph edges (connections) between fields. Can be across tables and databases."
source = models.ForeignKey(Field, related_name='source_correlation')
target = models.ForeignKey(Field, related_name='target_correlation')
correlation = models.FloatField(null=True)
mutual_information = models.FloatField(null=True)
shared_distinct_values = models.IntegerField(help_text='For nonreal, discrete-valued fields (strings, dates), the number of unique values that are shared between the two fields')
shared_values = models.IntegerField(help_text='For nonreal, discrete-valued fields (strings, dates), the number of values that are shared between the two fields, including duplicate occurrences of the same value')
shared_distinct_words = models.IntegerField(help_text='For strings, the number of unique words that are shared between all the strings in each field=')
shared_tokens = models.IntegerField(help_text='For strings, the number of unique tokens (words) that are shared between the two fields, including duplicate occurrences of the same value')
__unicode__ = representation
def import_meta(db_meta, db_name, db_date=None, verbosity=1):
db_obj, db_created = Database.objects.get_or_create(name=db_name, date=datetime.datetime.now())
for django_model, table_meta in db_meta.iteritems():
pk = table_meta['Meta'].get('primary_key', None)
if pk:
del(table_meta['Meta']['primary_key'])
table_obj, table_created = Table.objects.get_or_create(database=db_obj, django_model=django_model, **table_meta['Meta'])
for django_field, field_meta in table_meta.iteritems():
if django_field == "Meta":
# The table "Meta" has already been imported when Table object was created
continue
if verbosity > 1:
print django_field
if 'name' in field_meta and field_meta['name'] == django_field:
del(field_meta['name'])
if 'most_frequent' in field_meta:
field_meta['most_frequent'] = dict((str(k), '%016d' % v) for (k, v) in field_meta['most_frequent'])
#print field_meta['most_frequent']
del(field_meta['most_frequent']) # DatabaseError: can't adapt type 'HStoreDict'
field_obj, field_created = Field.objects.get_or_create(table_stats=table_obj, django_field=django_field, **field_meta)
if pk and pk in table_meta:
field_obj = Field.objects.get(table_stats=table_obj, django_field=pk, **table_meta[pk])
table_obj.django_field = field_obj
table_obj.save()
# def explore_app(app_name='call_center', verbosity=1):
# db_meta = explore.get_db_meta(app_name, verbosity=verbosity)
# try:
# print '&'*100
# print db_meta
# print '&'*100
# return import_meta(db_meta, db_name=app_name)
# except:
# return db_meta
| mit | 215,834,773,485,895,000 | 49.116105 | 348 | 0.653613 | false |
devilry/devilry-django | devilry/devilry_admin/views/dashboard/overview.py | 1 | 6086 | # -*- coding: utf-8 -*-
from devilry.devilry_cradmin import devilry_listbuilder
from devilry.devilry_cradmin.devilry_listbuilder.period import AdminItemValue
from django.db import models
from itertools import groupby
from django.utils.translation import gettext, gettext_lazy
from django.views.generic import TemplateView
from cradmin_legacy import crapp
from cradmin_legacy.crinstance import reverse_cradmin_url
from cradmin_legacy.viewhelpers import listbuilderview
from cradmin_legacy.viewhelpers import listfilter
from cradmin_legacy.viewhelpers import listbuilder
from devilry.devilry_admin.listbuilder import admindashboard_subject_listbuilder
from devilry.apps.core import models as coremodels
from devilry.apps.core.models import Period, Subject
from devilry.devilry_account.models import SubjectPermissionGroup, PeriodPermissionGroup
from devilry.devilry_cradmin.devilry_listfilter.utils import WithResultValueRenderable, RowListWithMatchResults
class SubjectItemFrame(devilry_listbuilder.common.GoForwardLinkItemFrame):
"""
An item frame for the list of subjects in the Administrator Dashboard Overview
"""
valuealias = 'subject'
def get_url(self):
return reverse_cradmin_url(
instanceid='devilry_admin_subject_for_periodadmin',
appname='subject_redirect',
roleid=self.subject.id,
viewname=crapp.INDEXVIEW_NAME
)
def get_extra_css_classes_list(self):
return ['devilry-admin-dashboard-overview-subjectitemframe']
class OrderSubjectFilter(listfilter.django.single.select.AbstractOrderBy):
def get_ordering_options(self):
return [
('', { # This will be the default sort order
'label': gettext_lazy('Short Name'),
'order_by': ['short_name'],
}),
('short_name_descending', {
'label': gettext_lazy('Short Name (descending)'),
'order_by': ['-short_name'],
}),
]
class SubjectListMatchResultRenderable(WithResultValueRenderable):
def get_object_name_singular(self, num_matches):
return gettext_lazy('course')
def get_object_name_plural(self, num_matches):
return gettext_lazy('courses')
class RowListBuilder(RowListWithMatchResults):
match_result_value_renderable = SubjectListMatchResultRenderable
class OverviewSubjectListView(listbuilderview.FilterListMixin, listbuilderview.View):
model = coremodels.Subject
template_name = 'devilry_admin/dashboard/overview.django.html'
listbuilder_class = RowListBuilder
frame_renderer_class = SubjectItemFrame
value_renderer_class = devilry_listbuilder.subject.AdminItemValue
paginate_by = 50
def get_pageheading(self):
return gettext("Administrator dashboard")
def get_pagetitle(self):
return self.get_pageheading()
def __get_all_subjects_where_user_is_subjectadmin(self):
return Subject.objects.filter_user_is_admin(user=self.request.user) \
.order_by('long_name') \
.distinct()
def __get_all_periods_where_user_is_subjectadmin_or_periodadmin(self):
groups = []
periods = Period.objects.filter_user_is_admin(user=self.request.user) \
.select_related('parentnode') \
.order_by('short_name', 'parentnode__long_name') \
.distinct()
for key, items in groupby(periods, lambda period: period.short_name):
groups.append(list(items))
return groups
def add_filterlist_items(self, filterlist):
"""
Add the filters to the filterlist.
"""
filterlist.append(listfilter.django.single.textinput.Search(
slug='search',
label='Search',
label_is_screenreader_only=True,
modelfields=['long_name']))
filterlist.append(OrderSubjectFilter(
slug='short_name', label=gettext_lazy('Short name')))
def get_filterlist_url(self, filters_string):
"""
This is used by the filterlist to create URLs.
"""
return self.request.cradmin_app.reverse_appurl(
'filter', kwargs={'filters_string': filters_string})
def get_unfiltered_queryset_for_role(self, site):
"""
Create the queryset, and apply the filters from the filterlist.
"""
# Return Subjects where the user can be admin on Subject and or admin on a Period within a Subject
queryset = coremodels.Subject.objects\
.filter_user_is_admin_for_any_periods_within_subject(self.request.user)\
.prefetch_active_period_objects()
# Set unfiltered count on self.
self.num_total = queryset.count()
return queryset
def get_context_data(self, **kwargs):
context = super(OverviewSubjectListView, self).get_context_data(**kwargs)
context['subjects_where_user_is_subjectadmin'] = \
self.__get_all_subjects_where_user_is_subjectadmin()
context['periods_where_user_is_subjectadmin_or_periodadmin'] = \
self.__get_all_periods_where_user_is_subjectadmin_or_periodadmin()
return context
#
# Add support for showing results on the top of the list.
#
def get_listbuilder_list_kwargs(self):
kwargs = super(OverviewSubjectListView, self).get_listbuilder_list_kwargs()
kwargs['num_matches'] = self.num_matches or 0
kwargs['num_total'] = self.num_total or 0
kwargs['page'] = self.request.GET.get('page', 1)
return kwargs
def get_queryset_for_role(self, role):
queryset = super(OverviewSubjectListView, self).get_queryset_for_role(role=role)
# Set filtered count on self.
self.num_matches = queryset.count()
return queryset
class App(crapp.App):
appurls = [
crapp.Url(r'^$', OverviewSubjectListView.as_view(), name=crapp.INDEXVIEW_NAME),
crapp.Url(
r'^filter/(?P<filters_string>.+)?$',
OverviewSubjectListView.as_view(),
name='filter'),
]
| bsd-3-clause | -6,426,557,289,801,527,000 | 36.801242 | 111 | 0.670884 | false |
jeetsukumaran/Ginkgo | ginkgopy/ginkgo/ginkgogrid.py | 1 | 5281 | #! /usr/bin/env python
import random
import sys
import os
from ginkgo import argparse
##############################################################################\\
# Grid
class Grid(object):
def __init__(self, **kwargs):
self.ncols = kwargs.get("ncols", None)
self.nrows = kwargs.get("nrows", None)
self.value_type = kwargs.get("value_type", int)
self.values = None
self.matrix = None
if 'values' in kwargs:
self.values = kwargs['values']
elif 'pop_func' in kwargs:
self.populate(kwargs['pop_func'])
elif 'filepath' in kwargs:
self.read(open(kwargs['filepath'], "rU"))
elif 'stream' in kwargs:
self.read(kwargs['stream'])
else:
self.values = {}
self._max_formatted_value_len = None
def __str__(self):
return self.as_string(include_header=True)
def populate(self, func):
self.values = {}
for x in range(self.ncols):
self.values[x] = {}
for y in range(self.nrows):
self.values[x][y] = func(x, y)
def read(self, src):
self.values = []
for line in src:
line = line.replace('\n', '').strip()
parts = line.split(' ',1)
kw = parts[0].lower()
if kw == 'ncols':
assert len(parts) == 2
self.ncols = int(parts[1])
continue
elif kw == 'nrows':
assert len(parts) == 2
self.nrows = int(parts[1])
continue
elif kw in ['xllcorner', 'yllcorner', 'cellsize', 'nodata_value']:
continue
else:
parts = line.split(' ')
self.values.extend([self.value_type(i) for i in parts])
break
assert self.ncols > 0
assert self.nrows > 0
for line in src:
line = line.replace('\n', '').strip()
parts = line.split(' ')
self.values.extend([self.value_type(i) for i in parts])
return self.matrix_from_values()
def matrix_from_values(self):
assert len(self.values) == self.ncols * self.nrows
self.matrix = []
for r in range(self.nrows):
self.matrix.append([])
for c in range(self.ncols):
self.matrix[r].append(self.values[(r * self.ncols) + c])
assert len(self.matrix[r]) == self.ncols
assert len(self.matrix) == self.nrows
return self.matrix
def formatted_value_matrix(self, cell_width=None):
fv = {}
fv_lens = []
for x in range(self.ncols):
fv[x] = {}
for y in range(self.nrows):
v = self.values[x][y]
if isinstance(v, float):
fv[x][y] = "{0:>.4}".format(v)
else:
fv[x][y] = "{0:>}".format(v)
fv_lens.append(len(fv[x][y]))
if cell_width is None:
self._max_formatted_value_len = max(fv_lens)
else:
self._max_formatted_value_len = cell_width
return fv
def ascii_grid_header(self):
return ("""ncols {0}
nrows {1}
xllcorner 0.0
yllcorner 0.0
cellsize 50.0
NODATA_value -9999""").format(self.ncols, self.nrows)
def as_string(self, include_header=True, cell_width=None):
rows = []
if include_header:
rows.append(self.grid_header())
fv = self.formatted_value_matrix(cell_width=cell_width)
for y in range(self.nrows):
if y % 5 == 0:
rows.append("")
row = []
for x in range(self.ncols):
# leader = ("{0:{1}}".format(" ", self._max_formatted_value_len)) if (x and (x % 5 == 0)) else ""
leader = " " if (x and (x % 5 == 0)) else ""
v = fv[x][y]
row.append("{2}{0:>{1}}".format(v, self._max_formatted_value_len, leader))
rows.append(" ".join(row))
#rows.append("")
return "\n".join(rows)
###############################################################################\\
# Occurrences
class Occurrences(Grid):
def __init__(self, filepath=None):
Grid.__init__(self)
self.filepath = None
if filepath is not None:
self.read(open(filepath, "rU"))
def __str__(self):
s = []
for r in range(self.nrows):
s.append(" ".join(["{0:>3}".format(self.matrix[r][c]) for c in range(self.ncols)]))
return "\n".join(s)
###############################################################################\\
# Input Grid Generation
def random_gaussian_grid(ncols, nrows, mean=0, sd=1):
return Grid(ncols=ncols, nrows=nrows, pop_func=lambda x, y: random.gauss(mean, sd))
def random_uniform_real_grid(ncols, nrows, a, b):
return Grid(ncols=ncols, nrows=nrows, pop_func=lambda x, y: random.uniform(a, b))
def random_uniform_int_grid(ncols, nrows, a, b):
return Grid(ncols=ncols, nrows=nrows, pop_func=lambda x, y: random.randint(a, b))
def fixed_value_grid(ncols, nrows, val):
return Grid(ncols=ncols, nrows=nrows, pop_func=lambda x, y: val)
| gpl-3.0 | 8,019,885,483,017,476,000 | 32.636943 | 112 | 0.492142 | false |
dtroyer/python-openstackclient | openstackclient/tests/unit/object/v1/test_object_all.py | 1 | 8188 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import mock
from osc_lib import exceptions
from requests_mock.contrib import fixture
import six
from openstackclient.object.v1 import object as object_cmds
from openstackclient.tests.unit.object.v1 import fakes as object_fakes
class TestObjectAll(object_fakes.TestObjectv1):
def setUp(self):
super(TestObjectAll, self).setUp()
self.requests_mock = self.useFixture(fixture.Fixture())
class TestObjectCreate(TestObjectAll):
def setUp(self):
super(TestObjectCreate, self).setUp()
# Get the command object to test
self.cmd = object_cmds.CreateObject(self.app, None)
def test_multiple_object_create_with_object_name(self):
arglist = [
object_fakes.container_name,
object_fakes.object_name_1,
object_fakes.object_name_2,
'--name', object_fakes.object_upload_name,
]
verifylist = [
('container', object_fakes.container_name),
('objects', [object_fakes.object_name_1,
object_fakes.object_name_2]),
('name', object_fakes.object_upload_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(exceptions.CommandError,
self.cmd.take_action,
parsed_args)
class TestObjectList(TestObjectAll):
columns = ('Name',)
def setUp(self):
super(TestObjectList, self).setUp()
# Get the command object to test
self.cmd = object_cmds.ListObject(self.app, None)
def test_object_list_objects_no_options(self):
return_body = [
copy.deepcopy(object_fakes.OBJECT),
copy.deepcopy(object_fakes.OBJECT_2),
]
self.requests_mock.register_uri(
'GET',
object_fakes.ENDPOINT +
'/' +
object_fakes.container_name +
'?format=json',
json=return_body,
status_code=200,
)
arglist = [
object_fakes.container_name,
]
verifylist = [
('container', object_fakes.container_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# Lister.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.assertEqual(self.columns, columns)
datalist = [
(object_fakes.object_name_1, ),
(object_fakes.object_name_2, ),
]
self.assertEqual(datalist, list(data))
def test_object_list_objects_prefix(self):
return_body = [
copy.deepcopy(object_fakes.OBJECT_2),
]
self.requests_mock.register_uri(
'GET',
object_fakes.ENDPOINT +
'/' +
object_fakes.container_name_2 +
'?prefix=floppy&format=json',
json=return_body,
status_code=200,
)
arglist = [
'--prefix', 'floppy',
object_fakes.container_name_2,
]
verifylist = [
('prefix', 'floppy'),
('container', object_fakes.container_name_2),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
self.assertEqual(self.columns, columns)
datalist = (
(object_fakes.object_name_2, ),
)
self.assertEqual(datalist, tuple(data))
class TestObjectShow(TestObjectAll):
def setUp(self):
super(TestObjectShow, self).setUp()
# Get the command object to test
self.cmd = object_cmds.ShowObject(self.app, None)
def test_object_show(self):
headers = {
'content-type': 'text/plain',
'content-length': '20',
'last-modified': 'yesterday',
'etag': '4c4e39a763d58392724bccf76a58783a',
'x-container-meta-owner': object_fakes.ACCOUNT_ID,
'x-object-manifest': 'manifest',
}
self.requests_mock.register_uri(
'HEAD',
'/'.join([
object_fakes.ENDPOINT,
object_fakes.container_name,
object_fakes.object_name_1,
]),
headers=headers,
status_code=200,
)
arglist = [
object_fakes.container_name,
object_fakes.object_name_1,
]
verifylist = [
('container', object_fakes.container_name),
('object', object_fakes.object_name_1),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
collist = (
'account',
'container',
'content-length',
'content-type',
'etag',
'last-modified',
'object',
'x-object-manifest',
)
self.assertEqual(collist, columns)
datalist = (
object_fakes.ACCOUNT_ID,
object_fakes.container_name,
'20',
'text/plain',
'4c4e39a763d58392724bccf76a58783a',
'yesterday',
object_fakes.object_name_1,
'manifest',
)
self.assertEqual(datalist, data)
class TestObjectSave(TestObjectAll):
def setUp(self):
super(TestObjectSave, self).setUp()
# Get the command object to test
self.cmd = object_cmds.SaveObject(self.app, None)
def test_save_to_stdout(self):
self.requests_mock.register_uri(
'GET',
object_fakes.ENDPOINT +
'/' +
object_fakes.container_name +
'/' +
object_fakes.object_name_1,
status_code=200,
content=object_fakes.object_1_content
)
arglist = [
object_fakes.container_name,
object_fakes.object_name_1,
'--file',
'-'
]
verifylist = [
('container', object_fakes.container_name),
('object', object_fakes.object_name_1),
('file', '-'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
class FakeStdout(six.BytesIO):
def __init__(self):
six.BytesIO.__init__(self)
self.context_manager_calls = []
def __enter__(self):
self.context_manager_calls.append('__enter__')
return self
def __exit__(self, *a):
self.context_manager_calls.append('__exit__')
with mock.patch('sys.stdout') as fake_stdout, mock.patch(
'os.fdopen', return_value=FakeStdout()) as fake_fdopen:
fake_stdout.fileno.return_value = 123
self.cmd.take_action(parsed_args)
self.assertEqual(fake_fdopen.return_value.getvalue(),
object_fakes.object_1_content)
self.assertEqual(fake_fdopen.mock_calls, [mock.call(123, 'wb')])
self.assertEqual(fake_fdopen.return_value.context_manager_calls,
['__enter__', '__exit__'])
| apache-2.0 | 6,344,649,810,711,815,000 | 29.898113 | 79 | 0.559966 | false |
mattvonrocketstein/smash | smashlib/ipy3x/nbconvert/preprocessors/execute.py | 1 | 3693 | """Module containing a preprocessor that removes the outputs from code cells"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
try:
from queue import Empty # Py 3
except ImportError:
from Queue import Empty # Py 2
from IPython.utils.traitlets import List, Unicode
from IPython.nbformat.v4 import output_from_msg
from .base import Preprocessor
from IPython.utils.traitlets import Integer
class ExecutePreprocessor(Preprocessor):
"""
Executes all the cells in a notebook
"""
timeout = Integer(30, config=True,
help="The time to wait (in seconds) for output from executions."
)
extra_arguments = List(Unicode)
def preprocess(self, nb, resources):
from IPython.kernel import run_kernel
kernel_name = nb.metadata.get('kernelspec', {}).get('name', 'python')
self.log.info("Executing notebook with kernel: %s" % kernel_name)
with run_kernel(kernel_name=kernel_name,
extra_arguments=self.extra_arguments,
stderr=open(os.devnull, 'w')) as kc:
self.kc = kc
nb, resources = super(
ExecutePreprocessor, self).preprocess(nb, resources)
return nb, resources
def preprocess_cell(self, cell, resources, cell_index):
"""
Apply a transformation on each code cell. See base.py for details.
"""
if cell.cell_type != 'code':
return cell, resources
try:
outputs = self.run_cell(
self.kc.shell_channel, self.kc.iopub_channel, cell)
except Exception as e:
self.log.error("failed to run cell: " + repr(e))
self.log.error(str(cell.source))
raise
cell.outputs = outputs
return cell, resources
def run_cell(self, shell, iopub, cell):
msg_id = shell.execute(cell.source)
self.log.debug("Executing cell:\n%s", cell.source)
# wait for finish, with timeout
while True:
try:
msg = shell.get_msg(timeout=self.timeout)
except Empty:
self.log.error("Timeout waiting for execute reply")
raise
if msg['parent_header'].get('msg_id') == msg_id:
break
else:
# not our reply
continue
outs = []
while True:
try:
msg = iopub.get_msg(timeout=self.timeout)
except Empty:
self.log.warn("Timeout waiting for IOPub output")
break
if msg['parent_header'].get('msg_id') != msg_id:
# not an output from our execution
continue
msg_type = msg['msg_type']
self.log.debug("output: %s", msg_type)
content = msg['content']
# set the prompt number for the input and the output
if 'execution_count' in content:
cell['execution_count'] = content['execution_count']
if msg_type == 'status':
if content['execution_state'] == 'idle':
break
else:
continue
elif msg_type == 'execute_input':
continue
elif msg_type == 'clear_output':
outs = []
continue
try:
out = output_from_msg(msg)
except ValueError:
self.log.error("unhandled iopub msg: " + msg_type)
else:
outs.append(out)
return outs
| mit | -6,775,581,182,042,542,000 | 31.394737 | 86 | 0.541836 | false |
pettazz/pygooglevoice | examples/parse_sms.py | 1 | 1581 | #
# SMS test via Google Voice
#
# John Nagle
# [email protected]
#
from googlevoice import Voice
import BeautifulSoup
def extractsms(htmlsms):
"""
extractsms -- extract SMS messages from BeautifulSoup
tree of Google Voice SMS HTML.
Output is a list of dictionaries, one per message.
"""
msgitems = [] # accum message items here
# Extract all conversations by searching for a DIV with an ID at top level.
tree = BeautifulSoup.BeautifulSoup(htmlsms) # parse HTML into tree
conversations = tree.findAll("div", attrs={"id": True}, recursive=False)
for conversation in conversations:
# For each conversation, extract each row, which is one SMS message.
rows = conversation.findAll(attrs={"class": "gc-message-sms-row"})
for row in rows: # for all rows
# For each row, which is one message, extract all the fields.
# tag this message with conversation ID
msgitem = {"id": conversation["id"]}
spans = row.findAll("span", attrs={"class": True}, recursive=False)
for span in spans: # for all spans in row
cl = span["class"].replace('gc-message-sms-', '')
# put text in dict
msgitem[cl] = (" ".join(span.findAll(text=True))).strip()
msgitems.append(msgitem) # add msg dictionary to list
return msgitems
def run():
voice = Voice()
voice.login()
voice.sms()
for msg in extractsms(voice.sms.html):
print(msg)
__name__ == '__main__' and run()
| bsd-3-clause | 4,728,614,197,272,323,000 | 32.638298 | 79 | 0.611638 | false |
AlphaSmartDog/DeepLearningNotes | Note-6 A3CNet/Note 6 simple ACNet/ACNet_adjust.py | 1 | 3662 | import random
import tensorflow as tf
from FCNet import FCNet
LOSS_V = 100
ENTROPY_BETA = 0.05
_EPSILON = 1e-6
L2_P = 1e-5
L2_V = 1e-2
actor_learning_rate = 1e-3
critic_learning_rate = 1e-3
class ACNet(object):
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.action_space = list(range(self.action_size))
self.inputs = tf.placeholder(tf.float32, [None, state_size], 'inputs')
self.actions = tf.placeholder(tf.int32, [None], 'aciton') # which action was taken
self.a_t = tf.one_hot(self.actions, self.action_size, name='action_taken')
self.targets = tf.placeholder(tf.float32, [None], 'disounted_reward')
# not immediate but n step discounted
self.R = tf.expand_dims(self.targets, axis=1)
# build network
self.actor = FCNet('actor')
self.critic = FCNet('critic')
# policy and deterministic policy
self.P = tf.nn.softmax(self.actor(self.inputs, self.action_size))
self.DP = tf.reduce_sum(self.P * self.a_t, axis=1, keep_dims=True)
# choose action one step, action probability
self.AP = tf.squeeze(self.P, axis=0)
self.log_DP = tf.log(self.DP + _EPSILON)
# value and advantage
self.V = self.critic(self.inputs, 1) # value predicted
self.A = self.R - self.V
# loss
self.loss_policy = -tf.reduce_sum(self.log_DP * tf.stop_gradient(self.A))
self.loss_value = LOSS_V * tf.nn.l2_loss(self.A)
self.loss_entropy = ENTROPY_BETA * tf.reduce_sum(self.P * tf.log(self.P + _EPSILON))
# optimizer
#self.actor_optimizer = tf.train.AdamOptimizer(
# actor_learning_rate).minimize(self.loss_policy + self.loss_entropy)
#self.critic_optimizer = tf.train.AdamOptimizer(
# critic_learning_rate).minimize(self.loss_value)
self.l2_policy = L2_P * tf.add_n(self.actor.get_regularizers())
self.l2_value = L2_V * tf.add_n(self.critic.get_regularizers())
self.actor_optimizer = tf.train.AdamOptimizer(
actor_learning_rate).minimize(
self.loss_policy + self.l2_policy + self.loss_entropy)
self.critic_optimizer = tf.train.AdamOptimizer(
critic_learning_rate).minimize(
self.loss_value + self.l2_value)
# session
self.sess = tf.Session()
self.init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
self.sess.run(self.init_op)
self.default_graph = tf.get_default_graph()
self.default_graph.finalize()
def predict_value(self, state):
return self.sess.run(self.V, {self.inputs: state})
def predict_policy(self, state):
return self.sess.run(self.P, {self.inputs: state})
def predict_action(self, state):
policy = self.sess.run(self.AP, {self.inputs: state})
return random.choices(self.action_space, policy)[0]
def train_actor(self, states, actions, targets):
self.sess.run(self.actor_optimizer,
{self.inputs: states, self.actions: actions, self.targets: targets})
def train_critic(self, states, targets):
self.sess.run(self.critic_optimizer,
{self.inputs: states, self.targets: targets})
def get_loss(self, states, actions, targets):
fetches = [self.loss_policy, self.loss_entropy, self.l2_policy,
self.loss_value, self.l2_value]
feed_dict = {self.inputs: states, self.actions: actions, self.targets: targets}
return self.sess.run(fetches, feed_dict) | mit | -7,544,644,809,307,700,000 | 39.7 | 100 | 0.629164 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/_operations.py | 1 | 4744 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OperationListResult"]
"""Lists all of the available Network Rest API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_10_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Network/operations'} # type: ignore
| mit | -2,921,255,939,064,702,000 | 42.522936 | 133 | 0.640388 | false |
Azure/azure-sdk-for-python | sdk/eventgrid/azure-mgmt-eventgrid/azure/mgmt/eventgrid/operations/_event_channels_operations.py | 1 | 20000 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class EventChannelsOperations(object):
"""EventChannelsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.eventgrid.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
partner_namespace_name, # type: str
event_channel_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.EventChannel"
"""Get an event channel.
Get properties of an event channel.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param partner_namespace_name: Name of the partner namespace.
:type partner_namespace_name: str
:param event_channel_name: Name of the event channel.
:type event_channel_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EventChannel, or the result of cls(response)
:rtype: ~azure.mgmt.eventgrid.models.EventChannel
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventChannel"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-15-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'partnerNamespaceName': self._serialize.url("partner_namespace_name", partner_namespace_name, 'str'),
'eventChannelName': self._serialize.url("event_channel_name", event_channel_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('EventChannel', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerNamespaces/{partnerNamespaceName}/eventChannels/{eventChannelName}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
partner_namespace_name, # type: str
event_channel_name, # type: str
event_channel_info, # type: "_models.EventChannel"
**kwargs # type: Any
):
# type: (...) -> "_models.EventChannel"
"""Create an event channel.
Asynchronously creates a new event channel with the specified parameters.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param partner_namespace_name: Name of the partner namespace.
:type partner_namespace_name: str
:param event_channel_name: Name of the event channel.
:type event_channel_name: str
:param event_channel_info: EventChannel information.
:type event_channel_info: ~azure.mgmt.eventgrid.models.EventChannel
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EventChannel, or the result of cls(response)
:rtype: ~azure.mgmt.eventgrid.models.EventChannel
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventChannel"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-15-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'partnerNamespaceName': self._serialize.url("partner_namespace_name", partner_namespace_name, 'str'),
'eventChannelName': self._serialize.url("event_channel_name", event_channel_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(event_channel_info, 'EventChannel')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('EventChannel', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerNamespaces/{partnerNamespaceName}/eventChannels/{eventChannelName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
partner_namespace_name, # type: str
event_channel_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-15-preview"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'partnerNamespaceName': self._serialize.url("partner_namespace_name", partner_namespace_name, 'str'),
'eventChannelName': self._serialize.url("event_channel_name", event_channel_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerNamespaces/{partnerNamespaceName}/eventChannels/{eventChannelName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
partner_namespace_name, # type: str
event_channel_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Delete an event channel.
Delete existing event channel.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param partner_namespace_name: Name of the partner namespace.
:type partner_namespace_name: str
:param event_channel_name: Name of the event channel.
:type event_channel_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
partner_namespace_name=partner_namespace_name,
event_channel_name=event_channel_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'partnerNamespaceName': self._serialize.url("partner_namespace_name", partner_namespace_name, 'str'),
'eventChannelName': self._serialize.url("event_channel_name", event_channel_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerNamespaces/{partnerNamespaceName}/eventChannels/{eventChannelName}'} # type: ignore
def list_by_partner_namespace(
self,
resource_group_name, # type: str
partner_namespace_name, # type: str
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.EventChannelsListResult"]
"""List event channels.
List all the event channels in a partner namespace.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param partner_namespace_name: Name of the partner namespace.
:type partner_namespace_name: str
:param filter: The query used to filter the search results using OData syntax. Filtering is
permitted on the 'name' property only and with limited number of OData operations. These
operations are: the 'contains' function as well as the following logical operations: not, and,
or, eq (for equal), and ne (for not equal). No arithmetic operations are supported. The
following is a valid filter example: $filter=contains(namE, 'PATTERN') and name ne 'PATTERN-1'.
The following is not a valid filter example: $filter=location eq 'westus'.
:type filter: str
:param top: The number of results to return per page for the list operation. Valid range for
top parameter is 1 to 100. If not specified, the default number of results to be returned is 20
items per page.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EventChannelsListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.eventgrid.models.EventChannelsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventChannelsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-15-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_partner_namespace.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'partnerNamespaceName': self._serialize.url("partner_namespace_name", partner_namespace_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('EventChannelsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_partner_namespace.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerNamespaces/{partnerNamespaceName}/eventChannels'} # type: ignore
| mit | -8,665,553,303,715,228,000 | 49.377834 | 229 | 0.64735 | false |
datamicroscopes/lda | test/test_reuters.py | 1 | 4285 | import os
import numpy as np
from nose.plugins.attrib import attr
from nose.tools import assert_almost_equal, assert_dict_equal
from nose.tools import assert_list_equal
from microscopes.lda import model, runner
from microscopes.lda.definition import model_definition
from microscopes.common.rng import rng
from microscopes.lda import utils
# Based on test_lda_reuters.py in ariddell's lda
# https://github.com/ariddell/lda/blob/57f721b05ffbdec5cb11c2533f72aa1f9e6ed12d/lda/tests/test_lda_reuters.py
class TestLDANewsReuters():
@classmethod
def _load_docs(cls):
test_dir = os.path.dirname(__file__)
reuters_ldac_fn = os.path.join(test_dir, 'data', 'reuters.ldac')
with open(reuters_ldac_fn, 'r') as f:
cls.docs = utils.docs_from_ldac(f)
cls.V = utils.num_terms(cls.docs)
cls.N = len(cls.docs)
@classmethod
def setup_class(cls):
cls._load_docs()
cls.niters = 100 if os.environ.get('TRAVIS') else 2
cls.defn = model_definition(cls.N, cls.V)
cls.seed = 12345
cls.prng = rng(seed=cls.seed)
cls.latent = model.initialize(cls.defn, cls.docs, cls.prng)
cls.r = runner.runner(cls.defn, cls.docs, cls.latent)
cls.original_perplexity = cls.latent.perplexity()
cls.r.run(cls.prng, cls.niters)
cls.doc_topic = cls.latent.topic_distribution_by_document()
def test_lda_news(self):
assert len(self.doc_topic) == len(self.docs)
@attr('slow')
def test_lda_monotone(self):
# run additional iterations, verify improvement in log likelihood
self.r.run(self.prng, self.niters)
assert self.latent.perplexity() < self.original_perplexity
def test_lda_zero_iter(self):
# compare to model with 0 iterations
prng2 = rng(seed=54321)
latent2 = model.initialize(self.defn, self.docs, prng2)
assert latent2 is not None
r2 = runner.runner(self.defn, self.docs, latent2)
assert r2 is not None
doc_topic2 = latent2.topic_distribution_by_document()
assert doc_topic2 is not None
assert latent2.perplexity() > self.latent.perplexity()
@attr('slow')
def test_lda_random_seed(self):
# ensure that randomness is contained in rng
# by running model twice with same seed
niters = 10
# model 1
prng1 = rng(seed=54321)
latent1 = model.initialize(self.defn, self.docs, prng1)
runner1 = runner.runner(self.defn, self.docs, latent1)
runner1.run(prng1, niters)
# model2
prng2 = rng(seed=54321)
latent2 = model.initialize(self.defn, self.docs, prng2)
runner2 = runner.runner(self.defn, self.docs, latent2)
runner2.run(prng2, niters)
assert_list_equal(latent1.topic_distribution_by_document(),
latent2.topic_distribution_by_document())
for d1, d2 in zip(latent1.word_distribution_by_topic(),
latent2.word_distribution_by_topic()):
assert_dict_equal(d1, d2)
def test_lda_attributes(self):
assert np.array(self.doc_topic).shape == (self.N, self.latent.ntopics())
assert len(self.latent.word_distribution_by_topic()) == self.latent.ntopics()
for dist in self.latent.word_distribution_by_topic():
assert len(dist) == self.V
# check distributions sum to one
for dist in self.latent.word_distribution_by_topic():
assert_almost_equal(sum(dist.values()), 1)
for dist in self.latent.topic_distribution_by_document():
assert_almost_equal(sum(dist), 1)
def test_lda_1transform_basic(self):
n_docs = 3
n_topics = self.latent.ntopics()
docs_test = self.docs[0:n_docs]
doc_topic_test = np.array(self.latent.predict(docs_test, self.prng))
assert doc_topic_test.shape == (n_docs, n_topics)
np.testing.assert_almost_equal(doc_topic_test.sum(axis=1), np.ones(n_docs))
# one document
docs_test = self.docs[0]
doc_topic_test = np.array(self.latent.predict(docs_test, self.prng))
doc_topic_test.shape = (1, n_topics)
np.testing.assert_array_almost_equal(doc_topic_test.sum(axis=1), np.ones(1))
| bsd-3-clause | -5,277,405,691,397,815,000 | 37.258929 | 109 | 0.641774 | false |
bowenliu16/deepchem | deepchem/data/tests/test_datasets.py | 1 | 14450 | """
Tests for dataset creation
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "GPL"
import unittest
import tempfile
import os
import shutil
import numpy as np
import deepchem as dc
class TestDatasets(unittest.TestCase):
"""
Test basic top-level API for dataset objects.
"""
def test_sparsify_and_densify(self):
"""Test that sparsify and densify work as inverses."""
# Test on identity matrix
num_samples = 10
num_features = num_samples
X = np.eye(num_samples)
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
# Generate random sparse features dataset
np.random.seed(123)
p = .05
X = np.random.binomial(1, p, size=(num_samples, num_features))
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
# Test edge case with array of all zeros
X = np.zeros((num_samples, num_features))
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
def test_pad_features(self):
"""Test that pad_features pads features correctly."""
batch_size = 100
num_features = 10
num_tasks = 5
# Test cases where n_samples < 2*n_samples < batch_size
n_samples = 29
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test cases where n_samples < batch_size
n_samples = 79
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case where n_samples == batch_size
n_samples = 100
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case for object featurization.
n_samples = 2
X_b = np.array([{"a": 1}, {"b": 2}])
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case for more complicated object featurization
n_samples = 2
X_b = np.array([(1, {"a": 1}), (2, {"b": 2})])
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case with multidimensional data
n_samples = 50
num_atoms = 15
d = 3
X_b = np.zeros((n_samples, num_atoms, d))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
def test_pad_batches(self):
"""Test that pad_batch pads batches correctly."""
batch_size = 100
num_features = 10
num_tasks = 5
# Test cases where n_samples < 2*n_samples < batch_size
n_samples = 29
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(
batch_size, X_b, y_b, w_b, ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test cases where n_samples < batch_size
n_samples = 79
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(
batch_size, X_b, y_b, w_b, ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case where n_samples == batch_size
n_samples = 100
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(
batch_size, X_b, y_b, w_b, ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case for object featurization.
n_samples = 2
X_b = np.array([{"a": 1}, {"b": 2}])
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(
batch_size, X_b, y_b, w_b, ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case for more complicated object featurization
n_samples = 2
X_b = np.array([(1, {"a": 1}), (2, {"b": 2})])
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(
batch_size, X_b, y_b, w_b, ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case with multidimensional data
n_samples = 50
num_atoms = 15
d = 3
X_b = np.zeros((n_samples, num_atoms, d))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(
batch_size, X_b, y_b, w_b, ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
def test_get_task_names(self):
"""Test that get_task_names returns correct task_names"""
solubility_dataset = dc.data.tests.load_solubility_data()
assert solubility_dataset.get_task_names() == ["log-solubility"]
multitask_dataset = dc.data.tests.load_multitask_data()
assert sorted(multitask_dataset.get_task_names()) == sorted(["task0",
"task1", "task2", "task3", "task4", "task5", "task6", "task7", "task8",
"task9", "task10", "task11", "task12", "task13", "task14", "task15",
"task16"])
def test_get_data_shape(self):
"""Test that get_data_shape returns currect data shape"""
solubility_dataset = dc.data.tests.load_solubility_data()
assert solubility_dataset.get_data_shape() == (1024,)
multitask_dataset = dc.data.tests.load_multitask_data()
assert multitask_dataset.get_data_shape() == (1024,)
def test_len(self):
"""Test that len(dataset) works."""
solubility_dataset = dc.data.tests.load_solubility_data()
assert len(solubility_dataset) == 10
def test_reshard(self):
"""Test that resharding the dataset works."""
solubility_dataset = dc.data.tests.load_solubility_data()
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
assert solubility_dataset.get_number_shards() == 1
solubility_dataset.reshard(shard_size=1)
assert solubility_dataset.get_shard_size() == 1
X_r, y_r, w_r, ids_r = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
assert solubility_dataset.get_number_shards() == 10
solubility_dataset.reshard(shard_size=10)
assert solubility_dataset.get_shard_size() == 10
X_rr, y_rr, w_rr, ids_rr = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Test first resharding worked
np.testing.assert_array_equal(X, X_r)
np.testing.assert_array_equal(y, y_r)
np.testing.assert_array_equal(w, w_r)
np.testing.assert_array_equal(ids, ids_r)
# Test second resharding worked
np.testing.assert_array_equal(X, X_rr)
np.testing.assert_array_equal(y, y_rr)
np.testing.assert_array_equal(w, w_rr)
np.testing.assert_array_equal(ids, ids_rr)
def test_select(self):
"""Test that dataset select works."""
num_datapoints = 10
num_features = 10
num_tasks = 1
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.ones((num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
indices = [0, 4, 5, 8]
select_dataset = dataset.select(indices)
X_sel, y_sel, w_sel, ids_sel = (select_dataset.X, select_dataset.y,
select_dataset.w, select_dataset.ids)
np.testing.assert_array_equal(X[indices], X_sel)
np.testing.assert_array_equal(y[indices], y_sel)
np.testing.assert_array_equal(w[indices], w_sel)
np.testing.assert_array_equal(ids[indices], ids_sel)
def test_get_shape(self):
"""Test that get_shape works."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_iterbatches(self):
"""Test that iterating over batches of data works."""
solubility_dataset = dc.data.tests.load_solubility_data()
batch_size = 2
data_shape = solubility_dataset.get_data_shape()
tasks = solubility_dataset.get_task_names()
for (X_b, y_b, w_b, ids_b) in solubility_dataset.iterbatches(batch_size):
assert X_b.shape == (batch_size,) + data_shape
assert y_b.shape == (batch_size,) + (len(tasks),)
assert w_b.shape == (batch_size,) + (len(tasks),)
assert ids_b.shape == (batch_size,)
def test_itersamples_numpy(self):
"""Test that iterating over samples in a NumpyDataset works."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
for i, (sx, sy, sw, sid) in enumerate(dataset.itersamples()):
np.testing.assert_array_equal(sx, X[i])
np.testing.assert_array_equal(sy, y[i])
np.testing.assert_array_equal(sw, w[i])
np.testing.assert_array_equal(sid, ids[i])
def test_itersamples_disk(self):
"""Test that iterating over samples in a DiskDataset works."""
solubility_dataset = dc.data.tests.load_solubility_data()
X = solubility_dataset.X
y = solubility_dataset.y
w = solubility_dataset.w
ids = solubility_dataset.ids
for i, (sx, sy, sw, sid) in enumerate(solubility_dataset.itersamples()):
np.testing.assert_array_equal(sx, X[i])
np.testing.assert_array_equal(sy, y[i])
np.testing.assert_array_equal(sw, w[i])
np.testing.assert_array_equal(sid, ids[i])
def test_transform_numpy(self):
"""Test that the transform() method works for NumpyDatasets."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
# Transform it
def fn(x, y, w):
return (2*x, 1.5*y, w)
transformed = dataset.transform(fn)
np.testing.assert_array_equal(X, dataset.X)
np.testing.assert_array_equal(y, dataset.y)
np.testing.assert_array_equal(w, dataset.w)
np.testing.assert_array_equal(ids, dataset.ids)
np.testing.assert_array_equal(2*X, transformed.X)
np.testing.assert_array_equal(1.5*y, transformed.y)
np.testing.assert_array_equal(w, transformed.w)
np.testing.assert_array_equal(ids, transformed.ids)
def test_transform_disk(self):
"""Test that the transform() method works for DiskDatasets."""
dataset = dc.data.tests.load_solubility_data()
X = dataset.X
y = dataset.y
w = dataset.w
ids = dataset.ids
# Transform it
def fn(x, y, w):
return (2*x, 1.5*y, w)
transformed = dataset.transform(fn)
np.testing.assert_array_equal(X, dataset.X)
np.testing.assert_array_equal(y, dataset.y)
np.testing.assert_array_equal(w, dataset.w)
np.testing.assert_array_equal(ids, dataset.ids)
np.testing.assert_array_equal(2*X, transformed.X)
np.testing.assert_array_equal(1.5*y, transformed.y)
np.testing.assert_array_equal(w, transformed.w)
np.testing.assert_array_equal(ids, transformed.ids)
def test_to_numpy(self):
"""Test that transformation to numpy arrays is sensible."""
solubility_dataset = dc.data.tests.load_solubility_data()
data_shape = solubility_dataset.get_data_shape()
tasks = solubility_dataset.get_task_names()
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
N_samples = len(solubility_dataset)
N_tasks = len(tasks)
assert X.shape == (N_samples,) + data_shape
assert y.shape == (N_samples, N_tasks)
assert w.shape == (N_samples, N_tasks)
assert ids.shape == (N_samples,)
def test_consistent_ordering(self):
"""Test that ordering of labels is consistent over time."""
solubility_dataset = dc.data.tests.load_solubility_data()
ids1 = solubility_dataset.ids
ids2 = solubility_dataset.ids
assert np.array_equal(ids1, ids2)
def test_get_statistics(self):
"""Test statistics computation of this dataset."""
solubility_dataset = dc.data.tests.load_solubility_data()
X, y, _, _ = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
X_means, y_means = np.mean(X, axis=0), np.mean(y, axis=0)
X_stds, y_stds = np.std(X, axis=0), np.std(y, axis=0)
comp_X_means, comp_X_stds, comp_y_means, comp_y_stds = \
solubility_dataset.get_statistics()
np.testing.assert_allclose(comp_X_means, X_means)
np.testing.assert_allclose(comp_y_means, y_means)
np.testing.assert_allclose(comp_X_stds, X_stds)
np.testing.assert_allclose(comp_y_stds, y_stds)
| gpl-3.0 | -8,183,733,697,165,437,000 | 36.72846 | 79 | 0.637024 | false |
n6g7/django_markdown | django_markdown/widgets.py | 1 | 1852 | """ Widgets for django-markdown. """
import os
from django import forms
from django.contrib.admin.widgets import AdminTextareaWidget
from django.contrib.staticfiles.storage import staticfiles_storage
from django.utils.safestring import mark_safe
from . import settings
from .utils import editor_js_initialization
class MarkdownWidget(forms.Textarea):
""" Widget for a textarea.
Takes two additional optional keyword arguments:
``markdown_set_name``
Name for current set. Default: value of MARKDOWN_SET_NAME setting.
``markdown_skin``
Name for current skin. Default: value of MARKDOWN_EDITOR_SKIN setting.
"""
def __init__(self, attrs=None):
super(MarkdownWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
""" Render widget.
:returns: A rendered HTML
"""
html = super(MarkdownWidget, self).render(name, value, attrs)
attrs = self.build_attrs(attrs)
html += editor_js_initialization("#%s" % attrs['id'])
return mark_safe(html)
class Media:
css = {
'screen': (
staticfiles_storage.url(os.path.join('django_markdown', 'skins', settings.MARKDOWN_EDITOR_SKIN, 'style.css')),
staticfiles_storage.url(os.path.join(settings.MARKDOWN_SET_PATH, settings.MARKDOWN_SET_NAME, 'style.css'))
)
}
js = (
staticfiles_storage.url(os.path.join('django_markdown', 'jquery.init.js')),
staticfiles_storage.url(os.path.join('django_markdown', 'jquery.markitup.js')),
staticfiles_storage.url(os.path.join(settings.MARKDOWN_SET_PATH, settings.MARKDOWN_SET_NAME, 'set.js'))
)
class AdminMarkdownWidget(MarkdownWidget, AdminTextareaWidget):
""" Support markdown widget in Django Admin. """
pass
| lgpl-3.0 | 4,647,823,019,649,413,000 | 30.389831 | 126 | 0.654428 | false |
xanthics/gw2craft-python3 | auto_gen/Armorsmith.py | 1 | 86598 | # -*- coding: utf-8 -*-
# Created: 2018-01-04T19:46:58 PST
recipes = {
9420: {'min': 0, 'max': 40, 'recipe': {19679: 10}},
9421: {'min': 75, 'max': 115, 'recipe': {13010: 1, 19683: 10}},
9422: {'min': 150, 'max': 190, 'recipe': {13006: 1, 19688: 10}},
9423: {'min': 225, 'max': 265, 'recipe': {13007: 1, 19681: 10}},
9429: {'min': 25, 'max': 65, 'recipe': {19679: 10, 24284: 3}},
9430: {'min': 25, 'max': 65, 'recipe': {19679: 10, 24272: 3}},
9451: {'min': 100, 'max': 140, 'recipe': {13010: 1, 19683: 10, 24285: 3}},
9454: {'min': 100, 'max': 140, 'recipe': {13010: 1, 19683: 10, 24273: 3}},
9459: {'min': 250, 'max': 290, 'recipe': {13007: 1, 19681: 10, 24287: 3}},
9462: {'min': 250, 'max': 290, 'recipe': {13007: 1, 19681: 10, 24275: 3}},
9466: {'min': 175, 'max': 215, 'recipe': {13006: 1, 19688: 10, 24274: 3}},
9468: {'min': 175, 'max': 215, 'recipe': {13006: 1, 19688: 10, 24286: 3}},
9586: {'min': 300, 'max': 340, 'recipe': {13008: 1, 19684: 10}},
9588: {'min': 325, 'max': 365, 'recipe': {13008: 1, 19684: 10, 24288: 3}},
9589: {'min': 325, 'max': 365, 'recipe': {13008: 1, 19684: 10, 24276: 3}},
9591: {'min': 400, 'max': 440, 'recipe': {13009: 1, 19685: 10}},
9593: {'min': 400, 'max': 440, 'recipe': {13009: 1, 19685: 10, 24289: 3}},
9594: {'min': 400, 'max': 440, 'recipe': {13009: 1, 19685: 10, 24277: 3}},
10262: {'min': 0, 'max': 40, 'recipe': {13105: 1, 13106: 1, 19795: 1}},
10263: {'min': 0, 'max': 40, 'recipe': {13105: 1, 13106: 1, 19798: 1}},
10264: {'min': 50, 'max': 90, 'recipe': {13105: 1, 13106: 1, 19799: 1}},
10265: {'min': 50, 'max': 90, 'recipe': {13105: 1, 13106: 1, 19802: 1}},
10266: {'min': 25, 'max': 65, 'recipe': {13105: 1, 13106: 1, 19796: 1}},
10267: {'min': 25, 'max': 65, 'recipe': {13105: 1, 13106: 1, 19797: 1}},
10268: {'min': 0, 'max': 40, 'recipe': {13103: 1, 13108: 1, 19795: 1}},
10269: {'min': 0, 'max': 40, 'recipe': {13103: 1, 13108: 1, 19798: 1}},
10270: {'min': 50, 'max': 90, 'recipe': {13103: 1, 13108: 1, 19799: 1}},
10271: {'min': 50, 'max': 90, 'recipe': {13103: 1, 13108: 1, 19802: 1}},
10272: {'min': 25, 'max': 65, 'recipe': {13103: 1, 13108: 1, 19796: 1}},
10273: {'min': 25, 'max': 65, 'recipe': {13103: 1, 13108: 1, 19797: 1}},
10274: {'min': 0, 'max': 40, 'recipe': {13107: 1, 13110: 1, 19795: 1}},
10275: {'min': 0, 'max': 40, 'recipe': {13107: 1, 13110: 1, 19798: 1}},
10276: {'min': 50, 'max': 90, 'recipe': {13107: 1, 13110: 1, 19799: 1}},
10277: {'min': 50, 'max': 90, 'recipe': {13107: 1, 13110: 1, 19802: 1}},
10278: {'min': 25, 'max': 65, 'recipe': {13107: 1, 13110: 1, 19796: 1}},
10279: {'min': 25, 'max': 65, 'recipe': {13107: 1, 13110: 1, 19797: 1}},
10280: {'min': 0, 'max': 40, 'recipe': {13099: 1, 13100: 1, 19795: 1}},
10281: {'min': 0, 'max': 40, 'recipe': {13099: 1, 13100: 1, 19798: 1}},
10282: {'min': 50, 'max': 90, 'recipe': {13099: 1, 13100: 1, 19799: 1}},
10283: {'min': 50, 'max': 90, 'recipe': {13099: 1, 13100: 1, 19802: 1}},
10284: {'min': 25, 'max': 65, 'recipe': {13099: 1, 13100: 1, 19796: 1}},
10285: {'min': 25, 'max': 65, 'recipe': {13099: 1, 13100: 1, 19797: 1}},
10286: {'min': 125, 'max': 165, 'recipe': {13111: 1, 13114: 1, 19803: 1}},
10287: {'min': 75, 'max': 115, 'recipe': {13111: 1, 13114: 1, 19804: 1}},
10289: {'min': 100, 'max': 140, 'recipe': {13111: 1, 13114: 1, 19808: 1}},
10291: {'min': 100, 'max': 140, 'recipe': {13111: 1, 13114: 1, 19809: 1}},
10292: {'min': 125, 'max': 165, 'recipe': {13111: 1, 13114: 1, 19810: 1}},
10293: {'min': 100, 'max': 140, 'recipe': {13111: 1, 13114: 1, 19874: 1}},
10294: {'min': 125, 'max': 165, 'recipe': {13111: 1, 13114: 1, 19811: 1}},
10295: {'min': 125, 'max': 165, 'recipe': {13115: 1, 13116: 1, 19803: 1}},
10296: {'min': 75, 'max': 115, 'recipe': {13115: 1, 13116: 1, 19804: 1}},
10298: {'min': 100, 'max': 140, 'recipe': {13115: 1, 13116: 1, 19808: 1}},
10300: {'min': 100, 'max': 140, 'recipe': {13115: 1, 13116: 1, 19809: 1}},
10301: {'min': 125, 'max': 165, 'recipe': {13115: 1, 13116: 1, 19810: 1}},
10302: {'min': 100, 'max': 140, 'recipe': {13115: 1, 13116: 1, 19874: 1}},
10303: {'min': 125, 'max': 165, 'recipe': {13115: 1, 13116: 1, 19811: 1}},
10304: {'min': 125, 'max': 165, 'recipe': {13112: 1, 13117: 1, 19803: 1}},
10305: {'min': 75, 'max': 115, 'recipe': {13112: 1, 13117: 1, 19804: 1}},
10307: {'min': 100, 'max': 140, 'recipe': {13112: 1, 13117: 1, 19808: 1}},
10309: {'min': 100, 'max': 140, 'recipe': {13112: 1, 13117: 1, 19809: 1}},
10310: {'min': 125, 'max': 165, 'recipe': {13112: 1, 13117: 1, 19810: 1}},
10311: {'min': 100, 'max': 140, 'recipe': {13112: 1, 13117: 1, 19874: 1}},
10312: {'min': 125, 'max': 165, 'recipe': {13112: 1, 13117: 1, 19811: 1}},
10313: {'min': 125, 'max': 165, 'recipe': {13120: 1, 13121: 1, 19803: 1}},
10314: {'min': 75, 'max': 115, 'recipe': {13120: 1, 13121: 1, 19804: 1}},
10316: {'min': 100, 'max': 140, 'recipe': {13120: 1, 13121: 1, 19808: 1}},
10318: {'min': 100, 'max': 140, 'recipe': {13120: 1, 13121: 1, 19809: 1}},
10319: {'min': 125, 'max': 165, 'recipe': {13120: 1, 13121: 1, 19810: 1}},
10320: {'min': 100, 'max': 140, 'recipe': {13120: 1, 13121: 1, 19874: 1}},
10321: {'min': 125, 'max': 165, 'recipe': {13120: 1, 13121: 1, 19811: 1}},
10323: {'min': 200, 'max': 240, 'recipe': {13123: 1, 13125: 1, 19815: 1}},
10324: {'min': 200, 'max': 240, 'recipe': {13123: 1, 13125: 1, 19819: 1}},
10325: {'min': 175, 'max': 215, 'recipe': {13123: 1, 13125: 1, 19812: 1}},
10326: {'min': 150, 'max': 190, 'recipe': {13123: 1, 13125: 1, 19867: 1}},
10327: {'min': 200, 'max': 240, 'recipe': {13123: 1, 13125: 1, 19816: 1}},
10329: {'min': 200, 'max': 240, 'recipe': {13123: 1, 13125: 1, 19868: 1}},
10330: {'min': 175, 'max': 215, 'recipe': {13123: 1, 13125: 1, 19813: 1}},
10331: {'min': 175, 'max': 215, 'recipe': {13123: 1, 13125: 1, 19814: 1}},
10332: {'min': 150, 'max': 190, 'recipe': {13123: 1, 13125: 1, 19818: 1}},
10333: {'min': 200, 'max': 240, 'recipe': {13123: 1, 13125: 1, 19821: 1}},
10334: {'min': 200, 'max': 240, 'recipe': {13126: 1, 13127: 1, 19815: 1}},
10335: {'min': 200, 'max': 240, 'recipe': {13126: 1, 13127: 1, 19819: 1}},
10336: {'min': 175, 'max': 215, 'recipe': {13126: 1, 13127: 1, 19812: 1}},
10338: {'min': 150, 'max': 190, 'recipe': {13126: 1, 13127: 1, 19867: 1}},
10339: {'min': 200, 'max': 240, 'recipe': {13126: 1, 13127: 1, 19816: 1}},
10341: {'min': 200, 'max': 240, 'recipe': {13126: 1, 13127: 1, 19868: 1}},
10342: {'min': 175, 'max': 215, 'recipe': {13126: 1, 13127: 1, 19813: 1}},
10343: {'min': 175, 'max': 215, 'recipe': {13126: 1, 13127: 1, 19814: 1}},
10344: {'min': 150, 'max': 190, 'recipe': {13126: 1, 13127: 1, 19818: 1}},
10345: {'min': 200, 'max': 240, 'recipe': {13126: 1, 13127: 1, 19821: 1}},
10346: {'min': 200, 'max': 240, 'recipe': {13128: 1, 13129: 1, 19815: 1}},
10347: {'min': 200, 'max': 240, 'recipe': {13128: 1, 13129: 1, 19819: 1}},
10348: {'min': 175, 'max': 215, 'recipe': {13128: 1, 13129: 1, 19812: 1}},
10350: {'min': 150, 'max': 190, 'recipe': {13128: 1, 13129: 1, 19867: 1}},
10351: {'min': 200, 'max': 240, 'recipe': {13128: 1, 13129: 1, 19816: 1}},
10353: {'min': 200, 'max': 240, 'recipe': {13128: 1, 13129: 1, 19868: 1}},
10354: {'min': 175, 'max': 215, 'recipe': {13128: 1, 13129: 1, 19813: 1}},
10355: {'min': 175, 'max': 215, 'recipe': {13128: 1, 13129: 1, 19814: 1}},
10356: {'min': 150, 'max': 190, 'recipe': {13128: 1, 13129: 1, 19818: 1}},
10357: {'min': 200, 'max': 240, 'recipe': {13128: 1, 13129: 1, 19821: 1}},
10358: {'min': 200, 'max': 240, 'recipe': {13132: 1, 13133: 1, 19815: 1}},
10359: {'min': 200, 'max': 240, 'recipe': {13132: 1, 13133: 1, 19819: 1}},
10360: {'min': 175, 'max': 215, 'recipe': {13132: 1, 13133: 1, 19812: 1}},
10362: {'min': 150, 'max': 190, 'recipe': {13132: 1, 13133: 1, 19867: 1}},
10363: {'min': 200, 'max': 240, 'recipe': {13132: 1, 13133: 1, 19816: 1}},
10365: {'min': 200, 'max': 240, 'recipe': {13132: 1, 13133: 1, 19868: 1}},
10366: {'min': 175, 'max': 215, 'recipe': {13132: 1, 13133: 1, 19813: 1}},
10367: {'min': 175, 'max': 215, 'recipe': {13132: 1, 13133: 1, 19814: 1}},
10368: {'min': 150, 'max': 190, 'recipe': {13132: 1, 13133: 1, 19818: 1}},
10369: {'min': 200, 'max': 240, 'recipe': {13132: 1, 13133: 1, 19821: 1}},
10370: {'min': 200, 'max': 240, 'recipe': {13124: 1, 13134: 1, 19815: 1}},
10371: {'min': 200, 'max': 240, 'recipe': {13124: 1, 13134: 1, 19819: 1}},
10372: {'min': 175, 'max': 215, 'recipe': {13124: 1, 13134: 1, 19812: 1}},
10374: {'min': 150, 'max': 190, 'recipe': {13124: 1, 13134: 1, 19867: 1}},
10375: {'min': 200, 'max': 240, 'recipe': {13124: 1, 13134: 1, 19816: 1}},
10377: {'min': 200, 'max': 240, 'recipe': {13124: 1, 13134: 1, 19868: 1}},
10378: {'min': 175, 'max': 215, 'recipe': {13124: 1, 13134: 1, 19813: 1}},
10379: {'min': 175, 'max': 215, 'recipe': {13124: 1, 13134: 1, 19814: 1}},
10380: {'min': 150, 'max': 190, 'recipe': {13124: 1, 13134: 1, 19818: 1}},
10381: {'min': 200, 'max': 240, 'recipe': {13124: 1, 13134: 1, 19821: 1}},
10382: {'min': 25, 'max': 65, 'recipe': {13103: 1, 13108: 1, 19805: 1}},
10383: {'min': 50, 'max': 90, 'recipe': {13103: 1, 13108: 1, 19800: 1}},
10384: {'min': 50, 'max': 90, 'recipe': {13103: 1, 13108: 1, 19801: 1}},
10385: {'min': 25, 'max': 65, 'recipe': {13105: 1, 13106: 1, 19805: 1}},
10386: {'min': 50, 'max': 90, 'recipe': {13105: 1, 13106: 1, 19800: 1}},
10387: {'min': 50, 'max': 90, 'recipe': {13105: 1, 13106: 1, 19801: 1}},
10388: {'min': 25, 'max': 65, 'recipe': {13107: 1, 13110: 1, 19805: 1}},
10389: {'min': 50, 'max': 90, 'recipe': {13107: 1, 13110: 1, 19800: 1}},
10390: {'min': 50, 'max': 90, 'recipe': {13107: 1, 13110: 1, 19801: 1}},
10391: {'min': 25, 'max': 65, 'recipe': {13099: 1, 13100: 1, 19805: 1}},
10392: {'min': 50, 'max': 90, 'recipe': {13099: 1, 13100: 1, 19800: 1}},
10393: {'min': 50, 'max': 90, 'recipe': {13099: 1, 13100: 1, 19801: 1}},
10394: {'min': 25, 'max': 65, 'recipe': {13101: 1, 13102: 1, 19795: 1}},
10395: {'min': 25, 'max': 65, 'recipe': {13101: 1, 13102: 1, 19798: 1}},
10396: {'min': 50, 'max': 90, 'recipe': {13101: 1, 13102: 1, 19799: 1}},
10397: {'min': 50, 'max': 90, 'recipe': {13101: 1, 13102: 1, 19802: 1}},
10398: {'min': 25, 'max': 65, 'recipe': {13101: 1, 13102: 1, 19796: 1}},
10399: {'min': 25, 'max': 65, 'recipe': {13101: 1, 13102: 1, 19797: 1}},
10400: {'min': 25, 'max': 65, 'recipe': {13101: 1, 13102: 1, 19805: 1}},
10401: {'min': 50, 'max': 90, 'recipe': {13101: 1, 13102: 1, 19800: 1}},
10402: {'min': 50, 'max': 90, 'recipe': {13101: 1, 13102: 1, 19801: 1}},
10404: {'min': 125, 'max': 165, 'recipe': {13118: 1, 13119: 1, 19803: 1}},
10405: {'min': 100, 'max': 140, 'recipe': {13118: 1, 13119: 1, 19808: 1}},
10407: {'min': 125, 'max': 165, 'recipe': {13118: 1, 13119: 1, 19810: 1}},
10408: {'min': 75, 'max': 115, 'recipe': {13118: 1, 13119: 1, 19804: 1}},
10409: {'min': 100, 'max': 140, 'recipe': {13118: 1, 13119: 1, 19809: 1}},
10410: {'min': 125, 'max': 165, 'recipe': {13118: 1, 13119: 1, 19811: 1}},
10411: {'min': 100, 'max': 140, 'recipe': {13118: 1, 13119: 1, 19874: 1}},
10413: {'min': 200, 'max': 240, 'recipe': {13130: 1, 13131: 1, 19815: 1}},
10414: {'min': 200, 'max': 240, 'recipe': {13130: 1, 13131: 1, 19819: 1}},
10415: {'min': 150, 'max': 190, 'recipe': {13130: 1, 13131: 1, 19867: 1}},
10416: {'min': 175, 'max': 215, 'recipe': {13130: 1, 13131: 1, 19812: 1}},
10417: {'min': 200, 'max': 240, 'recipe': {13130: 1, 13131: 1, 19868: 1}},
10418: {'min': 200, 'max': 240, 'recipe': {13130: 1, 13131: 1, 19816: 1}},
10420: {'min': 175, 'max': 215, 'recipe': {13130: 1, 13131: 1, 19813: 1}},
10421: {'min': 150, 'max': 190, 'recipe': {13130: 1, 13131: 1, 19818: 1}},
10422: {'min': 175, 'max': 215, 'recipe': {13130: 1, 13131: 1, 19814: 1}},
10423: {'min': 200, 'max': 240, 'recipe': {13130: 1, 13131: 1, 19821: 1}},
10424: {'min': 50, 'max': 90, 'recipe': {13104: 1, 13109: 1, 19870: 1}},
10425: {'min': 50, 'max': 90, 'recipe': {13104: 1, 13109: 1, 19795: 1}},
10426: {'min': 50, 'max': 90, 'recipe': {13104: 1, 13109: 1, 19798: 1}},
10427: {'min': 50, 'max': 90, 'recipe': {13104: 1, 13109: 1, 19805: 1}},
10428: {'min': 50, 'max': 90, 'recipe': {13104: 1, 13109: 1, 19796: 1}},
10429: {'min': 50, 'max': 90, 'recipe': {13104: 1, 13109: 1, 19797: 1}},
10430: {'min': 50, 'max': 90, 'recipe': {13104: 1, 13109: 1, 19871: 1}},
10431: {'min': 50, 'max': 90, 'recipe': {13104: 1, 13109: 1, 19869: 1}},
10432: {'min': 50, 'max': 90, 'recipe': {13104: 1, 13109: 1, 19799: 1}},
10433: {'min': 50, 'max': 90, 'recipe': {13104: 1, 13109: 1, 19800: 1}},
10434: {'min': 50, 'max': 90, 'recipe': {13104: 1, 13109: 1, 19801: 1}},
10435: {'min': 50, 'max': 90, 'recipe': {13104: 1, 13109: 1, 19802: 1}},
10436: {'min': 75, 'max': 115, 'recipe': {13113: 1, 13122: 1, 19872: 1}},
10438: {'min': 75, 'max': 115, 'recipe': {13113: 1, 13122: 1, 19804: 1}},
10439: {'min': 100, 'max': 140, 'recipe': {13113: 1, 13122: 1, 19808: 1}},
10440: {'min': 100, 'max': 140, 'recipe': {13113: 1, 13122: 1, 19809: 1}},
10441: {'min': 100, 'max': 140, 'recipe': {13113: 1, 13122: 1, 19874: 1}},
10442: {'min': 125, 'max': 165, 'recipe': {13113: 1, 13122: 1, 19873: 1}},
10444: {'min': 125, 'max': 165, 'recipe': {13113: 1, 13122: 1, 19803: 1}},
10445: {'min': 125, 'max': 165, 'recipe': {13113: 1, 13122: 1, 19810: 1}},
10446: {'min': 125, 'max': 165, 'recipe': {13113: 1, 13122: 1, 19811: 1}},
10447: {'min': 125, 'max': 165, 'recipe': {13113: 1, 13122: 1, 19875: 1}},
10448: {'min': 0, 'max': 40, 'recipe': {13099: 1, 13100: 1, 19870: 1}},
10449: {'min': 50, 'max': 90, 'recipe': {13099: 1, 13100: 1, 19869: 1}},
10450: {'min': 50, 'max': 90, 'recipe': {13099: 1, 13100: 1, 19871: 1}},
10451: {'min': 75, 'max': 115, 'recipe': {13120: 1, 13121: 1, 19872: 1}},
10452: {'min': 125, 'max': 165, 'recipe': {13120: 1, 13121: 1, 19875: 1}},
10453: {'min': 125, 'max': 165, 'recipe': {13120: 1, 13121: 1, 19873: 1}},
10454: {'min': 0, 'max': 40, 'recipe': {13103: 1, 13108: 1, 19870: 1}},
10455: {'min': 50, 'max': 90, 'recipe': {13103: 1, 13108: 1, 19869: 1}},
10456: {'min': 50, 'max': 90, 'recipe': {13103: 1, 13108: 1, 19871: 1}},
10457: {'min': 75, 'max': 115, 'recipe': {13111: 1, 13114: 1, 19872: 1}},
10458: {'min': 125, 'max': 165, 'recipe': {13111: 1, 13114: 1, 19875: 1}},
10459: {'min': 125, 'max': 165, 'recipe': {13111: 1, 13114: 1, 19873: 1}},
10460: {'min': 0, 'max': 40, 'recipe': {13107: 1, 13110: 1, 19870: 1}},
10461: {'min': 50, 'max': 90, 'recipe': {13107: 1, 13110: 1, 19869: 1}},
10462: {'min': 50, 'max': 90, 'recipe': {13107: 1, 13110: 1, 19871: 1}},
10463: {'min': 75, 'max': 115, 'recipe': {13112: 1, 13117: 1, 19872: 1}},
10464: {'min': 125, 'max': 165, 'recipe': {13112: 1, 13117: 1, 19875: 1}},
10465: {'min': 125, 'max': 165, 'recipe': {13112: 1, 13117: 1, 19873: 1}},
10466: {'min': 0, 'max': 40, 'recipe': {13105: 1, 13106: 1, 19870: 1}},
10467: {'min': 50, 'max': 90, 'recipe': {13105: 1, 13106: 1, 19869: 1}},
10468: {'min': 50, 'max': 90, 'recipe': {13105: 1, 13106: 1, 19871: 1}},
10469: {'min': 75, 'max': 115, 'recipe': {13115: 1, 13116: 1, 19872: 1}},
10470: {'min': 125, 'max': 165, 'recipe': {13115: 1, 13116: 1, 19875: 1}},
10471: {'min': 125, 'max': 165, 'recipe': {13115: 1, 13116: 1, 19873: 1}},
10472: {'min': 25, 'max': 65, 'recipe': {13101: 1, 13102: 1, 19870: 1}},
10473: {'min': 50, 'max': 90, 'recipe': {13101: 1, 13102: 1, 19869: 1}},
10474: {'min': 50, 'max': 90, 'recipe': {13101: 1, 13102: 1, 19871: 1}},
10475: {'min': 75, 'max': 115, 'recipe': {13118: 1, 13119: 1, 19872: 1}},
10476: {'min': 125, 'max': 165, 'recipe': {13118: 1, 13119: 1, 19875: 1}},
10477: {'min': 125, 'max': 165, 'recipe': {13118: 1, 13119: 1, 19873: 1}},
10478: {'min': 225, 'max': 265, 'recipe': {13136: 1, 13146: 1, 19906: 1}},
10479: {'min': 225, 'max': 265, 'recipe': {13136: 1, 13146: 1, 19904: 1}},
10480: {'min': 225, 'max': 265, 'recipe': {13136: 1, 13146: 1, 19905: 1}},
10481: {'min': 250, 'max': 290, 'recipe': {13136: 1, 13146: 1, 19852: 1}},
10482: {'min': 250, 'max': 290, 'recipe': {13136: 1, 13146: 1, 19850: 1}},
10483: {'min': 250, 'max': 290, 'recipe': {13136: 1, 13146: 1, 19851: 1}},
10485: {'min': 275, 'max': 315, 'recipe': {13136: 1, 13146: 1, 19849: 1}},
10486: {'min': 275, 'max': 315, 'recipe': {13136: 1, 13146: 1, 19877: 1}},
10487: {'min': 275, 'max': 315, 'recipe': {13136: 1, 13146: 1, 19909: 1}},
10488: {'min': 275, 'max': 315, 'recipe': {13136: 1, 13146: 1, 19907: 1}},
10489: {'min': 275, 'max': 315, 'recipe': {13136: 1, 13146: 1, 19908: 1}},
10490: {'min': 275, 'max': 315, 'recipe': {13136: 1, 13146: 1, 19879: 1}},
10492: {'min': 350, 'max': 390, 'recipe': {13148: 1, 13158: 1, 19859: 1}},
10493: {'min': 350, 'max': 390, 'recipe': {13148: 1, 13158: 1, 19858: 1}},
10494: {'min': 350, 'max': 390, 'recipe': {13148: 1, 13158: 1, 19856: 1}},
10495: {'min': 350, 'max': 390, 'recipe': {13148: 1, 13158: 1, 19860: 1}},
10497: {'min': 325, 'max': 365, 'recipe': {13148: 1, 13158: 1, 19861: 1}},
10498: {'min': 325, 'max': 365, 'recipe': {13148: 1, 13158: 1, 19866: 1}},
10499: {'min': 300, 'max': 340, 'recipe': {13148: 1, 13158: 1, 19862: 1}},
10500: {'min': 300, 'max': 340, 'recipe': {13148: 1, 13158: 1, 19865: 1}},
10501: {'min': 300, 'max': 340, 'recipe': {13148: 1, 13158: 1, 19864: 1}},
10502: {'min': 350, 'max': 390, 'recipe': {13148: 1, 13158: 1, 19855: 1}},
10503: {'min': 325, 'max': 365, 'recipe': {13148: 1, 13158: 1, 19863: 1}},
10505: {'min': 350, 'max': 390, 'recipe': {13148: 1, 13158: 1, 19857: 1}},
10506: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 19910: 1}},
10507: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 19911: 1}},
10509: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 19913: 1}},
10510: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 19914: 1}},
10511: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 19915: 1}},
10512: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 19916: 1}},
10513: {'min': 225, 'max': 265, 'recipe': {13144: 1, 13145: 1, 19906: 1}},
10514: {'min': 225, 'max': 265, 'recipe': {13144: 1, 13145: 1, 19904: 1}},
10515: {'min': 225, 'max': 265, 'recipe': {13144: 1, 13145: 1, 19905: 1}},
10516: {'min': 250, 'max': 290, 'recipe': {13144: 1, 13145: 1, 19852: 1}},
10517: {'min': 250, 'max': 290, 'recipe': {13144: 1, 13145: 1, 19850: 1}},
10518: {'min': 250, 'max': 290, 'recipe': {13144: 1, 13145: 1, 19851: 1}},
10520: {'min': 275, 'max': 315, 'recipe': {13144: 1, 13145: 1, 19849: 1}},
10521: {'min': 275, 'max': 315, 'recipe': {13144: 1, 13145: 1, 19877: 1}},
10522: {'min': 275, 'max': 315, 'recipe': {13144: 1, 13145: 1, 19909: 1}},
10523: {'min': 275, 'max': 315, 'recipe': {13144: 1, 13145: 1, 19907: 1}},
10524: {'min': 275, 'max': 315, 'recipe': {13144: 1, 13145: 1, 19908: 1}},
10525: {'min': 275, 'max': 315, 'recipe': {13144: 1, 13145: 1, 19879: 1}},
10527: {'min': 350, 'max': 390, 'recipe': {13156: 1, 13157: 1, 19859: 1}},
10528: {'min': 350, 'max': 390, 'recipe': {13156: 1, 13157: 1, 19858: 1}},
10529: {'min': 350, 'max': 390, 'recipe': {13156: 1, 13157: 1, 19856: 1}},
10530: {'min': 350, 'max': 390, 'recipe': {13156: 1, 13157: 1, 19860: 1}},
10532: {'min': 325, 'max': 365, 'recipe': {13156: 1, 13157: 1, 19861: 1}},
10533: {'min': 325, 'max': 365, 'recipe': {13156: 1, 13157: 1, 19866: 1}},
10534: {'min': 300, 'max': 340, 'recipe': {13156: 1, 13157: 1, 19862: 1}},
10535: {'min': 300, 'max': 340, 'recipe': {13156: 1, 13157: 1, 19865: 1}},
10536: {'min': 300, 'max': 340, 'recipe': {13156: 1, 13157: 1, 19864: 1}},
10537: {'min': 350, 'max': 390, 'recipe': {13156: 1, 13157: 1, 19855: 1}},
10538: {'min': 325, 'max': 365, 'recipe': {13156: 1, 13157: 1, 19863: 1}},
10540: {'min': 350, 'max': 390, 'recipe': {13156: 1, 13157: 1, 19857: 1}},
10541: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 19910: 1}},
10542: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 19911: 1}},
10544: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 19913: 1}},
10545: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 19914: 1}},
10546: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 19915: 1}},
10547: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 19916: 1}},
10548: {'min': 225, 'max': 265, 'recipe': {13135: 1, 13137: 1, 19906: 1}},
10549: {'min': 225, 'max': 265, 'recipe': {13135: 1, 13137: 1, 19904: 1}},
10550: {'min': 225, 'max': 265, 'recipe': {13135: 1, 13137: 1, 19905: 1}},
10551: {'min': 250, 'max': 290, 'recipe': {13135: 1, 13137: 1, 19852: 1}},
10552: {'min': 250, 'max': 290, 'recipe': {13135: 1, 13137: 1, 19850: 1}},
10553: {'min': 250, 'max': 290, 'recipe': {13135: 1, 13137: 1, 19851: 1}},
10555: {'min': 275, 'max': 315, 'recipe': {13135: 1, 13137: 1, 19849: 1}},
10556: {'min': 275, 'max': 315, 'recipe': {13135: 1, 13137: 1, 19877: 1}},
10557: {'min': 275, 'max': 315, 'recipe': {13135: 1, 13137: 1, 19909: 1}},
10558: {'min': 275, 'max': 315, 'recipe': {13135: 1, 13137: 1, 19907: 1}},
10559: {'min': 275, 'max': 315, 'recipe': {13135: 1, 13137: 1, 19908: 1}},
10560: {'min': 275, 'max': 315, 'recipe': {13135: 1, 13137: 1, 19879: 1}},
10562: {'min': 350, 'max': 390, 'recipe': {13147: 1, 13149: 1, 19859: 1}},
10563: {'min': 350, 'max': 390, 'recipe': {13147: 1, 13149: 1, 19858: 1}},
10564: {'min': 350, 'max': 390, 'recipe': {13147: 1, 13149: 1, 19856: 1}},
10565: {'min': 350, 'max': 390, 'recipe': {13147: 1, 13149: 1, 19860: 1}},
10567: {'min': 325, 'max': 365, 'recipe': {13147: 1, 13149: 1, 19861: 1}},
10568: {'min': 325, 'max': 365, 'recipe': {13147: 1, 13149: 1, 19866: 1}},
10569: {'min': 300, 'max': 340, 'recipe': {13147: 1, 13149: 1, 19862: 1}},
10570: {'min': 300, 'max': 340, 'recipe': {13147: 1, 13149: 1, 19865: 1}},
10571: {'min': 300, 'max': 340, 'recipe': {13147: 1, 13149: 1, 19864: 1}},
10572: {'min': 350, 'max': 390, 'recipe': {13147: 1, 13149: 1, 19855: 1}},
10573: {'min': 325, 'max': 365, 'recipe': {13147: 1, 13149: 1, 19863: 1}},
10575: {'min': 350, 'max': 390, 'recipe': {13147: 1, 13149: 1, 19857: 1}},
10576: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 19910: 1}},
10577: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 19911: 1}},
10579: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 19913: 1}},
10580: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 19914: 1}},
10581: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 19915: 1}},
10582: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 19916: 1}},
10583: {'min': 225, 'max': 265, 'recipe': {13140: 1, 13141: 1, 19906: 1}},
10584: {'min': 225, 'max': 265, 'recipe': {13140: 1, 13141: 1, 19904: 1}},
10585: {'min': 225, 'max': 265, 'recipe': {13140: 1, 13141: 1, 19905: 1}},
10586: {'min': 250, 'max': 290, 'recipe': {13140: 1, 13141: 1, 19852: 1}},
10587: {'min': 250, 'max': 290, 'recipe': {13140: 1, 13141: 1, 19850: 1}},
10588: {'min': 250, 'max': 290, 'recipe': {13140: 1, 13141: 1, 19851: 1}},
10590: {'min': 275, 'max': 315, 'recipe': {13140: 1, 13141: 1, 19849: 1}},
10591: {'min': 275, 'max': 315, 'recipe': {13140: 1, 13141: 1, 19877: 1}},
10592: {'min': 275, 'max': 315, 'recipe': {13140: 1, 13141: 1, 19909: 1}},
10593: {'min': 275, 'max': 315, 'recipe': {13140: 1, 13141: 1, 19907: 1}},
10594: {'min': 275, 'max': 315, 'recipe': {13140: 1, 13141: 1, 19908: 1}},
10595: {'min': 275, 'max': 315, 'recipe': {13140: 1, 13141: 1, 19879: 1}},
10597: {'min': 350, 'max': 390, 'recipe': {13152: 1, 13153: 1, 19859: 1}},
10598: {'min': 350, 'max': 390, 'recipe': {13152: 1, 13153: 1, 19858: 1}},
10599: {'min': 350, 'max': 390, 'recipe': {13152: 1, 13153: 1, 19856: 1}},
10600: {'min': 350, 'max': 390, 'recipe': {13152: 1, 13153: 1, 19860: 1}},
10602: {'min': 325, 'max': 365, 'recipe': {13152: 1, 13153: 1, 19861: 1}},
10603: {'min': 325, 'max': 365, 'recipe': {13152: 1, 13153: 1, 19866: 1}},
10604: {'min': 300, 'max': 340, 'recipe': {13152: 1, 13153: 1, 19862: 1}},
10605: {'min': 300, 'max': 340, 'recipe': {13152: 1, 13153: 1, 19865: 1}},
10606: {'min': 300, 'max': 340, 'recipe': {13152: 1, 13153: 1, 19864: 1}},
10607: {'min': 350, 'max': 390, 'recipe': {13152: 1, 13153: 1, 19855: 1}},
10608: {'min': 325, 'max': 365, 'recipe': {13152: 1, 13153: 1, 19863: 1}},
10610: {'min': 350, 'max': 390, 'recipe': {13152: 1, 13153: 1, 19857: 1}},
10611: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 19910: 1}},
10612: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 19911: 1}},
10614: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 19913: 1}},
10615: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 19914: 1}},
10616: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 19915: 1}},
10617: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 19916: 1}},
10618: {'min': 225, 'max': 265, 'recipe': {13138: 1, 13139: 1, 19906: 1}},
10619: {'min': 225, 'max': 265, 'recipe': {13138: 1, 13139: 1, 19904: 1}},
10620: {'min': 225, 'max': 265, 'recipe': {13138: 1, 13139: 1, 19905: 1}},
10621: {'min': 250, 'max': 290, 'recipe': {13138: 1, 13139: 1, 19852: 1}},
10622: {'min': 250, 'max': 290, 'recipe': {13138: 1, 13139: 1, 19850: 1}},
10623: {'min': 250, 'max': 290, 'recipe': {13138: 1, 13139: 1, 19851: 1}},
10625: {'min': 275, 'max': 315, 'recipe': {13138: 1, 13139: 1, 19849: 1}},
10626: {'min': 275, 'max': 315, 'recipe': {13138: 1, 13139: 1, 19877: 1}},
10627: {'min': 275, 'max': 315, 'recipe': {13138: 1, 13139: 1, 19909: 1}},
10628: {'min': 275, 'max': 315, 'recipe': {13138: 1, 13139: 1, 19907: 1}},
10629: {'min': 275, 'max': 315, 'recipe': {13138: 1, 13139: 1, 19908: 1}},
10630: {'min': 275, 'max': 315, 'recipe': {13138: 1, 13139: 1, 19879: 1}},
10632: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 19910: 1}},
10633: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 19911: 1}},
10635: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 19913: 1}},
10636: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 19914: 1}},
10637: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 19915: 1}},
10638: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 19916: 1}},
10639: {'min': 350, 'max': 390, 'recipe': {13150: 1, 13151: 1, 19859: 1}},
10640: {'min': 350, 'max': 390, 'recipe': {13150: 1, 13151: 1, 19858: 1}},
10641: {'min': 350, 'max': 390, 'recipe': {13150: 1, 13151: 1, 19856: 1}},
10642: {'min': 350, 'max': 390, 'recipe': {13150: 1, 13151: 1, 19860: 1}},
10644: {'min': 325, 'max': 365, 'recipe': {13150: 1, 13151: 1, 19861: 1}},
10645: {'min': 325, 'max': 365, 'recipe': {13150: 1, 13151: 1, 19866: 1}},
10646: {'min': 300, 'max': 340, 'recipe': {13150: 1, 13151: 1, 19862: 1}},
10647: {'min': 300, 'max': 340, 'recipe': {13150: 1, 13151: 1, 19865: 1}},
10648: {'min': 300, 'max': 340, 'recipe': {13150: 1, 13151: 1, 19864: 1}},
10649: {'min': 350, 'max': 390, 'recipe': {13150: 1, 13151: 1, 19855: 1}},
10650: {'min': 325, 'max': 365, 'recipe': {13150: 1, 13151: 1, 19863: 1}},
10652: {'min': 350, 'max': 390, 'recipe': {13150: 1, 13151: 1, 19857: 1}},
10653: {'min': 225, 'max': 265, 'recipe': {13142: 1, 13143: 1, 19906: 1}},
10654: {'min': 225, 'max': 265, 'recipe': {13142: 1, 13143: 1, 19904: 1}},
10655: {'min': 225, 'max': 265, 'recipe': {13142: 1, 13143: 1, 19905: 1}},
10656: {'min': 250, 'max': 290, 'recipe': {13142: 1, 13143: 1, 19852: 1}},
10657: {'min': 250, 'max': 290, 'recipe': {13142: 1, 13143: 1, 19850: 1}},
10658: {'min': 250, 'max': 290, 'recipe': {13142: 1, 13143: 1, 19851: 1}},
10660: {'min': 275, 'max': 315, 'recipe': {13142: 1, 13143: 1, 19849: 1}},
10661: {'min': 275, 'max': 315, 'recipe': {13142: 1, 13143: 1, 19877: 1}},
10662: {'min': 275, 'max': 315, 'recipe': {13142: 1, 13143: 1, 19909: 1}},
10663: {'min': 275, 'max': 315, 'recipe': {13142: 1, 13143: 1, 19907: 1}},
10664: {'min': 275, 'max': 315, 'recipe': {13142: 1, 13143: 1, 19908: 1}},
10665: {'min': 275, 'max': 315, 'recipe': {13142: 1, 13143: 1, 19879: 1}},
10667: {'min': 350, 'max': 390, 'recipe': {13154: 1, 13155: 1, 19859: 1}},
10668: {'min': 350, 'max': 390, 'recipe': {13154: 1, 13155: 1, 19858: 1}},
10669: {'min': 350, 'max': 390, 'recipe': {13154: 1, 13155: 1, 19856: 1}},
10670: {'min': 350, 'max': 390, 'recipe': {13154: 1, 13155: 1, 19860: 1}},
10672: {'min': 325, 'max': 365, 'recipe': {13154: 1, 13155: 1, 19861: 1}},
10673: {'min': 325, 'max': 365, 'recipe': {13154: 1, 13155: 1, 19866: 1}},
10674: {'min': 300, 'max': 340, 'recipe': {13154: 1, 13155: 1, 19862: 1}},
10675: {'min': 300, 'max': 340, 'recipe': {13154: 1, 13155: 1, 19865: 1}},
10676: {'min': 300, 'max': 340, 'recipe': {13154: 1, 13155: 1, 19864: 1}},
10677: {'min': 350, 'max': 390, 'recipe': {13154: 1, 13155: 1, 19855: 1}},
10678: {'min': 325, 'max': 365, 'recipe': {13154: 1, 13155: 1, 19863: 1}},
10680: {'min': 350, 'max': 390, 'recipe': {13154: 1, 13155: 1, 19857: 1}},
10681: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 19910: 1}},
10682: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 19911: 1}},
10684: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 19913: 1}},
10685: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 19914: 1}},
10686: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 19915: 1}},
10687: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 19916: 1}},
10688: {'min': 375, 'max': 415, 'recipe': {13148: 1, 13158: 1, 19880: 1}},
10689: {'min': 375, 'max': 415, 'recipe': {13148: 1, 13158: 1, 19881: 1}},
10691: {'min': 375, 'max': 415, 'recipe': {13148: 1, 13158: 1, 19883: 1}},
10692: {'min': 375, 'max': 415, 'recipe': {13148: 1, 13158: 1, 19884: 1}},
10693: {'min': 375, 'max': 415, 'recipe': {13148: 1, 13158: 1, 19885: 1}},
10694: {'min': 375, 'max': 415, 'recipe': {13148: 1, 13158: 1, 19886: 1}},
10695: {'min': 375, 'max': 415, 'recipe': {13156: 1, 13157: 1, 19880: 1}},
10696: {'min': 375, 'max': 415, 'recipe': {13156: 1, 13157: 1, 19881: 1}},
10698: {'min': 375, 'max': 415, 'recipe': {13156: 1, 13157: 1, 19883: 1}},
10699: {'min': 375, 'max': 415, 'recipe': {13156: 1, 13157: 1, 19884: 1}},
10700: {'min': 375, 'max': 415, 'recipe': {13156: 1, 13157: 1, 19885: 1}},
10701: {'min': 375, 'max': 415, 'recipe': {13156: 1, 13157: 1, 19886: 1}},
10702: {'min': 375, 'max': 415, 'recipe': {13147: 1, 13149: 1, 19880: 1}},
10703: {'min': 375, 'max': 415, 'recipe': {13147: 1, 13149: 1, 19881: 1}},
10705: {'min': 375, 'max': 415, 'recipe': {13147: 1, 13149: 1, 19883: 1}},
10706: {'min': 375, 'max': 415, 'recipe': {13147: 1, 13149: 1, 19884: 1}},
10707: {'min': 375, 'max': 415, 'recipe': {13147: 1, 13149: 1, 19885: 1}},
10708: {'min': 375, 'max': 415, 'recipe': {13147: 1, 13149: 1, 19886: 1}},
10709: {'min': 375, 'max': 415, 'recipe': {13152: 1, 13153: 1, 19880: 1}},
10710: {'min': 375, 'max': 415, 'recipe': {13152: 1, 13153: 1, 19881: 1}},
10712: {'min': 375, 'max': 415, 'recipe': {13152: 1, 13153: 1, 19883: 1}},
10713: {'min': 375, 'max': 415, 'recipe': {13152: 1, 13153: 1, 19884: 1}},
10714: {'min': 375, 'max': 415, 'recipe': {13152: 1, 13153: 1, 19885: 1}},
10715: {'min': 375, 'max': 415, 'recipe': {13152: 1, 13153: 1, 19886: 1}},
10716: {'min': 375, 'max': 415, 'recipe': {13150: 1, 13151: 1, 19880: 1}},
10717: {'min': 375, 'max': 415, 'recipe': {13150: 1, 13151: 1, 19881: 1}},
10719: {'min': 375, 'max': 415, 'recipe': {13150: 1, 13151: 1, 19883: 1}},
10720: {'min': 375, 'max': 415, 'recipe': {13150: 1, 13151: 1, 19884: 1}},
10721: {'min': 375, 'max': 415, 'recipe': {13150: 1, 13151: 1, 19885: 1}},
10722: {'min': 375, 'max': 415, 'recipe': {13150: 1, 13151: 1, 19886: 1}},
10723: {'min': 375, 'max': 415, 'recipe': {13154: 1, 13155: 1, 19880: 1}},
10724: {'min': 375, 'max': 415, 'recipe': {13154: 1, 13155: 1, 19881: 1}},
10726: {'min': 375, 'max': 415, 'recipe': {13154: 1, 13155: 1, 19883: 1}},
10727: {'min': 375, 'max': 415, 'recipe': {13154: 1, 13155: 1, 19884: 1}},
10728: {'min': 375, 'max': 415, 'recipe': {13154: 1, 13155: 1, 19885: 1}},
10729: {'min': 375, 'max': 415, 'recipe': {13154: 1, 13155: 1, 19886: 1}},
10730: {'min': 0, 'max': 40, 'recipe': {13099: 1, 13100: 1, 24290: 1}},
10731: {'min': 0, 'max': 40, 'recipe': {13103: 1, 13108: 1, 24290: 1}},
10732: {'min': 0, 'max': 40, 'recipe': {13107: 1, 13110: 1, 24290: 1}},
10733: {'min': 0, 'max': 40, 'recipe': {13105: 1, 13106: 1, 24290: 1}},
10734: {'min': 100, 'max': 140, 'recipe': {13113: 1, 13122: 1, 19930: 1}},
10735: {'min': 125, 'max': 165, 'recipe': {13113: 1, 13122: 1, 19931: 1}},
10736: {'min': 175, 'max': 215, 'recipe': {13124: 1, 13134: 1, 19932: 1}},
10737: {'min': 200, 'max': 240, 'recipe': {13124: 1, 13134: 1, 19933: 1}},
10738: {'min': 100, 'max': 140, 'recipe': {13120: 1, 13121: 1, 19930: 1}},
10739: {'min': 125, 'max': 165, 'recipe': {13120: 1, 13121: 1, 19931: 1}},
10740: {'min': 175, 'max': 215, 'recipe': {13132: 1, 13133: 1, 19932: 1}},
10741: {'min': 200, 'max': 240, 'recipe': {13132: 1, 13133: 1, 19933: 1}},
10742: {'min': 100, 'max': 140, 'recipe': {13111: 1, 13114: 1, 19930: 1}},
10743: {'min': 125, 'max': 165, 'recipe': {13111: 1, 13114: 1, 19931: 1}},
10744: {'min': 175, 'max': 215, 'recipe': {13123: 1, 13125: 1, 19932: 1}},
10745: {'min': 200, 'max': 240, 'recipe': {13123: 1, 13125: 1, 19933: 1}},
10746: {'min': 100, 'max': 140, 'recipe': {13112: 1, 13117: 1, 19930: 1}},
10747: {'min': 125, 'max': 165, 'recipe': {13112: 1, 13117: 1, 19931: 1}},
10748: {'min': 175, 'max': 215, 'recipe': {13128: 1, 13129: 1, 19932: 1}},
10749: {'min': 200, 'max': 240, 'recipe': {13128: 1, 13129: 1, 19933: 1}},
10750: {'min': 100, 'max': 140, 'recipe': {13115: 1, 13116: 1, 19930: 1}},
10751: {'min': 125, 'max': 165, 'recipe': {13115: 1, 13116: 1, 19931: 1}},
10752: {'min': 175, 'max': 215, 'recipe': {13126: 1, 13127: 1, 19932: 1}},
10753: {'min': 200, 'max': 240, 'recipe': {13126: 1, 13127: 1, 19933: 1}},
10754: {'min': 100, 'max': 140, 'recipe': {13118: 1, 13119: 1, 19930: 1}},
10755: {'min': 125, 'max': 165, 'recipe': {13118: 1, 13119: 1, 19931: 1}},
10756: {'min': 175, 'max': 215, 'recipe': {13130: 1, 13131: 1, 19932: 1}},
10757: {'min': 200, 'max': 240, 'recipe': {13130: 1, 13131: 1, 19933: 1}},
10758: {'min': 125, 'max': 165, 'recipe': {13113: 1, 13122: 1, 19955: 1}},
10759: {'min': 125, 'max': 165, 'recipe': {13113: 1, 13122: 1, 19956: 1}},
10760: {'min': 125, 'max': 165, 'recipe': {13113: 1, 13122: 1, 19957: 1}},
10762: {'min': 125, 'max': 165, 'recipe': {13113: 1, 13122: 1, 19959: 1}},
10763: {'min': 125, 'max': 165, 'recipe': {13113: 1, 13122: 1, 19960: 1}},
10764: {'min': 125, 'max': 165, 'recipe': {13113: 1, 13122: 1, 19961: 1}},
10765: {'min': 200, 'max': 240, 'recipe': {13124: 1, 13134: 1, 19962: 1}},
10766: {'min': 200, 'max': 240, 'recipe': {13124: 1, 13134: 1, 19963: 1}},
10767: {'min': 200, 'max': 240, 'recipe': {13124: 1, 13134: 1, 19964: 1}},
10769: {'min': 200, 'max': 240, 'recipe': {13124: 1, 13134: 1, 19966: 1}},
10770: {'min': 200, 'max': 240, 'recipe': {13124: 1, 13134: 1, 19967: 1}},
10771: {'min': 200, 'max': 240, 'recipe': {13124: 1, 13134: 1, 19968: 1}},
10772: {'min': 275, 'max': 315, 'recipe': {13136: 1, 13146: 1, 19969: 1}},
10773: {'min': 275, 'max': 315, 'recipe': {13136: 1, 13146: 1, 19970: 1}},
10775: {'min': 275, 'max': 315, 'recipe': {13136: 1, 13146: 1, 19972: 1}},
10776: {'min': 275, 'max': 315, 'recipe': {13136: 1, 13146: 1, 19973: 1}},
10777: {'min': 275, 'max': 315, 'recipe': {13136: 1, 13146: 1, 19974: 1}},
10778: {'min': 275, 'max': 315, 'recipe': {13136: 1, 13146: 1, 19975: 1}},
10779: {'min': 125, 'max': 165, 'recipe': {13120: 1, 13121: 1, 19955: 1}},
10780: {'min': 125, 'max': 165, 'recipe': {13120: 1, 13121: 1, 19956: 1}},
10781: {'min': 125, 'max': 165, 'recipe': {13120: 1, 13121: 1, 19957: 1}},
10783: {'min': 125, 'max': 165, 'recipe': {13120: 1, 13121: 1, 19959: 1}},
10784: {'min': 125, 'max': 165, 'recipe': {13120: 1, 13121: 1, 19960: 1}},
10785: {'min': 125, 'max': 165, 'recipe': {13120: 1, 13121: 1, 19961: 1}},
10786: {'min': 200, 'max': 240, 'recipe': {13132: 1, 13133: 1, 19962: 1}},
10787: {'min': 200, 'max': 240, 'recipe': {13132: 1, 13133: 1, 19963: 1}},
10788: {'min': 200, 'max': 240, 'recipe': {13132: 1, 13133: 1, 19964: 1}},
10790: {'min': 200, 'max': 240, 'recipe': {13132: 1, 13133: 1, 19966: 1}},
10791: {'min': 200, 'max': 240, 'recipe': {13132: 1, 13133: 1, 19967: 1}},
10792: {'min': 200, 'max': 240, 'recipe': {13132: 1, 13133: 1, 19968: 1}},
10793: {'min': 275, 'max': 315, 'recipe': {13144: 1, 13145: 1, 19969: 1}},
10794: {'min': 275, 'max': 315, 'recipe': {13144: 1, 13145: 1, 19970: 1}},
10796: {'min': 275, 'max': 315, 'recipe': {13144: 1, 13145: 1, 19972: 1}},
10797: {'min': 275, 'max': 315, 'recipe': {13144: 1, 13145: 1, 19973: 1}},
10798: {'min': 275, 'max': 315, 'recipe': {13144: 1, 13145: 1, 19974: 1}},
10799: {'min': 275, 'max': 315, 'recipe': {13144: 1, 13145: 1, 19975: 1}},
10800: {'min': 125, 'max': 165, 'recipe': {13111: 1, 13114: 1, 19955: 1}},
10801: {'min': 125, 'max': 165, 'recipe': {13111: 1, 13114: 1, 19956: 1}},
10802: {'min': 125, 'max': 165, 'recipe': {13111: 1, 13114: 1, 19957: 1}},
10804: {'min': 125, 'max': 165, 'recipe': {13111: 1, 13114: 1, 19959: 1}},
10805: {'min': 125, 'max': 165, 'recipe': {13111: 1, 13114: 1, 19960: 1}},
10806: {'min': 125, 'max': 165, 'recipe': {13111: 1, 13114: 1, 19961: 1}},
10807: {'min': 200, 'max': 240, 'recipe': {13123: 1, 13125: 1, 19962: 1}},
10808: {'min': 200, 'max': 240, 'recipe': {13123: 1, 13125: 1, 19963: 1}},
10809: {'min': 200, 'max': 240, 'recipe': {13123: 1, 13125: 1, 19964: 1}},
10811: {'min': 200, 'max': 240, 'recipe': {13123: 1, 13125: 1, 19966: 1}},
10812: {'min': 200, 'max': 240, 'recipe': {13123: 1, 13125: 1, 19967: 1}},
10813: {'min': 200, 'max': 240, 'recipe': {13123: 1, 13125: 1, 19968: 1}},
10814: {'min': 275, 'max': 315, 'recipe': {13135: 1, 13137: 1, 19969: 1}},
10815: {'min': 275, 'max': 315, 'recipe': {13135: 1, 13137: 1, 19970: 1}},
10817: {'min': 275, 'max': 315, 'recipe': {13135: 1, 13137: 1, 19972: 1}},
10818: {'min': 275, 'max': 315, 'recipe': {13135: 1, 13137: 1, 19973: 1}},
10819: {'min': 275, 'max': 315, 'recipe': {13135: 1, 13137: 1, 19974: 1}},
10820: {'min': 275, 'max': 315, 'recipe': {13135: 1, 13137: 1, 19975: 1}},
10821: {'min': 125, 'max': 165, 'recipe': {13112: 1, 13117: 1, 19955: 1}},
10822: {'min': 125, 'max': 165, 'recipe': {13112: 1, 13117: 1, 19956: 1}},
10823: {'min': 125, 'max': 165, 'recipe': {13112: 1, 13117: 1, 19957: 1}},
10825: {'min': 125, 'max': 165, 'recipe': {13112: 1, 13117: 1, 19959: 1}},
10826: {'min': 125, 'max': 165, 'recipe': {13112: 1, 13117: 1, 19960: 1}},
10827: {'min': 125, 'max': 165, 'recipe': {13112: 1, 13117: 1, 19961: 1}},
10828: {'min': 200, 'max': 240, 'recipe': {13128: 1, 13129: 1, 19962: 1}},
10829: {'min': 200, 'max': 240, 'recipe': {13128: 1, 13129: 1, 19963: 1}},
10830: {'min': 200, 'max': 240, 'recipe': {13128: 1, 13129: 1, 19964: 1}},
10832: {'min': 200, 'max': 240, 'recipe': {13128: 1, 13129: 1, 19966: 1}},
10833: {'min': 200, 'max': 240, 'recipe': {13128: 1, 13129: 1, 19967: 1}},
10834: {'min': 200, 'max': 240, 'recipe': {13128: 1, 13129: 1, 19968: 1}},
10835: {'min': 275, 'max': 315, 'recipe': {13140: 1, 13141: 1, 19969: 1}},
10836: {'min': 275, 'max': 315, 'recipe': {13140: 1, 13141: 1, 19970: 1}},
10838: {'min': 275, 'max': 315, 'recipe': {13140: 1, 13141: 1, 19972: 1}},
10839: {'min': 275, 'max': 315, 'recipe': {13140: 1, 13141: 1, 19973: 1}},
10840: {'min': 275, 'max': 315, 'recipe': {13140: 1, 13141: 1, 19974: 1}},
10841: {'min': 275, 'max': 315, 'recipe': {13140: 1, 13141: 1, 19975: 1}},
10842: {'min': 125, 'max': 165, 'recipe': {13115: 1, 13116: 1, 19955: 1}},
10843: {'min': 125, 'max': 165, 'recipe': {13115: 1, 13116: 1, 19956: 1}},
10844: {'min': 125, 'max': 165, 'recipe': {13115: 1, 13116: 1, 19957: 1}},
10846: {'min': 125, 'max': 165, 'recipe': {13115: 1, 13116: 1, 19959: 1}},
10847: {'min': 125, 'max': 165, 'recipe': {13115: 1, 13116: 1, 19960: 1}},
10848: {'min': 125, 'max': 165, 'recipe': {13115: 1, 13116: 1, 19961: 1}},
10849: {'min': 200, 'max': 240, 'recipe': {13126: 1, 13127: 1, 19962: 1}},
10850: {'min': 200, 'max': 240, 'recipe': {13126: 1, 13127: 1, 19963: 1}},
10851: {'min': 200, 'max': 240, 'recipe': {13126: 1, 13127: 1, 19964: 1}},
10853: {'min': 200, 'max': 240, 'recipe': {13126: 1, 13127: 1, 19966: 1}},
10854: {'min': 200, 'max': 240, 'recipe': {13126: 1, 13127: 1, 19967: 1}},
10855: {'min': 200, 'max': 240, 'recipe': {13126: 1, 13127: 1, 19968: 1}},
10856: {'min': 275, 'max': 315, 'recipe': {13138: 1, 13139: 1, 19969: 1}},
10857: {'min': 275, 'max': 315, 'recipe': {13138: 1, 13139: 1, 19970: 1}},
10859: {'min': 275, 'max': 315, 'recipe': {13138: 1, 13139: 1, 19972: 1}},
10860: {'min': 275, 'max': 315, 'recipe': {13138: 1, 13139: 1, 19973: 1}},
10861: {'min': 275, 'max': 315, 'recipe': {13138: 1, 13139: 1, 19974: 1}},
10862: {'min': 275, 'max': 315, 'recipe': {13138: 1, 13139: 1, 19975: 1}},
10863: {'min': 125, 'max': 165, 'recipe': {13118: 1, 13119: 1, 19955: 1}},
10864: {'min': 125, 'max': 165, 'recipe': {13118: 1, 13119: 1, 19956: 1}},
10865: {'min': 125, 'max': 165, 'recipe': {13118: 1, 13119: 1, 19957: 1}},
10867: {'min': 125, 'max': 165, 'recipe': {13118: 1, 13119: 1, 19959: 1}},
10868: {'min': 125, 'max': 165, 'recipe': {13118: 1, 13119: 1, 19960: 1}},
10869: {'min': 125, 'max': 165, 'recipe': {13118: 1, 13119: 1, 19961: 1}},
10870: {'min': 200, 'max': 240, 'recipe': {13130: 1, 13131: 1, 19962: 1}},
10871: {'min': 200, 'max': 240, 'recipe': {13130: 1, 13131: 1, 19963: 1}},
10872: {'min': 200, 'max': 240, 'recipe': {13130: 1, 13131: 1, 19964: 1}},
10874: {'min': 200, 'max': 240, 'recipe': {13130: 1, 13131: 1, 19966: 1}},
10875: {'min': 200, 'max': 240, 'recipe': {13130: 1, 13131: 1, 19967: 1}},
10876: {'min': 200, 'max': 240, 'recipe': {13130: 1, 13131: 1, 19968: 1}},
10877: {'min': 275, 'max': 315, 'recipe': {13142: 1, 13143: 1, 19969: 1}},
10878: {'min': 275, 'max': 315, 'recipe': {13142: 1, 13143: 1, 19970: 1}},
10880: {'min': 275, 'max': 315, 'recipe': {13142: 1, 13143: 1, 19972: 1}},
10881: {'min': 275, 'max': 315, 'recipe': {13142: 1, 13143: 1, 19973: 1}},
10882: {'min': 275, 'max': 315, 'recipe': {13142: 1, 13143: 1, 19974: 1}},
10883: {'min': 275, 'max': 315, 'recipe': {13142: 1, 13143: 1, 19975: 1}},
13099: {'min': 0, 'max': 25, 'recipe': {19720: 1, 19792: 3}},
13100: {'min': 0, 'max': 25, 'recipe': {19679: 4}},
13101: {'min': 25, 'max': 50, 'recipe': {19720: 2, 19792: 1}},
13102: {'min': 25, 'max': 50, 'recipe': {19679: 1}},
13103: {'min': 0, 'max': 25, 'recipe': {19720: 1, 19792: 1}},
13104: {'min': 50, 'max': 75, 'recipe': {19720: 1, 19792: 1}},
13105: {'min': 0, 'max': 25, 'recipe': {19720: 1, 19792: 3}},
13106: {'min': 0, 'max': 25, 'recipe': {19679: 5}},
13107: {'min': 0, 'max': 25, 'recipe': {19720: 1, 19792: 1}},
13108: {'min': 0, 'max': 25, 'recipe': {19679: 2}},
13109: {'min': 50, 'max': 75, 'recipe': {19679: 2}},
13110: {'min': 0, 'max': 25, 'recipe': {19679: 2}},
13111: {'min': 75, 'max': 100, 'recipe': {19740: 1, 19789: 1}},
13112: {'min': 75, 'max': 100, 'recipe': {19683: 2}},
13113: {'min': 75, 'max': 100, 'recipe': {19683: 2}},
13114: {'min': 75, 'max': 100, 'recipe': {19683: 2}},
13115: {'min': 75, 'max': 100, 'recipe': {19740: 1, 19789: 3}},
13116: {'min': 75, 'max': 100, 'recipe': {19683: 5}},
13117: {'min': 75, 'max': 100, 'recipe': {19740: 1, 19789: 1}},
13118: {'min': 75, 'max': 100, 'recipe': {19740: 2, 19789: 1}},
13119: {'min': 75, 'max': 100, 'recipe': {19683: 1}},
13120: {'min': 75, 'max': 100, 'recipe': {19740: 1, 19789: 3}},
13121: {'min': 75, 'max': 100, 'recipe': {19683: 4}},
13122: {'min': 75, 'max': 100, 'recipe': {19740: 1, 19789: 1}},
13123: {'min': 150, 'max': 175, 'recipe': {19688: 2}},
13124: {'min': 150, 'max': 175, 'recipe': {19688: 2}},
13125: {'min': 150, 'max': 175, 'recipe': {19742: 1, 19794: 1}},
13126: {'min': 150, 'max': 175, 'recipe': {19742: 1, 19794: 3}},
13127: {'min': 150, 'max': 175, 'recipe': {19688: 5}},
13128: {'min': 150, 'max': 175, 'recipe': {19742: 1, 19794: 1}},
13129: {'min': 150, 'max': 175, 'recipe': {19688: 2}},
13130: {'min': 150, 'max': 175, 'recipe': {19742: 2, 19794: 1}},
13131: {'min': 150, 'max': 175, 'recipe': {19688: 1}},
13132: {'min': 150, 'max': 175, 'recipe': {19742: 1, 19794: 3}},
13133: {'min': 150, 'max': 175, 'recipe': {19688: 4}},
13134: {'min': 150, 'max': 175, 'recipe': {19742: 1, 19794: 1}},
13135: {'min': 225, 'max': 250, 'recipe': {19681: 2}},
13136: {'min': 225, 'max': 250, 'recipe': {19681: 2}},
13137: {'min': 225, 'max': 250, 'recipe': {19744: 1, 19793: 1}},
13138: {'min': 225, 'max': 250, 'recipe': {19744: 1, 19793: 3}},
13139: {'min': 225, 'max': 250, 'recipe': {19681: 5}},
13140: {'min': 225, 'max': 250, 'recipe': {19744: 1, 19793: 1}},
13141: {'min': 225, 'max': 250, 'recipe': {19681: 2}},
13142: {'min': 225, 'max': 250, 'recipe': {19744: 2, 19793: 1}},
13143: {'min': 225, 'max': 250, 'recipe': {19681: 1}},
13144: {'min': 225, 'max': 250, 'recipe': {19744: 1, 19793: 3}},
13145: {'min': 225, 'max': 250, 'recipe': {19681: 4}},
13146: {'min': 225, 'max': 250, 'recipe': {19744: 1, 19793: 1}},
13147: {'min': 300, 'max': 325, 'recipe': {19684: 2}},
13148: {'min': 300, 'max': 325, 'recipe': {19684: 2}},
13149: {'min': 300, 'max': 325, 'recipe': {19747: 1, 19791: 1}},
13150: {'min': 300, 'max': 325, 'recipe': {19747: 1, 19791: 3}},
13151: {'min': 300, 'max': 325, 'recipe': {19684: 5}},
13152: {'min': 300, 'max': 325, 'recipe': {19747: 1, 19791: 1}},
13153: {'min': 300, 'max': 325, 'recipe': {19684: 2}},
13154: {'min': 300, 'max': 325, 'recipe': {19747: 2, 19791: 1}},
13155: {'min': 300, 'max': 325, 'recipe': {19684: 1}},
13156: {'min': 300, 'max': 325, 'recipe': {19747: 1, 19791: 3}},
13157: {'min': 300, 'max': 325, 'recipe': {19684: 4}},
13158: {'min': 300, 'max': 325, 'recipe': {19747: 1, 19791: 1}},
13159: {'min': 400, 'max': 425, 'recipe': {19685: 2}},
13160: {'min': 400, 'max': 425, 'recipe': {19685: 2}},
13161: {'min': 400, 'max': 425, 'recipe': {19746: 1, 19790: 1}},
13162: {'min': 400, 'max': 425, 'recipe': {19746: 1, 19790: 3}},
13163: {'min': 400, 'max': 425, 'recipe': {19685: 5}},
13164: {'min': 400, 'max': 425, 'recipe': {19746: 1, 19790: 1}},
13165: {'min': 400, 'max': 425, 'recipe': {19685: 2}},
13166: {'min': 400, 'max': 425, 'recipe': {19746: 2, 19790: 1}},
13167: {'min': 400, 'max': 425, 'recipe': {19685: 1}},
13168: {'min': 400, 'max': 425, 'recipe': {19746: 1, 19790: 3}},
13169: {'min': 400, 'max': 425, 'recipe': {19685: 4}},
13170: {'min': 400, 'max': 425, 'recipe': {19746: 1, 19790: 1}},
19679: {'min': 0, 'max': 25, 'recipe': {19697: 10, 19704: 1}},
19681: {'min': 225, 'max': 250, 'recipe': {19702: 2, 19924: 1}},
19683: {'min': 75, 'max': 100, 'recipe': {19699: 3}},
19684: {'min': 300, 'max': 325, 'recipe': {19700: 2}},
19685: {'min': 400, 'max': 425, 'recipe': {19701: 2}},
19688: {'min': 150, 'max': 175, 'recipe': {19699: 3, 19750: 1}},
19720: {'min': 0, 'max': 25, 'recipe': {19718: 2}},
19733: {'min': 75, 'max': 100, 'recipe': {19728: 2}},
19734: {'min': 150, 'max': 175, 'recipe': {19730: 2}},
19735: {'min': 300, 'max': 325, 'recipe': {19729: 4}},
19736: {'min': 225, 'max': 250, 'recipe': {19731: 2}},
19737: {'min': 400, 'max': 425, 'recipe': {19732: 3}},
19738: {'min': 0, 'max': 25, 'recipe': {19719: 2}},
19740: {'min': 75, 'max': 100, 'recipe': {19739: 2}},
19742: {'min': 150, 'max': 175, 'recipe': {19741: 2}},
19744: {'min': 225, 'max': 250, 'recipe': {19743: 2}},
19746: {'min': 400, 'max': 425, 'recipe': {19745: 2}},
19747: {'min': 300, 'max': 325, 'recipe': {19748: 3}},
19795: {'min': 0, 'max': 25, 'recipe': {19720: 1, 24290: 3}},
19796: {'min': 25, 'max': 50, 'recipe': {19720: 1, 24346: 3}},
19797: {'min': 25, 'max': 50, 'recipe': {19720: 1, 24284: 3}},
19798: {'min': 0, 'max': 25, 'recipe': {19720: 1, 24342: 3}},
19799: {'min': 50, 'max': 75, 'recipe': {24290: 8, 71307: 1}},
19800: {'min': 50, 'max': 75, 'recipe': {24346: 8, 71307: 1}},
19801: {'min': 50, 'max': 75, 'recipe': {24284: 8, 71307: 1}},
19802: {'min': 50, 'max': 75, 'recipe': {24342: 8, 71307: 1}},
19803: {'min': 125, 'max': 150, 'recipe': {24343: 8, 71225: 1}},
19804: {'min': 75, 'max': 100, 'recipe': {19740: 1, 24343: 3}},
19805: {'min': 25, 'max': 50, 'recipe': {19720: 1, 24296: 3}},
19806: {'min': 75, 'max': 100, 'recipe': {19740: 1, 24347: 3}},
19807: {'min': 125, 'max': 150, 'recipe': {24347: 8, 71225: 1}},
19808: {'min': 100, 'max': 125, 'recipe': {19740: 1, 24291: 3}},
19809: {'min': 100, 'max': 125, 'recipe': {19740: 1, 24353: 3}},
19810: {'min': 125, 'max': 150, 'recipe': {24291: 8, 71225: 1}},
19811: {'min': 125, 'max': 150, 'recipe': {24353: 8, 71225: 1}},
19812: {'min': 175, 'max': 200, 'recipe': {19742: 1, 24354: 3}},
19813: {'min': 175, 'max': 200, 'recipe': {19742: 1, 24292: 3}},
19814: {'min': 175, 'max': 200, 'recipe': {19742: 1, 24286: 3}},
19815: {'min': 200, 'max': 225, 'recipe': {24292: 8, 72752: 1}},
19816: {'min': 200, 'max': 225, 'recipe': {24354: 8, 72752: 1}},
19817: {'min': 150, 'max': 175, 'recipe': {19742: 1, 24348: 3}},
19818: {'min': 150, 'max': 175, 'recipe': {19742: 1, 24344: 3}},
19819: {'min': 200, 'max': 225, 'recipe': {24286: 8, 72752: 1}},
19820: {'min': 200, 'max': 225, 'recipe': {24348: 8, 72752: 1}},
19821: {'min': 200, 'max': 225, 'recipe': {24344: 8, 72752: 1}},
19849: {'min': 275, 'max': 300, 'recipe': {24293: 8, 76216: 1}},
19850: {'min': 250, 'max': 275, 'recipe': {19744: 1, 24293: 3}},
19851: {'min': 250, 'max': 275, 'recipe': {19744: 1, 24363: 3}},
19852: {'min': 250, 'max': 275, 'recipe': {19744: 1, 24281: 3}},
19853: {'min': 325, 'max': 350, 'recipe': {19747: 1, 24350: 3}},
19854: {'min': 350, 'max': 375, 'recipe': {24350: 8, 72194: 1}},
19855: {'min': 350, 'max': 375, 'recipe': {24294: 8, 72194: 1}},
19856: {'min': 350, 'max': 375, 'recipe': {24356: 8, 72194: 1}},
19857: {'min': 350, 'max': 375, 'recipe': {24282: 8, 72194: 1}},
19858: {'min': 350, 'max': 375, 'recipe': {24288: 8, 72194: 1}},
19859: {'min': 350, 'max': 375, 'recipe': {24341: 8, 72194: 1}},
19860: {'min': 350, 'max': 375, 'recipe': {24299: 8, 72194: 1}},
19861: {'min': 325, 'max': 350, 'recipe': {19747: 1, 24294: 3}},
19862: {'min': 300, 'max': 325, 'recipe': {19747: 1, 24356: 3}},
19863: {'min': 325, 'max': 350, 'recipe': {19747: 1, 24282: 3}},
19864: {'min': 300, 'max': 325, 'recipe': {19747: 1, 24288: 3}},
19865: {'min': 300, 'max': 325, 'recipe': {19747: 1, 24341: 3}},
19866: {'min': 325, 'max': 350, 'recipe': {19747: 1, 24299: 3}},
19867: {'min': 150, 'max': 175, 'recipe': {19742: 1, 24280: 3}},
19868: {'min': 200, 'max': 225, 'recipe': {24280: 8, 72752: 1}},
19869: {'min': 50, 'max': 75, 'recipe': {24296: 8, 71307: 1}},
19870: {'min': 0, 'max': 25, 'recipe': {19720: 1, 24278: 3}},
19871: {'min': 50, 'max': 75, 'recipe': {24278: 8, 71307: 1}},
19872: {'min': 75, 'max': 100, 'recipe': {19740: 1, 24279: 3}},
19873: {'min': 125, 'max': 150, 'recipe': {24279: 8, 71225: 1}},
19874: {'min': 100, 'max': 125, 'recipe': {19740: 1, 24285: 3}},
19875: {'min': 125, 'max': 150, 'recipe': {24285: 8, 71225: 1}},
19876: {'min': 250, 'max': 275, 'recipe': {19744: 1, 24349: 3}},
19877: {'min': 275, 'max': 300, 'recipe': {24363: 8, 76216: 1}},
19878: {'min': 275, 'max': 300, 'recipe': {24349: 8, 76216: 1}},
19879: {'min': 275, 'max': 300, 'recipe': {24281: 8, 76216: 1}},
19880: {'min': 375, 'max': 400, 'recipe': {24341: 15, 72194: 2}},
19881: {'min': 375, 'max': 400, 'recipe': {24299: 15, 72194: 2}},
19882: {'min': 375, 'max': 400, 'recipe': {24350: 15, 72194: 2}},
19883: {'min': 375, 'max': 400, 'recipe': {24294: 15, 72194: 2}},
19884: {'min': 375, 'max': 400, 'recipe': {24282: 15, 72194: 2}},
19885: {'min': 375, 'max': 400, 'recipe': {24288: 15, 72194: 2}},
19886: {'min': 375, 'max': 400, 'recipe': {24356: 15, 72194: 2}},
19904: {'min': 225, 'max': 250, 'recipe': {19744: 1, 24345: 3}},
19905: {'min': 225, 'max': 250, 'recipe': {19744: 1, 24287: 3}},
19906: {'min': 225, 'max': 250, 'recipe': {19744: 1, 24355: 3}},
19907: {'min': 275, 'max': 300, 'recipe': {24345: 8, 76216: 1}},
19908: {'min': 275, 'max': 300, 'recipe': {24287: 8, 76216: 1}},
19909: {'min': 275, 'max': 300, 'recipe': {24355: 8, 76216: 1}},
19910: {'min': 400, 'max': 425, 'recipe': {19721: 5, 24358: 5, 76614: 5}},
19911: {'min': 400, 'max': 425, 'recipe': {19721: 5, 24300: 5, 76614: 5}},
19912: {'min': 400, 'max': 425, 'recipe': {19721: 5, 24351: 5, 76614: 5}},
19913: {'min': 400, 'max': 425, 'recipe': {19721: 5, 24295: 5, 76614: 5}},
19914: {'min': 400, 'max': 425, 'recipe': {19721: 5, 24283: 5, 76614: 5}},
19915: {'min': 400, 'max': 425, 'recipe': {19721: 5, 24289: 5, 76614: 5}},
19916: {'min': 400, 'max': 425, 'recipe': {19721: 5, 24357: 5, 76614: 5}},
19930: {'min': 100, 'max': 125, 'recipe': {19740: 1, 24297: 3}},
19931: {'min': 125, 'max': 150, 'recipe': {24297: 8, 71225: 1}},
19932: {'min': 175, 'max': 200, 'recipe': {19742: 1, 24298: 3}},
19933: {'min': 200, 'max': 225, 'recipe': {24298: 8, 72752: 1}},
19955: {'min': 125, 'max': 150, 'recipe': {24279: 15, 71225: 2}},
19956: {'min': 125, 'max': 150, 'recipe': {24297: 15, 71225: 2}},
19957: {'min': 125, 'max': 150, 'recipe': {24353: 15, 71225: 2}},
19958: {'min': 125, 'max': 150, 'recipe': {24347: 15, 71225: 2}},
19959: {'min': 125, 'max': 150, 'recipe': {24291: 15, 71225: 2}},
19960: {'min': 125, 'max': 150, 'recipe': {24343: 15, 71225: 2}},
19961: {'min': 125, 'max': 150, 'recipe': {24285: 15, 71225: 2}},
19962: {'min': 200, 'max': 225, 'recipe': {24280: 15, 72752: 2}},
19963: {'min': 200, 'max': 225, 'recipe': {24298: 15, 72752: 2}},
19964: {'min': 200, 'max': 225, 'recipe': {24354: 15, 72752: 2}},
19965: {'min': 200, 'max': 225, 'recipe': {24348: 15, 72752: 2}},
19966: {'min': 200, 'max': 225, 'recipe': {24292: 15, 72752: 2}},
19967: {'min': 200, 'max': 225, 'recipe': {24344: 15, 72752: 2}},
19968: {'min': 200, 'max': 225, 'recipe': {24286: 15, 72752: 2}},
19969: {'min': 275, 'max': 300, 'recipe': {24345: 15, 76216: 2}},
19970: {'min': 275, 'max': 300, 'recipe': {24363: 15, 76216: 2}},
19971: {'min': 275, 'max': 300, 'recipe': {24349: 15, 76216: 2}},
19972: {'min': 275, 'max': 300, 'recipe': {24293: 15, 76216: 2}},
19973: {'min': 275, 'max': 300, 'recipe': {24355: 15, 76216: 2}},
19974: {'min': 275, 'max': 300, 'recipe': {24281: 15, 76216: 2}},
19975: {'min': 275, 'max': 300, 'recipe': {24287: 15, 76216: 2}},
24742: {'min': 175, 'max': 200, 'recipe': {19688: 1, 24307: 1}},
24743: {'min': 375, 'max': 400, 'recipe': {19684: 1, 24309: 1}},
24744: {'min': 400, 'max': 425, 'recipe': {19685: 1, 24310: 1}},
24745: {'min': 175, 'max': 200, 'recipe': {19688: 1, 24312: 1}},
24746: {'min': 375, 'max': 400, 'recipe': {19684: 1, 24314: 1}},
24747: {'min': 400, 'max': 425, 'recipe': {19685: 1, 24315: 1}},
24760: {'min': 150, 'max': 175, 'recipe': {19688: 1, 24332: 1}},
24761: {'min': 350, 'max': 375, 'recipe': {19684: 1, 24334: 1}},
24762: {'min': 400, 'max': 425, 'recipe': {19685: 1, 24335: 1}},
24792: {'min': 150, 'max': 175, 'recipe': {19688: 1, 24337: 1}},
24793: {'min': 350, 'max': 375, 'recipe': {19684: 1, 24339: 1}},
24794: {'min': 400, 'max': 425, 'recipe': {19685: 1, 24340: 1}},
24819: {'min': 100, 'max': 125, 'recipe': {19683: 1, 24285: 1, 24307: 1}},
24820: {'min': 300, 'max': 325, 'recipe': {19684: 1, 24288: 1, 24309: 1}},
24821: {'min': 400, 'max': 425, 'recipe': {19685: 1, 24289: 1, 24310: 1}},
24822: {'min': 100, 'max': 125, 'recipe': {19683: 1, 24302: 1}},
24823: {'min': 300, 'max': 325, 'recipe': {19684: 1, 24304: 1}},
24824: {'min': 400, 'max': 425, 'recipe': {19685: 1, 24305: 1}},
24825: {'min': 125, 'max': 150, 'recipe': {19683: 1, 24291: 1, 24312: 1}},
24826: {'min': 325, 'max': 350, 'recipe': {19684: 1, 24294: 1, 24314: 1}},
24827: {'min': 400, 'max': 425, 'recipe': {19685: 1, 24295: 1, 24315: 1}},
24831: {'min': 125, 'max': 150, 'recipe': {19683: 1, 24327: 1}},
24832: {'min': 325, 'max': 350, 'recipe': {19684: 1, 24329: 1}},
24833: {'min': 400, 'max': 425, 'recipe': {19685: 1, 24330: 1}},
24869: {'min': 10, 'max': 35, 'recipe': {19679: 2, 19720: 1}},
36042: {'min': 75, 'max': 100, 'recipe': {19683: 1, 24307: 2, 36041: 75, 36059: 1}},
36043: {'min': 275, 'max': 300, 'recipe': {19681: 1, 24308: 2, 36041: 150, 36059: 10}},
36044: {'min': 400, 'max': 425, 'recipe': {19685: 1, 24310: 1, 36041: 200, 36059: 25}},
36732: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 37177: 1}},
36746: {'min': 375, 'max': 415, 'recipe': {13148: 1, 13158: 1, 37176: 1}},
36749: {'min': 350, 'max': 390, 'recipe': {13148: 1, 13158: 1, 37175: 1}},
36756: {'min': 325, 'max': 365, 'recipe': {13148: 1, 13158: 1, 37174: 1}},
36765: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 37177: 1}},
36774: {'min': 375, 'max': 415, 'recipe': {13156: 1, 13157: 1, 37176: 1}},
36785: {'min': 350, 'max': 390, 'recipe': {13156: 1, 13157: 1, 37175: 1}},
36789: {'min': 325, 'max': 365, 'recipe': {13156: 1, 13157: 1, 37174: 1}},
36802: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 37177: 1}},
36806: {'min': 375, 'max': 415, 'recipe': {13154: 1, 13155: 1, 37176: 1}},
36815: {'min': 350, 'max': 390, 'recipe': {13154: 1, 13155: 1, 37175: 1}},
36819: {'min': 325, 'max': 365, 'recipe': {13154: 1, 13155: 1, 37174: 1}},
36827: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 37177: 1}},
36839: {'min': 375, 'max': 415, 'recipe': {13152: 1, 13153: 1, 37176: 1}},
36843: {'min': 350, 'max': 390, 'recipe': {13152: 1, 13153: 1, 37175: 1}},
36850: {'min': 325, 'max': 365, 'recipe': {13152: 1, 13153: 1, 37174: 1}},
36857: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 37177: 1}},
36860: {'min': 375, 'max': 415, 'recipe': {13150: 1, 13151: 1, 37176: 1}},
36870: {'min': 350, 'max': 390, 'recipe': {13150: 1, 13151: 1, 37175: 1}},
36872: {'min': 325, 'max': 365, 'recipe': {13150: 1, 13151: 1, 37174: 1}},
36878: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 37177: 1}},
36887: {'min': 375, 'max': 415, 'recipe': {13147: 1, 13149: 1, 37176: 1}},
36890: {'min': 350, 'max': 390, 'recipe': {13147: 1, 13149: 1, 37175: 1}},
36897: {'min': 325, 'max': 365, 'recipe': {13147: 1, 13149: 1, 37174: 1}},
37174: {'min': 325, 'max': 350, 'recipe': {19747: 1, 37897: 5}},
37175: {'min': 350, 'max': 375, 'recipe': {37897: 10, 72194: 1}},
37176: {'min': 375, 'max': 400, 'recipe': {37897: 20, 72194: 2}},
37177: {'min': 400, 'max': 425, 'recipe': {19721: 5, 37897: 30, 76614: 5}},
38162: {'min': 400, 'max': 425, 'recipe': {19721: 5, 76614: 5, 86601: 100}},
38163: {'min': 150, 'max': 175, 'recipe': {19742: 1, 86601: 35}},
38164: {'min': 75, 'max': 100, 'recipe': {19740: 1, 86601: 25}},
38165: {'min': 25, 'max': 50, 'recipe': {19720: 1, 86601: 15}},
38166: {'min': 375, 'max': 400, 'recipe': {72194: 2, 86601: 65}},
38167: {'min': 275, 'max': 300, 'recipe': {76216: 1, 86601: 45}},
38168: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 38162: 1}},
38169: {'min': 375, 'max': 415, 'recipe': {13148: 1, 13158: 1, 38166: 1}},
38170: {'min': 275, 'max': 315, 'recipe': {13136: 1, 13146: 1, 38167: 1}},
38171: {'min': 200, 'max': 240, 'recipe': {13124: 1, 13134: 1, 38163: 1}},
38172: {'min': 75, 'max': 115, 'recipe': {13113: 1, 13122: 1, 38164: 1}},
38173: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 38162: 1}},
38174: {'min': 375, 'max': 415, 'recipe': {13156: 1, 13157: 1, 38166: 1}},
38175: {'min': 275, 'max': 315, 'recipe': {13144: 1, 13145: 1, 38167: 1}},
38176: {'min': 200, 'max': 240, 'recipe': {13132: 1, 13133: 1, 38163: 1}},
38177: {'min': 75, 'max': 115, 'recipe': {13120: 1, 13121: 1, 38164: 1}},
38178: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 38162: 1}},
38179: {'min': 375, 'max': 415, 'recipe': {13154: 1, 13155: 1, 38166: 1}},
38180: {'min': 275, 'max': 315, 'recipe': {13142: 1, 13143: 1, 38167: 1}},
38181: {'min': 200, 'max': 240, 'recipe': {13130: 1, 13131: 1, 38163: 1}},
38182: {'min': 75, 'max': 115, 'recipe': {13118: 1, 13119: 1, 38164: 1}},
38183: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 38162: 1}},
38184: {'min': 375, 'max': 415, 'recipe': {13152: 1, 13153: 1, 38166: 1}},
38185: {'min': 275, 'max': 315, 'recipe': {13140: 1, 13141: 1, 38167: 1}},
38186: {'min': 200, 'max': 240, 'recipe': {13128: 1, 13129: 1, 38163: 1}},
38187: {'min': 75, 'max': 115, 'recipe': {13112: 1, 13117: 1, 38164: 1}},
38188: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 38162: 1}},
38189: {'min': 375, 'max': 415, 'recipe': {13150: 1, 13151: 1, 38166: 1}},
38190: {'min': 275, 'max': 315, 'recipe': {13138: 1, 13139: 1, 38167: 1}},
38191: {'min': 200, 'max': 240, 'recipe': {13126: 1, 13127: 1, 38163: 1}},
38192: {'min': 75, 'max': 115, 'recipe': {13115: 1, 13116: 1, 38164: 1}},
38193: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 38162: 1}},
38194: {'min': 375, 'max': 415, 'recipe': {13147: 1, 13149: 1, 38166: 1}},
38195: {'min': 275, 'max': 315, 'recipe': {13135: 1, 13137: 1, 38167: 1}},
38196: {'min': 200, 'max': 240, 'recipe': {13123: 1, 13125: 1, 38163: 1}},
38197: {'min': 75, 'max': 115, 'recipe': {13111: 1, 13114: 1, 38164: 1}},
38198: {'min': 25, 'max': 65, 'recipe': {13103: 1, 13108: 1, 38165: 1}},
38199: {'min': 25, 'max': 65, 'recipe': {13105: 1, 13106: 1, 38165: 1}},
38200: {'min': 25, 'max': 65, 'recipe': {13107: 1, 13110: 1, 38165: 1}},
38201: {'min': 25, 'max': 65, 'recipe': {13101: 1, 13102: 1, 38165: 1}},
38202: {'min': 25, 'max': 65, 'recipe': {13099: 1, 13100: 1, 38165: 1}},
38203: {'min': 50, 'max': 90, 'recipe': {13104: 1, 13109: 1, 38165: 1}},
38204: {'min': 100, 'max': 125, 'recipe': {19683: 1, 24317: 1, 86601: 2}},
38205: {'min': 300, 'max': 325, 'recipe': {19684: 1, 24319: 1, 86601: 16}},
38206: {'min': 400, 'max': 425, 'recipe': {19685: 1, 24320: 1, 86601: 32}},
41374: {'min': 325, 'max': 365, 'recipe': {13147: 1, 13149: 1, 41553: 1}},
41375: {'min': 350, 'max': 390, 'recipe': {13147: 1, 13149: 1, 41554: 1}},
41376: {'min': 375, 'max': 415, 'recipe': {13147: 1, 13149: 1, 41555: 1}},
41377: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 41556: 1}},
41378: {'min': 325, 'max': 365, 'recipe': {13150: 1, 13151: 1, 41553: 1}},
41379: {'min': 350, 'max': 390, 'recipe': {13150: 1, 13151: 1, 41554: 1}},
41380: {'min': 375, 'max': 415, 'recipe': {13150: 1, 13151: 1, 41555: 1}},
41381: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 41556: 1}},
41382: {'min': 325, 'max': 365, 'recipe': {13152: 1, 13153: 1, 41553: 1}},
41383: {'min': 350, 'max': 390, 'recipe': {13152: 1, 13153: 1, 41554: 1}},
41384: {'min': 375, 'max': 415, 'recipe': {13152: 1, 13153: 1, 41555: 1}},
41385: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 41556: 1}},
41386: {'min': 325, 'max': 365, 'recipe': {13154: 1, 13155: 1, 41553: 1}},
41387: {'min': 350, 'max': 390, 'recipe': {13154: 1, 13155: 1, 41554: 1}},
41388: {'min': 375, 'max': 415, 'recipe': {13154: 1, 13155: 1, 41555: 1}},
41389: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 41556: 1}},
41390: {'min': 325, 'max': 365, 'recipe': {13156: 1, 13157: 1, 41553: 1}},
41391: {'min': 350, 'max': 390, 'recipe': {13156: 1, 13157: 1, 41554: 1}},
41392: {'min': 375, 'max': 415, 'recipe': {13156: 1, 13157: 1, 41555: 1}},
41393: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 41556: 1}},
41394: {'min': 325, 'max': 365, 'recipe': {13148: 1, 13158: 1, 41553: 1}},
41395: {'min': 350, 'max': 390, 'recipe': {13148: 1, 13158: 1, 41554: 1}},
41396: {'min': 375, 'max': 415, 'recipe': {13148: 1, 13158: 1, 41555: 1}},
41397: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 41556: 1}},
45598: {'min': 75, 'max': 115, 'recipe': {13113: 1, 13122: 1, 19806: 1}},
45599: {'min': 125, 'max': 165, 'recipe': {13113: 1, 13122: 1, 19807: 1}},
45600: {'min': 125, 'max': 165, 'recipe': {13113: 1, 13122: 1, 19958: 1}},
45601: {'min': 150, 'max': 190, 'recipe': {13124: 1, 13134: 1, 19817: 1}},
45602: {'min': 200, 'max': 240, 'recipe': {13124: 1, 13134: 1, 19820: 1}},
45603: {'min': 200, 'max': 240, 'recipe': {13124: 1, 13134: 1, 19965: 1}},
45604: {'min': 250, 'max': 290, 'recipe': {13136: 1, 13146: 1, 19876: 1}},
45605: {'min': 275, 'max': 315, 'recipe': {13136: 1, 13146: 1, 19878: 1}},
45606: {'min': 275, 'max': 315, 'recipe': {13136: 1, 13146: 1, 19971: 1}},
45607: {'min': 325, 'max': 365, 'recipe': {13148: 1, 13158: 1, 19853: 1}},
45608: {'min': 350, 'max': 390, 'recipe': {13148: 1, 13158: 1, 19854: 1}},
45609: {'min': 375, 'max': 415, 'recipe': {13148: 1, 13158: 1, 19882: 1}},
45610: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 19912: 1}},
45611: {'min': 75, 'max': 115, 'recipe': {13120: 1, 13121: 1, 19806: 1}},
45612: {'min': 125, 'max': 165, 'recipe': {13120: 1, 13121: 1, 19807: 1}},
45613: {'min': 125, 'max': 165, 'recipe': {13120: 1, 13121: 1, 19958: 1}},
45614: {'min': 150, 'max': 190, 'recipe': {13132: 1, 13133: 1, 19817: 1}},
45615: {'min': 200, 'max': 240, 'recipe': {13132: 1, 13133: 1, 19820: 1}},
45616: {'min': 200, 'max': 240, 'recipe': {13132: 1, 13133: 1, 19965: 1}},
45617: {'min': 250, 'max': 290, 'recipe': {13144: 1, 13145: 1, 19876: 1}},
45618: {'min': 275, 'max': 315, 'recipe': {13144: 1, 13145: 1, 19878: 1}},
45619: {'min': 275, 'max': 315, 'recipe': {13144: 1, 13145: 1, 19971: 1}},
45620: {'min': 325, 'max': 365, 'recipe': {13156: 1, 13157: 1, 19853: 1}},
45621: {'min': 350, 'max': 390, 'recipe': {13156: 1, 13157: 1, 19854: 1}},
45622: {'min': 375, 'max': 415, 'recipe': {13156: 1, 13157: 1, 19882: 1}},
45623: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 19912: 1}},
45624: {'min': 75, 'max': 115, 'recipe': {13111: 1, 13114: 1, 19806: 1}},
45625: {'min': 125, 'max': 165, 'recipe': {13111: 1, 13114: 1, 19807: 1}},
45626: {'min': 125, 'max': 165, 'recipe': {13111: 1, 13114: 1, 19958: 1}},
45627: {'min': 150, 'max': 190, 'recipe': {13123: 1, 13125: 1, 19817: 1}},
45628: {'min': 200, 'max': 240, 'recipe': {13123: 1, 13125: 1, 19820: 1}},
45629: {'min': 200, 'max': 240, 'recipe': {13123: 1, 13125: 1, 19965: 1}},
45630: {'min': 250, 'max': 290, 'recipe': {13135: 1, 13137: 1, 19876: 1}},
45631: {'min': 275, 'max': 315, 'recipe': {13135: 1, 13137: 1, 19878: 1}},
45632: {'min': 275, 'max': 315, 'recipe': {13135: 1, 13137: 1, 19971: 1}},
45633: {'min': 325, 'max': 365, 'recipe': {13147: 1, 13149: 1, 19853: 1}},
45634: {'min': 350, 'max': 390, 'recipe': {13147: 1, 13149: 1, 19854: 1}},
45635: {'min': 375, 'max': 415, 'recipe': {13147: 1, 13149: 1, 19882: 1}},
45636: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 19912: 1}},
45637: {'min': 75, 'max': 115, 'recipe': {13112: 1, 13117: 1, 19806: 1}},
45638: {'min': 125, 'max': 165, 'recipe': {13112: 1, 13117: 1, 19807: 1}},
45639: {'min': 125, 'max': 165, 'recipe': {13112: 1, 13117: 1, 19958: 1}},
45640: {'min': 150, 'max': 190, 'recipe': {13128: 1, 13129: 1, 19817: 1}},
45641: {'min': 200, 'max': 240, 'recipe': {13128: 1, 13129: 1, 19820: 1}},
45642: {'min': 200, 'max': 240, 'recipe': {13128: 1, 13129: 1, 19965: 1}},
45643: {'min': 250, 'max': 290, 'recipe': {13140: 1, 13141: 1, 19876: 1}},
45644: {'min': 275, 'max': 315, 'recipe': {13140: 1, 13141: 1, 19878: 1}},
45645: {'min': 275, 'max': 315, 'recipe': {13140: 1, 13141: 1, 19971: 1}},
45646: {'min': 325, 'max': 365, 'recipe': {13152: 1, 13153: 1, 19853: 1}},
45647: {'min': 350, 'max': 390, 'recipe': {13152: 1, 13153: 1, 19854: 1}},
45648: {'min': 375, 'max': 415, 'recipe': {13152: 1, 13153: 1, 19882: 1}},
45649: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 19912: 1}},
45650: {'min': 75, 'max': 115, 'recipe': {13115: 1, 13116: 1, 19806: 1}},
45651: {'min': 125, 'max': 165, 'recipe': {13115: 1, 13116: 1, 19807: 1}},
45652: {'min': 125, 'max': 165, 'recipe': {13115: 1, 13116: 1, 19958: 1}},
45653: {'min': 150, 'max': 190, 'recipe': {13126: 1, 13127: 1, 19817: 1}},
45654: {'min': 200, 'max': 240, 'recipe': {13126: 1, 13127: 1, 19820: 1}},
45655: {'min': 200, 'max': 240, 'recipe': {13126: 1, 13127: 1, 19965: 1}},
45656: {'min': 250, 'max': 290, 'recipe': {13138: 1, 13139: 1, 19876: 1}},
45657: {'min': 275, 'max': 315, 'recipe': {13138: 1, 13139: 1, 19878: 1}},
45658: {'min': 275, 'max': 315, 'recipe': {13138: 1, 13139: 1, 19971: 1}},
45659: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 19912: 1}},
45660: {'min': 325, 'max': 365, 'recipe': {13150: 1, 13151: 1, 19853: 1}},
45661: {'min': 350, 'max': 390, 'recipe': {13150: 1, 13151: 1, 19854: 1}},
45662: {'min': 375, 'max': 415, 'recipe': {13150: 1, 13151: 1, 19882: 1}},
45663: {'min': 75, 'max': 115, 'recipe': {13118: 1, 13119: 1, 19806: 1}},
45664: {'min': 125, 'max': 165, 'recipe': {13118: 1, 13119: 1, 19807: 1}},
45665: {'min': 125, 'max': 165, 'recipe': {13118: 1, 13119: 1, 19958: 1}},
45666: {'min': 150, 'max': 190, 'recipe': {13130: 1, 13131: 1, 19817: 1}},
45667: {'min': 200, 'max': 240, 'recipe': {13130: 1, 13131: 1, 19820: 1}},
45668: {'min': 200, 'max': 240, 'recipe': {13130: 1, 13131: 1, 19965: 1}},
45669: {'min': 250, 'max': 290, 'recipe': {13142: 1, 13143: 1, 19876: 1}},
45670: {'min': 275, 'max': 315, 'recipe': {13142: 1, 13143: 1, 19878: 1}},
45671: {'min': 275, 'max': 315, 'recipe': {13142: 1, 13143: 1, 19971: 1}},
45672: {'min': 325, 'max': 365, 'recipe': {13154: 1, 13155: 1, 19853: 1}},
45673: {'min': 350, 'max': 390, 'recipe': {13154: 1, 13155: 1, 19854: 1}},
45674: {'min': 375, 'max': 415, 'recipe': {13154: 1, 13155: 1, 19882: 1}},
45675: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 19912: 1}},
46730: {'min': 450, 'max': 475, 'recipe': {19925: 2, 46731: 100, 46747: 10}},
46732: {'min': 450, 'max': 475, 'recipe': {19925: 2, 46733: 100, 46747: 10}},
46734: {'min': 450, 'max': 475, 'recipe': {19925: 2, 46735: 100, 46747: 10}},
46738: {'min': 450, 'max': 475, 'recipe': {19681: 20, 19683: 20, 19688: 10, 46742: 1}},
46739: {'min': 450, 'max': 475, 'recipe': {19733: 20, 19734: 10, 19736: 20, 46745: 1}},
46740: {'min': 450, 'max': 475, 'recipe': {19721: 1, 19747: 100, 19790: 25}},
46741: {'min': 450, 'max': 475, 'recipe': {19740: 20, 19742: 10, 19744: 20, 46740: 1}},
46742: {'min': 450, 'max': 475, 'recipe': {19684: 50, 19721: 1, 46747: 10}},
46745: {'min': 450, 'max': 475, 'recipe': {19721: 1, 19735: 50, 46747: 10}},
48580: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 46710: 1}},
48581: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 49522: 1}},
48582: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 46711: 1}},
48583: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 46712: 1}},
48584: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 46713: 1}},
48585: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 46709: 1}},
48586: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 46708: 1}},
48587: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 46710: 1}},
48588: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 49522: 1}},
48589: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 46711: 1}},
48590: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 46712: 1}},
48591: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 46713: 1}},
48592: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 46709: 1}},
48593: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 46708: 1}},
48594: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 46710: 1}},
48595: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 49522: 1}},
48596: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 46711: 1}},
48597: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 46712: 1}},
48598: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 46713: 1}},
48599: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 46709: 1}},
48600: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 46708: 1}},
48601: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 46710: 1}},
48602: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 49522: 1}},
48603: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 46711: 1}},
48604: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 46712: 1}},
48605: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 46713: 1}},
48606: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 46709: 1}},
48607: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 46708: 1}},
48608: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 46710: 1}},
48609: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 49522: 1}},
48610: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 46711: 1}},
48611: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 46712: 1}},
48612: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 46713: 1}},
48613: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 46709: 1}},
48614: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 46708: 1}},
48615: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 46710: 1}},
48616: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 49522: 1}},
48617: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 46711: 1}},
48618: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 46712: 1}},
48619: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 46713: 1}},
48620: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 46709: 1}},
48621: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 46708: 1}},
48907: {'min': 400, 'max': 425, 'recipe': {19685: 1, 19721: 1, 19924: 40, 48884: 50}},
49733: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 49866: 1}},
49782: {'min': 400, 'max': 425, 'recipe': {44941: 500}},
49783: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 49866: 1}},
49784: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 49866: 1}},
49785: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 49866: 1}},
49786: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 49866: 1}},
49787: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 49866: 1}},
49866: {'min': 400, 'max': 425, 'recipe': {19721: 5, 49782: 1, 76614: 5}},
50140: {'min': 400, 'max': 650, 'recipe': {13147: 1, 13149: 1, 19721: 4, 50363: 1}},
50141: {'min': 425, 'max': 675, 'recipe': {13166: 1, 13167: 1, 19721: 15, 50367: 1}},
50142: {'min': 400, 'max': 650, 'recipe': {13147: 1, 13149: 1, 19721: 4, 50364: 1}},
50143: {'min': 400, 'max': 650, 'recipe': {13150: 1, 13151: 1, 19721: 12, 50366: 1}},
50144: {'min': 375, 'max': 625, 'recipe': {13138: 1, 13139: 1, 19721: 8, 50362: 1}},
50145: {'min': 400, 'max': 650, 'recipe': {13152: 1, 13153: 1, 19721: 5, 50366: 1}},
50146: {'min': 375, 'max': 625, 'recipe': {13140: 1, 13141: 1, 19721: 3, 50362: 1}},
50147: {'min': 400, 'max': 650, 'recipe': {13154: 1, 13155: 1, 19721: 10, 50366: 1}},
50148: {'min': 375, 'max': 625, 'recipe': {13142: 1, 13143: 1, 19721: 5, 50362: 1}},
50149: {'min': 375, 'max': 625, 'recipe': {13144: 1, 13145: 1, 19721: 5, 50362: 1}},
50150: {'min': 425, 'max': 675, 'recipe': {13159: 1, 13161: 1, 19721: 10, 50368: 1}},
50151: {'min': 400, 'max': 650, 'recipe': {13156: 1, 13157: 1, 19721: 10, 50366: 1}},
50152: {'min': 375, 'max': 625, 'recipe': {13136: 1, 13146: 1, 19721: 3, 50362: 1}},
50153: {'min': 400, 'max': 650, 'recipe': {13148: 1, 13158: 1, 19721: 5, 50366: 1}},
50154: {'min': 425, 'max': 675, 'recipe': {13162: 1, 13163: 1, 19721: 25, 50368: 1}},
50155: {'min': 425, 'max': 675, 'recipe': {13159: 1, 13161: 1, 19721: 10, 50367: 1}},
50156: {'min': 350, 'max': 600, 'recipe': {13123: 1, 13125: 1, 19721: 2, 50358: 1}},
50157: {'min': 425, 'max': 675, 'recipe': {13162: 1, 13163: 1, 19721: 20, 50367: 1}},
50158: {'min': 350, 'max': 600, 'recipe': {13126: 1, 13127: 1, 19721: 5, 50358: 1}},
50159: {'min': 350, 'max': 600, 'recipe': {13128: 1, 13129: 1, 19721: 2, 50358: 1}},
50160: {'min': 350, 'max': 600, 'recipe': {13130: 1, 13131: 1, 19721: 2, 50358: 1}},
50161: {'min': 350, 'max': 600, 'recipe': {13132: 1, 13133: 1, 19721: 2, 50358: 1}},
50162: {'min': 350, 'max': 600, 'recipe': {13124: 1, 13134: 1, 19721: 2, 50358: 1}},
50163: {'min': 425, 'max': 675, 'recipe': {13164: 1, 13165: 1, 19721: 10, 50367: 1}},
50164: {'min': 375, 'max': 625, 'recipe': {13135: 1, 13137: 1, 19721: 2, 50360: 1}},
50165: {'min': 425, 'max': 675, 'recipe': {13164: 1, 13165: 1, 19721: 10, 50368: 1}},
50166: {'min': 400, 'max': 650, 'recipe': {13150: 1, 13151: 1, 19721: 10, 50363: 1}},
50167: {'min': 400, 'max': 650, 'recipe': {13152: 1, 13153: 1, 19721: 4, 50363: 1}},
50168: {'min': 400, 'max': 650, 'recipe': {13154: 1, 13155: 1, 19721: 5, 50363: 1}},
50169: {'min': 400, 'max': 650, 'recipe': {13156: 1, 13157: 1, 19721: 5, 50363: 1}},
50170: {'min': 400, 'max': 650, 'recipe': {13148: 1, 13158: 1, 19721: 4, 50363: 1}},
50171: {'min': 425, 'max': 675, 'recipe': {13166: 1, 13167: 1, 19721: 20, 50368: 1}},
50172: {'min': 425, 'max': 675, 'recipe': {13168: 1, 13169: 1, 19721: 15, 50367: 1}},
50173: {'min': 375, 'max': 625, 'recipe': {13135: 1, 13137: 1, 19721: 3, 50362: 1}},
50174: {'min': 400, 'max': 650, 'recipe': {13147: 1, 13149: 1, 19721: 5, 50366: 1}},
50175: {'min': 375, 'max': 625, 'recipe': {13138: 1, 13139: 1, 19721: 6, 50360: 1}},
50176: {'min': 400, 'max': 650, 'recipe': {13150: 1, 13151: 1, 19721: 10, 50364: 1}},
50177: {'min': 425, 'max': 675, 'recipe': {13168: 1, 13169: 1, 19721: 20, 50368: 1}},
50178: {'min': 400, 'max': 650, 'recipe': {13152: 1, 13153: 1, 19721: 4, 50364: 1}},
50179: {'min': 375, 'max': 625, 'recipe': {13140: 1, 13141: 1, 19721: 2, 50360: 1}},
50180: {'min': 375, 'max': 625, 'recipe': {13142: 1, 13143: 1, 19721: 4, 50360: 1}},
50181: {'min': 400, 'max': 650, 'recipe': {13154: 1, 13155: 1, 19721: 8, 50364: 1}},
50182: {'min': 400, 'max': 650, 'recipe': {13156: 1, 13157: 1, 19721: 8, 50364: 1}},
50183: {'min': 375, 'max': 625, 'recipe': {13144: 1, 13145: 1, 19721: 4, 50360: 1}},
50184: {'min': 375, 'max': 625, 'recipe': {13136: 1, 13146: 1, 19721: 2, 50360: 1}},
50185: {'min': 400, 'max': 650, 'recipe': {13148: 1, 13158: 1, 19721: 4, 50364: 1}},
50186: {'min': 425, 'max': 675, 'recipe': {13160: 1, 13170: 1, 19721: 10, 50367: 1}},
50187: {'min': 425, 'max': 675, 'recipe': {13160: 1, 13170: 1, 19721: 10, 50368: 1}},
50358: {'min': 350, 'max': 375, 'recipe': {19721: 1, 24274: 8, 72752: 1}},
50360: {'min': 375, 'max': 400, 'recipe': {19721: 2, 24275: 8, 76216: 1}},
50362: {'min': 375, 'max': 400, 'recipe': {19721: 2, 24275: 10, 76216: 2}},
50363: {'min': 400, 'max': 425, 'recipe': {19721: 3, 24276: 8, 72194: 1}},
50364: {'min': 400, 'max': 425, 'recipe': {19721: 3, 24276: 12, 72194: 2}},
50366: {'min': 400, 'max': 425, 'recipe': {19721: 3, 24276: 15, 72194: 3}},
50367: {'min': 425, 'max': 450, 'recipe': {19721: 5, 24277: 3, 76614: 3}},
50368: {'min': 425, 'max': 450, 'recipe': {19721: 5, 24277: 5, 76614: 5}},
62886: {'min': 0, 'max': 40, 'recipe': {62942: 1, 62961: 1}},
62887: {'min': 275, 'max': 315, 'recipe': {62891: 1, 62964: 1}},
62888: {'min': 75, 'max': 115, 'recipe': {62886: 1, 62962: 1}},
62889: {'min': 400, 'max': 650, 'recipe': {19721: 3, 62917: 1, 62966: 1}},
62890: {'min': 400, 'max': 650, 'recipe': {19721: 3, 62917: 1, 62966: 1}},
62891: {'min': 150, 'max': 190, 'recipe': {62888: 1, 62963: 1}},
62917: {'min': 300, 'max': 550, 'recipe': {19721: 1, 62887: 1, 62965: 1}},
62961: {'min': 0, 'max': 25, 'recipe': {19679: 5}},
62962: {'min': 75, 'max': 100, 'recipe': {19683: 5}},
62963: {'min': 150, 'max': 175, 'recipe': {19688: 5}},
62964: {'min': 275, 'max': 300, 'recipe': {19681: 5}},
62965: {'min': 300, 'max': 325, 'recipe': {19684: 5}},
62966: {'min': 400, 'max': 425, 'recipe': {19685: 5}},
66616: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 66623: 1, 66625: 1}},
66650: {'min': 400, 'max': 425, 'recipe': {43773: 5, 66637: 25}},
67339: {'min': 400, 'max': 425, 'recipe': {19746: 1, 24310: 1, 24358: 1}},
67342: {'min': 400, 'max': 425, 'recipe': {19746: 1, 24330: 1, 43773: 10}},
67344: {'min': 400, 'max': 425, 'recipe': {19746: 1, 24295: 1, 24330: 1}},
67912: {'min': 400, 'max': 425, 'recipe': {19746: 1, 24310: 1, 67832: 1}},
69370: {'min': 400, 'max': 425, 'recipe': {19685: 1, 68942: 1, 74328: 5}},
70612: {'min': 400, 'max': 425, 'recipe': {19746: 1, 19790: 1, 73034: 5}},
70891: {'min': 400, 'max': 425, 'recipe': {19746: 1, 19790: 3, 73034: 5}},
71225: {'min': 125, 'max': 150, 'recipe': {19733: 10, 19740: 4, 19789: 25}},
71307: {'min': 50, 'max': 75, 'recipe': {19720: 4, 19738: 10, 19792: 25}},
71350: {'min': 400, 'max': 425, 'recipe': {19685: 2, 73034: 5}},
71879: {'min': 400, 'max': 425, 'recipe': {19746: 2, 19790: 1, 73034: 5}},
72060: {'min': 400, 'max': 425, 'recipe': {19746: 1, 19790: 1, 73034: 5}},
72194: {'min': 350, 'max': 375, 'recipe': {19735: 10, 19747: 4, 19791: 25}},
72195: {'min': 400, 'max': 425, 'recipe': {19685: 1, 73034: 5}},
72752: {'min': 200, 'max': 225, 'recipe': {19734: 10, 19742: 4, 19794: 25}},
73034: {'min': 0, 'max': 25, 'recipe': {74090: 20, 76839: 1, 77256: 5}},
73056: {'min': 400, 'max': 425, 'recipe': {19685: 5, 73034: 5}},
73665: {'min': 400, 'max': 425, 'recipe': {19685: 4, 73034: 5}},
74341: {'min': 400, 'max': 425, 'recipe': {19746: 1, 19790: 1, 73034: 5}},
74525: {'min': 400, 'max': 440, 'recipe': {13009: 1, 19685: 10, 19737: 12, 24277: 3}},
75134: {'min': 400, 'max': 425, 'recipe': {19746: 1, 19790: 3, 73034: 5}},
75169: {'min': 400, 'max': 425, 'recipe': {19685: 2, 73034: 5}},
75606: {'min': 400, 'max': 425, 'recipe': {19685: 2, 73034: 5}},
76216: {'min': 275, 'max': 300, 'recipe': {19736: 10, 19744: 4, 19793: 25}},
76614: {'min': 400, 'max': 425, 'recipe': {19737: 10, 19746: 4, 19790: 25}},
82156: {'min': 400, 'max': 650, 'recipe': {73056: 1, 75134: 1, 82882: 1}},
82329: {'min': 425, 'max': 675, 'recipe': {70612: 1, 75606: 1, 82882: 1}},
82332: {'min': 400, 'max': 650, 'recipe': {82415: 1, 82882: 1, 83474: 1}},
82383: {'min': 400, 'max': 650, 'recipe': {70891: 1, 73665: 1, 83264: 1}},
82415: {'min': 400, 'max': 425, 'recipe': {19685: 1, 82796: 2}},
82551: {'min': 400, 'max': 425, 'recipe': {19685: 2, 82796: 1}},
82571: {'min': 400, 'max': 650, 'recipe': {71350: 1, 72060: 1, 82882: 1}},
82633: {'min': 400, 'max': 425, 'recipe': {24812: 1, 68942: 1, 82582: 10}},
82678: {'min': 400, 'max': 425, 'recipe': {24330: 1, 83103: 10, 83284: 3, 83757: 10}},
82749: {'min': 400, 'max': 425, 'recipe': {19746: 1, 19790: 1, 82796: 2}},
82754: {'min': 425, 'max': 675, 'recipe': {82551: 1, 82882: 1, 83883: 1}},
82791: {'min': 400, 'max': 425, 'recipe': {24818: 1, 68942: 1, 83757: 50}},
82796: {'min': 400, 'max': 425, 'recipe': {73034: 1, 82582: 20, 82678: 1}},
82822: {'min': 400, 'max': 650, 'recipe': {74341: 1, 75169: 1, 83264: 1}},
82882: {'min': 400, 'max': 425, 'recipe': {19721: 5, 76614: 5, 82678: 3, 83284: 45}},
82885: {'min': 400, 'max': 650, 'recipe': {82882: 1, 83813: 1, 84061: 1}},
82990: {'min': 400, 'max': 650, 'recipe': {71350: 1, 72060: 1, 83264: 1}},
83005: {'min': 400, 'max': 425, 'recipe': {19746: 1, 19790: 3, 82796: 4}},
83264: {'min': 400, 'max': 425, 'recipe': {19721: 5, 76614: 5, 82678: 1, 83103: 100}},
83336: {'min': 400, 'max': 650, 'recipe': {73056: 1, 75134: 1, 83264: 1}},
83338: {'min': 400, 'max': 425, 'recipe': {24824: 1, 68942: 1, 83103: 50}},
83342: {'min': 400, 'max': 425, 'recipe': {19685: 2, 82796: 1}},
83391: {'min': 400, 'max': 650, 'recipe': {71879: 1, 72195: 1, 82882: 1}},
83423: {'min': 400, 'max': 425, 'recipe': {24800: 1, 68942: 1, 83103: 50}},
83440: {'min': 400, 'max': 650, 'recipe': {82882: 1, 83005: 1, 84197: 1}},
83474: {'min': 400, 'max': 425, 'recipe': {19746: 2, 19790: 1, 82796: 3}},
83502: {'min': 400, 'max': 425, 'recipe': {68942: 1, 69370: 1, 83103: 50}},
83663: {'min': 400, 'max': 425, 'recipe': {24806: 1, 68942: 1, 83757: 50}},
83792: {'min': 400, 'max': 650, 'recipe': {82749: 1, 82882: 1, 83342: 1}},
83813: {'min': 400, 'max': 425, 'recipe': {19746: 1, 19790: 1, 82796: 3}},
83883: {'min': 400, 'max': 425, 'recipe': {19746: 1, 19790: 1, 82796: 2}},
83964: {'min': 400, 'max': 425, 'recipe': {24815: 1, 68942: 1, 82678: 1}},
83972: {'min': 400, 'max': 650, 'recipe': {70891: 1, 73665: 1, 82882: 1}},
84061: {'min': 400, 'max': 425, 'recipe': {19685: 2, 82796: 2}},
84127: {'min': 400, 'max': 425, 'recipe': {24803: 1, 68942: 1, 82678: 1}},
84131: {'min': 425, 'max': 675, 'recipe': {70612: 1, 75606: 1, 83264: 1}},
84197: {'min': 400, 'max': 425, 'recipe': {19685: 5, 82796: 3}},
84210: {'min': 400, 'max': 425, 'recipe': {19685: 4, 82796: 2}},
84383: {'min': 400, 'max': 650, 'recipe': {71879: 1, 72195: 1, 83264: 1}},
84509: {'min': 400, 'max': 650, 'recipe': {74341: 1, 75169: 1, 82882: 1}},
84539: {'min': 400, 'max': 425, 'recipe': {19746: 1, 19790: 3, 82796: 3}},
84745: {'min': 400, 'max': 650, 'recipe': {82882: 1, 84210: 1, 84539: 1}},
84749: {'min': 400, 'max': 425, 'recipe': {24821: 1, 68942: 1, 83103: 50}},
85379: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 19721: 5, 85405: 1}},
85405: {'min': 400, 'max': 425, 'recipe': {48805: 1, 48806: 1, 48807: 1, 76614: 5}},
85411: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 19721: 5, 85405: 1}},
85415: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 19721: 5, 85405: 1}},
85425: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 19721: 5, 85405: 1}},
85445: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 19721: 5, 85405: 1}},
85498: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 19721: 5, 85405: 1}},
85713: {'min': 400, 'max': 425, 'recipe': {19721: 1, 19746: 1, 24320: 1, 86069: 15}},
86403: {'min': 400, 'max': 650, 'recipe': {13160: 1, 13170: 1, 86585: 1}},
86428: {'min': 400, 'max': 650, 'recipe': {13159: 1, 13161: 1, 86585: 1}},
86508: {'min': 400, 'max': 650, 'recipe': {13168: 1, 13169: 1, 86585: 1}},
86513: {'min': 400, 'max': 650, 'recipe': {13162: 1, 13163: 1, 86585: 1}},
86563: {'min': 400, 'max': 650, 'recipe': {13164: 1, 13165: 1, 86585: 1}},
86585: {'min': 400, 'max': 425, 'recipe': {19721: 5, 76614: 5, 86601: 100}},
86659: {'min': 400, 'max': 650, 'recipe': {13166: 1, 13167: 1, 86585: 1}},
}
| mit | 8,320,170,496,389,430,000 | 72.637755 | 88 | 0.53686 | false |
pp-mo/iris-grib | iris_grib/tests/unit/load_rules/test_grib1_convert.py | 1 | 5593 | # (C) British Crown Copyright 2013 - 2016, Met Office
#
# This file is part of iris-grib.
#
# iris-grib is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# iris-grib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with iris-grib. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for :func:`iris_grib.load_rules.grib1_convert`."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris_grib.tests first so that some things can be initialised before
# importing anything else
import iris_grib.tests as tests
import cf_units
import gribapi
import mock
import iris
from iris.exceptions import TranslationError
from iris.fileformats.rules import Reference
from iris_grib import GribWrapper
from iris_grib.load_rules import grib1_convert
from iris_grib.tests.unit import TestField
class TestBadEdition(tests.IrisGribTest):
def test(self):
message = mock.Mock(edition=2)
emsg = 'GRIB edition 2 is not supported'
with self.assertRaisesRegexp(TranslationError, emsg):
grib1_convert(message)
class TestBoundedTime(TestField):
@staticmethod
def is_forecast_period(coord):
return (coord.standard_name == 'forecast_period' and
coord.units == 'hours')
@staticmethod
def is_time(coord):
return (coord.standard_name == 'time' and
coord.units == 'hours since epoch')
def assert_bounded_message(self, **kwargs):
attributes = {'productDefinitionTemplateNumber': 0,
'edition': 1, '_forecastTime': 15,
'_forecastTimeUnit': 'hours',
'phenomenon_bounds': lambda u: (80, 120),
'_phenomenonDateTime': -1,
'table2Version': 9999}
attributes.update(kwargs)
message = mock.Mock(**attributes)
self._test_for_coord(message, grib1_convert, self.is_forecast_period,
expected_points=[35],
expected_bounds=[[15, 55]])
self._test_for_coord(message, grib1_convert, self.is_time,
expected_points=[100],
expected_bounds=[[80, 120]])
def test_time_range_indicator_2(self):
self.assert_bounded_message(timeRangeIndicator=2)
def test_time_range_indicator_3(self):
self.assert_bounded_message(timeRangeIndicator=3)
def test_time_range_indicator_4(self):
self.assert_bounded_message(timeRangeIndicator=4)
def test_time_range_indicator_5(self):
self.assert_bounded_message(timeRangeIndicator=5)
def test_time_range_indicator_51(self):
self.assert_bounded_message(timeRangeIndicator=51)
def test_time_range_indicator_113(self):
self.assert_bounded_message(timeRangeIndicator=113)
def test_time_range_indicator_114(self):
self.assert_bounded_message(timeRangeIndicator=114)
def test_time_range_indicator_115(self):
self.assert_bounded_message(timeRangeIndicator=115)
def test_time_range_indicator_116(self):
self.assert_bounded_message(timeRangeIndicator=116)
def test_time_range_indicator_117(self):
self.assert_bounded_message(timeRangeIndicator=117)
def test_time_range_indicator_118(self):
self.assert_bounded_message(timeRangeIndicator=118)
def test_time_range_indicator_123(self):
self.assert_bounded_message(timeRangeIndicator=123)
def test_time_range_indicator_124(self):
self.assert_bounded_message(timeRangeIndicator=124)
def test_time_range_indicator_125(self):
self.assert_bounded_message(timeRangeIndicator=125)
class Test_GribLevels(tests.IrisTest):
def test_grib1_hybrid_height(self):
gm = gribapi.grib_new_from_samples('regular_gg_ml_grib1')
gw = GribWrapper(gm)
results = grib1_convert(gw)
factory, = results[0]
self.assertEqual(factory.factory_class,
iris.aux_factory.HybridPressureFactory)
delta, sigma, ref = factory.args
self.assertEqual(delta, {'long_name': 'level_pressure'})
self.assertEqual(sigma, {'long_name': 'sigma'})
self.assertEqual(ref, Reference(name='surface_pressure'))
ml_ref = iris.coords.CoordDefn('model_level_number', None, None,
cf_units.Unit('1'),
{'positive': 'up'}, None)
lp_ref = iris.coords.CoordDefn(None, 'level_pressure', None,
cf_units.Unit('Pa'),
{}, None)
s_ref = iris.coords.CoordDefn(None, 'sigma', None,
cf_units.Unit('1'),
{}, None)
aux_coord_defns = [coord._as_defn() for coord, dim in results[8]]
self.assertIn(ml_ref, aux_coord_defns)
self.assertIn(lp_ref, aux_coord_defns)
self.assertIn(s_ref, aux_coord_defns)
if __name__ == "__main__":
tests.main()
| lgpl-3.0 | 6,769,606,754,175,757,000 | 36.790541 | 77 | 0.638119 | false |
Tristan79/ComicStreamer | comicstreamerlib/options.py | 1 | 4608 | import sys
import getopt
import platform
import os
import traceback
import csversion
try:
import argparse
except:
pass
class Options:
help_text = """
Usage: {0} [OPTION]... [FOLDER LIST]
A digital comic media server.
The FOLDER_LIST is a list of folders that will be scanned recursively
for comics to add to the database (persisted)
-p, --port [PORT] The port the server should listen on. (persisted)
-b, --bind [IP] Bind server traffic to ip (persisted)
-w, --webroot [WEBROOT] Webroot for reverse proxy (persisted)
-u, --user-dir [FOLDER] Set path for user folder
-r, --reset Purge the existing database and quit
-d, --debug More verbose console output
-q, --quiet No console output
--nomonitor Don't start the folder scanner/monitor
--nobrowser Don't launch a web browser
-v, --version Display version
-h, --help Display this message
Example:
comicstreamer -p 32502 --config-file ~/comcistreamer/comics.conf
"""
# -c, --config-file [FILE] Config file not implemented
def __init__(self):
self.port = None
self.folder_list = None
self.reset = False
self.no_monitor = False
self.debug = True
self.quiet = False
self.launch_client = True
self.reset_and_run = False
self.webroot = None
self.user_dir = None
self.bind = None
self.extract_last_page = False
def display_msg_and_quit( self, msg, code, show_help=False ):
appname = os.path.basename(sys.argv[0])
if msg is not None:
print( msg )
if show_help:
print self.help_text.format(appname)
else:
print "For more help, run with '--help'"
sys.exit(code)
def parseCmdLineArgs(self,remove=True):
if platform.system() == "Darwin" and hasattr(sys, "frozen") and sys.frozen == 1:
# remove the PSN ("process serial number") argument from OS/X
input_args = [a for a in sys.argv[1:] if "-psn_0_" not in a ]
else:
input_args = sys.argv[1:]
# parse command line options
try: #will never know why the ":" is below... "dp:hrqwuvb"
opts, args = getopt.getopt( input_args,
"lhp:w:vrdqb:u:c:",
[ "help", "port=", "webroot=", "version", "reset", "debug", "quiet",
"nomonitor", "nobrowser", "bind=", "user-dir=","config-file=",
"_resetdb_and_run", #private
] )
except getopt.GetoptError as err:
self.display_msg_and_quit( str(err), 2 )
# process options
for o, a in opts:
if o in ("-r", "--reset"):
self.reset = True
if o in ("-d", "--debug"):
self.debug = True
if o in ("-q", "--quiet"):
self.quiet = True
if o in ("-h", "--help"):
self.display_msg_and_quit( None, 0, show_help=True )
if o in ("-p", "--port"):
try:
self.port = int(a)
except:
pass
if o in ("-w", "--webroot"):
self.webroot = a
if o == "-l":
self.extract_last_page = True
if o in ("-b", "--bind"):
self.bind = a
if o == "--nomonitor":
self.no_monitor = True
if o == "--nobrowser":
self.launch_client = False
if o in ("-v","--version"):
print "ComicStreamer {0}: ".format(csversion.version)
sys.exit(0)
if o == "--_resetdb_and_run":
self.reset_and_run = True
if o in ("-u","--user-dir"):
self.user_dir = a
#if o in ("-c","--config-file"):
# self.config_file = a
filename_encoding = sys.getfilesystemencoding()
if len(args) > 0:
#self.folder_list = [os.path.normpath(a.decode(filename_encoding)) for a in args]
self.folder_list = [os.path.abspath(os.path.normpath(unicode(a.decode(filename_encoding)))) for a in args]
# remove certain private flags from args
if remove:
try:
sys.argv.remove("--_resetdb_and_run")
except:
pass
| apache-2.0 | 4,990,370,780,975,231,000 | 33.646617 | 118 | 0.496962 | false |
zpiman/MathIA | square.py | 1 | 1854 | import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 8, 0.01)
myb = []
for i in x:
if np.floor(i)%2 == 0:
myb.append(1)
else:
myb.append(0)
y = np.array(myb)
plt.subplot(121)
plt.plot(x,y)
plt.xticks([2,4,6],("-T","0","T"))
plt.tick_params(axis='x', labelsize="15")
plt.ylim([-0.5,1.5])
plt.grid(b=True, which="major", color='black', linestyle='--')
myb = []
for i in x:
myb.append(0.5)
yl = np.array(myb)
plt.subplot(122)
plt.plot(x,yl)
plt.xticks([2,4,6],("-T","0","T"))
plt.tick_params(axis='x', labelsize="15")
plt.ylim([-0.5,1.5])
plt.grid(b=True, which="major", color='black', linestyle='--')
plt.show()
plt.subplot(121)
plt.plot(x,y)
plt.xticks([2,4,6],("-T","0","T"))
plt.tick_params(axis='x', labelsize="15")
plt.ylim([-0.5,1.5])
plt.grid(b=True, which="major", color='black', linestyle='--')
ys = yl + 2/np.pi*np.sin(x*np.pi)
plt.subplot(122)
plt.plot(x,ys)
plt.xticks([2,4,6],("-T","0","T"))
plt.tick_params(axis='x', labelsize="15")
plt.ylim([-0.5,1.5])
plt.grid(b=True, which="major", color='black', linestyle='--')
plt.show()
plt.subplot(131)
plt.plot(x,y)
plt.xticks([2,4,6],("-T","0","T"))
plt.tick_params(axis='x', labelsize="15")
plt.ylim([-0.5,1.5])
plt.grid(b=True, which="major", color='black', linestyle='--')
ys = np.zeros_like(ys)
ys += yl
for i in range(1,4,2):
ys += 2/(np.pi*i)*np.sin(x*np.pi*i)
plt.subplot(132)
plt.plot(x,ys)
plt.xticks([2,4,6],("-T","0","T"))
plt.tick_params(axis='x', labelsize="15")
plt.ylim([-0.5,1.5])
plt.grid(b=True, which="major", color='black', linestyle='--')
for i in range(5,100,2):
ys += 2/(np.pi*i)*np.sin(x*np.pi*i)
plt.subplot(133)
plt.plot(x,ys)
plt.xticks([2,4,6],("-T","0","T"))
plt.tick_params(axis='x', labelsize="15")
plt.ylim([-0.5,1.5])
plt.grid(b=True, which="major", color='black', linestyle='--')
plt.show()
| apache-2.0 | 3,362,402,061,348,584,000 | 20.068182 | 62 | 0.59493 | false |
hsoft/send2trash | setup.py | 1 | 1335 | from setuptools import setup
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Desktop Environment :: File Managers",
]
LONG_DESCRIPTION = (
open("README.rst", "rt").read() + "\n\n" + open("CHANGES.rst", "rt").read()
)
setup(
name="Send2Trash",
version="1.6.0b1",
author="Andrew Senetar",
author_email="[email protected]",
packages=["send2trash"],
scripts=[],
test_suite="tests",
url="https://github.com/arsenetar/send2trash",
license="BSD License",
description="Send file to trash natively under Mac OS X, Windows and Linux.",
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
extras_require={"win32": ["pywin32"]},
project_urls={"Bug Reports": "https://github.com/arsenetar/send2trash/issues"},
)
| bsd-3-clause | -5,560,323,544,130,696,000 | 33.230769 | 83 | 0.6397 | false |
muffato/pyEnsemblRest | template/genome.py | 1 | 1247 |
#__GENERATED_OBJECTS__
__feature_types = {
'gene' : GeneFeature,
'transcript' : TranscriptFeature,
'cds': CDSFeature,
'exon' : ExonFeature,
'repeat' : RepeatFeature,
'simple' : SimpleFeature,
'misc' : MiscFeature,
'variation' : VariationFeature,
'somatic_variation' : VariationFeature,
'structural_variation' : StructuralVariationFeature,
'somatic_structural_variation' : StructuralVariationFeature,
'constrained' : ConstrainedElementFeature,
'regulatory' : RegulatoryFeature,
'motif' : MotifFeature,
'chipseq' : ChipSeqFeature,
'translation' : TranslationFeature,
}
def feature_wrapper(d, r):
"""
Wrapper arround the various types of features.
It automatically selects the appropriate type for the fetched features.
"""
t = d.get('object_type')
if t is None:
t = d.get('feature_type')
if t is None:
print("Unable to find the type of", d)
t = Feature
else:
t = t.lower()
if t not in __feature_types:
print("Unrecognized feature type:", t)
t = Feature
else:
t = __feature_types[t]
return t(d,r)
| apache-2.0 | -6,060,772,300,965,686,000 | 28 | 75 | 0.587811 | false |
johnbywater/eventsourcing | setup.py | 1 | 2434 | from distutils.core import setup
from eventsourcing import __version__
crypto_requires = ["pycryptodome<=3.9.99999"]
postgresql_requires = ["psycopg2<=2.8.99999"]
postgresql_dev_requires = ["psycopg2-binary<=2.8.99999"]
docs_requires = (
postgresql_dev_requires
+ crypto_requires
+ [
"Sphinx==1.8.5",
"python_docs_theme",
"sphinx_rtd_theme==0.4.3",
]
)
dev_requires = docs_requires + [
"python-coveralls",
"coverage",
"black",
"mypy",
"flake8",
"flake8-bugbear",
"isort",
]
long_description = """
A library for event sourcing in Python.
`Package documentation is now available <http://eventsourcing.readthedocs.io/>`_.
`Please raise issues on GitHub <https://github.com/johnbywater/eventsourcing/issues>`_.
"""
packages = [
"eventsourcing",
"eventsourcing.tests",
"eventsourcing.examples",
"eventsourcing.examples.bankaccounts",
"eventsourcing.examples.cargoshipping",
]
setup(
name="eventsourcing",
version=__version__,
description="Event sourcing in Python",
author="John Bywater",
author_email="[email protected]",
url="https://github.com/johnbywater/eventsourcing",
license="BSD-3-Clause",
packages=packages,
package_data={"eventsourcing": ["py.typed"]},
install_requires=[],
extras_require={
"postgres": postgresql_requires,
"postgres_dev": postgresql_dev_requires,
"crypto": crypto_requires,
"docs": docs_requires,
"dev": dev_requires,
},
zip_safe=False,
long_description=long_description,
keywords=[
"event sourcing",
"event store",
"domain driven design",
"domain-driven design",
"ddd",
"cqrs",
"cqs",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| bsd-3-clause | -1,621,732,981,441,119,200 | 26.348315 | 87 | 0.620378 | false |
XuesongYang/end2end_dialog | PipelineLstmModel.py | 1 | 9106 | ''' Pipelined bi-directional LSTM model.
This model stacked biLSTM NLU and biLSTM SAP separate models together,
and its weights are initilized by the ones of seprate models. Besides,
for the SAP task, the decision threshold on the output layer is tuned
on dev data.
Author : Xuesong Yang
Email : [email protected]
Created Date: Dec. 31, 2016
'''
import numpy as np
from utils import checkExistence, get_windowedVec, eval_intentPredict, getActPred
from AgentActClassifyingModel import writeUtterActTxt
from DataSetCSVagentActPred import DataSetCSVagentActPred
import os
import argparse
def load_model_NLU(model_weights, test_data):
from SlotTaggingModel_multitask import SlotTaggingModel
params = ['train_data', 'dev_data', 'epoch_nb', 'batch_size', 'embedding_size', 'hidden_size',
'dropout_ratio', 'optimizer', 'patience', 'loss', 'test_tag_only', 'test_intent_only', 'threshold']
argparams = {key: None for key in params}
argparams['weights_fname'] = model_weights
argparams['model_folder'] = os.path.dirname(model_weights).replace('/weights', '', 1)
argparams['test_data'] = test_data
model = SlotTaggingModel(**argparams)
model.load_model()
return model
#def load_model_Policy(model_weights, test_data, threshold):
def load_model_Policy(model_weights):
from AgentActClassifyingModel import AgentActClassifying
params = ['train_data', 'dev_data', 'test_data', 'epoch_nb', 'batch_size', 'hidden_size',
'dropout_ratio', 'optimizer', 'patience', 'loss', 'threshold']
argparams = {key: None for key in params}
argparams['weights_fname'] = model_weights
argparams['model_folder'] = os.path.dirname(model_weights).replace('/weights', '', 1)
argparams['threshold'] = 1.0
# argparams['test_data'] = test_data
model = AgentActClassifying(**argparams)
model.load_model()
return model
def readTagPredTxt(tag_pred_txt, userTag2id, sample_nb, userTag_vocab_size):
checkExistence(tag_pred_txt)
indicator = np.zeros((sample_nb, userTag_vocab_size))
with open(tag_pred_txt, 'rb') as f:
for idx, line in enumerate(f):
for tag in line.strip().split():
tag = 'tag-{}'.format(tag)
if tag in userTag2id:
pos = userTag2id[tag] - 1
else:
pos = 0
indicator[idx, pos] = 1.
return indicator
def readIntentPredTxt(intent_pred_txt, userIntent2id, sample_nb, userIntent_vocab_size):
checkExistence(intent_pred_txt)
indicator = np.zeros((sample_nb, userIntent_vocab_size))
with open(intent_pred_txt, 'rb') as f:
for idx, line in enumerate(f):
for intent in line.strip().split(';'):
if intent == 'null':
continue
intent = 'intent-{}'.format(intent)
if intent in userIntent2id:
pos = userIntent2id[intent] - 1
else:
pos = 0
indicator[idx, pos] = 1.
return indicator
def pipelinePrediction(test_data, tag_model_weights, intent_model_weights, act_model_weights, result_folder, tuneTh=True, threshold=None):
# load slot tagging model, and make prediction
tag_model = load_model_NLU(tag_model_weights, test_data)
tag_model.test_tag_flag = True
tag_model.model_folder = result_folder
tag_model.predict()
tag_pred_txt = '{}/test_result/tag_{}.pred'.format(tag_model.model_folder, os.path.basename(tag_model_weights).split('_')[0])
tag_pred_indicator = readTagPredTxt(tag_pred_txt, test_data.userTag2id,
len(test_data.userTag_txt), test_data.userTag_vocab_size)
# load user intent model and make prediction
intent_model = load_model_NLU(intent_model_weights, test_data)
intent_model.test_intent_flag = True
intent_model.threshold = threshold_intent
intent_model.model_folder = result_folder
intent_model.predict()
intent_pred_txt = '{}/test_result/intent_{}.pred'.format(intent_model.model_folder, os.path.basename(intent_model_weights).split('_')[0])
intent_pred_indicator = readIntentPredTxt(intent_pred_txt, test_data.userIntent2id,
len(test_data.userIntent_txt), test_data.userIntent_vocab_size)
# merge indicators of slot tagging and user intents, and generate windowed tagIntent matrix
assert len(tag_pred_indicator) == len(intent_pred_indicator), 'sample_nb is not equal.'
nlu_vecBin = np.hstack((tag_pred_indicator, intent_pred_indicator))
# load agent act model and make prediction
act_model = load_model_Policy(act_model_weights)
act_model.model_folder = result_folder
nlu_vecBin_windowed = get_windowedVec(nlu_vecBin, act_model.window_size)
if tuneTh:
# tune threshold
print('Tuning threshold on Dev ...')
act_probs = act_model.model.predict(nlu_vecBin_windowed)
precision, recall, fscore, accuracy_frame, act_threshold = eval_intentPredict(act_probs, test_data.agentAct_vecBin)
print('AgentActPred on Dev: precision={:.4f}, recall={:.4f}, fscore={:.4f}, accuracy_frame={:.4f}, threshold={:.4f}'.format(precision, recall, fscore, accuracy_frame, act_threshold))
dev_pred_txt = getActPred(act_probs, act_threshold, test_data.id2agentAct)
dev_results_fname = '{}/act_dev.pred'.format(act_model.model_folder)
writeUtterActTxt(test_data.userUtter_txt, dev_pred_txt, dev_results_fname)
print('Write dev results: {}'.format(dev_results_fname))
return act_threshold
else:
# make prediction based on well-tuned threshold
assert threshold is not None, 'Argument required: threshold for agent action prediction.'
act_model.threshold = threshold
act_model.test_data = test_data
act_model.test_data.userTagIntent_vecBin = nlu_vecBin_windowed
act_model.predict()
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data-npz', dest='npz_file', help='.npz file that contains the instance of DataSetCSVagentAct class')
parser.add_argument('--intent-weights', dest='intent_weights', help='.h5 weights for best user intent model')
parser.add_argument('--tag-weights', dest='tag_weights', help='.h5 weights for best user slot tagging model')
parser.add_argument('--act-weights', dest='act_weights', help='.h5 weights for oracle agent act model')
parser.add_argument('--intent-threshold', dest='intent_threshold', type=float, help='decision threshold for intent model')
parser.add_argument('--tune', dest='tune_threshold', action='store_true', help='tune decision threshold for act model if this option is activated.')
parser.add_argument('--act-threshold', dest='act_threshold', type=float, help='decision threshold for agent act model')
parser.add_argument('--model-folder', dest='model_folder', help='model folder')
args = parser.parse_args()
argparams = vars(args)
pid = os.getpid()
npz_file = argparams['npz_file']
intent_model_weights = argparams['intent_weights']
tag_model_weights = argparams['tag_weights']
act_model_weights = argparams['act_weights']
threshold_intent = argparams['intent_threshold']
tune_threshold = argparams['tune_threshold']
threshold_act = argparams['act_threshold']
# validate params
checkExistence(npz_file)
checkExistence(intent_model_weights)
checkExistence(tag_model_weights)
checkExistence(act_model_weights)
assert threshold_intent is not None, 'Argument required: --intent-threshold'
for key in sorted(argparams.keys()):
print('\t{}={}'.format(key, argparams[key]))
# load test data
data_npz = np.load(npz_file)
if tune_threshold:
dev_result_folder = './model/pipe_{}/dev'.format(pid)
if not os.path.exists(dev_result_folder):
os.makedirs(dev_result_folder)
print('\tdev_result_folder={}'.format(dev_result_folder))
dev_data = data_npz['dev_data'][()]
assert isinstance(dev_data, DataSetCSVagentActPred)
act_threshold = pipelinePrediction(dev_data, tag_model_weights, intent_model_weights, act_model_weights, dev_result_folder, tuneTh=True)
else:
assert threshold_act is not None, 'Argument required: --act-threshold.'
assert argparams['model_folder'] is not None, 'Argument required: --model-folder'
test_result_folder = '{}/test'.format(argparams['model_folder'])
if not os.path.exists(test_result_folder):
os.makedirs(test_result_folder)
print('\ttest_result_folder={}'.format(test_result_folder))
test_data = data_npz['test_data'][()]
assert isinstance(test_data, DataSetCSVagentActPred)
pipelinePrediction(test_data, tag_model_weights, intent_model_weights, act_model_weights, test_result_folder, tuneTh=False, threshold=threshold_act)
| mit | 5,262,195,568,939,314,000 | 49.871508 | 190 | 0.669888 | false |
alviezhang/leetcode | 91.decode_ways/solution.py | 1 | 1077 | # coding: utf-8
class Solution:
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
if s == "" or s[0] == '0':
return 0
# Initialization
before_previous = 0
previous_one = 0
current = 1
for i in range(1, len(s)):
before_previous = previous_one
previous_one = current
# When current charactor is '0', if previous charactor is in ['1', '2'],
# f(i) = f(i-2), otherwise result is 0
if s[i] == '0':
if s[i-1] not in ['1', '2']:
return 0
# In particular, when i = 2 or before_previous == 0, current = 1
current = before_previous if before_previous else 1
continue
# f(i) = f(i - 1)
current = previous_one
if s[i-1] != '0' and int(s[i-1] + s[i]) <= 26:
# f(i) = f(i - 1) + f(i - 2)
current += before_previous if before_previous else 1
return current
| mit | 6,711,470,115,184,742,000 | 26.615385 | 84 | 0.44104 | false |
openelections/openelections-core | openelex/us/vt/validate/validates.py | 1 | 8951 | import re
from openelex.models import Contest, Candidate, Office, Result
import logging
import time
import os
# if not os.path.isdir("logs"):
# os.makedirs("logs")
# logging.basicConfig(filename=time.strftime("logs/%Y%m%d-%H%M%S-validate.log"),level=logging.DEBUG)
# Generic validation helpers
def _validate_candidate_votes(election_id, reporting_level, contest_slug,
candidate_slug, expected_votes):
"""Sum sub-contest level results and compare them to known totals"""
msg = "Expected {} votes for contest {} and candidate {}, found {}"
votes = Result.objects.filter(election_id=election_id,
contest_slug=contest_slug, candidate_slug=candidate_slug,
reporting_level=reporting_level).sum('votes')
if votes != expected_votes:
logging.debug("db.getCollection('result').find({election_id:\"%s\", \
contest_slug:\"%s\", candidate_slug:\"%s\", \
reporting_level:\"%s\"})", election_id, contest_slug, candidate_slug, reporting_level)
assert votes == expected_votes, msg.format(expected_votes, contest_slug,
candidate_slug, votes)
def _validate_many_candidate_votes(election_id, reporting_level,
candidates):
"""
Sum sub-contest level results and compare them to known totals for
multiple contests and candidates.
Arguments:
election_id - Election ID of the election of interest.
reporting_level - Reporting level to use to aggregate results.
candidates - Tuple of contests slug, candidate slug and expected votes.
"""
for candidate_info in candidates:
contest, candidate, expected = candidate_info
_validate_candidate_votes(election_id, reporting_level,
contest, candidate, expected)
def validate_results_2012_president_general():
"""Sum some county-level results for 2012 general presidential and compare with known totals"""
election_id = 'vt-2012-11-06-general'
known_results = [
('president', 'barack-obama', 199053),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2014_house_general():
"""Sum some county-level results for 2014 general and compare with known totals"""
election_id = 'vt-2014-11-04-general'
known_results = [
('us-house-of-representatives', 'peter-welch', 123349),
('us-house-of-representatives', 'mark-donka', 59432),
('us-house-of-representatives', 'cris-ericson', 2750),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2014_house_primary():
"""Sum some county-level results for 2014 house primary and compare with known totals"""
election_id = 'vt-2014-08-26-primary'
known_results = [
('us-house-of-representatives-d', 'peter-welch', 19248),
('us-house-of-representatives-d', 'writeins', 224),
('us-house-of-representatives-r', 'mark-donka', 4340),
('us-house-of-representatives-r', 'donald-russell', 4026),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2002_lt_gov_general():
"""Sum some county-level results for 2002 lt-gov general and compare with known totals"""
election_id = 'vt-2002-11-05-general'
known_results = [
('lieutenant-governor', 'peter-shumlin', 73501),
('lieutenant-governor', 'brian-e-dubie', 94044),
('lieutenant-governor', 'anthony-pollina', 56564),
('lieutenant-governor', 'sally-ann-jones', 4310),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2002_lt_gov_primary():
"""Sum some county-level results for 2002 lt-gov primary and compare with known totals"""
election_id = 'vt-2002-09-10-primary'
known_results = [
('lieutenant-governor-d', 'peter-shumlin', 22633),
('lieutenant-governor-r', 'brian-e-dubie', 22584),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2004_misc_results_general():
"""Sum some state specific results for 2004 general and compare with known totals"""
election_id = 'vt-2004-11-02-general'
known_results = [
('treasurer', 'jeb-spaulding', 273705),
('secretary-of-state', 'deb-markowitz', 270744),
('auditor', 'randy-brock', 152848),
('auditor', 'elizabeth-m-ready', 122498),
('auditor', 'jerry-levy', 17685),
('attorney-general', 'william-h-sorrell', 169726),
# there is an error on the vermont website, I talked to the VT Sec state and the real result should be 81,285
# ('attorney-general', 'dennis-carver', 90285),
('attorney-general', 'susan-a-davis', 14351),
('attorney-general', 'james-mark-leas', 8769),
('attorney-general', 'karen-kerin', 6357),
('attorney-general', 'boots-wardinski', 2944),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2008_state_senate_primary():
"""Sum some county-level results for 2008 state senate primary and compare with known totals"""
election_id = 'vt-2008-09-08-primary'
known_results = [
('state-senate-orange-d', 'mark-a-macdonald', 557),
('state-senate-franklin-r', 'randy-brock', 879),
('state-senate-franklin-r', 'willard-rowell', 782),
('state-senate-essexorleans-d', 'robert-a-starr', 748),
('state-senate-essexorleans-d', 'writeins', 112),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2010_state_senate_general():
"""Sum some county-level results for 2010 state senate general and compare with known totals"""
election_id = 'vt-2010-11-02-general'
known_results = [
('state-senate-orange', 'mark-a-macdonald', 4524),
('state-senate-orange', 'stephen-w-webster', 3517),
('state-senate-franklin', 'randy-brock', 9014),
('state-senate-franklin', 'peter-d-moss', 793),
('state-senate-essexorleans', 'robert-a-starr', 9902),
('state-senate-essexorleans', 'vincent-illuzzi', 9231),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2012_state_house_primary():
"""Sum some county-level results for 2012 state house primary and compare with known totals"""
election_id = 'vt-2012-03-06-primary'
known_results = [
('house-of-representatives-addison-5-d', 'edward-v-mcguire', 220),
('house-of-representatives-addison-5-r', 'harvey-smith', 75),
('house-of-representatives-addison-1-d', 'betty-a-nuovo', 486),
('house-of-representatives-addison-1-d', 'paul-ralston', 446),
('house-of-representatives-bennington-1-d', 'bill-botzow', 152),
('house-of-representatives-caledonia-1-r', 'leigh-b-larocque', 72),
('house-of-representatives-chittenden-61-d', 'joanna-cole', 658),
('house-of-representatives-chittenden-61-d', 'bill-aswad', 619),
('house-of-representatives-chittenden-61-d', 'robert-hooper', 536),
('house-of-representatives-chittenden-61-r', 'kurt-wright', 116),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
def validate_results_2012_state_house_general():
"""Sum some county-level results for 2012 state house general and compare with known totals"""
election_id = 'vt-2012-11-06-general'
known_results = [
('house-of-representatives-addison-5', 'edward-v-mcguire', 982),
('house-of-representatives-addison-5', 'harvey-smith', 1151),
('house-of-representatives-addison-1', 'betty-a-nuovo', 2601),
('house-of-representatives-addison-1', 'paul-ralston', 2378),
('house-of-representatives-bennington-1', 'bill-botzow', 1613),
('house-of-representatives-caledonia-1', 'leigh-b-larocque', 1143),
('house-of-representatives-chittenden-61', 'joanna-cole', 2008),
('house-of-representatives-chittenden-61', 'bill-aswad', 1987),
('house-of-representatives-chittenden-61', 'kurt-wright', 2332),
]
_validate_many_candidate_votes(election_id, 'parish', known_results)
_validate_many_candidate_votes(election_id, 'precinct', known_results)
| mit | -705,504,772,250,037,200 | 45.378238 | 117 | 0.669199 | false |
snowflying/messager | messager/common/sslutils.py | 1 | 2833 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ssl
from oslo.config import cfg
from messager.common.gettextutils import _
ssl_opts = [
cfg.StrOpt('ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely"),
cfg.StrOpt('key_file',
default=None,
help="Private key file to use when starting "
"the server securely"),
]
CONF = cfg.CONF
CONF.register_opts(ssl_opts, "ssl")
def is_enabled():
cert_file = CONF.ssl.cert_file
key_file = CONF.ssl.key_file
ca_file = CONF.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(sock):
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl.cert_file,
'keyfile': CONF.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl.ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
_SSL_PROTOCOLS = {
"tlsv1": ssl.PROTOCOL_TLSv1,
"sslv23": ssl.PROTOCOL_SSLv23,
"sslv3": ssl.PROTOCOL_SSLv3
}
try:
_SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2
except AttributeError:
pass
def validate_ssl_version(version):
key = version.lower()
try:
return _SSL_PROTOCOLS[key]
except KeyError:
raise RuntimeError(_("Invalid SSL version : %s") % version)
| apache-2.0 | -7,891,878,470,662,829,000 | 27.908163 | 78 | 0.61772 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.