repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
noelcjr/EntropyMaxima | em/scripts/add_residues.py | 1 | 12727 | #!/usr/bin/python
"""
Created on Tue Oct 25 15:19:00 2016
@author: noel
"""
import os
import sys
import em.tools.CHARMM_Parser as CP
import em.describe.utilities as ut
import em.tools.Super_Structures as SS
import Bio.PDB.PDBParser as PDBParser
import pandas as pd
import em.tools.input_output as IO
import optparse, pkg_resources
def main():
usage = "usage: %prog [options] arg"
d = "This program reads a CSV file that has been generated by Super_Structure.\
The file corresponds to a Super Structure of a Protein. \
Multiple residues can be added at the time, No terminal will be added..\n \
This program can only add residues or terminals that are in the parameter file."
opt_parser = optparse.OptionParser(usage,description=d)
opt_parser.add_option("--apn", type="str",help="Enter Instruction for where to append residues in hard '\"'\n \
quotes. Place: Amino Acid Number, Entity ID, Chain ID and \n \
the direction to add residues separated by comas. Add. The \
direction to add residues is either Ndir or Cdir. This means \
that if a residue is added in residue 10, it could be toward \
the N or C terminal. This is important so that the program \
knows if the new residue is placed before or after the residue.\
Example \"1,1,A,Ndir\" or \"20,2,A,Cdir\". \n \
Chain ID, amino acid or terminal name are not case sensitive \
and do not need to go in quotes.\n")
opt_parser.add_option("-r","--res", type="str",help="Enter list of amino acids to be added in hard quotes.'\"'\n\
Example: \"ALA,VAL,ASP,ASN,GLU\".")
opt_parser.add_option("--inp", type="str",help="Path to CSV file for adding residue.")
opt_parser.add_option("--out", type="str",help="Path and name to CSV and PDB outputs with added residues.")
#opt_parser.add_option("--pep", type="str",help="Path to peptide file.")
options, args = opt_parser.parse_args()
if not os.path.exists(options.inp):
print "Error: File path Super Structure CSV file does not exist."
print("Type -h or --help for description and options.")
sys.exit(1)
########################## Init Setup #####################################
# Comment out the next four lines to test in Spyder.
directory, filename = os.path.split(options.inp)
params = CP.read_charmm_FF()
insulin = SS.Super_Structure(params, options.inp,'add_linker')
parse_list = options.apn.split(',')
if options.res.find(',') == -1:
aa_add = [i for i in options.res]
aa_add = [ut.utilities.residueDict1_1[i] for i in aa_add]
else:
aa_add = options.res.split(',')
parser2 = PDBParser()
pep_file_path = pkg_resources.resource_filename('em', 'params/' + 'peptides.pdb')
# pep_file_path= /home/noel/.cache/Python-Eggs/Entropy_Maxima-0.1.0-py2.7.egg-tmp/em/params/peptides.pdb #
pep_file = parser2.get_structure('Peptides',pep_file_path)
# Uncomment the next four lines to test
#file_path = '/home/noel/Projects/Protein_design/EntropyMaxima/examples/Linker_minimization/2hiu.csv'
#insulin = SS.Super_Structure(params, file_path,'add_linker')
#parse_list = "1,1,A,Ndir".split(',')
#aa_add = "ALA".split(',')
###############################################
insulin.build_pep_and_anchers(pep_file)
############### Begin processing parse_list and aa_add ####################
message = ''
print(parse_list,len(parse_list))
if len(parse_list) == 4 and len(aa_add) > 0:
aaid_add = int(parse_list[0])
ent_id_add = int(parse_list[1])
chain_add = str(parse_list[2]).upper()
term_dir = str(parse_list[3])
# So far this only works with natural aminoacids and ACE and CTER
if term_dir in ['Ndir','Cdir']:
message += 'Adding residues '+str(aa_add)+' in th '+term_dir+' at amino acid '+str(aaid_add)+', '+'entity '
message += str(ent_id_add)+' and direction '+term_dir+'.'
print(message)
# TODO: counting atoms do not seem necessary. Consider deleting.
#count_atoms_added = 0
#for i in aa_add:
# for j in insulin.params.AA[i].atoms:
# for k in j:
# count_atoms_added += 1
#count_aa_added = len(aa_add)
###################################################################
# So we now create the link dataframe and follow the prosses in
# Super_Structures to populate its fields.
link = pd.DataFrame()
aa = []
aaid = []
entity_id = []
chain_id = []
atmtyp1 = []
atmtyp2 = []
charg = []
component = []
snum = 1
for res in aa_add:
chrm = res
pdbx = res
if chrm in insulin.params.AA:
comp = 1
for k in insulin.params.AA[chrm].atoms:
for l in k:
aa.append(pdbx)
aaid.append(snum)
entity_id.append(ent_id_add)
chain_id.append(chain_add)
atmtyp1.append(insulin.corrections(chrm,l))
atmtyp2.append(insulin.params.AA[chrm].atom_type[insulin.corrections(chrm,l)])
charg.append(insulin.params.AA[chrm].atom_chrg[insulin.corrections(chrm,l)])
if comp == 1:
component.append('AMINO')
else:
if l in ['C','O']:
component.append('CARBO')
else:
component.append(('SIDE'+str(comp)))
comp += 1
snum += 1
else:
print('Warning: Amino Acid identifier',chrm,' is not found in parameters.')
sys.exit(1)
link['aa'] = pd.Series(aa)
link['aaid'] = pd.Series(aaid)
link['ent_id'] = pd.Series(entity_id)
link['chain'] = pd.Series(chain_id)
link['atmtyp1'] = pd.Series(atmtyp1)
link['atmtyp2'] = pd.Series(atmtyp2)
link['component'] = pd.Series(component)
link['charg'] = pd.Series(charg)
###########################################################################
# Add atomtyp, masses and atmNumber to each atom type
mass = []
atmNum = []
atmtyp3 = []
epsilon = []
rmin_half = []
atminfo = []
aainfo = []
for i in link['atmtyp2']:
atmNum.append(params.am.MASS[i][0])
mass.append(params.am.MASS[i][1])
atmtyp3.append(params.am.MASS[i][2])
epsilon.append(params.NONBONDED[i][1])
rmin_half.append(params.NONBONDED[i][2])
atminfo.append(True)
aainfo.append(False)
link['epsilon'] = pd.Series(epsilon)
link['rmin_half'] = pd.Series(rmin_half)
link['atmtyp3'] = pd.Series(atmtyp3)
link['mass'] = pd.Series(mass)
link['atmNum'] = pd.Series(atmNum)
###########################################################################
# DF Type correction.
link['aaid'] = link['aaid'].apply(int)
link['ent_id'] = link['ent_id'].apply(int)
link['mass'] = link['mass'].apply(float)
link['epsilon'] = link['epsilon'].apply(float)
link['rmin_half'] = link['rmin_half'].apply(float)
link['atmNum'] = link['atmNum'].apply(int)
# We now fill out the number of columns in the DataFrame with nan
for i in insulin.Full_Structure.columns:
if i not in list(link.columns):
if i[0:6] == 'aainfo':
link[i] = pd.Series(aainfo)
elif i[0:7] == 'atminfo':
link[i] = pd.Series(atminfo)
else:
link[i] = pd.Series([float('nan') for j in range(len(link))])
if term_dir == 'Ndir':
beg_insert = min(insulin.Full_Structure.index[(insulin.Full_Structure.aaid == aaid_add) &\
(insulin.Full_Structure.ent_id == ent_id_add) &\
(insulin.Full_Structure.chain == chain_add)])
end_insert = beg_insert + link.shape[0]
elif term_dir == 'Cdir':
print('WARNING: The code has not been design and tested for insertions in the CTER.')
print('Exiting the program without finishing.')
sys.exit(1)
else:
print('ERROR: wrong terminal to insert link. Ndir and Cdir are the only choices. Exiting now.')
sys.exit(1)
joint_df = pd.DataFrame(columns=link.columns)
count = 0
insert = True
# When links are added , aaid needs to be fixed to reflect added residues
aaid_offset = 0
for i in insulin.Full_Structure.index:
if (i >= beg_insert) and (i < end_insert):
if insert:
for j in link.index:
joint_df.loc[count] = link.loc[j]
joint_df.loc[count,'aaid'] = joint_df.loc[count,'aaid'] + aaid_offset
current_aaid = link.loc[j,'aaid']
count += 1
insert = False
aaid_offset = aaid_offset + current_aaid
joint_df.loc[count] = insulin.Full_Structure.loc[i]
# So that only residues after the added link get increased in the given ent_id and chain
# Any other entity or chain in the molecules is not fixed.
if (joint_df.loc[count,'ent_id'] == ent_id_add) & (joint_df.loc[count,'chain'] == chain_add):
joint_df.loc[count,'aaid'] = joint_df.loc[count,'aaid'] + aaid_offset
count += 1
# After adding residues, it all gets copied back to original dataframe.
for i in joint_df.index:
insulin.Full_Structure.loc[i] = joint_df.loc[i]
# The way to get number of models is very specific to the way this program
# stores data in DataFrame. Be careful if the data frame column structure changes.
# TODO: missing atom coordinates are added manually. It needs to be automated more.
num_models = len(range(((insulin.Full_Structure.shape[1]-20)/5)))+1
for i in range(1,num_models+1):
for j in range(len(aa_add),0,-1):
insulin.fit_coordinates(term_dir,j,ent_id_add,chain_add,str(i),aa_add[j-1])
# NOTE: insulin.models are not in the Super Structure Class, but it is added here.
# This works, but it does not seem the best way to do it. should models be a field of super
# structures and be initialized there?
insulin.models = [str(i) for i in range(1,num_models+1)]
################ Write to outputs ####################
file_name = os.path.basename(options.out).split('.')[0]
dir_path = os.path.dirname(options.out)
insulin.write_csv(os.path.dirname(options.out),file_name)
IO.write_pdb(insulin,dir_path,file_name,'all')
else:
print("ERROR: only two directions to add residues, Ndir and Cdir.")
print(" The entries are not case sensitive.")
else:
message += 'The number of entries in the instruction field, followed by -a or --apn, is not right.\n'
message += 'Type -h or --help for instructions\n'
print(message)
if __name__ == '__main__':
main()
| gpl-3.0 | 2,646,204,782,213,206,000 | 53.857759 | 121 | 0.498782 | false | 4.00598 | false | false | false |
kgjamieson/NEXT-psych | next/apps/PoolBasedBinaryClassification/dashboard/Dashboard.py | 2 | 3689 | import json
import numpy
from datetime import datetime
from datetime import timedelta
from next.utils import utils
from next.dashboard.AppDashboard import AppDashboard
class PoolBasedBinaryClassificationDashboard(AppDashboard):
def __init__(self,db,ell):
AppDashboard.__init__(self,db,ell)
def get_app_supported_stats(self):
"""
Returns a list of dictionaries describing the identifier (stat_id) and
necessary params inputs to be used when calling getStats
Expected output (list of dicts, each with fields):
(string) stat_id : the identiifer of the statistic
(string) description : docstring of describing outputs
(list of string) necessary_params : list where each string describes the type of param input like 'alg_label' or 'task'
"""
stat_list = self.get_supported_stats()
stat = {}
stat['stat_id'] = 'test_error_multiline_plot'
stat['description'] = self.test_error_multiline_plot.__doc__
stat['necessary_params'] = ['alg_label']
stat_list.append(stat)
return stat_list
def test_error_multiline_plot(self,app_id,exp_uid):
"""
Description: Returns multiline plot where there is a one-to-one mapping lines to
algorithms and each line indicates the error on the validation set with respect to number of reported answers
Expected input:
None
Expected output (in dict):
mpld3 plot object
"""
# get list of algorithms associated with project
alg_list,didSucceed,message = self.db.get(app_id+':experiments',exp_uid,'alg_list')
x_min = numpy.float('inf')
x_max = -numpy.float('inf')
y_min = numpy.float('inf')
y_max = -numpy.float('inf')
list_of_alg_dicts = []
for algorithm in alg_list:
alg_id = algorithm['alg_id']
alg_uid = algorithm['alg_uid']
alg_label = algorithm['alg_label']
list_of_log_dict,didSucceed,message = self.ell.get_logs_with_filter(app_id+':ALG-EVALUATION',{'alg_uid':alg_uid})
list_of_log_dict = sorted(list_of_log_dict, key=lambda item: utils.str2datetime(item['timestamp']) )
x = []
y = []
for item in list_of_log_dict:
num_reported_answers = item['num_reported_answers']
err = item['error']
x.append(num_reported_answers)
y.append(err)
alg_dict = {}
alg_dict['legend_label'] = alg_label
alg_dict['x'] = x
alg_dict['y'] = y
try:
x_min = min(x_min,min(x))
x_max = max(x_max,max(x))
y_min = min(y_min,min(y))
y_max = max(y_max,max(y))
except:
pass
list_of_alg_dicts.append(alg_dict)
import matplotlib.pyplot as plt
import mpld3
fig, ax = plt.subplots(subplot_kw=dict(axisbg='#EEEEEE'))
for alg_dict in list_of_alg_dicts:
ax.plot(alg_dict['x'],alg_dict['y'],label=alg_dict['legend_label'])
ax.set_xlabel('Number of answered triplets')
ax.set_ylabel('Error on hold-out set')
ax.set_xlim([x_min,x_max])
ax.set_ylim([y_min,y_max])
ax.grid(color='white', linestyle='solid')
ax.set_title('Triplet Test Error', size=14)
legend = ax.legend(loc=2,ncol=3,mode="expand")
for label in legend.get_texts():
label.set_fontsize('small')
plot_dict = mpld3.fig_to_dict(fig)
plt.close()
return plot_dict | apache-2.0 | -8,598,757,479,435,417,000 | 34.825243 | 131 | 0.577934 | false | 3.803093 | false | false | false |
OpenReliability/OpenReliability | veusz/utils/action.py | 5 | 5375 | # Copyright (C) 2005 Jeremy S. Sanders
# Email: Jeremy Sanders <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
from __future__ import division
from .. import qtall as qt4
from . import utilfuncs
import os.path
import textwrap
# where images are stored
imagedir = os.path.join(utilfuncs.resourceDirectory, 'icons')
_pixmapcache = {}
def getPixmap(pixmap):
"""Return a cached QPixmap for the filename in the icons directory."""
if pixmap not in _pixmapcache:
_pixmapcache[pixmap] = qt4.QPixmap(os.path.join(imagedir, pixmap))
return _pixmapcache[pixmap]
def pixmapExists(pixmap):
"""Does the pixmap exist?"""
return (pixmap in _pixmapcache or
os.path.exists(os.path.join(imagedir, pixmap)))
_iconcache = {}
def getIcon(icon):
"""Return a cached QIconSet for the filename in the icons directory."""
if icon not in _iconcache:
svg = os.path.join(imagedir, icon+'.svg')
if os.path.exists(svg):
filename = svg
else:
filename = os.path.join(imagedir, icon+'.png')
_iconcache[icon] = qt4.QIcon(filename)
return _iconcache[icon]
def makeAction(parent, descr, menutext, slot, icon=None, key=None,
checkable=False):
"""A quick way to set up an QAction object."""
a = qt4.QAction(parent)
a.setText(menutext)
a.setStatusTip(descr)
a.setToolTip(textwrap.fill(descr, 25))
if slot:
a.triggered.connect(slot)
if icon:
a.setIcon(getIcon(icon))
if key:
a.setShortcut( qt4.QKeySequence(key) )
if checkable:
a.setCheckable(True)
return a
def addToolbarActions(toolbar, actions, which):
"""Add actions listed in "which" from dict "actions" to toolbar "toolbar".
"""
for w in which:
toolbar.addAction(actions[w])
def constructMenus(rootobject, menuout, menutree, actions):
"""Add menus to the output dict from the tree, listing actions
from actions.
rootobject: QMenu or QMenuBar to add menus to
menuout: dict to store menus
menutree: tree structure to create menus from
actions: dict of actions to assign to menu items
"""
for menuid, menutext, actlist in menutree:
# make a new menu if necessary
if menuid not in menuout:
menu = rootobject.addMenu(menutext)
menuout[menuid] = menu
else:
menu = menuout[menuid]
# add actions to the menu
for action in actlist:
if utilfuncs.isiternostr(action):
# recurse for submenus
constructMenus(menu, menuout, [action], actions)
elif action == '':
# blank means separator
menu.addSeparator()
else:
# normal action
menu.addAction(actions[action])
def populateMenuToolbars(items, toolbar, menus):
"""Construct the menus and toolbar from the list of items.
toolbar is a QToolbar object
menus is a dict of menus to add to
Items are tuples consisting of:
(actioname, status bar text, menu text, menu id, slot,
icon filename, add to toolbar (bool), shortcut text)
Returns a dict of actions
"""
actions = {}
parent = toolbar.parent()
for item in items:
if len(item) == 1:
if menus is not None:
menus[item[0]].addSeparator()
continue
menuid, descr, menutext, menu, slot, icon, addtool, key = item
# create action
action = qt4.QAction(parent)
action.setText(menutext)
action.setStatusTip(descr)
action.setToolTip(descr)
# set shortcut if set
if key:
action.setShortcut( qt4.QKeySequence(key) )
# load icon if set
if icon:
action.setIcon(getIcon(icon))
if callable(slot):
# connect the action to the slot
action.triggered.connect(slot)
# add to menu
if menus is not None:
menus[menu].addAction(action)
elif slot is not None:
if menus is not None:
submenu = menus[menu].addMenu(menutext)
menus["%s.%s" % (menu ,menuid)] = submenu
populateMenuToolbars(slot, toolbar, menus)
else:
if menus is not None:
menus[menu].addAction(action)
# add to toolbar
if addtool and toolbar is not None:
toolbar.addAction(action)
# save for later
actions[menuid] = action
return actions
| gpl-2.0 | 6,143,498,901,184,079,000 | 31.77439 | 79 | 0.614326 | false | 4.01419 | false | false | false |
fresskarma/tinyos-1.x | tools/python/pytos/tools/Drip.py | 1 | 5254 | # "Copyright (c) 2000-2003 The Regents of the University of California.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement
# is hereby granted, provided that the above copyright notice, the following
# two paragraphs and the author appear in all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY
# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
#
# @author Kamin Whitehouse
#
from jpype import jimport, JInt
from pytos.util.JavaInheritor import JavaInheritor
import pytos.Comm as Comm
from copy import *
drip = jimport.net.tinyos.drip
def getDripObject(app, motecom=None, channel=None) :
"""This function returns the drip object stored in app that is connected to optional motecom
with a optional channel. If motecome and channel are specified but there is no drip object
with these specs, it creates one"""
drips = []
for conn in app.connections :
if isinstance(conn, Drip) :
# if motecom == None or conn.motecom == motecom : #we need this funtion in java
# if channel == None or drip.channel == channel : #we need this funtion in java
drips.append( conn )
if len(drips) == 0 and motecom != None and channel != None :
drip = Drip(app, channel, app.motecom)
app.connections.append(drip)
drips.append(drip)
return drips
class Drip( JavaInheritor ) :
"""The Drip object inherits from the Drip.java object. It overrides the
constructor, and the send and sendwakeup commands to handle python TosMsg objects.
usage:
drip = Drip(app, Channel, 'sf@localhost:9001')
drip = Drip(app, Channel, moteif)
drip.send(myTosMsg)
drip.sendWakeup(myTosMsg)
... (plus all other functions inherited from the java object)
For interface-compatbility with comm, you can also send a dest address, which is ignored:
drip.send(addr, myTosMsg)
"""
def __init__( self , app, channel, moteIF ) :
self.app = app
if type(moteIF) == str :
moteIF = Comm.openMoteIF(moteIF, app)
dripObj = drip.Drip(channel, moteIF)
JavaInheritor.__init__(self, (dripObj,) )
def send( self , msg, *comm ) :
#For interface-compatbility with comm, you can also send a dest address, which is ignored:
if type(msg) == int and len(comm) > 0:
msg = comm[0]
migMsg = msg.createMigMsg()
self.migMsgSend(migMsg, msg.size)
def sendWakeup( self , msg, *comm ) :
migMsg = msg.createMigMsg()
self.migMsgSendWakeup(migMsg, msg.size)
def migMsgSend( self , msg, size, *comm ) :
self._javaParents[0].send(msg, JInt(size))
def migMsgSendWakeup( self , msg, size, *comm ) :
self._javaParents[0].sendWakeup(msg, size)
def register( self , msg , callback, *comm ) :
comm = Comm.getCommObject(self.app)
comm.register(self.app.msgs.DripMsg, DripMsgPeeler(self.app, msg, callback))
def unregister( self , msg , callback , *comm ) :
comm = Comm.getCommObject(self.app)
comm.unregister(self.app.msgs.DripMsg, DripMsgPeeler(self.app, msg, callback))
def getCommObject(self, motecom) :
"""This function returns the comm object stored in app. If there
is none, it creates one"""
for conn in self.app.connections :
if isinstance(conn, Comm.Comm) :
if motecom not in conn._connected :
conn.connect(motecom)
return conn
comm = Comm.Comm()
comm.connect(self.motecom)
self.app.connections.append(comm)
return comm
class DripMsgPeeler( Comm.MessageListener ) :
"""This is a wrapper callback object that peels the Drip headers out
of a DripMsg mig object and creates a python TosMsg with the remaining data """
def __init__(self, app, msg, callback) :
self.app = app
self.msg = msg
Comm.MessageListener.__init__(self, callback )
self._firstHashFunction = self._hashFunction
self._hashFunction = self._combinedHash
def _combinedHash(self):
return self._firstHashFunction() + self.msg.amType #this will have to change
def messageReceived( self , addr , dripMsg ) :
if dripMsg.metadata.id == self.msg.amType :
try:
msg = deepcopy(self.msg)
bytes = dripMsg.data.getBytes()
msg.setBytes( bytes )
msg.parentMsg = dripMsg
self.callback( addr, msg )
except Exception, inst:
print inst
raise
| bsd-3-clause | 1,687,768,265,527,362,600 | 38.80303 | 106 | 0.664446 | false | 3.59863 | false | false | false |
jaufrec/mixtape | mix.py | 1 | 18087 | #!/usr/bin/python3
# TODO: type check inputs
# TODO: generate an easily reviewable section of transitions
import csv
import datetime
import os
import shutil
import subprocess
import sys
import decimal
from optparse import OptionParser
class Song:
def __init__(self, num=None, file=None, start_time=None, fadein_length=None,
end_time=None, fadeout_length=None, prev_talkover=None, title='',
artist=''):
self.num = num
self.file = file
self.start_time = start_time
self.fadein_length = fadein_length
self.end_time = end_time
self.fadeout_length = fadeout_length
self.prev_talkover = prev_talkover
self.title = title
self.artist = artist
def main(argv):
global DEBUG
DEBUG = False
global VERBOSE
VERBOSE = False
global pipeoutput
pipeoutput = subprocess.DEVNULL
usage = """usage: %prog [options]\n
column headers for playlist.csv are\n
file, start_time, fadein_length, end_time, fadeout_length, title, artist, prev_talkover\n
of which only file is required"""
parser = OptionParser(usage=usage)
parser.add_option("-a", "--album",
dest="album", metavar="Album Name",
help="If --id3tag is selected, use this for album title")
parser.add_option("-b", "--begin", type="int",
dest="begin", metavar="STARTING_TRACK_NUM",
help="Process only from this track in cuesheet")
parser.add_option("-d", "--debug",
action="store_true", dest="DEBUG",
help="execute extra, debugging instructions and reports")
parser.add_option("-e", "--end", type="int",
dest="end", metavar="ENDING_TRACK_NUM",
help="Process only to this track in cuesheet")
parser.add_option("-i", "--id3tag",
action="store_true", dest="id3tag",
help="Add ID3 information. Uses title if present in\
cue sheet, or else file name.")
parser.add_option("-m", "--mp3",
action="store_true", dest="output_mp3",
help="Output MP3 files instead of wav")
parser.add_option("-o", "--output_dir",
dest="output_dir", default="output", metavar="RELATIVE_PATH",
help="Location of output directory relative to working directory.")
parser.add_option("-p", "--playlist",
dest="playlist_path", metavar="PATH", default="playlist.csv",
help="Read playlist csv from PATH.")
parser.add_option("-r", "--version_tag",
dest="version", metavar="VERSION_STRING",
help="Add this version number string to the ID3 comment tag.")
parser.add_option("-s", "--source_dir",
dest="source_dir", metavar="PATH",
help="Path prepended to each filename in playlist.")
parser.add_option("-v", "--verbose",
action="store_true", dest="VERBOSE",
help="Show extra messages during execution.")
(options, args) = parser.parse_args()
if options.DEBUG:
DEBUG = True
pipeoutput = None
if options.VERBOSE:
VERBOSE = True
working_path = os.getcwd()
temp_path = subprocess.run('mktemp -d',
shell=True,
check=True,
stdout=subprocess.PIPE)\
.stdout.decode("utf-8").strip()
output_dir = os.path.join(working_path, options.output_dir)
output_dir_clear_wildcard = os.path.join(output_dir, '*')
# TODO: should probably require a flag to do this without warning
if os.path.isdir(output_dir):
subprocess.call('rm {0}'.format(output_dir_clear_wildcard),
shell=True)
if options.source_dir:
if os.path.isdir(options.source_dir):
source_dir = os.path.join(working_path, options.source_dir)
else:
print('Output directory {0} is not a directory'.format(source_dir))
sys.exit(2)
else:
source_dir = ''
try:
id3tag = options.id3tag
except AttributeError:
id3tag = None
try:
album = options.album
except AttributeError:
album = None
try:
version_tag = options.version_tag
except AttributeError:
version_tag = None
if not os.path.isfile(options.playlist_path):
print('Missing playlist file "{0}"'.format(options.playlist_path))
sys.exit(2)
playlist = load_playlist(options.playlist_path, source_dir,
options.begin, options.end)
convert_and_renumber(playlist, temp_path)
normalize(temp_path)
fade_and_crop(playlist, temp_path)
if options.output_mp3:
convert_to_mp3(playlist,
temp_path,
album,
id3tag,
version_tag)
move_to_output(temp_path, output_dir)
if os.path.isdir(temp_path):
shutil.rmtree(temp_path)
log('Done')
def load_playlist(playlist_file, source_dir, begin, end):
playlist = []
with open(playlist_file, 'rt') as f:
reader = csv.DictReader(f)
counter = 0
for line in reader:
try:
file = line['file']
except KeyError:
file = ''
if not file or file[0] == '#':
continue
else:
file = os.path.join(source_dir, file)
if not os.path.isfile(file):
print('Could not find file {0}'.format(file))
continue
try:
start_time = line['start_time']
except KeyError:
start_time = ''
try:
fadein_length = line['fadein_length']
except KeyError:
fadein_length = ''
try:
end_time = line['end_time']
except KeyError:
end_time = ''
try:
fadeout_length = line['fadeout_length']
except KeyError:
fadeout_length = ''
try:
title = line['title']
except KeyError:
title = ''
try:
artist = line['artist']
except KeyError:
artist = ''
try:
prev_talkover = decimal.Decimal(line['prev_talkover'])
except (KeyError, TypeError, decimal.InvalidOperation):
prev_talkover = ''
playlist.append(Song(counter,
file,
start_time,
fadein_length,
end_time,
fadeout_length,
prev_talkover,
title,
artist))
counter += 1
if begin or end:
if not begin:
begin = 0
if not end:
end = len(playlist)
playlist = playlist[begin:end]
return playlist
def convert_and_renumber(playlist, temp_path):
for song in playlist:
from_path = song.file
to_path = num_concat(temp_path, song.num, 'wav')
log('Converting {0}'.format(song.file))
subprocess.call('ffmpeg -i "{0}" -c:a pcm_s16le -vn "{1}"'.
format(from_path, to_path),
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
def fade_and_crop(playlist, temp_path):
# sox trim: Any number of positions may be given; ... The effect
# then alternates between copying and discarding audio at each
# position. If a position is preceded by an equals or minus sign,
# it is interpreted relative to the beginning or the end of the
# audio, respectively. ... Otherwise, it is considered an offset
# from the last position, or from the start of audio for the first
# parameter. Using a value of 0 for the first position parameter
# allows copying from the beginning of the audio.
# sox fade: fade [type] fade-in-length [stop-time
# [fade-out-length]] Apply a fade effect to the beginning, end, or
# both of the audio. An optional type can be specified to select
# the shape of the fade curve: ... h for half a sine wave, l for
# logarithmic .... A fade-in starts from the first sample and
# ramps the signal level from 0 to full volume over fade-in-length
# seconds. Specify 0 seconds if no fade-in is wanted. For
# fade-outs, the audio will be truncated at stop-time and the
# signal level will be ramped from full volume down to 0 starting
# at fade-out-length seconds before the stop-time. If
# fade-out-length is not specified, it defaults to the same value
# as fade-in-length. No fade-out is performed if stop-time is not
# specified.
for song in playlist:
from_path = num_concat(temp_path, song.num, 'wav')
sox_path = num_concat(temp_path, song.num, 'sox.wav')
log('Fading and cropping {0}'.format(song.file))
trim_command = ''
if song.start_time or song.end_time:
if song.start_time:
start_time = song.start_time
else:
start_time = '0'
if song.fadein_length:
fadein_length = song.fadein_length
fadein_command = ' fade l {0}'.format(fadein_length)
else:
fadein_command = ''
if song.end_time:
end_time = song.end_time
trim_command = ' trim {0} ={1}'.format(start_time, end_time)
else:
fadeout_length = ''
trim_command = ' trim {0}'.format(start_time)
if song.fadeout_length:
fadeout_length = song.fadeout_length
fadeout_command = ' fade h 0 {0} {1}'.\
format(end_time, fadeout_length)
else:
fadeout_command = ''
# fadeout, trim, and fadein commands are ordered so that
# all of the timing inputs can remain relative to the
# original file start time.
sox_command = 'sox "{0}" "{1}" {2} {3} {4}'.format(from_path,
sox_path,
fadeout_command,
trim_command,
fadein_command)
if DEBUG:
print(sox_command)
subprocess.call(sox_command,
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
subprocess.call('mv {0} {1}'.format(sox_path, from_path),
shell=True,
stdout=pipeoutput,
stderr=pipeoutput) # TODO: use python function for this?
if song.prev_talkover:
if song.num == 0:
continue # can't fade the first song with a previous song
prev_num = song.num - 1
first_file = num_concat(temp_path, prev_num, 'wav')
second_file = num_concat(temp_path, song.num, 'wav')
fadeout_file = os.path.join(temp_path, 'fadeout.wav')
fadein_file = os.path.join(temp_path, 'fadein.wav')
crossfade_file = os.path.join(temp_path, 'crossfade.wav')
crossfadeout_file = os.path.join(temp_path, 'crossfadeout.wav')
crossfadein_file = os.path.join(temp_path, 'crossfadein.wav')
first_inter_file = os.path.join(temp_path, 'first_inter.wav')
second_inter_file = os.path.join(temp_path, 'second_inter.wav')
first_final_file = os.path.join(temp_path, 'first_final.wav')
second_final_file = os.path.join(temp_path, 'second_final.wav')
first_length = subprocess.call('sox "{0}" -n stat | grep Length | cut -d : -f 2 | cut -f 1'.format(first_file),
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
trim_length = first_length - song.prev_talkover
crossfade_split_length = song.prev_talkover / decimal.Decimal('2')
subprocess.call('sox {0} -r 44100 {1} trim {2}'.format(first_file, fadeout_file, trim_length),
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
subprocess.call('sox {0} -r 44100 {1} trim 0 {2}'.format(second_file, fadein_file, song.prev_talkover),
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
subprocess.call('sox -V -m -v 1.0 {0} -v 1.0 {1} {2}'.format(fadeout_file, fadein_file, crossfade_file),
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
subprocess.call('sox {0} {1} trim 0 {2}'.format(crossfade_file, crossfadeout_file, crossfade_split_length),
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
subprocess.call('sox {0} {1} trim {2}'.format(crossfade_file, crossfadein_file, crossfade_split_length),
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
subprocess.call('sox {0} -r 44100 {1} trim 0 {2}'.format(first_file, first_inter_file, trim_length),
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
subprocess.call('sox {0} -r 44100 {1} trim {2}'.format(second_file, second_inter_file, song.prev_talkover),
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
subprocess.call('sox -V {0} {1} {2}'.format(first_inter_file, crossfadeout_file, first_final_file),
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
subprocess.call('sox -V {0} {1} {2}'.format(crossfadein_file, second_inter_file, second_final_file),
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
subprocess.call('mv {0} {1}'.format(first_final_file, first_file),
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
subprocess.call('mv {0} {1}'.format(second_final_file, second_file),
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
try:
os.remove(fadeout_file)
os.remove(fadein_file)
os.remove(crossfade_file)
os.remove(crossfadeout_file)
os.remove(crossfadein_file)
os.remove(first_inter_file)
os.remove(second_inter_file)
except FileNotFoundError:
pass
def log(message):
if VERBOSE:
print('{0}: {1}'.
format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
message))
def num_concat(temp_path, num, suffix):
num_pad = '{:0>2}'.format(num)
return os.path.join(temp_path, '{0}.{1}'.format(num_pad, suffix))
def normalize(temp_path):
orig_path = os.getcwd()
os.chdir(temp_path)
log('Normalizing')
subprocess.call('normalize-audio -a -8dB *wav',
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
os.chdir(orig_path)
def convert_to_mp3(playlist, temp_path, album, id3tag, version):
track_count = len(playlist)
for i, song in enumerate(playlist):
from_path = num_concat(temp_path, song.num, 'wav')
output_path = num_concat(temp_path, song.num, 'mp3')
log('Converting to mp3'.format(song))
subprocess.call('lame -vbr-new {0} {1}'.format(from_path, output_path),
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
if id3tag:
if version:
version_arg = '--comment="version {0}"'.format(version)
else:
version_arg = ''
num_pad = '{:0>2}'.format(i + 1)
if song.title:
track_title = song.title
else:
track_title = 'Track {0}'.format(num_pad)
subprocess.call('eyeD3 --album="{0}" --title="{1}" --artist="{2}" --track="{3}" --track-total="{4}" {5} "{6}"'.format(album, track_title, song.artist, num_pad, track_count, version_arg, output_path), # noqa
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
os.remove(from_path)
def move_to_output(temp_path, output_path):
from_path = os.path.join(temp_path, '*')
if not os.path.isdir(output_path):
os.mkdir(output_path)
subprocess.call('mv {0} {1}'.format(from_path, output_path),
shell=True,
stdout=pipeoutput,
stderr=pipeoutput)
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-3.0 | 4,436,340,871,423,215,600 | 38.149351 | 220 | 0.509703 | false | 4.168472 | false | false | false |
AusDTO/dto-digitalmarketplace-buyer-frontend | tests/unit/test_search_presenters.py | 1 | 7685 | import os
import json
import unittest
from mock import Mock
from dmcontent.content_loader import ContentLoader
from werkzeug.datastructures import MultiDict
from app.presenters.search_presenters import filters_for_lot, set_filter_states
content_loader = ContentLoader('tests/fixtures/content')
content_loader.load_manifest('g6', 'data', 'manifest')
questions_builder = content_loader.get_builder('g6', 'manifest')
def _get_fixture_data():
test_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..")
)
fixture_path = os.path.join(
test_root, 'fixtures', 'search_results_fixture.json'
)
with open(fixture_path) as fixture_file:
return json.load(fixture_file)
def _get_fixture_multiple_pages_data():
test_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..")
)
fixture_path = os.path.join(
test_root, 'fixtures', 'search_results_multiple_pages_fixture.json'
)
with open(fixture_path) as fixture_file:
return json.load(fixture_file)
class TestSearchFilters(unittest.TestCase):
def _get_filter_group_by_label(self, lot, label):
filter_groups = filters_for_lot(lot, questions_builder)
for filter_group in filter_groups:
if filter_group['label'] == label:
return filter_group
def _get_request_for_params(self, params):
return Mock(args=MultiDict(params))
def test_get_filter_groups_from_questions_with_radio_filters(self):
radios_filter_group = self._get_filter_group_by_label(
'saas', 'Radios example'
)
self.assertEqual({
'label': 'Radios example',
'filters': [
{
'label': 'Option 1',
'name': 'radiosExample',
'id': 'radiosExample-option-1',
'value': 'option 1',
},
{
'label': 'Option 2',
'name': 'radiosExample',
'id': 'radiosExample-option-2',
'value': 'option 2',
}
]
}, radios_filter_group)
def test_get_filter_groups_from_questions_with_checkbox_filters(self):
checkboxes_filter_group = self._get_filter_group_by_label(
'saas', 'Checkboxes example'
)
self.assertEqual({
'label': 'Checkboxes example',
'filters': [
{
'label': 'Option 1',
'name': 'checkboxesExample',
'id': 'checkboxesExample-option-1',
'value': 'option 1',
},
{
'label': 'Option 2',
'name': 'checkboxesExample',
'id': 'checkboxesExample-option-2',
'value': 'option 2',
}
]
}, checkboxes_filter_group)
def test_get_filter_groups_from_questions_with_boolean_filters(self):
booleans_filter_group = self._get_filter_group_by_label(
'saas', 'Booleans example'
)
self.assertEqual({
'label': 'Booleans example',
'filters': [
{
'label': 'Option 1',
'name': 'booleanExample1',
'id': 'booleanExample1',
'value': 'true',
},
{
'label': 'Option 2',
'name': 'booleanExample2',
'id': 'booleanExample2',
'value': 'true',
}
]
}, booleans_filter_group)
def test_request_filters_are_set(self):
search_filters = filters_for_lot('saas', questions_builder)
request = self._get_request_for_params({
'q': 'email',
'booleanExample1': 'true'
})
set_filter_states(search_filters, request)
self.assertEqual(search_filters[0]['filters'][0]['name'],
'booleanExample1')
self.assertEqual(search_filters[0]['filters'][0]['checked'], True)
self.assertEqual(search_filters[0]['filters'][1]['name'],
'booleanExample2')
self.assertEqual(search_filters[0]['filters'][1]['checked'], False)
def test_filter_groups_have_correct_default_state(self):
request = self._get_request_for_params({
'q': 'email',
'lot': 'paas'
})
search_filters = filters_for_lot('paas', questions_builder)
set_filter_states(search_filters, request)
self.assertEqual(
search_filters[0],
{
'label': 'Booleans example',
'filters': [
{
'checked': False,
'label': 'Option 1',
'name': 'booleanExample1',
'id': 'booleanExample1',
'value': 'true',
},
{
'checked': False,
'label': 'Option 2',
'name': 'booleanExample2',
'id': 'booleanExample2',
'value': 'true',
}
]
}
)
def test_filter_groups_have_correct_state_when_changed(self):
request = self._get_request_for_params({
'q': 'email',
'lot': 'paas',
'booleanExample1': 'true'
})
search_filters = filters_for_lot('paas', questions_builder)
set_filter_states(search_filters, request)
self.assertEqual(
search_filters[0],
{
'label': 'Booleans example',
'filters': [
{
'checked': True,
'label': 'Option 1',
'name': 'booleanExample1',
'id': 'booleanExample1',
'value': 'true',
},
{
'checked': False,
'label': 'Option 2',
'name': 'booleanExample2',
'id': 'booleanExample2',
'value': 'true',
}
]
}
)
def test_no_lot_is_the_same_as_all(self):
all_filters = self._get_filter_group_by_label(
'all', 'Radios example'
)
no_lot_filters = self._get_filter_group_by_label(
None, 'Radios example'
)
self.assertTrue(all_filters)
self.assertEqual(all_filters, no_lot_filters)
def test_instance_has_correct_filter_groups_for_paas(self):
search_filters = filters_for_lot('paas', questions_builder)
filter_group_labels = [
group['label'] for group in search_filters
]
self.assertTrue('Booleans example' in filter_group_labels)
self.assertTrue('Checkboxes example' in filter_group_labels)
self.assertTrue('Radios example' in filter_group_labels)
def test_instance_has_correct_filter_groups_for_iaas(self):
search_filters = filters_for_lot('iaas', questions_builder)
filter_group_labels = [
group['label'] for group in search_filters
]
self.assertFalse('Booleans example' in filter_group_labels)
self.assertTrue('Checkboxes example' in filter_group_labels)
self.assertTrue('Radios example' in filter_group_labels)
| mit | 8,642,487,669,402,487,000 | 33.004425 | 79 | 0.49434 | false | 4.283724 | true | false | false |
jolyonb/edx-platform | lms/djangoapps/certificates/api.py | 1 | 22320 | """Certificates API
This is a Python API for generating certificates asynchronously.
Other Django apps should use the API functions defined in this module
rather than importing Django models directly.
"""
import logging
from django.conf import settings
from django.urls import reverse
from django.db.models import Q
from opaque_keys.edx.django.models import CourseKeyField
from opaque_keys.edx.keys import CourseKey
from branding import api as branding_api
from lms.djangoapps.certificates.models import (
CertificateGenerationConfiguration,
CertificateGenerationCourseSetting,
CertificateInvalidation,
CertificateStatuses,
CertificateTemplate,
CertificateTemplateAsset,
ExampleCertificateSet,
GeneratedCertificate,
certificate_status_for_student
)
from lms.djangoapps.certificates.queue import XQueueCertInterface
from eventtracking import tracker
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from util.organizations_helpers import get_course_organization_id
from xmodule.modulestore.django import modulestore
log = logging.getLogger("edx.certificate")
MODES = GeneratedCertificate.MODES
def is_passing_status(cert_status):
"""
Given the status of a certificate, return a boolean indicating whether
the student passed the course. This just proxies to the classmethod
defined in models.py
"""
return CertificateStatuses.is_passing_status(cert_status)
def format_certificate_for_user(username, cert):
"""
Helper function to serialize an user certificate.
Arguments:
username (unicode): The identifier of the user.
cert (GeneratedCertificate): a user certificate
Returns: dict
"""
try:
return {
"username": username,
"course_key": cert.course_id,
"type": cert.mode,
"status": cert.status,
"grade": cert.grade,
"created": cert.created_date,
"modified": cert.modified_date,
"is_passing": is_passing_status(cert.status),
"is_pdf_certificate": bool(cert.download_url),
"download_url": (
cert.download_url or get_certificate_url(cert.user.id, cert.course_id, user_certificate=cert)
if cert.status == CertificateStatuses.downloadable
else None
),
}
except CourseOverview.DoesNotExist:
return None
def get_certificates_for_user(username):
"""
Retrieve certificate information for a particular user.
Arguments:
username (unicode): The identifier of the user.
Returns: list
Example Usage:
>>> get_certificates_for_user("bob")
[
{
"username": "bob",
"course_key": CourseLocator('edX', 'DemoX', 'Demo_Course', None, None),
"type": "verified",
"status": "downloadable",
"download_url": "http://www.example.com/cert.pdf",
"grade": "0.98",
"created": 2015-07-31T00:00:00Z,
"modified": 2015-07-31T00:00:00Z
}
]
"""
certs = []
# Checks if certificates are not None before adding them to list
for cert in GeneratedCertificate.eligible_certificates.filter(user__username=username).order_by("course_id"):
formatted_cert = format_certificate_for_user(username, cert)
if formatted_cert:
certs.append(formatted_cert)
return certs
def get_certificate_for_user(username, course_key):
"""
Retrieve certificate information for a particular user for a specific course.
Arguments:
username (unicode): The identifier of the user.
course_key (CourseKey): A Course Key.
Returns: dict
"""
try:
cert = GeneratedCertificate.eligible_certificates.get(
user__username=username,
course_id=course_key
)
except GeneratedCertificate.DoesNotExist:
return None
return format_certificate_for_user(username, cert)
def get_recently_modified_certificates(course_keys=None, start_date=None, end_date=None):
"""
Returns a QuerySet of GeneratedCertificate objects filtered by the input
parameters and ordered by modified_date.
"""
cert_filter_args = {}
if course_keys:
cert_filter_args['course_id__in'] = course_keys
if start_date:
cert_filter_args['modified_date__gte'] = start_date
if end_date:
cert_filter_args['modified_date__lte'] = end_date
return GeneratedCertificate.objects.filter(**cert_filter_args).order_by('modified_date') # pylint: disable=no-member
def generate_user_certificates(student, course_key, course=None, insecure=False, generation_mode='batch',
forced_grade=None):
"""
It will add the add-cert request into the xqueue.
A new record will be created to track the certificate
generation task. If an error occurs while adding the certificate
to the queue, the task will have status 'error'. It also emits
`edx.certificate.created` event for analytics.
Args:
student (User)
course_key (CourseKey)
Keyword Arguments:
course (Course): Optionally provide the course object; if not provided
it will be loaded.
insecure - (Boolean)
generation_mode - who has requested certificate generation. Its value should `batch`
in case of django command and `self` if student initiated the request.
forced_grade - a string indicating to replace grade parameter. if present grading
will be skipped.
"""
xqueue = XQueueCertInterface()
if insecure:
xqueue.use_https = False
if not course:
course = modulestore().get_course(course_key, depth=0)
generate_pdf = not has_html_certificates_enabled(course)
cert = xqueue.add_cert(
student,
course_key,
course=course,
generate_pdf=generate_pdf,
forced_grade=forced_grade
)
# If cert_status is not present in certificate valid_statuses (for example unverified) then
# add_cert returns None and raises AttributeError while accesing cert attributes.
if cert is None:
return
if CertificateStatuses.is_passing_status(cert.status):
emit_certificate_event('created', student, course_key, course, {
'user_id': student.id,
'course_id': unicode(course_key),
'certificate_id': cert.verify_uuid,
'enrollment_mode': cert.mode,
'generation_mode': generation_mode
})
return cert.status
def regenerate_user_certificates(student, course_key, course=None,
forced_grade=None, template_file=None, insecure=False):
"""
It will add the regen-cert request into the xqueue.
A new record will be created to track the certificate
generation task. If an error occurs while adding the certificate
to the queue, the task will have status 'error'.
Args:
student (User)
course_key (CourseKey)
Keyword Arguments:
course (Course): Optionally provide the course object; if not provided
it will be loaded.
grade_value - The grade string, such as "Distinction"
template_file - The template file used to render this certificate
insecure - (Boolean)
"""
xqueue = XQueueCertInterface()
if insecure:
xqueue.use_https = False
if not course:
course = modulestore().get_course(course_key, depth=0)
generate_pdf = not has_html_certificates_enabled(course)
log.info(
u"Started regenerating certificates for user %s in course %s with generate_pdf status: %s",
student.username, unicode(course_key), generate_pdf
)
return xqueue.regen_cert(
student,
course_key,
course=course,
forced_grade=forced_grade,
template_file=template_file,
generate_pdf=generate_pdf
)
def certificate_downloadable_status(student, course_key):
"""
Check the student existing certificates against a given course.
if status is not generating and not downloadable or error then user can view the generate button.
Args:
student (user object): logged-in user
course_key (CourseKey): ID associated with the course
Returns:
Dict containing student passed status also download url, uuid for cert if available
"""
current_status = certificate_status_for_student(student, course_key)
# If the certificate status is an error user should view that status is "generating".
# On the back-end, need to monitor those errors and re-submit the task.
response_data = {
'is_downloadable': False,
'is_generating': True if current_status['status'] in [CertificateStatuses.generating,
CertificateStatuses.error] else False,
'is_unverified': True if current_status['status'] == CertificateStatuses.unverified else False,
'download_url': None,
'uuid': None,
}
may_view_certificate = CourseOverview.get_from_id(course_key).may_certify()
if current_status['status'] == CertificateStatuses.downloadable and may_view_certificate:
response_data['is_downloadable'] = True
response_data['download_url'] = current_status['download_url'] or get_certificate_url(student.id, course_key)
response_data['uuid'] = current_status['uuid']
return response_data
def set_cert_generation_enabled(course_key, is_enabled):
"""Enable or disable self-generated certificates for a course.
There are two "switches" that control whether self-generated certificates
are enabled for a course:
1) Whether the self-generated certificates feature is enabled.
2) Whether self-generated certificates have been enabled for this particular course.
The second flag should be enabled *only* when someone has successfully
generated example certificates for the course. This helps avoid
configuration errors (for example, not having a template configured
for the course installed on the workers). The UI for the instructor
dashboard enforces this constraint.
Arguments:
course_key (CourseKey): The course identifier.
Keyword Arguments:
is_enabled (boolean): If provided, enable/disable self-generated
certificates for this course.
"""
CertificateGenerationCourseSetting.set_self_generatation_enabled_for_course(course_key, is_enabled)
cert_event_type = 'enabled' if is_enabled else 'disabled'
event_name = '.'.join(['edx', 'certificate', 'generation', cert_event_type])
tracker.emit(event_name, {
'course_id': unicode(course_key),
})
if is_enabled:
log.info(u"Enabled self-generated certificates for course '%s'.", unicode(course_key))
else:
log.info(u"Disabled self-generated certificates for course '%s'.", unicode(course_key))
def is_certificate_invalid(student, course_key):
"""Check that whether the student in the course has been invalidated
for receiving certificates.
Arguments:
student (user object): logged-in user
course_key (CourseKey): The course identifier.
Returns:
Boolean denoting whether the student in the course is invalidated
to receive certificates
"""
is_invalid = False
certificate = GeneratedCertificate.certificate_for_student(student, course_key)
if certificate is not None:
is_invalid = CertificateInvalidation.has_certificate_invalidation(student, course_key)
return is_invalid
def cert_generation_enabled(course_key):
"""Check whether certificate generation is enabled for a course.
There are two "switches" that control whether self-generated certificates
are enabled for a course:
1) Whether the self-generated certificates feature is enabled.
2) Whether self-generated certificates have been enabled for this particular course.
Certificates are enabled for a course only when both switches
are set to True.
Arguments:
course_key (CourseKey): The course identifier.
Returns:
boolean: Whether self-generated certificates are enabled
for the course.
"""
return (
CertificateGenerationConfiguration.current().enabled and
CertificateGenerationCourseSetting.is_self_generation_enabled_for_course(course_key)
)
def generate_example_certificates(course_key):
"""Generate example certificates for a course.
Example certificates are used to validate that certificates
are configured correctly for the course. Staff members can
view the example certificates before enabling
the self-generated certificates button for students.
Several example certificates may be generated for a course.
For example, if a course offers both verified and honor certificates,
examples of both types of certificate will be generated.
If an error occurs while starting the certificate generation
job, the errors will be recorded in the database and
can be retrieved using `example_certificate_status()`.
Arguments:
course_key (CourseKey): The course identifier.
Returns:
None
"""
xqueue = XQueueCertInterface()
for cert in ExampleCertificateSet.create_example_set(course_key):
xqueue.add_example_cert(cert)
def example_certificates_status(course_key):
"""Check the status of example certificates for a course.
This will check the *latest* example certificate task.
This is generally what we care about in terms of enabling/disabling
self-generated certificates for a course.
Arguments:
course_key (CourseKey): The course identifier.
Returns:
list
Example Usage:
>>> from lms.djangoapps.certificates import api as certs_api
>>> certs_api.example_certificate_status(course_key)
[
{
'description': 'honor',
'status': 'success',
'download_url': 'http://www.example.com/abcd/honor_cert.pdf'
},
{
'description': 'verified',
'status': 'error',
'error_reason': 'No template found!'
}
]
"""
return ExampleCertificateSet.latest_status(course_key)
def _safe_course_key(course_key):
if not isinstance(course_key, CourseKey):
return CourseKey.from_string(course_key)
return course_key
def _course_from_key(course_key):
return CourseOverview.get_from_id(_safe_course_key(course_key))
def _certificate_html_url(user_id, course_id, uuid):
if uuid:
return reverse('certificates:render_cert_by_uuid', kwargs={'certificate_uuid': uuid})
elif user_id and course_id:
kwargs = {"user_id": str(user_id), "course_id": unicode(course_id)}
return reverse('certificates:html_view', kwargs=kwargs)
return ''
def _certificate_download_url(user_id, course_id, user_certificate=None):
if not user_certificate:
try:
user_certificate = GeneratedCertificate.eligible_certificates.get(
user=user_id,
course_id=_safe_course_key(course_id)
)
except GeneratedCertificate.DoesNotExist:
log.critical(
u'Unable to lookup certificate\n'
u'user id: %d\n'
u'course: %s', user_id, unicode(course_id)
)
if user_certificate:
return user_certificate.download_url
return ''
def has_html_certificates_enabled(course):
if not settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False):
return False
return course.cert_html_view_enabled
def get_certificate_url(user_id=None, course_id=None, uuid=None, user_certificate=None):
url = ''
course = _course_from_key(course_id)
if not course:
return url
if has_html_certificates_enabled(course):
url = _certificate_html_url(user_id, course_id, uuid)
else:
url = _certificate_download_url(user_id, course_id, user_certificate=user_certificate)
return url
def get_active_web_certificate(course, is_preview_mode=None):
"""
Retrieves the active web certificate configuration for the specified course
"""
certificates = getattr(course, 'certificates', {})
configurations = certificates.get('certificates', [])
for config in configurations:
if config.get('is_active') or is_preview_mode:
return config
return None
def get_certificate_template(course_key, mode, language):
"""
Retrieves the custom certificate template based on course_key, mode, and language.
"""
template = None
# fetch organization of the course
org_id = get_course_organization_id(course_key)
# only consider active templates
active_templates = CertificateTemplate.objects.filter(is_active=True)
if org_id and mode: # get template by org, mode, and key
org_mode_and_key_templates = active_templates.filter(
organization_id=org_id,
mode=mode,
course_key=course_key
)
template = get_language_specific_template_or_default(language, org_mode_and_key_templates)
# since no template matched that course_key, only consider templates with empty course_key
empty_course_key_templates = active_templates.filter(course_key=CourseKeyField.Empty)
if not template and org_id and mode: # get template by org and mode
org_and_mode_templates = empty_course_key_templates.filter(
organization_id=org_id,
mode=mode
)
template = get_language_specific_template_or_default(language, org_and_mode_templates)
if not template and org_id: # get template by only org
org_templates = empty_course_key_templates.filter(
organization_id=org_id,
mode=None
)
template = get_language_specific_template_or_default(language, org_templates)
if not template and mode: # get template by only mode
mode_templates = empty_course_key_templates.filter(
organization_id=None,
mode=mode
)
template = get_language_specific_template_or_default(language, mode_templates)
return template if template else None
def get_language_specific_template_or_default(language, templates):
"""
Returns templates that match passed in language.
Returns default templates If no language matches, or language passed is None
"""
two_letter_language = _get_two_letter_language_code(language)
language_or_default_templates = list(templates.filter(Q(language=two_letter_language)
| Q(language=None) | Q(language='')))
language_specific_template = get_language_specific_template(two_letter_language,
language_or_default_templates)
if language_specific_template:
return language_specific_template
else:
return get_all_languages_or_default_template(language_or_default_templates)
def get_language_specific_template(language, templates):
for template in templates:
if template.language == language:
return template
return None
def get_all_languages_or_default_template(templates):
for template in templates:
if template.language == '':
return template
return templates[0] if templates else None
def _get_two_letter_language_code(language_code):
"""
Shortens language to only first two characters (e.g. es-419 becomes es)
This is needed because Catalog returns locale language which is not always a 2 letter code.
"""
if language_code is None:
return None
elif language_code == '':
return ''
else:
return language_code[:2]
def emit_certificate_event(event_name, user, course_id, course=None, event_data=None):
"""
Emits certificate event.
"""
event_name = '.'.join(['edx', 'certificate', event_name])
if course is None:
course = modulestore().get_course(course_id, depth=0)
context = {
'org_id': course.org,
'course_id': unicode(course_id)
}
data = {
'user_id': user.id,
'course_id': unicode(course_id),
'certificate_url': get_certificate_url(user.id, course_id)
}
event_data = event_data or {}
event_data.update(data)
with tracker.get_tracker().context(event_name, context):
tracker.emit(event_name, event_data)
def get_asset_url_by_slug(asset_slug):
"""
Returns certificate template asset url for given asset_slug.
"""
asset_url = ''
try:
template_asset = CertificateTemplateAsset.objects.get(asset_slug=asset_slug)
asset_url = template_asset.asset.url
except CertificateTemplateAsset.DoesNotExist:
pass
return asset_url
def get_certificate_header_context(is_secure=True):
"""
Return data to be used in Certificate Header,
data returned should be customized according to the site configuration.
"""
data = dict(
logo_src=branding_api.get_logo_url(is_secure),
logo_url=branding_api.get_base_url(is_secure),
)
return data
def get_certificate_footer_context():
"""
Return data to be used in Certificate Footer,
data returned should be customized according to the site configuration.
"""
data = dict()
# get Terms of Service and Honor Code page url
terms_of_service_and_honor_code = branding_api.get_tos_and_honor_code_url()
if terms_of_service_and_honor_code != branding_api.EMPTY_URL:
data.update({'company_tos_url': terms_of_service_and_honor_code})
# get Privacy Policy page url
privacy_policy = branding_api.get_privacy_url()
if privacy_policy != branding_api.EMPTY_URL:
data.update({'company_privacy_url': privacy_policy})
# get About page url
about = branding_api.get_about_url()
if about != branding_api.EMPTY_URL:
data.update({'company_about_url': about})
return data
| agpl-3.0 | -4,637,370,171,658,106,000 | 33.12844 | 121 | 0.665681 | false | 4.284892 | true | false | false |
ayoubenx/pyfoot-v2.0 | resultats.py | 2 | 7271 | import DB_manager3, sys
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_ResultsManager(QtGui.QWidget):
def __init__(self, database, tableName):
QtGui.QWidget.__init__(self)
self.dbu = DB_manager3.DatabaseTool(database, tableName)
self.setupUi(self)
self.UpdateTree()
self.UpdateCombo()
def setupUi(self, ResultsManager):
ResultsManager.setObjectName(_fromUtf8("ResultsManager"))
ResultsManager.setFixedSize(742, 229)
self.labelhome = QtGui.QLabel(ResultsManager)
self.labelhome.setGeometry(QtCore.QRect(530, 10, 92, 17))
self.labelhome.setObjectName(_fromUtf8("labelhome"))
self.labelmessage = QtGui.QLabel(ResultsManager)
self.labelmessage.setGeometry(QtCore.QRect(540, 90, 151, 20))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.labelmessage.setFont(font)
self.labelmessage.setStyleSheet(_fromUtf8("color: rgb(255, 0, 0);"))
self.labelmessage.setText(_fromUtf8(""))
self.labelmessage.setObjectName(_fromUtf8("labelmessage"))
self.homename = QtGui.QComboBox(ResultsManager)
self.homename.setGeometry(QtCore.QRect(520, 30, 91, 27))
self.homename.setObjectName(_fromUtf8("homename"))
self.deletebutton = QtGui.QPushButton(ResultsManager)
self.deletebutton.setGeometry(QtCore.QRect(480, 150, 261, 27))
self.deletebutton.setObjectName(_fromUtf8("deletebutton"))
self.addbutton = QtGui.QPushButton(ResultsManager)
self.addbutton.setGeometry(QtCore.QRect(480, 110, 261, 27))
self.addbutton.setObjectName(_fromUtf8("addbutton"))
self.visitorname = QtGui.QComboBox(ResultsManager)
self.visitorname.setGeometry(QtCore.QRect(650, 30, 91, 27))
self.visitorname.setObjectName(_fromUtf8("visitorname"))
self.labelteam_2 = QtGui.QLabel(ResultsManager)
self.labelteam_2.setGeometry(QtCore.QRect(620, 40, 47, 17))
self.labelteam_2.setObjectName(_fromUtf8("labelteam_2"))
self.labelvisitor = QtGui.QLabel(ResultsManager)
self.labelvisitor.setGeometry(QtCore.QRect(660, 10, 92, 17))
self.labelvisitor.setObjectName(_fromUtf8("labelvisitor"))
self.labelresult = QtGui.QLabel(ResultsManager)
self.labelresult.setGeometry(QtCore.QRect(500, 190, 201, 31))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.labelresult.setFont(font)
self.labelresult.setStyleSheet(_fromUtf8("color: rgb(0, 170, 0);"))
self.labelresult.setText(_fromUtf8(""))
self.labelresult.setObjectName(_fromUtf8("labelresult"))
self.treeWidget = QtGui.QTreeWidget(ResultsManager)
self.treeWidget.setGeometry(QtCore.QRect(10, 10, 451, 211))
self.treeWidget.setObjectName(_fromUtf8("treeWidget"))
self.treeWidget.headerItem().setText(0, _fromUtf8("1"))
self.homescore = QtGui.QLineEdit(ResultsManager)
self.homescore.setGeometry(QtCore.QRect(520, 60, 91, 27))
self.homescore.setObjectName(_fromUtf8("homescore"))
self.visitorscore = QtGui.QLineEdit(ResultsManager)
self.visitorscore.setGeometry(QtCore.QRect(650, 60, 91, 27))
self.visitorscore.setObjectName(_fromUtf8("visitorscore"))
self.label = QtGui.QLabel(ResultsManager)
self.label.setGeometry(QtCore.QRect(470, 30, 66, 21))
self.label.setObjectName(_fromUtf8("label"))
self.label_2 = QtGui.QLabel(ResultsManager)
self.label_2.setGeometry(QtCore.QRect(470, 60, 66, 21))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.labelteam_3 = QtGui.QLabel(ResultsManager)
self.labelteam_3.setGeometry(QtCore.QRect(620, 60, 47, 17))
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
self.labelteam_3.setFont(font)
self.labelteam_3.setObjectName(_fromUtf8("labelteam_3"))
self.retranslateUi(ResultsManager)
QtCore.QMetaObject.connectSlotsByName(ResultsManager)
def retranslateUi(self, ResultsManager):
ResultsManager.setWindowTitle(_translate("ResultsManager", "ResultsManager", None))
self.labelhome.setText(_translate("ResultsManager", "Home", None))
self.deletebutton.setText(_translate("ResultsManager", "Delete", None))
self.addbutton.setText(_translate("ResultsManager", "Add", None))
self.labelteam_2.setText(_translate("ResultsManager", " Vs", None))
self.labelvisitor.setText(_translate("ResultsManager", "Visitor", None))
self.label.setText(_translate("ResultsManager", "Teams", None))
self.label_2.setText(_translate("ResultsManager", "Scores", None))
self.labelteam_3.setText(_translate("ResultsManager", " :", None))
@QtCore.pyqtSignature("on_addbutton_clicked()")
def addbuttonf(self):
if((self.homename.currentText() != 'Choose Team') and (self.visitorname.currentText() != 'Choose Team') and (self.homename.currentText() != self.visitorname.currentText())):
Home = self.homename.currentText()
Visitor = self.visitorname.currentText()
Hscore = self.homescore.text()
Vscore = self.visitorscore.text()
self.dbu.AddEntryToTable(Home,Hscore,Vscore,Visitor)
self.UpdateTree()
self.labelresult.setText(_translate("ResultsManager", "Done!", None))
self.labelresult.setStyleSheet('QLabel#label {color: green}')
else :
self.labelmessage.setText(_translate("ResultsManager", "Fill The Form!", None))
self.labelmessage.setStyleSheet('QLabel#label {color: red}')
@QtCore.pyqtSignature("on_deletebutton_clicked()")
def delbutton(self):
idl = self.treeWidget.currentItem()
idd = idl.text(4)
self.dbu.DelEntry(str(idd))
self.UpdateTree()
def UpdateTree(self):
col = self.dbu.GetColumns()
table = self.dbu.GetTable()
for c in range(len(col)):
self.treeWidget.headerItem().setText(c, col[c][0])
self.treeWidget.clear()
for item in range(len(table)):
QtGui.QTreeWidgetItem(self.treeWidget)
for value in range(len(table[item])):
self.treeWidget.topLevelItem(item).setText(value, str(table[item][value]))
def UpdateCombo(self):
c = []
f = open('teams.txt','r')
line = f.readlines()
for i in range(len(line)):
x = line[i]
c.append(x[:len(x)-1])
f.close()
self.homename.clear()
self.visitorname.clear()
self.homename.addItems(c)
self.visitorname.addItems(c) | mit | -6,442,178,527,033,832,000 | 44.45 | 181 | 0.656856 | false | 3.845056 | false | false | false |
peterwilletts24/Monsoon-Python-Scripts | msl_pressure/msl_plot_ensemble.py | 1 | 4521 | import os, sys
import cPickle as pickle
import glob
# import itertools
import matplotlib.pyplot as plt
import matplotlib.cm as mpl_cm
import numpy as np
#from mpl_toolkits.basemap import Basemap
#import matplotlib.animation as animation
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import datetime
def main():
lon_low= 60
lon_high = 105
lat_low = -10
lat_high = 30
first_of_year = datetime.date(2011, 01, 01)
first_ordinal = first_of_year.toordinal()
j=1
#pickle_name = 'pickle_daily_mean_*.p'
pickle_name = 'pickle_model_mean_collapsed_*.p'
flist = glob.glob ('/home/pwille/python_scripts/*/%s' % pickle_name)
plt.figure(figsize=(30, 15))
#plt.gcf().subplots_adjust(hspace=0.05, wspace=0.05, top=0.95, bottom=0.05, left=0.075, right=0.925)
plt.gcf().subplots_adjust(top=0.5)
plt.suptitle('Mean sea level pressure of model runs (average of entire model run)')
for i in flist:
fname = str(i)
experiment_id = fname.split('/')[4]
if not os.path.exists('/home/pwille/python_scripts/pngs/%s' % (experiment_id)): os.makedirs('/home/pwille/python_scripts/pngs/%s' % (experiment_id))
#daily_mean = pickle.load( open( "/home/pwille/python_scripts/%s/pickle_daily_mean_%s.p" % (experiment_id, experiment_id), "rb" ) )
model_mean = pickle.load( open( "%s" % (fname), "rb" ) )
#print model_mean
for sub_cube in model_mean.slices(['grid_latitude', 'grid_longitude']):
#Get date in iso format for title, if needed
#day=sub_cube_daily.coord('dayyear')
#day_number = day.points[0]
#day_number_ordinal=first_ordinal-1 + day_number
#date_print = datetime.date.fromordinal(day_number_ordinal)
#date_iso = str(date_print.isoformat())
sub_cube.units = 'hPa'
sub_cube /= 100
# Load a Cynthia Brewer palette.
brewer_cmap = mpl_cm.get_cmap('Spectral')
#contour = qplt.contour(sub_cube_daily, brewer_cmap.N, cmap=brewer_cmap)
clevs = np.arange(996,1016)
sub_cube.coord('grid_latitude').guess_bounds()
sub_cube.coord('grid_longitude').guess_bounds()
print j
plt.subplot(2, 4, j, projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low,lat_high))
#plt.subplot(4, 2, j, projection=ccrs.PlateCarree())
j+=1
contour = iplt.contour(sub_cube, clevs, colors='k',linewidths=0.5)
#iplt.contourf(sub_cube, 16, cmap=brewer_cmap)
#plt.title('Daily Mean Sea Level Pressure: %s model run. %s' % (experiment_id, date_iso), fontsize=12)
plt.title('%s' % (experiment_id), fontsize=8)
dx, dy = 10, 10
plt.clabel(contour, fmt='%d', inline=1, fontsize=8)
plt.gca().coastlines(resolution='110m', color='gray')
plt.gca().stock_img()
gl = plt.gca().gridlines(draw_labels=True,linewidth=1, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
gl.xlocator = mticker.FixedLocator(range(60,105+dx,dx))
gl.ylocator = mticker.FixedLocator(range(-10,30+dy,dx))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 8, 'color': 'gray'}
#gl.xlabel_style = {'color': 'red', 'weight': 'bold'}
gl.ylabel_style = {'size': 8, 'color': 'gray'}
#gl.xlabel_style = {'color': 'red', 'weight': 'bold'}
#plt.savefig('/home/pwille/python_scripts/pngs/%s/msl_model_mean_%s.png' % (experiment_id, experiment_id))
# plt.subplots_adjust(top=0.9, bottom=0.1, hspace=0.2)
plt.tight_layout()
plt.subplots_adjust(top=0.9, wspace=0.2, hspace=0.2)
#plt.show()
plt.savefig('/home/pwille/python_scripts/pngs/msl_model_mean_ensemble.png')
plt.close()
#print sub_cube
#print fname
#print experiment_id
if __name__ == '__main__':
main()
| mit | -4,242,318,659,673,928,700 | 34.880952 | 156 | 0.594116 | false | 3.297593 | false | false | false |
actofgoods/actofgoods | administration/urls.py | 1 | 2485 | """actofgoods URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from . import views
app_name = 'administration'
urlpatterns = [
url(r'^administration/$', views.administration, name='administration'),
url(r'^administration/requests/$', views.requests, name='requests'),
url(r'^administration/categories/$', views.categories, name='categories'),
url(r'^administration/needs/$', views.needs, name='needs'),
url(r'^administration/information/$', views.informations, name='information'),
url(r'^administration/users/$', views.users, name='users'),
url(r'^administration/groups/$', views.groups, name='groups'),
url(r'^administration/faq_administration/$', views.faq_administration, name='faq_administration'),
url(r'^administration/users/(?P<pk>\d+)/delete/$', views.user_delete, name='user_delete'),
url(r'^administration/groups/(?P<pk>\d+)/delete/$', views.group_delete, name='group_delete'),
url(r'^administration/needs/(?P<pk>\d+)/delete/$', views.need_delete, name='need_delete'),
url(r'^administration/information/(?P<pk>\d+)/delete/$', views.information_delete, name='information_delete'),
url(r'^administration/users/(?P<pk>\d+)/make_admin/$', views.make_admin, name='make_admin'),
url(r'^administration/categories/(?P<pk>\d+)/delete/$', views.categories_delete, name='categories_delete'),
url(r'^administration/information/(?P<pk>\d+)/$', views.information_admin, name='information_admin'),
url(r'^administration/information/comment/delete/$', views.comment_delete, name='comment_delete'),
url(r'^administration/information/comment/(?P<pk>\d+)/delete/$', views.comment_delete, name='comment_delete'),
url(r'^administration/information/(?P<pki>\d+)/(?P<pkc>\d+)/$', views.information_reported_comment_admin, name='information_reported_comment_admin'),
]
| mit | -8,970,017,133,031,198,000 | 59.609756 | 153 | 0.701811 | false | 3.555079 | false | false | false |
ttadano/ALM | tools/interface/VASP.py | 1 | 14333 | #
# VASP.py
#
# Interface to VASP (https://www.vasp.at)
#
# Copyright (c) 2014 Terumasa Tadano
#
# This file is distributed under the terms of the MIT license.
# Please see the file 'LICENCE.txt' in the root directory
# or http://opensource.org/licenses/mit-license.php for information.
#
from __future__ import print_function
import numpy as np
try:
try:
# cElementTree on Python 2.5+
import xml.etree.cElementTree as etree
except ImportError:
# ElementTree on Python 2.5+
import xml.etree.ElementTree as etree
except ImportError:
try:
# cElementTree
import cElementTree as etree
except ImportError:
# ElementTree
import elementtree.ElementTree as etree
def read_POSCAR(file_in):
file_pos = open(file_in, 'r')
file_pos.readline()
a = float(file_pos.readline().rstrip())
lavec = np.zeros((3, 3))
for i in range(3):
arr = file_pos.readline().rstrip().split()
if len(arr) != 3:
print("Could not read POSCAR properly")
exit(1)
for j in range(3):
lavec[i, j] = a * float(arr[j])
lavec = lavec.transpose()
invlavec = np.linalg.inv(lavec)
elements = file_pos.readline().rstrip().split()
if elements[0].isdigit():
nat_elem = [int(tmp) for tmp in elements]
elements = []
else:
nat_elem = [int(tmp) for tmp in file_pos.readline().rstrip().split()]
nat = np.sum(nat_elem)
basis = file_pos.readline().rstrip()
x = np.zeros((nat, 3))
for i in range(nat):
arr = file_pos.readline().rstrip().split()
for j in range(3):
x[i][j] = float(arr[j])
if basis == "Direct" or basis == "direct" or basis == "D" or basis == "d":
xf = x
else:
xf = np.dot(x, invlavec)
file_pos.close()
return lavec, invlavec, elements, nat_elem, xf
def write_POSCAR(prefix, counter, header, nzerofills,
lavec, elems, nat, disp, coord):
filename = prefix + str(counter).zfill(nzerofills) + ".POSCAR"
f = open(filename, 'w')
f.write("%s\n" % header)
f.write("%s\n" % "1.0")
for i in range(3):
f.write("%20.15f %20.15f %20.15f\n" % (lavec[0][i],
lavec[1][i],
lavec[2][i]))
for i in range(len(elems)):
f.write("%s " % elems[i])
if len(elems) > 0:
f.write("\n")
for i in range(len(nat)):
f.write("%d " % nat[i])
f.write("\n")
f.write("Direct\n")
for i in range(len(disp)):
for j in range(3):
f.write("%20.15f" % (coord[i][j] + disp[i][j]))
f.write("\n")
f.close()
def get_coordinate_VASP(xml_file, nat):
x = []
try:
xml = etree.parse(xml_file)
root = xml.getroot()
for elems in root.findall('calculation/structure/varray'):
str_coord = [elems2.text for elems2 in elems.findall('v')]
n = len(str_coord)
for i in range(n):
x.extend([t for t in str_coord[i].split()])
return np.array(x, dtype=np.float)
except:
print("Error in reading atomic positions from the XML file: %s" % xml_file)
def print_displacements_VASP(xml_files,
lavec, nat, x0,
conversion_factor,
file_offset):
x0 = np.round(x0, 8)
lavec_transpose = lavec.transpose()
vec_refold = np.vectorize(refold)
if file_offset is None:
disp_offset = np.zeros((nat, 3))
else:
x0_offset = get_coordinate_VASP(file_offset, nat)
try:
x0_offset = np.reshape(x0_offset, (nat, 3))
except:
print("File %s contains too many position entries" % file_offset)
disp_offset = x0_offset - x0
for search_target in xml_files:
x = get_coordinate_VASP(search_target, nat)
ndata = len(x) // (3 * nat)
x = np.reshape(x, (ndata, nat, 3))
for idata in range(ndata):
disp = x[idata, :, :] - x0 - disp_offset
disp = np.dot(vec_refold(disp), lavec_transpose)
disp *= conversion_factor
for i in range(nat):
print("%15.7F %15.7F %15.7F" % (disp[i, 0],
disp[i, 1],
disp[i, 2]))
def get_atomicforces_VASP(xml_file):
f = []
try:
xml = etree.parse(xml_file)
root = xml.getroot()
for elems in root.findall('calculation/varray'):
if elems.get('name') == "forces":
str_force = [elems2.text for elems2 in elems.findall('v')]
for i in range(len(str_force)):
f.extend([t for t in str_force[i].split()])
return np.array(f, dtype=np.float)
except:
print("Error in reading atomic forces from the XML file: %s" % xml_file)
def print_atomicforces_VASP(xml_files,
nat,
conversion_factor,
file_offset):
if file_offset is None:
force_offset = np.zeros((nat, 3))
else:
data0 = get_atomicforces_VASP(file_offset)
try:
force_offset = np.reshape(data0, (nat, 3))
except:
print("File %s contains too many force entries" % file_offset)
for search_target in xml_files:
data = get_atomicforces_VASP(search_target)
ndata = len(data) // (3 * nat)
data = np.reshape(data, (ndata, nat, 3))
for idata in range(ndata):
f = data[idata, :, :] - force_offset
f *= conversion_factor
for i in range(nat):
print("%15.8E %15.8E %15.8E" % (f[i][0],
f[i][1],
f[i][2]))
def get_coordinate_and_force_VASP(xml_file, nat):
x = []
f = []
try:
xml = etree.parse(xml_file)
root = xml.getroot()
for elems in root.findall('calculation/structure/varray'):
str_coord = [elems2.text for elems2 in elems.findall('v')]
n = len(str_coord)
for i in range(n):
x.extend([t for t in str_coord[i].split()])
for elems in root.findall('calculation/varray'):
if elems.get('name') == "forces":
str_force = [elems2.text for elems2 in elems.findall('v')]
for i in range(len(str_force)):
f.extend([t for t in str_force[i].split()])
return np. array(x, dtype=np.float), np.array(f, dtype=np.float)
except:
print("Error in reading atomic positions and forces from the XML file: %s" % xml_file)
def print_displacements_and_forces_VASP(xml_files,
lavec, nat, x0,
conversion_factor_disp,
conversion_factor_force,
conversion_factor_energy,
file_offset,
filter_emin,
filter_emax):
x0 = np.round(x0, 8)
lavec_transpose = lavec.transpose()
vec_refold = np.vectorize(refold)
if file_offset is None:
disp_offset = np.zeros((nat, 3))
force_offset = np.zeros((nat, 3))
epot_offset = 0
else:
x0_offset, force_offset = get_coordinate_and_force_VASP(file_offset, nat)
epot_offset, _ = get_energies_VASP(file_offset)
epot_offset = np.array(epot_offset, dtype='float')
try:
x0_offset = np.reshape(x0_offset, (nat, 3))
except:
print("File %s contains too many position entries" % file_offset)
try:
force_offset = np.reshape(force_offset, (nat, 3))
except:
print("File %s contains too many force entries" % file_offset)
disp_offset = x0_offset - x0
if len(epot_offset) > 1:
print("File %s contains too many energy entries" % file_offset)
for search_target in xml_files:
x, force = get_coordinate_and_force_VASP(search_target, nat)
epot, ekin = get_energies_VASP(search_target)
ndata = len(x) // (3 * nat)
ndata2 = len(force) // (3 * nat)
if ndata != ndata2:
print("The numbers of displacement and force entries are different.")
exit(1)
ndata_energy = len(epot)
if ndata_energy != ndata:
print("The numbers of displacement and energy entries are different.")
exit(1)
epot = np.array(epot, dtype='float')
epot -= epot_offset
x = np.reshape(x, (ndata, nat, 3))
force = np.reshape(force, (ndata, nat, 3))
for idata in range(ndata):
disp = x[idata, :, :] - x0 - disp_offset
disp = np.dot(vec_refold(disp), lavec_transpose)
f = force[idata, :, :] - force_offset
disp *= conversion_factor_disp
f *= conversion_factor_force
if filter_emin is not None:
if filter_emin > epot[idata]:
continue
if filter_emax is not None:
if filter_emax < epot[idata]:
continue
print("# Filename: %s, Snapshot: %d, E_pot (eV): %s" % (search_target, idata + 1, epot[idata]))
for i in range(nat):
print("%15.7F %15.7F %15.7F %20.8E %15.8E %15.8E" % (disp[i, 0],
disp[i, 1],
disp[i, 2],
f[i][0],
f[i][1],
f[i][2]))
def get_energies_VASP(xml_file):
etot_array = []
ekin_array = []
try:
xml = etree.parse(xml_file)
root = xml.getroot()
for elems in root.findall('calculation/energy'):
etot = 'N/A'
ekin = 'N/A'
for elems2 in elems.findall('i'):
if elems2.get('name') == "e_fr_energy":
etot = elems2.text
if elems2.get('name') == "kinetic":
ekin = elems2.text
etot_array.append(etot)
ekin_array.append(ekin)
return etot_array, ekin_array
except:
print("Error in reading energies from the XML file: %s" % xml_file)
def print_energies_VASP(xml_files,
conversion_factor,
file_offset):
print("# Etot, Ekin")
etot_offset = 0.0
ekin_offset = 0.0
if file_offset:
etot, ekin = get_energies_VASP(file_offset)
if len(etot) > 1 or len(ekin) > 1:
print("File %s contains too many energy entries" % file_offset)
exit(1)
if etot[0] != 'N/A':
etot_offset = float(etot[0])
if ekin[0] != 'N/A':
ekin_offset = float(ekin[0])
for search_target in xml_files:
etot, ekin = get_energies_VASP(search_target)
for i in range(len(etot)):
if etot[i] != 'N/A':
val_etot = float(etot[i]) - etot_offset
print("%15.8E" % (val_etot * conversion_factor), end=' ')
else:
print("%s" % etot[i], end=' ')
if ekin[i] != 'N/A':
val_ekin = float(ekin[i]) - ekin_offset
print("%15.8E" % (val_ekin * conversion_factor))
else:
print("%s" % ekin[i])
def get_unit_conversion_factor(str_unit):
Bohr_radius = 0.52917721067
Rydberg_to_eV = 13.60569253
disp_conv_factor = 1.0
energy_conv_factor = 1.0
force_conv_factor = 1.0
if str_unit == "ev":
disp_conv_factor = 1.0
energy_conv_factor = 1.0
elif str_unit == "rydberg":
disp_conv_factor = 1.0 / Bohr_radius
energy_conv_factor = 1.0 / Rydberg_to_eV
elif str_unit == "hartree":
disp_conv_factor = 1.0 / Bohr_radius
energy_conv_factor = 0.5 / Rydberg_to_eV
else:
print("This cannot happen.")
exit(1)
force_conv_factor = energy_conv_factor / disp_conv_factor
return disp_conv_factor, force_conv_factor, energy_conv_factor
def parse(SPOSCAR_init, xml_files, xml_file_offset, str_unit,
print_disp, print_force, print_energy,
filter_emin, filter_emax):
aa, _, elems, nats, x_frac0 = read_POSCAR(SPOSCAR_init)
scale_disp, scale_force, scale_energy = get_unit_conversion_factor(str_unit)
if print_disp == True and print_force == True:
print_displacements_and_forces_VASP(xml_files,
aa, np.sum(nats),
x_frac0,
scale_disp,
scale_force,
scale_energy,
xml_file_offset,
filter_emin,
filter_emax)
elif print_disp == True:
print_displacements_VASP(xml_files,
aa, np.sum(nats),
x_frac0,
scale_disp,
xml_file_offset)
elif print_force == True:
print_atomicforces_VASP(xml_files,
np.sum(nats),
scale_force,
xml_file_offset)
elif print_energy == True:
print_energies_VASP(xml_files,
scale_energy,
xml_file_offset)
def refold(x):
if x >= 0.5:
return x - 1.0
elif x < -0.5:
return x + 1.0
else:
return x
| mit | 8,267,490,672,836,880,000 | 29.691649 | 107 | 0.484965 | false | 3.674186 | false | false | false |
pixelated/puppet-pixelated | files/functional-tests/steps/__init__.py | 1 | 2678 | #
# Copyright (c) 2015 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
import couchdb
import shutil
LEAP_HOME_FOLDER = os.environ.get('LEAP_HOME_FOLDER', '/var/lib/pixelated/.leap/')
def detect_hostname():
return os.environ.get('TESTHOST') or subprocess.check_output(['hostname', '-d']).strip()
hostname = detect_hostname()
user_agent_address = 'https://%s' % hostname
def url_home(port=None):
if port is not None:
return '%s:%d' % (user_agent_address, port)
else:
return user_agent_address
def login_url():
return url_home(port=8083) + '/login'
def logout_url():
return url_home(port=8083) + '/logout'
def signup_url():
return url_home() + '/signup'
def leap_login_url():
return url_home() + '/login'
def _netrc_couch_credentials():
with open('/etc/couchdb/couchdb.netrc', 'r') as netrc:
netrc_line = netrc.readline().strip().split(' ')
credentials = {}
for index in xrange(0, len(netrc_line), 2):
credentials[netrc_line[index]] = netrc_line[index+1]
return credentials
def _delete_identity(server, username):
email = '%s@%s' % (username, detect_hostname())
filter_by_user_id = '''function(doc) { if (doc['address']=='%s') { emit(doc, null);} }''' % email
identities = server['identities']
user_identities = identities.query(filter_by_user_id)
for ident in user_identities:
doc = identities.get(ident['id'])
identities.delete(doc)
def _delete_data(server, user_id):
user_db = 'user-%s' % user_id
if user_db in server:
del server[user_db]
def delete_soledad_server_db(user_id, username):
couch_credentials = _netrc_couch_credentials()
server = couchdb.Server("http://%(login)s:%(password)s@%(machine)s:5984" % couch_credentials)
_delete_identity(server, username)
_delete_data(server, user_id)
def delete_soledad_client_db(user_id):
soledad_folder = LEAP_HOME_FOLDER + user_id
if os.path.exists(soledad_folder):
shutil.rmtree(soledad_folder)
| agpl-3.0 | 1,248,136,364,840,974,300 | 29.089888 | 102 | 0.677371 | false | 3.3475 | false | false | false |
HPPTECH/hpp_IOSTressTest | Refer/check_ID_done.py | 1 | 1906 |
def findProcess( processId ):
ps= subprocess.Popen("ps -ef | grep "+processId, shell=True, stdout=subprocess.PIPE)
output = ps.stdout.read()
ps.stdout.close()
ps.wait()
return output
def isProcessRunning( processId):
output = findProcess( processId )
if re.search(processId, output) is None:
return true
else:
return False
def check_process(process):
import re
import subprocess
returnprocess = False
s = subprocess.Popen(["ps", "ax"],stdout=subprocess.PIPE)
for x in s.stdout:
if re.search(process, x):
returnprocess = True
if retornoprocesso == False:
print 'no process executing'
if retornoprocesso == True:
print 'process executing'
mluebke code is not 100% correct; kill() can also raise EPERM (access denied) in which case that obviously means a process exists. This is supposed to work:
(edited as per Jason R. Coombs comments)
import errno
import os
import sys
def pid_exists(pid):
"""Check whether pid exists in the current process table.
UNIX only.
"""
if pid < 0:
return False
if pid == 0:
# According to "man 2 kill" PID 0 refers to every process
# in the process group of the calling process.
# On certain systems 0 is a valid PID but we have no way
# to know that in a portable fashion.
raise ValueError('invalid PID 0')
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
| mit | -1,037,469,669,189,381,100 | 25.486111 | 156 | 0.612802 | false | 4.021097 | false | false | false |
beefoo/still-i-rise | detect_flow.py | 1 | 1815 | # -*- coding: utf-8 -*-
# Based on:
# http://docs.opencv.org/trunk/d7/d8b/tutorial_py_lucas_kanade.html
# https://github.com/opencv/opencv/blob/master/samples/python/opt_flow.py
#
# Outputs image where direction responds to hue, length by brightness
# 0° Blue, 60° Magenta, 120° Red, 180° Yellow, 240° Green, 300° Cyan
import argparse
import cv2
import locale
import os
from glob import glob
import numpy as np
from pprint import pprint
import sys
try:
locale.setlocale(locale.LC_ALL, 'en_US')
except locale.Error:
locale.setlocale(locale.LC_ALL, 'english-us')
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_DIR", default="frames/*.png", help="Path to frames directory")
parser.add_argument('-out', dest="OUTPUT_DIR", default="frames_flow/", help="Path to output directory")
# init input
args = parser.parse_args()
# if not os.path.exists(args.OUTPUT_DIR):
# os.makedirs(args.OUTPUT_DIR)
def drawHsv(flow):
h, w = flow.shape[:2]
mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
hsv = np.zeros((h, w, 3), np.uint8)
hsv[...,0] = ang*180/np.pi/2
hsv[...,1] = 255
hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
return bgr
frames = glob(args.INPUT_DIR)
frameCount = len(frames)
print "Found %s frames" % locale.format("%d", frameCount, grouping=True)
frames.sort()
prvs = None
for i, f in enumerate(frames):
im = cv2.imread(f)
if prvs is None:
prvs = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
continue
nxt = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs,nxt, None, 0.5, 3, 15, 3, 5, 1.2, 0)
bgr = drawHsv(flow)
cv2.imshow('frame',bgr)
cv2.waitKey(30)
prvs = nxt
cv2.destroyAllWindows()
| mit | -1,169,636,866,794,271,500 | 26.830769 | 103 | 0.669983 | false | 2.687964 | false | false | false |
personalrobotics/libcozmo | src/examples/rviz_example.py | 1 | 1927 | #!/usr/bin/env python
import rospy
from roscpp_initializer import roscpp_initializer
from aikidopy import SkeletonMarker, InteractiveMarkerViewer
from cozmopy import Cozmo
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--mesh_dir', dest='mesh_dir', required = True,
help='The path to the directory containing Cozmos meshes')
args = parser.parse_args()
# rospy.init_node does not initialize roscpp and if roscpp is not initialized
# instanciating ros::NodeHandle will lead to a fatal error.
# roscpp_initializer initializes roscpp and ros::NodeHandle in the background
roscpp_initializer.roscpp_init("load_cozmo", [])
rospy.init_node("load_cozmo")
rate = rospy.Rate(10)
topicName = "dart_markers"
baseFrameName = "map"
if not rospy.is_shutdown():
cozmo = Cozmo(args.mesh_dir)
skeleton = cozmo.getCozmoSkeleton();
print("Starting viewer. Please subscribe to the {} InteractiveMarker"
" topic in Rviz \n".format(topicName))
viewer = InteractiveMarkerViewer(topicName, baseFrameName)
cozmo_marker = viewer.addSkeleton(skeleton)
viewer.setAutoUpdate(True);
input_str = ""
input_val = 0
while input_val != -1.0:
input_str = raw_input("\nEnter forklift position (0-0.86 radians, -1 to quit): ")
try:
input_val = float(input_str)
print input_val
except ValueError as verr:
print('Please enter a valid float value\n')
continue
if input_val == -1.0:
break
elif (input_val > 0.86 or input_val < 0):
print('This value exceeds the joint limits, please enter valid value\n')
continue
cozmo.setForkliftPosition(input_val);
| bsd-3-clause | 631,787,375,368,796,800 | 34.685185 | 93 | 0.615983 | false | 3.854 | false | false | false |
timcera/hspfbintoolbox | setup.py | 1 | 2360 | # -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup
# temporarily redirect config directory to prevent matplotlib importing
# testing that for writeable directory which results in sandbox error in
# certain easy_install versions
os.environ["MPLCONFIGDIR"] = "."
pkg_name = "hspfbintoolbox"
version = open("VERSION").readline().strip()
if sys.argv[-1] == "publish":
os.system("cleanpy .")
os.system("python setup.py sdist")
os.system("twine upload dist/{pkg_name}-{version}.tar.gz".format(**locals()))
sys.exit()
README = open("./README.rst").read()
install_requires = [
# List your project dependencies here.
# For more details, see:
# http://packages.python.org/distribute/setuptools.html#declaring-dependencies
"tstoolbox >= 103",
]
extras_require = {
"dev": [
"black",
"cleanpy",
"twine",
"pytest",
"coverage",
"flake8",
"pytest-cov",
"pytest-mpl",
"pre-commit",
]
}
setup(
name=pkg_name,
version=version,
description=(
"Reads Hydrological Simulation Program - " "FORTRAN binary output files."
),
long_description=README,
classifiers=[
# Get strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords="hspf binary hydrologic simulation model",
author="Tim Cera, P.E.",
author_email="[email protected]",
url="http://timcera.bitbucket.io/{pkg_name}/docsrc/index.html".format(**locals()),
packages=[pkg_name],
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
entry_points={
"console_scripts": ["{pkg_name}={pkg_name}.{pkg_name}:main".format(**locals())]
},
extras_require=extras_require,
python_requires=">=3.7.1",
)
| bsd-3-clause | 7,215,059,931,769,591,000 | 28.135802 | 87 | 0.622458 | false | 3.81877 | false | false | false |
NullHypothesis/tor-dns-tools | analyse-query-quality.py | 1 | 5546 | #!/usr/bin/env python2
#
# Copyright 2016 Philipp Winter <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Determine query quality of DNS resolvers in given pcap file.
The code filters DNS queries for `PREFIX.tor.nymity.ch' and determines which
resolvers are poorly configured.
"""
import re
import sys
import time
import logging as log
import datetime
import cymruwhois
import scapy.all as scapy
# IP addresses of machines that were involved in this experiment.
DNS_SERVER_ADDR = "198.83.85.34"
DNS_QUERY_ADDR = "193.11.166.194"
TARGET_DOMAIN = "tor.nymity.ch"
log.basicConfig(level=log.getLevelName("INFO"),
format="%(asctime)s [%(levelname)s]: %(message)s")
# Maps exit relay fingerprints to DNS queries.
has_lowercase = re.compile("[a-z]")
has_uppercase = re.compile("[A-Z]")
fingerprint_pattern = re.compile("^[a-fA-F0-9]{40,}$")
def has_0x20_encoding(query):
"""
Return `True' if query uses 0x20 encoding.
Note that there's a slim chance of false negatives here because a resolver
could produce an all-lowercase or all-uppercase query despite using 0x20
encoding.
"""
return has_lowercase.search(query) and has_uppercase.search(query)
def analyse_queries(exit_queries, whois):
"""
Iterate over queries and determine their quality.
"""
has_0x20 = 0
has_rand_port = 0
lacks_0x20 = set()
lacks_rand = set()
for exit_fpr, info in exit_queries.iteritems():
query, src_port, src_addr = info
if has_0x20_encoding(query):
has_0x20 += 1
else:
lacks_0x20.add((exit_fpr, src_addr))
if src_port != 53:
has_rand_port += 1
else:
lacks_rand.add((exit_fpr, src_addr))
exit_queries_len = len(exit_queries)
has_0x20_pct = float(has_0x20) / exit_queries_len * 100
has_rand_port_pct = float(has_rand_port) / exit_queries_len * 100
log.info("Extracted queries from %d resolvers." % exit_queries_len)
log.info("%d out of %d resolvers (%.2f%%) use 0x20 encoding." %
(has_0x20, exit_queries_len, has_0x20_pct))
log.info("%d out of %d resolvers (%.2f%%) use random source port." %
(has_rand_port, exit_queries_len, has_rand_port_pct))
print "%d,%d,%d" % (exit_queries_len, has_0x20, has_rand_port)
# Print resolvers that are poorly configured.
addrs = []
for record, info in zip(whois.lookupmany([addr for _, addr in lacks_0x20]),
lacks_0x20):
exit_fpr, rslv_addr = info
log.warning("%s %15s (%30s) lacks 0x20." %
(exit_fpr[:8], rslv_addr, record.owner[:30]))
addrs.append(rslv_addr)
print ",".join(addrs)
addrs = []
for record, info in zip(whois.lookupmany([addr for _, addr in lacks_rand]),
lacks_rand):
exit_fpr, rslv_addr = info
log.warning("%s %15s (%30s) lacks random source port." %
(exit_fpr[:8], rslv_addr, record.owner[:30]))
addrs.append(rslv_addr)
print ",".join(addrs)
def matches_fingerprint(dns_label):
"""
Return `True' if given dns_label appears to be a fingerprint.
"""
return fingerprint_pattern.match(dns_label)
def parse_file(pcap_file):
"""
Parse pcap file and return dictionary mapping exit fingerprint to its info.
"""
exit_queries = dict()
try:
packets = scapy.rdpcap(pcap_file)
except Exception as err:
log.critical("Error while reading pcap: %s" % err)
sys.exit(3)
for packet in packets:
if not packet.haslayer(scapy.IP):
continue
src_addr = packet[scapy.IP].src
if src_addr == DNS_QUERY_ADDR or src_addr == DNS_SERVER_ADDR:
continue
if not packet.haslayer(scapy.DNSQR):
continue
query = packet[scapy.DNSQR].qname
if TARGET_DOMAIN not in query.lower():
continue
# Extract fingerprint and add dictionary entry.
dns_labels = query.split(".")
if not matches_fingerprint(dns_labels[0]):
continue
exit_fpr = dns_labels[0].lower()
exit_queries[exit_fpr] = (query, packet[scapy.UDP].sport, src_addr)
if len(packets) >= 2:
first, last = packets[0].time, packets[-1].time
log.info("Trace duration: %s" %
str(datetime.timedelta(seconds=last-first)))
return exit_queries
if __name__ == "__main__":
if len(sys.argv) != 2:
log.critical("Usage: %s PCAP_FILE" % sys.argv[0])
sys.exit(1)
pcap_file = sys.argv[1]
before = time.time()
exit_queries = parse_file(pcap_file)
log.info("Parsed file in %ss." % str(time.time() - before))
if len(exit_queries) == 0:
log.critical("Could not extract any queries from pcap.")
sys.exit(2)
analyse_queries(exit_queries, cymruwhois.Client())
sys.exit(0)
| gpl-3.0 | -5,350,662,846,594,334,000 | 28.5 | 79 | 0.626578 | false | 3.417129 | false | false | false |
sly-ninja/python_for_ml | Module5/assignment2.py | 1 | 5895 | import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot') # Look Pretty
def showandtell(title=None):
if title != None: plt.savefig(title + ".png", bbox_inches='tight', dpi=300)
plt.show()
# exit()
#
# INFO: This dataset has call records for 10 users tracked over the course of 3 years.
# Your job is to find out where the users likely live and work at!
#
# TODO: Load up the dataset and take a peek at its head
# Convert the date using pd.to_datetime, and the time using pd.to_timedelta
#
df = pd.read_csv('Datasets/CDR.csv')
df['CallDate'] = pd.to_datetime( df['CallDate'] )
df['CallTime'] = pd.to_timedelta( df['CallTime'])
#
# TODO: Get a distinct list of "In" phone numbers (users) and store the values in a
# regular python list.
# Hint: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.tolist.html
#
import numpy as np
u = np.unique(df['In'])
unique_nums = u.tolist()
#
# TODO: Create a slice called user1 that filters to only include dataset records where the
# "In" feature (user phone number) is equal to the first number on your unique list above;
# that is, the very first number in the dataset
#
user1 = df[df['In'] == unique_nums[0]]
# INFO: Plot all the call locations
user1.plot.scatter(x='TowerLon', y='TowerLat', c='gray', marker='o', alpha=0.1, title='Call Locations')
# showandtell() # Comment this line out when you're ready to proceed
#
# INFO: The locations map above should be too "busy" to really wrap your head around. This
# is where domain expertise comes into play. Your intuition tells you that people are likely
# to behave differently on weekends:
#
# On Weekdays:
# 1. People probably don't go into work
# 2. They probably sleep in late on Saturday
# 3. They probably run a bunch of random errands, since they couldn't during the week
# 4. They should be home, at least during the very late hours, e.g. 1-4 AM
#
# On Weekdays:
# 1. People probably are at work during normal working hours
# 2. They probably are at home in the early morning and during the late night
# 3. They probably spend time commuting between work and home everyday
#
# TODO: Add more filters to the user1 slice you created. Add bitwise logic so that you're
# only examining records that came in on weekends (sat/sun).
#
user1 = user1[(user1['DOW'] == 'Sat') | (user1['DOW'] == 'Sun')]
#
# TODO: Further filter it down for calls that are came in either before 6AM OR after 10pm (22:00:00).
# You can use < and > to compare the string times, just make sure you code them as military time
# strings, eg: "06:00:00", "22:00:00": https://en.wikipedia.org/wiki/24-hour_clock
#
# You might also want to review the Data Manipulation section for this. Once you have your filtered
# slice, print out its length:
#
user1a = user1[('06:00:00' > user1['CallTime']) | (user1['CallTime'] > '22:00:00')]
#
# INFO: Visualize the dataframe with a scatter plot as a sanity check. Since you're familiar
# with maps, you know well that your X-Coordinate should be Longitude, and your Y coordinate
# should be the tower Latitude. Check the dataset headers for proper column feature names.
# https://en.wikipedia.org/wiki/Geographic_coordinate_system#Geographic_latitude_and_longitude
#
# At this point, you don't yet know exactly where the user is located just based off the cell
# phone tower position data; but considering the below are for Calls that arrived in the twilight
# hours of weekends, it's likely that wherever they are bunched up is probably near where the
# caller's residence:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(user1a.TowerLon, user1a.TowerLat, c='g', marker='o', alpha=0.2)
# user1.plot.scatter(user1.TowerLon, user1.TowerLat, c='gray', alpha=0.1, title='Weekend Twilight Calls')
# showandtell() # TODO: Comment this line out when you're ready to proceed
#
# TODO: Run K-Means with a K=1. There really should only be a single area of concentration. If you
# notice multiple areas that are "hot" (multiple areas the usr spends a lot of time at that are FAR
# apart from one another), then increase K=2, with the goal being that one of the centroids will
# sweep up the annoying outliers; and the other will zero in on the user's approximate home location.
# Or rather the location of the cell tower closest to their home.....
#
# Be sure to only feed in Lat and Lon coordinates to the KMeans algo, since none of the other
# data is suitable for your purposes. Since both Lat and Lon are (approximately) on the same scale,
# no feature scaling is required. Print out the centroid locations and add them onto your scatter
# plot. Use a distinguishable marker and color.
#
# Hint: Make sure you graph the CORRECT coordinates. This is part of your domain expertise.
#
from sklearn.cluster import KMeans
user1b = user1a[['TowerLon', 'TowerLat']]
model = KMeans(n_clusters = 7)
model.fit(user1b)
#
# INFO: Print and plot the centroids...
centroids = model.cluster_centers_
ax.scatter(centroids[:,0], centroids[:,1], marker='x', c='blue', alpha=0.5, linewidths=3, s=169)
print('centroids:', centroids)
# showandtell() # TODO: Comment this line out when you're ready to proceed
#
# TODO: Repeat the above steps for all 10 individuals, being sure to record their approximate home
# locations. You might want to use a for-loop, unless you enjoy typing.
#
for index,item in enumerate(unique_nums):
user = df[df['In'] == unique_nums[index]]
user = user[('06:00:00' > user['CallTime']) | (user['CallTime'] > '22:00:00')]
user = user[(user['DOW'] == 'Sat') | (user['DOW'] == 'Sun')]
user = user[['TowerLon', 'TowerLat']]
model = KMeans(n_clusters = 7)
model.fit(user)
centroids = model.cluster_centers_
ax.scatter(centroids[:,0], centroids[:,1], marker='x', c='blue', alpha=0.5, linewidths=3, s=169)
print(item, centroids)
| mit | 560,721,554,075,269,200 | 38.563758 | 105 | 0.718236 | false | 3.324873 | false | false | false |
PopCap/GameIdea | Engine/Source/ThirdParty/HTML5/emsdk/emscripten/1.30.0/tools/emterpretify.py | 1 | 54897 | #!/usr/bin/env python2
'''
Processes asm.js code to make it run in an emterpreter.
Currently this requires the asm.js code to have been built with -s FINALIZE_ASM_JS=0
'''
import os, sys, re, json
import asm_module, shared, shutil
# params
INNERTERPRETER_LAST_OPCODE = 0 # 'CONDD'
EMT_STACK_MAX = 1024*1024
LOG_CODE = os.environ.get('EMCC_LOG_EMTERPRETER_CODE')
ZERO = False
ASYNC = False
ASSERTIONS = False
PROFILING = False
SWAPPABLE = False
FROUND = False
ADVISE = False
MEMORY_SAFE = False
def handle_arg(arg):
global ZERO, ASYNC, ASSERTIONS, PROFILING, FROUND, ADVISE, MEMORY_SAFE
if '=' in arg:
l, r = arg.split('=')
if l == 'ZERO': ZERO = int(r)
elif l == 'ASYNC': ASYNC = int(r)
elif l == 'ASSERTIONS': ASSERTIONS = int(r)
elif l == 'PROFILING': PROFILING = int(r)
elif l == 'FROUND': FROUND = int(r)
elif l == 'ADVISE': ADVISE = int(r)
elif l == 'MEMORY_SAFE': MEMORY_SAFE = int(r)
return False
return True
DEBUG = os.environ.get('EMCC_DEBUG')
config = shared.Configuration()
temp_files = config.get_temp_files()
if DEBUG:
print >> sys.stderr, 'running emterpretify on', sys.argv
if FROUND:
shared.Settings.PRECISE_F32 = 1
sys.argv = filter(handle_arg, sys.argv)
# consts
BLACKLIST = set(['_malloc', '_free', '_memcpy', '_memmove', '_memset', 'copyTempDouble', 'copyTempFloat', '_strlen', 'stackAlloc', 'setThrew', 'stackRestore', 'setTempRet0', 'getTempRet0', 'stackSave', 'runPostSets', '_emscripten_autodebug_double', '_emscripten_autodebug_float', '_emscripten_autodebug_i8', '_emscripten_autodebug_i16', '_emscripten_autodebug_i32', '_emscripten_autodebug_i64', '_strncpy', '_strcpy', '_strcat', '_saveSetjmp', '_testSetjmp', '_emscripten_replace_memory', '_bitshift64Shl', '_bitshift64Ashr', '_bitshift64Lshr', 'setAsyncState', 'emtStackSave'])
WHITELIST = []
YIELDLIST = ['stackSave', 'stackRestore', 'stackAlloc', 'setThrew', '_memset'] # functions which are ok to run while doing a sleep_with_yield.
SYNC_FUNCS = set(['_emscripten_sleep', '_emscripten_sleep_with_yield', '_emscripten_wget_data', '_emscripten_idb_load', '_emscripten_idb_store', '_emscripten_idb_delete'])
OPCODES = [ # l, lx, ly etc - one of 256 locals
'SET', # [lx, ly, 0] lx = ly (int or float, not double)
'SETVI', # [l, vl, vh] l = v (16-bit signed int)
'SETVIB', # [l, 0, 0] [..v..] l = 32-bit int in next 32-bit instruction
'ADD', # [lx, ly, lz] lx = ly + lz (32-bit int)
'SUB', # [lx, ly, lz] lx = ly - lz (32-bit int)
'MUL', # [lx, ly, lz] lx = ly * lz (32-bit int)
'SDIV', # [lx, ly, lz] lx = ly / lz (32-bit signed int)
'UDIV', # [lx, ly, lz] lx = ly / lz (32-bit unsigned int)
'SMOD', # [lx, ly, lz] lx = ly % lz (32-bit signed int)
'UMOD', # [lx, ly, lz] lx = ly % lz (32-bit unsigned int)
'NEG', # [lx, ly, 0] lx = -ly (int)
'BNOT', # [lx, ly, 0] ly = ~ly (int)
'LNOT', # [lx, ly, 0] ly = !ly (int)
'EQ', # [lx, ly, lz] lx = ly == lz (32-bit int)
'NE', # [lx, ly, lz] lx = ly != lz (32-bit int)
'SLT', # [lx, ly, lz] lx = ly < lz (32-bit signed)
'ULT', # [lx, ly, lz] lx = ly < lz (32-bit unsigned)
'SLE', # [lx, ly, lz] lx = ly <= lz (32-bit signed)
'ULE', # [lx, ly, lz] lx = ly <= lz (32-bit unsigned)
'AND', # [lx, ly, lz] lx = ly & lz
'OR', # [lx, ly, lz] lx = ly | lz
'XOR', # [lx, ly, lz] lx = ly ^ lz
'SHL', # [lx, ly, lz] lx = ly << lz
'ASHR', # [lx, ly, lz] lx = ly >> lz
'LSHR', # [lx, ly, lz] lx = ly >>> lz
'ADDV', # [lx, ly, v] lx = ly + v (32-bit int, v is 8-bit signed)
'SUBV',
'MULV',
'SDIVV',
'UDIVV', # (v is 8-bit unsigned)
'SMODV',
'UMODV', # (v is 8-bit unsigned)
'EQV',
'NEV',
'SLTV',
'ULTV', # (v is 8-bit unsigned)
'SLEV',
'ULEV', # (v is 8-bit unsigned)
'ANDV',
'ORV',
'XORV',
'SHLV', # (v is 8-bit unsigned)
'ASHRV', # (v is 8-bit unsigned)
'LSHRV', # (v is 8-bit unsigned)
'LNOTBRF', # [cond] [absolute-target] cond+branch
'EQBRF',
'NEBRF',
'SLTBRF',
'ULTBRF',
'SLEBRF',
'ULEBRF',
'LNOTBRT',
'EQBRT',
'NEBRT',
'SLTBRT',
'ULTBRT',
'SLEBRT',
'ULEBRT',
'SETD', # [lx, ly, 0] lx = ly (double)
'SETVD', # [lx, vl, vh] lx = ly (16 bit signed int, converted into double)
'SETVDI', # [lx, 0, 0] [..v..] lx = v (32 bit signed int, converted into double)
'SETVDF', # [lx, 0, 0] [..v..] lx = v (32 bit float, converted into double)
'SETVDD', # [lx, 0, 0][.v.][.v.] lx = v (64 bit double)
'ADDD', # [lx, ly, lz] lx = ly + lz (double)
'SUBD', # [lx, ly, lz] lx = ly - lz (double)
'MULD', # [lx, ly, lz] lx = ly * lz (double)
'DIVD', # [lx, ly, lz] lx = ly / lz (double)
'MODD', # [lx, ly, lz] lx = ly % lz (double)
'NEGD', # [lx, ly, 0] lx = -ly (double)
'EQD', # [lx, ly, lz] lx = ly == lz (double)
'NED', # [lx, ly, lz] lx = ly != lz (double)
'LTD', # [lx, ly, lz] lx = ly < lz (signed)
'LED', # [lx, ly, lz] lx = ly < lz (double)
'GTD', # [lx, ly, lz] lx = ly <= lz (double)
'GED', # [lx, ly, lz] lx = ly <= lz (double)
'D2I', # [lx, ly, 0] lx = ~~ly (double-to-int)
'SI2D', # [lx, ly, 0] lx = +ly (signed int-to-double)
'UI2D', # [lx, ly, 0] lx = +ly (unsigned int-to-double)
'LOAD8', # [lx, ly, 0] lx = HEAP8[ly >> 0]
'LOADU8', # [lx, ly, 0] lx = HEAPU8[ly >> 0]
'LOAD16', # [lx, ly, 0] lx = HEAP16[ly >> 1]
'LOADU16', # [lx, ly, 0] lx = HEAPU16[ly >> 1]
'LOAD32', # [lx, ly, 0] lx = HEAP32[ly >> 2] - no need for unsigned version, this is set to a register anyhow
'STORE8', # [lx, ly, 0] HEAP8[lx >> 2] = ly
'STORE16', # [lx, ly, 0] HEAP16[lx >> 2] = ly
'STORE32', # [lx, ly, 0] HEAP32[lx >> 2] = ly
'LOADF64', # [lx, ly, 0] lx = HEAPF64[ly >> 3]
'STOREF64', # [lx, ly, 0] HEAPF64[lx >> 3] = ly
'LOADF32', # [lx, ly, 0] lx = HEAPF32[ly >> 3]
'STOREF32', # [lx, ly, 0] HEAPF32[lx >> 3] = ly
'LOAD8A', # [lx, ly, lz] load-add and store-add instructions, whose pointer input is a signed addition: lx = load(ly + lz), store(lx + ly) = lz
'LOADU8A',
'LOAD16A',
'LOADU16A',
'LOAD32A',
'STORE8A',
'STORE16A',
'STORE32A',
'LOADF64A',
'STOREF64A',
'LOADF32A',
'STOREF32A',
'LOAD8AV', # [lx, ly, lz] load-add and store-add instructions, whose pointer input is a signed addition: lx = load(ly + lz), store(lx + ly) = lz, where the second add op is 8-bit signed
'LOADU8AV',
'LOAD16AV',
'LOADU16AV',
'LOAD32AV',
'STORE8AV',
'STORE16AV',
'STORE32AV',
'LOADF64AV',
'STOREF64AV',
'LOADF32AV',
'STOREF32AV',
'STORE8C',
'STORE16C',
'STORE32C',
'STOREF64C',
'STOREF32C',
'BR', # [0, tl, th] jump t instructions (multiple of 4)
'BRT', # [cond, tl, th] if cond, jump t instructions (multiple of 4)
'BRF', # [cond, tl, th] if !cond, jump t instructions (multiple of 4)
'BRA', # [0, 0, 0] [addr] jump to addr
'BRTA', # [cond, 0, 0] [addr] if cond, jump to addr
'BRFA', # [cond, 0, 0] [addr] if !cond, jump to addr
'COND', # [out, cond, x] [y] out = cond ? x : y, int
'CONDD', # [out, cond, x] [y] out = cond ? x : y, double
'GETTDP', # [l, 0, 0] l = tempDoublePtr
'GETTR0', # [l, 0, 0] l = tempRet0
'SETTR0', # [l, 0, 0] tempRet0 = l
'GETGLBI', # [l, vl, vh] get global value, int, indexed by v
'GETGLBD', # [l, vl, vh] get global value, double, indexed by v
'SETGLBI', # [vl, vh, l] set global value, int, indexed by v (v = l)
'SETGLBD', # [vl, vh, l] set global value, double, indexed by v (v = l)
'INTCALL', # [lx, 0, 0] [target] [params] (lx = ) target(params..)
# Internal, emterpreter-to-emterpreter call.
'EXTCALL', # [lx, targetl, targeth] [params...] (lx = ) target(params..) lx's existence and type depend on the target's actual callsig;
# this instruction can take multiple 32-bit instruction chunks
# if target is a function table, then the first param is the index of the register holding the function pointer
'GETST', # [l, 0, 0] l = STACKTOP
'SETST', # [l, 0, 0] STACKTOP = l
'SWITCH', # [lx, ly, lz] switch (lx) { .. }. followed by a jump table for values in range [ly..ly+lz), after which is the default (which might be empty)
'RET', # [l, 0, 0] return l (depending on which emterpreter_x we are in, has the right type)
'FUNC', # [num params, total locals (low 8 bits), total locals (high 8 bits)] [which emterpreter (0 = normal, 1 = zero), 0, 0, 0] function with n locals (each taking 64 bits), of which the first are params
# this is read in the emterpreter prelude, and also in intcalls
# slow locals support - copying from/to slow locals
'FSLOW', # [lx, lyl, lyh] lx = ly (int or float, not double; ly = lyl,lyh
'FSLOWD', # [lx, lyl, lyh] lx = ly (double)
'TSLOW', # [lxl, lxh, ly] lx = ly (int or float, not double; lx = lxl,lxh
'TSLOWD', # [lxl, lxh, ly] lx = ly (double; lx = lxl,lxh)
]
if FROUND:
OPCODES.append(
'FROUND', # [lx, ly] lx = Math.fround(ly), rounds doubles to floats
)
def randomize_opcodes():
global OPCODES
import random
random.shuffle(opcodes)
print OPCODES
#randomize_opcodes()
assert len(OPCODES) == len(set(OPCODES)) # no dupe names
assert len(OPCODES) < 256
ROPCODES = {}
for i in range(len(OPCODES)):
ROPCODES[OPCODES[i]] = i
GLOBAL_BASE = 256*8
# utils
settings = { 'PRECISE_F32': 0 } # TODO
def bytify(x):
assert x >= 0 and x < (1 << 32)
return [x & 255, (x >> 8) & 255, (x >> 16) & 255, (x >> 24) & 255]
def next_power_of_two(x):
if x == 0: return 0
ret = 1
while ret < x: ret <<= 1
return ret
def get_access(l, s='i', base='sp', offset=None):
if offset is not None:
offset = '+ ' + str(offset) + ' '
else:
offset = ''
if s == 'i':
return 'HEAP32[' + str(base) + ' + (' + l + ' << 3) ' + offset + '>> 2]'
elif s == 'd' or s == 'f':
return 'HEAPF64[' + str(base) + ' + (' + l + ' << 3) ' + offset + '>> 3]'
else:
assert 0
def get_coerced_access(l, s='i', unsigned=False, base='sp', offset=None):
if s == 'i':
if not unsigned:
return get_access(l, s, base, offset) + '|0'
else:
return get_access(l, s, base, offset) + '>>>0'
elif s == 'd' or s == 'f':
return '+' + get_access(l, s, base, offset)
else:
assert 0
def make_assign(left, right, temp): # safely assign, taking into account memory safety
if not MEMORY_SAFE:
return left + ' = ' + right + ';'
return temp + ' = ' + right + '; ' + left + ' = ' + temp + ';'
CASES = {}
CASES[ROPCODES['SET']] = get_access('lx') + ' = ' + get_coerced_access('ly') + ';'
CASES[ROPCODES['GETST']] = get_access('lx') + ' = STACKTOP;'
CASES[ROPCODES['SETST']] = 'STACKTOP = ' + get_coerced_access('lx') + ';'
CASES[ROPCODES['SETVI']] = get_access('lx') + ' = inst >> 16;'
CASES[ROPCODES['SETVIB']] = 'pc = pc + 4 | 0; ' + get_access('lx') + ' = HEAP32[pc >> 2] | 0;'
CASES[ROPCODES['ADD']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') + (' + get_coerced_access('lz') + ') | 0;'
CASES[ROPCODES['SUB']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') - (' + get_coerced_access('lz') + ') | 0;'
CASES[ROPCODES['MUL']] = make_assign(get_access('lx'), 'Math_imul(' + get_coerced_access('ly') + ', ' + get_coerced_access('lz') + ') | 0', 'ly')
CASES[ROPCODES['SDIV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') / (' + get_coerced_access('lz') + ') | 0;'
CASES[ROPCODES['UDIV']] = get_access('lx') + ' = (' + get_coerced_access('ly', unsigned=True) + ') / (' + get_coerced_access('lz', unsigned=True) + ') >>> 0;'
CASES[ROPCODES['SMOD']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') % (' + get_coerced_access('lz') + ') | 0;'
CASES[ROPCODES['UMOD']] = get_access('lx') + ' = (' + get_coerced_access('ly', unsigned=True) + ') % (' + get_coerced_access('lz', unsigned=True) + ') >>> 0;'
CASES[ROPCODES['NEG']] = get_access('lx') + ' = -(' + get_coerced_access('ly') + ');'
CASES[ROPCODES['BNOT']] = get_access('lx') + ' = ~(' + get_coerced_access('ly') + ');'
CASES[ROPCODES['LNOT']] = get_access('lx') + ' = !(' + get_coerced_access('ly') + ');'
CASES[ROPCODES['EQ']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') == (' + get_coerced_access('lz') + ') | 0;'
CASES[ROPCODES['NE']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') != (' + get_coerced_access('lz') + ') | 0;'
CASES[ROPCODES['SLT']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') < (' + get_coerced_access('lz') + ') | 0;'
CASES[ROPCODES['ULT']] = get_access('lx') + ' = (' + get_coerced_access('ly', unsigned=True) + ') < (' + get_coerced_access('lz', unsigned=True) + ') | 0;'
CASES[ROPCODES['SLE']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') <= (' + get_coerced_access('lz') + ') | 0;'
CASES[ROPCODES['ULE']] = get_access('lx') + ' = (' + get_coerced_access('ly', unsigned=True) + ') <= (' + get_coerced_access('lz', unsigned=True) + ') | 0;'
CASES[ROPCODES['AND']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') & (' + get_coerced_access('lz') + ');'
CASES[ROPCODES['OR']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') | (' + get_coerced_access('lz') + ');'
CASES[ROPCODES['XOR']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') ^ (' + get_coerced_access('lz') + ');'
CASES[ROPCODES['SHL']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') << (' + get_coerced_access('lz') + ');'
CASES[ROPCODES['ASHR']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') >> (' + get_coerced_access('lz') + ');'
CASES[ROPCODES['LSHR']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') >>> (' + get_coerced_access('lz') + ');'
CASES[ROPCODES['ADDV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') + (inst >> 24) | 0;'
CASES[ROPCODES['SUBV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') - (inst >> 24) | 0;'
CASES[ROPCODES['MULV']] = make_assign(get_access('lx'), 'Math_imul(' + get_coerced_access('ly') + ', inst >> 24) | 0', 'ly')
CASES[ROPCODES['SDIVV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') / (inst >> 24) | 0;'
CASES[ROPCODES['UDIVV']] = get_access('lx') + ' = (' + get_coerced_access('ly', unsigned=True) + ') / (lz >>> 0) >>> 0;'
CASES[ROPCODES['SMODV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') % (inst >> 24) | 0;'
CASES[ROPCODES['UMODV']] = get_access('lx') + ' = (' + get_coerced_access('ly', unsigned=True) + ') % (lz >>> 0) >>> 0;'
CASES[ROPCODES['EQV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') == (inst >> 24) | 0;'
CASES[ROPCODES['NEV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') != (inst >> 24) | 0;'
CASES[ROPCODES['SLTV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') < (inst >> 24) | 0;'
CASES[ROPCODES['ULTV']] = get_access('lx') + ' = (' + get_coerced_access('ly', unsigned=True) + ') < (lz >>> 0) | 0;'
CASES[ROPCODES['SLEV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') <= (inst >> 24) | 0;'
CASES[ROPCODES['ULEV']] = get_access('lx') + ' = (' + get_coerced_access('ly', unsigned=True) + ') <= (lz >>> 0) | 0;'
CASES[ROPCODES['ANDV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') & (inst >> 24);'
CASES[ROPCODES['ORV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') | (inst >> 24);'
CASES[ROPCODES['XORV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') ^ (inst >> 24);'
CASES[ROPCODES['SHLV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') << lz;'
CASES[ROPCODES['ASHRV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') >> lz;'
CASES[ROPCODES['LSHRV']] = get_access('lx') + ' = (' + get_coerced_access('ly') + ') >>> lz;'
CASES[ROPCODES['LNOTBRF']] = 'if (' + get_coerced_access('ly') + ') { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; } else { pc = pc + 4 | 0; }'
CASES[ROPCODES['EQBRF']] = 'if ((' + get_coerced_access('ly') + ') == (' + get_coerced_access('lz') + ')) { pc = pc + 4 | 0; } else { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }'
CASES[ROPCODES['NEBRF']] = 'if ((' + get_coerced_access('ly') + ') != (' + get_coerced_access('lz') + ')) { pc = pc + 4 | 0; } else { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }'
CASES[ROPCODES['SLTBRF']] = 'if ((' + get_coerced_access('ly') + ') < (' + get_coerced_access('lz') + ')) { pc = pc + 4 | 0; } else { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }'
CASES[ROPCODES['ULTBRF']] = 'if ((' + get_coerced_access('ly', unsigned=True) + ') < (' + get_coerced_access('lz', unsigned=True) + ')) { pc = pc + 4 | 0; } else { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }'
CASES[ROPCODES['SLEBRF']] = 'if ((' + get_coerced_access('ly') + ') <= (' + get_coerced_access('lz') + ')) { pc = pc + 4 | 0; } else { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }'
CASES[ROPCODES['ULEBRF']] = 'if ((' + get_coerced_access('ly', unsigned=True) + ') <= (' + get_coerced_access('lz', unsigned=True) + ')) { pc = pc + 4 | 0; } else { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }'
CASES[ROPCODES['LNOTBRT']] = 'if (' + get_coerced_access('ly') + ') { pc = pc + 4 | 0; } else { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }'
CASES[ROPCODES['EQBRT']] = 'if ((' + get_coerced_access('ly') + ') == (' + get_coerced_access('lz') + ')) { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; } else { pc = pc + 4 | 0; }'
CASES[ROPCODES['NEBRT']] = 'if ((' + get_coerced_access('ly') + ') != (' + get_coerced_access('lz') + ')) { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; } else { pc = pc + 4 | 0; }'
CASES[ROPCODES['SLTBRT']] = 'if ((' + get_coerced_access('ly') + ') < (' + get_coerced_access('lz') + ')) { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; } else { pc = pc + 4 | 0; }'
CASES[ROPCODES['ULTBRT']] = 'if ((' + get_coerced_access('ly', unsigned=True) + ') < (' + get_coerced_access('lz', unsigned=True) + ')) { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; } else { pc = pc + 4 | 0; }'
CASES[ROPCODES['SLEBRT']] = 'if ((' + get_coerced_access('ly') + ') <= (' + get_coerced_access('lz') + ')) { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; } else { pc = pc + 4 | 0; }'
CASES[ROPCODES['ULEBRT']] = 'if ((' + get_coerced_access('ly', unsigned=True) + ') <= (' + get_coerced_access('lz', unsigned=True) + ')) { pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; } else { pc = pc + 4 | 0; }'
CASES[ROPCODES['SETD']] = get_access('lx', s='d') + ' = ' + get_coerced_access('ly', s='d') + ';'
CASES[ROPCODES['SETVD']] = get_access('lx', s='d') + ' = +(inst >> 16);'
CASES[ROPCODES['SETVDI']] = 'pc = pc + 4 | 0; ' + get_access('lx', s='d') + ' = +(HEAP32[pc >> 2] | 0);'
CASES[ROPCODES['SETVDF']] = 'pc = pc + 4 | 0; ' + get_access('lx', s='d') + ' = +HEAPF32[pc >> 2];'
CASES[ROPCODES['SETVDD']] = 'HEAP32[tempDoublePtr >> 2] = HEAP32[pc + 4 >> 2]; HEAP32[tempDoublePtr + 4 >> 2] = HEAP32[pc + 8 >> 2]; pc = pc + 8 | 0; ' + get_access('lx', s='d') + ' = +HEAPF64[tempDoublePtr >> 3];'
CASES[ROPCODES['ADDD']] = get_access('lx', s='d') + ' = (' + get_coerced_access('ly', s='d') + ') + (' + get_coerced_access('lz', s='d') + ');'
CASES[ROPCODES['SUBD']] = get_access('lx', s='d') + ' = (' + get_coerced_access('ly', s='d') + ') - (' + get_coerced_access('lz', s='d') + ');'
CASES[ROPCODES['MULD']] = get_access('lx', s='d') + ' = (' + get_coerced_access('ly', s='d') + ') * (' + get_coerced_access('lz', s='d') + ');'
CASES[ROPCODES['DIVD']] = get_access('lx', s='d') + ' = (' + get_coerced_access('ly', s='d') + ') / (' + get_coerced_access('lz', s='d') + ');'
CASES[ROPCODES['MODD']] = get_access('lx', s='d') + ' = (' + get_coerced_access('ly', s='d') + ') % (' + get_coerced_access('lz', s='d') + ');'
CASES[ROPCODES['NEGD']] = get_access('lx', s='d') + ' = -(' + get_coerced_access('ly', s='d') + ');'
CASES[ROPCODES['EQD']] = get_access('lx') + ' = (' + get_coerced_access('ly', s='d') + ') == (' + get_coerced_access('lz', s='d') + ') | 0;'
CASES[ROPCODES['NED']] = get_access('lx') + ' = (' + get_coerced_access('ly', s='d') + ') != (' + get_coerced_access('lz', s='d') + ') | 0;'
CASES[ROPCODES['LTD']] = get_access('lx') + ' = (' + get_coerced_access('ly', s='d') + ') < (' + get_coerced_access('lz', s='d') + ') | 0;'
CASES[ROPCODES['LED']] = get_access('lx') + ' = (' + get_coerced_access('ly', s='d') + ') <= (' + get_coerced_access('lz', s='d') + ') | 0;'
CASES[ROPCODES['GTD']] = get_access('lx') + ' = (' + get_coerced_access('ly', s='d') + ') > (' + get_coerced_access('lz', s='d') + ') | 0;'
CASES[ROPCODES['GED']] = get_access('lx') + ' = (' + get_coerced_access('ly', s='d') + ') >= (' + get_coerced_access('lz', s='d') + ') | 0;'
CASES[ROPCODES['D2I']] = get_access('lx') + ' = ~~(' + get_coerced_access('ly', s='d') + ');'
CASES[ROPCODES['SI2D']] = get_access('lx', s='d') + ' = +(' + get_coerced_access('ly') + ');'
CASES[ROPCODES['UI2D']] = get_access('lx', s='d') + ' = +(' + get_coerced_access('ly', unsigned=True) + ');'
CASES[ROPCODES['LOAD8']] = get_access('lx') + ' = ' + 'HEAP8[' + get_access('ly') + ' >> 0];'
CASES[ROPCODES['LOADU8']] = get_access('lx') + ' = ' + 'HEAPU8[' + get_access('ly') + ' >> 0];'
CASES[ROPCODES['LOAD16']] = get_access('lx') + ' = ' + 'HEAP16[' + get_access('ly') + ' >> 1];'
CASES[ROPCODES['LOADU16']] = get_access('lx') + ' = ' + 'HEAPU16[' + get_access('ly') + ' >> 1];'
CASES[ROPCODES['LOAD32']] = get_access('lx') + ' = ' + 'HEAP32[' + get_access('ly') + ' >> 2];'
CASES[ROPCODES['STORE8']] = 'HEAP8[' + get_access('lx') + ' >> 0] = ' + get_coerced_access('ly') + ';'
CASES[ROPCODES['STORE16']] = 'HEAP16[' + get_access('lx') + ' >> 1] = ' + get_coerced_access('ly') + ';'
CASES[ROPCODES['STORE32']] = 'HEAP32[' + get_access('lx') + ' >> 2] = ' + get_coerced_access('ly') + ';'
CASES[ROPCODES['LOADF64']] = get_access('lx', s='d') + ' = ' + '+HEAPF64[' + get_access('ly') + ' >> 3];'
CASES[ROPCODES['STOREF64']] = 'HEAPF64[' + get_access('lx') + ' >> 3] = ' + get_coerced_access('ly', s='d') + ';'
CASES[ROPCODES['LOADF32']] = get_access('lx', s='d') + ' = ' + '+HEAPF32[' + get_access('ly') + ' >> 2];'
CASES[ROPCODES['STOREF32']] = 'HEAPF32[' + get_access('lx') + ' >> 2] = ' + get_coerced_access('ly', s='d') + ';'
CASES[ROPCODES['LOAD8A']] = get_access('lx') + ' = ' + 'HEAP8[(' + get_coerced_access('ly') + ') + (' + get_coerced_access('lz') + ') >> 0];'
CASES[ROPCODES['LOADU8A']] = get_access('lx') + ' = ' + 'HEAPU8[(' + get_coerced_access('ly') + ') + (' + get_coerced_access('lz') + ') >> 0];'
CASES[ROPCODES['LOAD16A']] = get_access('lx') + ' = ' + 'HEAP16[(' + get_coerced_access('ly') + ') + (' + get_coerced_access('lz') + ') >> 1];'
CASES[ROPCODES['LOADU16A']] = get_access('lx') + ' = ' + 'HEAPU16[(' + get_coerced_access('ly') + ') + (' + get_coerced_access('lz') + ') >> 1];'
CASES[ROPCODES['LOAD32A']] = get_access('lx') + ' = ' + 'HEAP32[(' + get_coerced_access('ly') + ') + (' + get_coerced_access('lz') + ') >> 2];'
CASES[ROPCODES['STORE8A']] = 'HEAP8[(' + get_coerced_access('lx') + ') + (' + get_coerced_access('ly') + ') >> 0] = ' + get_coerced_access('lz') + ';'
CASES[ROPCODES['STORE16A']] = 'HEAP16[(' + get_coerced_access('lx') + ') + (' + get_coerced_access('ly') + ') >> 1] = ' + get_coerced_access('lz') + ';'
CASES[ROPCODES['STORE32A']] = 'HEAP32[(' + get_coerced_access('lx') + ') + (' + get_coerced_access('ly') + ') >> 2] = ' + get_coerced_access('lz') + ';'
CASES[ROPCODES['LOADF64A']] = get_access('lx', s='d') + ' = ' + '+HEAPF64[(' + get_coerced_access('ly') + ') + (' + get_coerced_access('lz') + ') >> 3];'
CASES[ROPCODES['STOREF64A']] = 'HEAPF64[(' + get_coerced_access('lx') + ') + (' + get_coerced_access('ly') + ') >> 3] = ' + get_coerced_access('lz', s='d') + ';'
CASES[ROPCODES['LOADF32A']] = get_access('lx', s='d') + ' = ' + '+HEAPF32[(' + get_coerced_access('ly') + ') + (' + get_coerced_access('lz') + ') >> 2];'
CASES[ROPCODES['STOREF32A']] = 'HEAPF32[(' + get_coerced_access('lx') + ') + (' + get_coerced_access('ly') + ') >> 2] = ' + get_coerced_access('lz', s='d') + ';'
CASES[ROPCODES['LOAD8AV']] = get_access('lx') + ' = ' + 'HEAP8[(' + get_coerced_access('ly') + ') + (inst >> 24) >> 0];'
CASES[ROPCODES['LOADU8AV']] = get_access('lx') + ' = ' + 'HEAPU8[(' + get_coerced_access('ly') + ') + (inst >> 24) >> 0];'
CASES[ROPCODES['LOAD16AV']] = get_access('lx') + ' = ' + 'HEAP16[(' + get_coerced_access('ly') + ') + (inst >> 24) >> 1];'
CASES[ROPCODES['LOADU16AV']] = get_access('lx') + ' = ' + 'HEAPU16[(' + get_coerced_access('ly') + ') + (inst >> 24) >> 1];'
CASES[ROPCODES['LOAD32AV']] = get_access('lx') + ' = ' + 'HEAP32[(' + get_coerced_access('ly') + ') + (inst >> 24) >> 2];'
CASES[ROPCODES['STORE8AV']] = 'HEAP8[(' + get_coerced_access('lx') + ') + (ly << 24 >> 24) >> 0] = ' + get_coerced_access('lz') + ';'
CASES[ROPCODES['STORE16AV']] = 'HEAP16[(' + get_coerced_access('lx') + ') + (ly << 24 >> 24) >> 1] = ' + get_coerced_access('lz') + ';'
CASES[ROPCODES['STORE32AV']] = 'HEAP32[(' + get_coerced_access('lx') + ') + (ly << 24 >> 24) >> 2] = ' + get_coerced_access('lz') + ';'
CASES[ROPCODES['LOADF64AV']] = get_access('lx', s='d') + ' = ' + '+HEAPF64[(' + get_coerced_access('ly') + ') + (inst >> 24) >> 3];'
CASES[ROPCODES['STOREF64AV']] = 'HEAPF64[(' + get_coerced_access('lx') + ') + (ly << 24 >> 24) >> 3] = ' + get_coerced_access('lz', s='d') + ';'
CASES[ROPCODES['LOADF32AV']] = get_access('lx', s='d') + ' = ' + '+HEAPF32[(' + get_coerced_access('ly') + ') + (inst >> 24) >> 2];'
CASES[ROPCODES['STOREF32AV']] = 'HEAPF32[(' + get_coerced_access('lx') + ') + (ly << 24 >> 24) >> 2] = ' + get_coerced_access('lz', s='d') + ';'
CASES[ROPCODES['STORE8C']] = 'HEAP8[' + get_access('lx') + ' >> 0] = HEAP8[' + get_access('ly') + ' >> 0] | 0;'
CASES[ROPCODES['STORE16C']] = 'HEAP16[' + get_access('lx') + ' >> 1] = HEAP16[' + get_access('ly') + ' >> 1] | 0;'
CASES[ROPCODES['STORE32C']] = 'HEAP32[' + get_access('lx') + ' >> 2] = HEAP32[' + get_access('ly') + ' >> 2] | 0;'
CASES[ROPCODES['STOREF32C']] = 'HEAPF32[' + get_access('lx') + ' >> 2] = +HEAPF32[' + get_access('ly') + ' >> 2];'
CASES[ROPCODES['STOREF64C']] = 'HEAPF64[' + get_access('lx') + ' >> 3] = +HEAPF64[' + get_access('ly') + ' >> 3];'
CASES[ROPCODES['BR']] = 'pc = pc + ((inst >> 16) << 2) | 0; PROCEED_WITHOUT_PC_BUMP;'
CASES[ROPCODES['BRT']] = 'if (' + get_coerced_access('lx') + ') { pc = pc + ((inst >> 16) << 2) | 0; PROCEED_WITHOUT_PC_BUMP; }'
CASES[ROPCODES['BRF']] = 'if (!(' + get_coerced_access('lx') + ')) { pc = pc + ((inst >> 16) << 2) | 0; PROCEED_WITHOUT_PC_BUMP; }'
CASES[ROPCODES['BRA']] = 'pc = HEAP32[pc + 4 >> 2] | 0; PROCEED_WITHOUT_PC_BUMP;'
CASES[ROPCODES['BRTA']] = 'pc = pc + 4 | 0; if (' + get_coerced_access('lx') + ') { pc = HEAP32[pc >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }'
CASES[ROPCODES['BRFA']] = 'pc = pc + 4 | 0; if (!(' + get_coerced_access('lx') + ')) { pc = HEAP32[pc >> 2] | 0; PROCEED_WITHOUT_PC_BUMP; }'
CASES[ROPCODES['COND']] = 'pc = pc + 4 | 0; ' + get_access('lx') + ' = (' + get_coerced_access('ly') + ') ? (' + get_coerced_access('lz') + ') : (' + get_coerced_access('(HEAPU8[pc >> 0] | 0)') + ');'
CASES[ROPCODES['CONDD']] = 'pc = pc + 4 | 0; ' + get_access('lx', s='d') + ' = (' + get_coerced_access('ly') + ') ? (' + get_coerced_access('lz', s='d') + ') : (' + get_coerced_access('(HEAPU8[pc >> 0] | 0)', s='d') + ');'
CASES[ROPCODES['GETTDP']] = get_access('lx') + ' = tempDoublePtr;'
#CASES[ROPCODES['GETPC']] = get_access('lx') + ' = pc;'
CASES[ROPCODES['GETTR0']] = get_access('lx') + ' = tempRet0;'
CASES[ROPCODES['SETTR0']] = 'tempRet0 = ' + get_coerced_access('lx') + ';'
if FROUND:
CASES[ROPCODES['FROUND']] = get_access('lx', s='d') + ' = Math_fround(' + get_coerced_access('ly', s='d') + ');'
# stacktop handling: if allowing async, the very bottom will contain the function being executed,
# for stack trace reconstruction. We store [pc of function, curr pc]
# where curr pc is the current position in that function, when asyncing
# The effective sp, where locals reside, is 8 above that.
def push_stacktop(zero):
return (' sp = EMTSTACKTOP;' if not ASYNC else ' sp = EMTSTACKTOP + 8 | 0;') if not zero else ''
def pop_stacktop(zero):
return '//Module.print("exit");\n' + ((' EMTSTACKTOP = sp; ' if not ASYNC else 'EMTSTACKTOP = sp - 8 | 0; ') if not zero else '')
def handle_async_pre_call():
return 'HEAP32[sp - 4 >> 2] = pc;' if ASYNC else ''
def handle_async_post_call():
assert not ZERO
return 'if ((asyncState|0) == 1) { ' + pop_stacktop(zero=False) + ' return }\n' if ASYNC else '' # save pc and exit immediately if currently saving state
CASES[ROPCODES['INTCALL']] = '''
lz = HEAPU8[(HEAP32[pc + 4 >> 2] | 0) + 1 | 0] | 0; // FUNC inst, see definition above; we read params here
ly = 0;
assert(((EMTSTACKTOP + 8|0) <= (EMT_STACK_MAX|0))|0); // for return value
%s
%s
while ((ly|0) < (lz|0)) {
%s = %s;
%s = %s;
ly = ly + 1 | 0;
}
%s
%s
emterpret(HEAP32[pc + 4 >> 2] | 0);
%s
%s
%s = HEAP32[EMTSTACKTOP >> 2] | 0;
%s = HEAP32[EMTSTACKTOP + 4 >> 2] | 0;
pc = pc + (((4 + lz + 3) >> 2) << 2) | 0;
''' % (
'if ((HEAPU8[(HEAP32[pc + 4 >> 2] | 0) + 4 | 0] | 0) == 0) {' if ZERO else '',
'if ((asyncState|0) != 2) {' if ASYNC else '',
get_access('ly', base='EMTSTACKTOP', offset=8 if ASYNC else 0), get_coerced_access('HEAPU8[pc + 8 + ly >> 0]'),
get_access('ly', base='EMTSTACKTOP', offset=12 if ASYNC else 4), get_coerced_access('HEAPU8[pc + 8 + ly >> 0]', offset=4),
'}' if ASYNC else '',
handle_async_pre_call(),
handle_async_post_call(),
('''} else {
while ((ly|0) < (lz|0)) {
%s = %s;
%s = %s;
ly = ly + 1 | 0;
}
emterpret_z(HEAP32[pc + 4 >> 2] | 0);
}''' % (
get_access('ly', base=0), get_coerced_access('HEAPU8[pc + 8 + ly >> 0]'),
get_access('ly', base=0, offset=4), get_coerced_access('HEAPU8[pc + 8 + ly >> 0]', offset=4),
)) if ZERO else '',
get_access('lx'), get_access('lx', offset=4),
)
CASES[ROPCODES['SWITCH']] = '''
lz = ''' + get_coerced_access('lz') + ''';
lx = ((''' + get_coerced_access('lx') + ''') - (''' + get_coerced_access('ly') + ''')) >>> 0; // lx is now relative to the base
if ((lx >>> 0) >= (lz >>> 0)) { // is the adjusted value too big?
pc = (pc + (lz << 2)) | 0; // jump to right after the table, where the default is
PROCEED_WITH_PC_BUMP; // also increment the pc normally, to skip the switch itself
}
pc = HEAP32[pc + 4 + (lx << 2) >> 2] | 0; // load from the jump table which is right after this instruction, and set pc
PROCEED_WITHOUT_PC_BUMP;'''
CASES[ROPCODES['FSLOW']] = get_access('lx') + ' = ' + get_coerced_access('inst >>> 16') + ';'
CASES[ROPCODES['FSLOWD']] = get_access('lx', s='d') + ' = ' + get_coerced_access('inst >>> 16', s='d') + ';'
CASES[ROPCODES['TSLOW']] = get_access('inst >>> 16') + ' = ' + get_coerced_access('lx') + ';'
CASES[ROPCODES['TSLOWD']] = get_access('inst >>> 16', s='d') + ' = ' + get_coerced_access('lx', s='d') + ';'
opcode_used = {}
for opcode in OPCODES:
opcode_used[opcode] = False
def is_function_table(name):
return name.startswith('FUNCTION_TABLE_')
def is_dyn_call(func):
return func.startswith('dynCall_')
def make_emterpreter(zero=False):
# return is specialized per interpreter
CASES[ROPCODES['RET']] = pop_stacktop(zero)
CASES[ROPCODES['RET']] += 'HEAP32[EMTSTACKTOP >> 2] = ' + get_coerced_access('lx') + '; HEAP32[EMTSTACKTOP + 4 >> 2] = ' + get_coerced_access('lx', offset=4) + '; return;'
# call is custom generated using information of actual call patterns, and which emterpreter this is
def make_target_call(i):
name = global_func_names[i]
sig = global_func_sigs[i]
function_pointer_call = is_function_table(name)
# our local registers are never true floats, and we just do fround calls to ensure correctness, not caring
# about performance. but when coercing to outside of the emterpreter, we need to know the true sig,
# and must use frounds
true_sig = sig
if function_pointer_call:
true_sig = name.split('_')[-1]
elif name in actual_sigs:
true_sig = actual_sigs[name]
def fix_coercion(value, s):
if s == 'f':
value = 'Math_fround(' + value + ')'
return value
ret = name
if function_pointer_call:
ret += '[' + get_access('HEAPU8[pc+4>>0]') + ' & %d]' % (next_power_of_two(asm.tables[name].count(',')+1)-1)
ret += '(' + ', '.join([fix_coercion(get_coerced_access('HEAPU8[pc+%d>>0]' % (i+4+int(function_pointer_call)), s=sig[i+1]), true_sig[i+1]) for i in range(len(sig)-1)]) + ')'
if sig[0] != 'v':
ret = shared.JS.make_coercion(fix_coercion(ret, true_sig[0]), sig[0])
if not ASYNC:
ret = make_assign(get_access('lx', sig[0]), ret, 'ly' if sig[0] == 'i' else 'ld')
else:
# we cannot save the return value immediately! if we are saving the stack, it is meaningless, and would corrupt a local stack variable
if sig[0] == 'i':
ret = 'lz = ' + ret
else:
assert sig[0] == 'd'
ret = 'ld = ' + ret
elif name in actual_sigs and actual_sigs[name][0] != 'v':
ret = shared.JS.make_coercion(ret, actual_sigs[name][0]) # return value ignored, but need a coercion
if ASYNC:
# check if we are asyncing, and if not, it is ok to save the return value
ret = handle_async_pre_call() + ret + '; ' + handle_async_post_call()
if sig[0] != 'v':
ret += ' else ' + get_access('lx', sig[0]) + ' = ';
if sig[0] == 'i':
ret += 'lz'
else:
assert sig[0] == 'd'
ret += 'ld '
ret += ';'
extra = len(sig) - 1 + int(function_pointer_call) # [opcode, lx, target, sig], take the usual 4. params are extra
if extra > 0:
ret += '; pc = pc + %d | 0' % (4*((extra+3)>>2))
return ' ' + ret + '; PROCEED_WITH_PC_BUMP;'
CASES[ROPCODES['EXTCALL']] = 'switch ((inst>>>16)|0) {\n' + \
'\n'.join([' case %d: {\n%s\n }' % (i, make_target_call(i)) for i in range(global_func_id)]) + \
'\n default: assert(0);' + \
'\n }'
if ROPCODES['GETGLBI'] not in CASES:
def make_load(i, t):
name = rglobal_vars[i]
return ' ' + get_access('lx', t) + ' = ' + name + '; PROCEED_WITH_PC_BUMP;'
def make_getglb(suffix, t):
CASES[ROPCODES['GETGLB' + suffix]] = 'switch (ly|0) {\n' + \
'\n'.join([' case %d: {\n%s\n }' % (i, make_load(i, t)) for i in range(global_var_id) if global_var_types[rglobal_vars[i]] == t]) + \
'\n default: assert(0);' + \
'\n }'
make_getglb('I', 'i')
make_getglb('D', 'd')
def make_store(i, t):
name = rglobal_vars[i]
return ' ' + name + ' = ' + get_coerced_access('lz', t) + '; PROCEED_WITH_PC_BUMP;'
def make_setglb(suffix, t):
CASES[ROPCODES['SETGLB' + suffix]] = 'switch ((inst >> 8)&255) {\n' + \
'\n'.join([' case %d: {\n%s\n }' % (i, make_store(i, t)) for i in range(global_var_id) if global_var_types[rglobal_vars[i]] == t]) + \
'\n default: assert(0);' + \
'\n }'
make_setglb('I', 'i')
make_setglb('D', 'd')
def fix_case(case):
# we increment pc at the top of the loop. to avoid a pc bump, we decrement it first; this is rare, most opcodes just continue; this avoids any code at the end of the loop
return case.replace('PROCEED_WITH_PC_BUMP', 'continue').replace('PROCEED_WITHOUT_PC_BUMP', 'pc = pc - 4 | 0; continue').replace('continue; continue;', 'continue;')
def process(code):
if not ASSERTIONS: code = code.replace(' assert(', ' //assert(')
if zero: code = code.replace('sp + ', '')
return code
main_loop_prefix = r''' //if (first) first = false; else print('last lx (' + lx + '): ' + [''' + get_coerced_access('lx') + ',' + get_coerced_access('lx', s='d') + ''']);
pc = pc + 4 | 0;
inst = HEAP32[pc>>2]|0;
lx = (inst >> 8) & 255;
ly = (inst >> 16) & 255;
lz = inst >>> 24;
//Module.print([pc, inst&255, %s[inst&255], lx, ly, lz, HEAPU8[pc + 4],HEAPU8[pc + 5],HEAPU8[pc + 6],HEAPU8[pc + 7]].join(', '));
''' % (json.dumps(OPCODES))
if not INNERTERPRETER_LAST_OPCODE:
main_loop = main_loop_prefix + r'''
switch (inst&255) {
%s
default: assert(0);
}
''' % ('\n'.join([fix_case(' case %d: %s break;' % (k, CASES[k])) for k in sorted(CASES.keys()) if opcode_used[OPCODES[k]]]))
else:
# emit an inner interpreter (innerterpreter) loop, of trivial opcodes that hopefully the JS engine will implement with no spills
assert OPCODES[-1] == 'FUNC' # we don't need to emit that one
main_loop = r''' innerterpreter: while (1) {
%s
switch (inst&255) {
%s
%s
default: break innerterpreter;
}
}
switch (inst&255) {
%s
default: assert(0);
}
''' % (
' ' + '\n '.join(main_loop_prefix.split('\n')),
'\n'.join([fix_case(' case %d: %s break;' % (ROPCODES[k], CASES[ROPCODES[k]])) for k in OPCODES[:-1][:ROPCODES[INNERTERPRETER_LAST_OPCODE]+1]]),
'\n'.join([fix_case(' case %d:' % (ROPCODES[k])) for k in OPCODES[:-1][ROPCODES[INNERTERPRETER_LAST_OPCODE]+1:]]),
'\n'.join([fix_case(' case %d: %s break;' % (ROPCODES[k], CASES[ROPCODES[k]])) for k in OPCODES[:-1][ROPCODES[INNERTERPRETER_LAST_OPCODE]+1:]])
)
return process(r'''
function emterpret%s(pc) {
//Module.print('emterpret: ' + pc + ',' + EMTSTACKTOP);
pc = pc | 0;
var %sinst = 0, lx = 0, ly = 0, lz = 0;
%s
%s
%s
assert(((HEAPU8[pc>>0]>>>0) == %d)|0);
lx = HEAPU16[pc + 2 >> 1] | 0; // num locals
%s
%s
//print('enter func ' + [pc, HEAPU8[pc + 0],HEAPU8[pc + 1],HEAPU8[pc + 2],HEAPU8[pc + 3],HEAPU8[pc + 4],HEAPU8[pc + 5],HEAPU8[pc + 6],HEAPU8[pc + 7]].join(', '));
//var first = true;
pc = pc + 4 | 0;
while (1) {
%s
}
assert(0);
}''' % (
'' if not zero else '_z',
'sp = 0, ' if not zero else '',
'' if not ASYNC and not MEMORY_SAFE else 'var ld = +0;',
'' if not ASYNC else 'HEAP32[EMTSTACKTOP>>2] = pc;\n',
push_stacktop(zero),
ROPCODES['FUNC'],
(''' EMTSTACKTOP = EMTSTACKTOP + (lx ''' + (' + 1 ' if ASYNC else '') + '''<< 3) | 0;
assert(((EMTSTACKTOP|0) <= (EMT_STACK_MAX|0))|0);\n''' + (' if ((asyncState|0) != 2) {' if ASYNC else '')) if not zero else '',
' } else { pc = (HEAP32[sp - 4 >> 2] | 0) - 8 | 0; }' if ASYNC else '',
main_loop,
))
# main
if __name__ == '__main__':
infile = sys.argv[1]
outfile = sys.argv[2]
force_memfile = sys.argv[3] if len(sys.argv) >= 4 else None
original_yieldlist = YIELDLIST
extra_blacklist = []
if len(sys.argv) >= 5:
temp = sys.argv[4]
if temp[0] == '"':
# response file
assert temp[1] == '@'
temp = open(temp[2:-1]).read()
extra_blacklist = json.loads(temp)
if len(sys.argv) >= 6:
temp = sys.argv[5]
if temp[0] == '"':
# response file
assert temp[1] == '@'
temp = open(temp[2:-1]).read()
WHITELIST = json.loads(temp)
if len(sys.argv) >= 7:
temp = sys.argv[6]
if temp[0] == '"':
# response file
assert temp[1] == '@'
temp = open(temp[2:-1]).read()
YIELDLIST = YIELDLIST + json.loads(temp)
if len(sys.argv) >= 8:
SWAPPABLE = int(sys.argv[7])
if ADVISE:
# Advise the user on which functions should likely be emterpreted
temp = temp_files.get('.js').name
shared.Building.js_optimizer(infile, ['dumpCallGraph'], output_filename=temp, just_concat=True)
asm = asm_module.AsmModule(temp)
lines = asm.funcs_js.split('\n')
can_call = {}
for i in range(len(lines)):
line = lines[i]
if line.startswith('// REACHABLE '):
curr = json.loads(line[len('// REACHABLE '):])
func = curr[0]
targets = curr[2]
can_call[func] = set(targets)
# function tables too - treat a function all as a function that can call anything in it, which is effectively what it is
for name, funcs in asm.tables.iteritems():
can_call[name] = set(funcs[1:-1].split(','))
#print can_call
# Note: We ignore calls in from outside the asm module, so you could do emterpreted => outside => emterpreted, and we would
# miss the first one there. But this is acceptable to do, because we can't save such a stack anyhow, due to the outside!
#print 'can call', can_call, '\n!!!\n', asm.tables, '!'
reachable_from = {}
for func, targets in can_call.iteritems():
for target in targets:
if target not in reachable_from:
reachable_from[target] = set()
reachable_from[target].add(func)
#print 'reachable from', reachable_from
# find all functions that can reach the sync funcs, which are those that can be on the stack during an async save/load, and hence must all be emterpreted
to_check = list(SYNC_FUNCS)
advised = set()
while len(to_check) > 0:
curr = to_check.pop()
if curr in reachable_from:
for reacher in reachable_from[curr]:
if reacher not in advised:
if not is_dyn_call(reacher) and not is_function_table(reacher): advised.add(str(reacher))
to_check.append(reacher)
print "Suggested list of functions to run in the emterpreter:"
print " -s EMTERPRETIFY_WHITELIST='" + str(sorted(list(advised))).replace("'", '"') + "'"
print "(%d%% out of %d functions)" % (int((100.0*len(advised))/len(can_call)), len(can_call))
if len(YIELDLIST) > len(original_yieldlist):
# advise on the yield list as well. Anything a yield function can reach, likely needs to also be a yield function
YIELD_IGNORE = set(['abort'])
to_check = list(YIELDLIST)
advised = set([str(f) for f in YIELDLIST])
while len(to_check) > 0:
curr = to_check.pop()
if curr not in can_call: continue
for next in can_call[curr]:
if next not in advised:
advised.add(str(next))
to_check.append(next)
advised = [next for next in advised if not is_dyn_call(next) and not is_function_table(next) and not next in original_yieldlist and next not in SYNC_FUNCS and next not in YIELD_IGNORE and next[0] == '_']
print
print "Suggested list of yield functions for the emterpreter:"
print " -s EMTERPRETIFY_YIELDLIST='" + str(sorted(list(advised))).replace("'", '"') + "'"
print "(%d%% out of %d functions)" % (int((100.0*len(advised))/len(can_call)), len(can_call))
sys.exit(0)
BLACKLIST = set(list(BLACKLIST) + extra_blacklist)
if DEBUG or SWAPPABLE:
orig = infile + '.orig.js'
shared.logging.debug('saving original (non-emterpreted) code to ' + orig)
shutil.copyfile(infile, orig)
# final global functions
asm = asm_module.AsmModule(infile)
# process blacklist
for func in extra_blacklist:
assert func in asm.funcs, 'requested blacklist of %s but it does not exist' % func
## debugging
#import hashlib
#def hash(s):
# hash_object = hashlib.sha256(s)
# return int(hash_object.hexdigest(), 16)
#if len(WHITELIST) == 0 and len(extra_blacklist) == 0:
# WHITELIST = set([func for func in asm.funcs if func[0] == '_' and hash(func) % 3 == 1])
# print >> sys.stderr, 'manual whitelist', len(WHITELIST), '/', len(asm.funcs)
##
if len(WHITELIST) > 0:
# we are using a whitelist: fill the blacklist with everything not whitelisted
BLACKLIST = set([func for func in asm.funcs if func not in WHITELIST])
# decide which functions will be emterpreted, and find which are externally reachable (from outside other emterpreted code; those will need trampolines)
emterpreted_funcs = set([func for func in asm.funcs if func not in BLACKLIST and not is_dyn_call(func)])
tabled_funcs = asm.get_table_funcs()
exported_funcs = [func.split(':')[0] for func in asm.exports]
temp = temp_files.get('.js').name # infile + '.tmp.js'
# find emterpreted functions reachable by non-emterpreted ones, we will force a trampoline for them later
shared.Building.js_optimizer(infile, ['findReachable'], extra_info={ 'blacklist': list(emterpreted_funcs) }, output_filename=temp, just_concat=True)
asm = asm_module.AsmModule(temp)
lines = asm.funcs_js.split('\n')
reachable_funcs = set([])
for i in range(len(lines)):
line = lines[i]
if line.startswith('// REACHABLE '):
curr = json.loads(line[len('// REACHABLE '):])
reachable_funcs = set(list(reachable_funcs) + curr)
external_emterpreted_funcs = filter(lambda func: func in tabled_funcs or func in exported_funcs or func in reachable_funcs, emterpreted_funcs)
# process functions, generating bytecode
shared.Building.js_optimizer(infile, ['emterpretify'], extra_info={ 'emterpretedFuncs': list(emterpreted_funcs), 'externalEmterpretedFuncs': list(external_emterpreted_funcs), 'opcodes': OPCODES, 'ropcodes': ROPCODES, 'ASYNC': ASYNC, 'PROFILING': PROFILING, 'ASSERTIONS': ASSERTIONS, 'yieldFuncs': YIELDLIST }, output_filename=temp, just_concat=True)
# load the module and modify it
asm = asm_module.AsmModule(temp)
# find memfile. can be x.js.mem or x.html.mem
in_mem_file = infile + '.mem'
in_mem_file_base = os.path.basename(in_mem_file)
out_mem_file = outfile + '.mem'
out_mem_file_base = os.path.basename(out_mem_file)
if in_mem_file_base not in asm.pre_js:
in_mem_file = (infile + '.mem').replace('.js.mem', '.html.mem')
in_mem_file_base = os.path.basename(in_mem_file)
out_mem_file = (outfile + '.mem').replace('.js.mem', '.html.mem')
out_mem_file_base = os.path.basename(out_mem_file)
assert in_mem_file_base in asm.pre_js, 'we assume a mem init file for now (looked for %s)' % in_mem_file
if not force_memfile:
asm.pre_js = asm.pre_js.replace(in_mem_file_base, out_mem_file_base)
assert os.path.exists(in_mem_file), 'need to find mem file at %s' % in_mem_file
else:
out_mem_file = force_memfile
out_mem_file_base = os.path.basename(out_mem_file)
mem_init = map(ord, open(in_mem_file, 'rb').read())
zero_space = asm.staticbump - len(mem_init)
assert zero_space >= 0 # can be positive, if we add a bump of zeros
assert ('GLOBAL_BASE: %d,' % GLOBAL_BASE) in asm.pre_js, 'we assume a specific global base, and that we can write to all memory below it'
# calculate where code will start
while len(mem_init) % 8 != 0:
mem_init.append(0)
asm.staticbump += 1
code_start = len(mem_init) + GLOBAL_BASE
# parse out bytecode and add to mem init file
all_code = []
funcs = {}
lines = asm.funcs_js.split('\n')
asm.funcs_js = None
func = None
# first pass, collect and process bytecode
global_funcs = {} # 'name|sig' -> id
global_func_names = {} # id -> name
global_func_sigs = {} # id -> sig, one name can have multiple sigs
global_func_id = 0
global_vars = {}
rglobal_vars = {}
global_var_types = {}
global_var_id = 0
def note_global(target, j, code):
global global_var_id
imp = asm.imports[target]
ty = asm.get_import_type(imp)
assert ty in ['i', 'd'], target
if code[j] == 'GETGLBI' and ty == 'd':
# the js optimizer doesn't know all types, we must fix it up here
assert '.0' in imp or '+' in imp, imp
code[j] = 'GETGLBD'
ty = 'd'
if target not in global_vars:
global_vars[target] = global_var_id
rglobal_vars[global_var_id] = target
global_var_id += 1
global_var_types[target] = ty
else:
assert global_var_types[target] == ty
call_sigs = {} # signatures appearing for each call target
def process_code(func, code, absolute_targets):
global global_func_id
absolute_start = code_start + len(all_code) # true absolute starting point of this function
#print 'processing code', func, absolute_start
for i in range(len(code)/4):
j = i*4
if code[j] == 'EXTCALL':
# fix CALL instructions' targets and signatures
target = code[j+2]
sig = code[j+3]
if target not in call_sigs: call_sigs[target] = []
sigs = call_sigs[target]
if sig not in sigs: sigs.append(sig)
fullname = target + '|' + sig
if fullname not in global_funcs:
global_funcs[fullname] = global_func_id
global_func_names[global_func_id] = target
global_func_sigs[global_func_id] = sig
global_func_id += 1
code[j+2] = global_funcs[fullname] & 255
code[j+3] = global_funcs[fullname] >> 8
if sig[0] == 'v':
if code[j+1] == -1: # dummy value for assignment XXX we should not have assignments on void calls
code[j+1] = 0 # clear it
else:
assert code[j+1] >= 0 # there should be a real target here
elif code[j] in ['GETGLBI', 'GETGLBD']:
# fix global-accessing instructions' targets
target = code[j+2]
note_global(target, j, code)
code[j+2] = global_vars[target]
elif code[j] in ['SETGLBI', 'SETGLBD']:
# fix global-accessing instructions' targets
target = code[j+1]
note_global(target, j, code)
code[j+1] = global_vars[target]
elif code[j] == 'absolute-value':
# put the 32-bit absolute value of an abolute target here
absolute_value = absolute_start + absolute_targets[unicode(code[j+1])]
#print ' fixing absolute value', code[j+1], absolute_targets[unicode(code[j+1])], absolute_value
assert absolute_value < (1 << 31)
assert absolute_value % 4 == 0
value = bytify(absolute_value)
for k in range(4):
code[j + k] = value[k]
actual_sigs = {}
for i in range(len(lines)):
line = lines[i]
if line.startswith('function ') and '}' not in line:
assert not func
elif line.startswith('// EMTERPRET_INFO '):
try:
func, curr, absolute_targets = json.loads(line[len('// EMTERPRET_INFO '):])
except Exception, e:
print >> sys.stderr, 'failed to parse code from', line
raise e
assert len(curr) % 4 == 0, len(curr)
funcs[func] = len(all_code) # no operation here should change the length
if LOG_CODE: print >> sys.stderr, 'raw bytecode for %s:' % func, curr, 'insts:', len(curr)/4
process_code(func, curr, absolute_targets)
#print >> sys.stderr, 'processed bytecode for %s:' % func, curr
all_code += curr
func = None
lines[i] = ''
elif line.startswith('// return type: ['):
name, sig = line.split('[')[1].split(']')[0].split(',')
actual_sigs[name] = sig
lines[i] = ''
if global_func_id >= 65536:
msg = 'Too many extcall-able global functions (%d) for emterpreter bytecode' % global_func_id
if PROFILING:
msg += '\nDue to --profiling or --profiling-funcs being on, all emterpreter calls are extcalls. Building without those flags might avoid this problem.'
raise Exception(msg)
assert global_var_id < 256, [global_vars, global_var_id]
def post_process_code(code):
for i in range(len(code)/4):
j = i*4
if code[j] == 'absolute-funcaddr':
# put the 32-bit absolute value of an abolute function here
absolute_value = code_start + funcs[code[j+1]]
#print ' fixing absolute value', code[j+1], absolute_targets[unicode(code[j+1])], absolute_value
assert absolute_value < (1 << 31)
assert absolute_value % 4 == 0
value = bytify(absolute_value)
for k in range(4):
code[j + k] = value[k]
# finalize instruction string names to opcodes
for i in range(len(code)/4):
j = i*4
if type(code[j]) in (str, unicode):
opcode_used[code[j]] = True
code[j] = ROPCODES[code[j]]
# sanity checks
for i in range(len(code)):
v = code[i]
assert type(v) == int and v >= 0 and v < 256, [i, v, 'in', code[i-5:i+5], ROPCODES]
post_process_code(all_code)
# create new mem init
mem_init = mem_init + all_code
asm.staticbump += len(all_code)
while len(mem_init) % 8 != 0:
mem_init.append(0)
asm.staticbump += 1
stack_start = len(mem_init)
asm.staticbump += EMT_STACK_MAX
while asm.staticbump % 8 != 0:
asm.staticbump += 1
open(out_mem_file, 'wb').write(''.join(map(chr, mem_init)))
# second pass, finalize trampolines
for i in range(len(lines)):
line = lines[i]
if line.startswith('function ') and '}' not in line:
assert not func
func = line.split(' ')[1].split('(')[0]
elif line.startswith('}'):
assert func
func = None
elif func and func in funcs:
call = '(EMTERPRETER_' + func + ')'
if call in line:
lines[i] = lines[i].replace(call, '(%s)' % (funcs[func] + code_start))
# finalize funcs JS (first line has the marker, add emterpreters right after that)
asm.funcs_js = '\n'.join([lines[0], make_emterpreter(), make_emterpreter(zero=True) if ZERO else '', '\n'.join(filter(lambda line: len(line) > 0, lines[1:]))]) + '\n'
lines = None
# set up emterpreter stack top
asm.set_pre_js(js='var EMTSTACKTOP = STATIC_BASE + %s, EMT_STACK_MAX = EMTSTACKTOP + %d;' % (stack_start, EMT_STACK_MAX))
# send EMT vars into asm
asm.pre_js += "Module.asmLibraryArg['EMTSTACKTOP'] = EMTSTACKTOP; Module.asmLibraryArg['EMT_STACK_MAX'] = EMT_STACK_MAX;\n"
extra_vars = 'var EMTSTACKTOP = env.EMTSTACKTOP|0;\nvar EMT_STACK_MAX = env.EMT_STACK_MAX|0;\n'
first_func = asm.imports_js.find('function ')
if first_func < 0:
asm.imports_js += extra_vars
else:
# imports contains a function (not a true asm function, hidden from opt passes) that we must not be before
asm.imports_js = asm.imports_js[:first_func] + '\n' + extra_vars + '\n' + asm.imports_js[first_func:]
asm.write(outfile)
temp_files.clean()
| bsd-2-clause | -4,270,246,014,771,602,400 | 49.783534 | 578 | 0.554129 | false | 2.741286 | false | false | false |
davisagli/beatbox | src/beatbox/tests/test_pythonClient.py | 1 | 32342 | from beatbox import SoapFaultError
from beatbox.python_client import _prepareSObjects
from types import DictType, StringTypes, IntType, ListType, TupleType
import beatbox
import datetime
import sfconfig
import unittest
class TestUtils(unittest.TestCase):
def setUp(self):
self.svc = svc = beatbox.PythonClient()
svc.login(sfconfig.USERNAME, sfconfig.PASSWORD)
self._todelete = list()
def tearDown(self):
svc = self.svc
ids = self._todelete
if ids:
while len(ids) > 200:
svc.delete(ids[:200])
ids = ids[200:]
if ids:
svc.delete(ids)
def testDescribeGlobal(self):
svc = self.svc
res = svc.describeGlobal()
self.assertEqual(type(res), DictType)
self.failUnless(type(res['encoding']) in StringTypes)
self.assertEqual(type(res['maxBatchSize']), IntType)
self.assertEqual(type(res['types']), ListType)
self.failUnless(len(res['sobjects']) > 0)
# BBB for API < 17.0
self.failUnless(len(res['types']) > 0)
def testDescribeSObjects(self):
svc = self.svc
globalres = svc.describeGlobal()
types = globalres['types'][:100]
res = svc.describeSObjects(types[0])
self.assertEqual(type(res), ListType)
self.assertEqual(len(res), 1)
res = svc.describeSObjects(types)
self.assertEqual(len(types), len(res))
def testCreate(self):
svc = self.svc
data = dict(
type='Contact',
LastName='Doe',
FirstName='John',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=datetime.date(1970, 1, 4)
)
res = svc.create([data])
self.failUnless(type(res) in (ListType, TupleType))
self.failUnless(len(res) == 1)
self.failUnless(res[0]['success'])
id = res[0]['id']
self._todelete.append(id)
contacts = svc.retrieve(
'LastName, FirstName, Phone, Email, Birthdate',
'Contact', [id])
self.assertEqual(len(contacts), 1)
contact = contacts[0]
for k in ['LastName', 'FirstName', 'Phone', 'Email', 'Birthdate']:
self.assertEqual(
data[k], contact[k])
def testSetIntegerField(self):
# Passes when you feed it floats, even if salesforce field is defined
# for 0 decimal places. Lack of data validation in SF?
svc = self.svc
testField = 'Favorite_Integer__c'
data = dict(
type='Contact',
LastName='Doe',
FirstName='John',
Favorite_Integer__c=-25
)
res = svc.create([data])
self.failUnless(type(res) in (ListType, TupleType))
self.failUnless(len(res) == 1)
self.failUnless(res[0]['success'])
id = res[0]['id']
self._todelete.append(id)
contacts = svc.retrieve('LastName, FirstName, Favorite_Integer__c', 'Contact', [id])
self.assertEqual(len(contacts), 1)
contact = contacts[0]
self.assertEqual(data[testField], contact[testField])
def testSetFloatField(self):
# this fails when you have a large amount (I didn't test the #) of decimal places.
svc = self.svc
testField = 'Favorite_Float__c'
data = dict(
type='Contact',
LastName='Doe',
FirstName='John',
Favorite_Float__c=-1.999888777
)
res = svc.create([data])
self.failUnless(type(res) in (ListType, TupleType))
self.failUnless(len(res) == 1)
self.failUnless(res[0]['success'])
id = res[0]['id']
self._todelete.append(id)
contacts = svc.retrieve('LastName, FirstName, Favorite_Float__c', 'Contact', [id])
self.assertEqual(len(contacts), 1)
contact = contacts[0]
self.assertEqual(data[testField], contact[testField])
def testCreatePickListMultiple(self):
svc = self.svc
data = dict(
type='Contact',
LastName='Doe',
FirstName='John',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=datetime.date(1970, 1, 4),
Favorite_Fruit__c=["Apple", "Orange", "Pear"]
)
res = svc.create([data])
self.failUnless(type(res) in (ListType, TupleType))
self.failUnless(len(res) == 1)
self.failUnless(res[0]['success'])
id = res[0]['id']
self._todelete.append(id)
contacts = svc.retrieve('LastName, FirstName, Phone, Email, Birthdate, \
Favorite_Fruit__c', 'Contact', [id])
self.assertEqual(len(contacts), 1)
contact = contacts[0]
for k in ['LastName', 'FirstName', 'Phone', 'Email', 'Birthdate', 'Favorite_Fruit__c']:
self.assertEqual(
data[k], contact[k])
def testFailedCreate(self):
svc = self.svc
data = dict(
type='Contact',
LastName='Doe',
FirstName='John',
Phone='123-456-7890',
Email='[email protected]',
Birthdate='foo'
)
self.assertRaises(SoapFaultError, svc.create, data)
def testRetrieve(self):
svc = self.svc
data = dict(
type='Contact',
LastName='Doe',
FirstName='John',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=datetime.date(1970, 1, 4)
)
res = svc.create([data])
id = res[0]['id']
self._todelete.append(id)
typedesc = svc.describeSObjects('Contact')[0]
fieldnames = list()
fields = typedesc.fields.values()
fieldnames = [f.name for f in fields if f.type not in ('address',)]
fieldnames = ', '.join(fieldnames)
contacts = svc.retrieve(fieldnames, 'Contact', [id])
self.assertEqual(len(contacts), 1)
def testRetrieveDeleted(self):
svc = self.svc
data = dict(
type='Contact',
LastName='Doe',
FirstName='John',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=datetime.date(1970, 1, 4)
)
res = svc.create(data)
id = res[0]['id']
svc.delete(id)
typedesc = svc.describeSObjects('Contact')[0]
fieldnames = list()
fields = typedesc.fields.values()
fieldnames = [f.name for f in fields]
fieldnames = ', '.join(fieldnames)
contacts = svc.retrieve(fieldnames, 'Contact', [id])
self.assertEqual(len(contacts), 0)
def testDelete(self):
svc = self.svc
data = dict(
type='Contact',
LastName='Doe',
FirstName='John',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=datetime.date(1970, 1, 4)
)
res = svc.create([data])
id = res[0]['id']
res = svc.delete([id])
self.failUnless(res[0]['success'])
contacts = svc.retrieve('LastName', 'Contact', [id])
self.assertEqual(len(contacts), 0)
def testUpdate(self):
svc = self.svc
originaldate = datetime.date(1970, 1, 4)
newdate = datetime.date(1970, 1, 5)
lastname = 'Doe'
data = dict(
type='Contact',
LastName=lastname,
FirstName='John',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=originaldate
)
res = svc.create([data])
id = res[0]['id']
self._todelete.append(id)
contacts = svc.retrieve('LastName, Birthdate', 'Contact', [id])
self.assertEqual(contacts[0]['Birthdate'], originaldate)
self.assertEqual(contacts[0]['LastName'], lastname)
data = dict(
type='Contact',
Id=id,
Birthdate=newdate)
svc.update(data)
contacts = svc.retrieve('LastName, Birthdate', 'Contact', [id])
self.assertEqual(contacts[0]['Birthdate'], newdate)
self.assertEqual(contacts[0]['LastName'], lastname)
def testShrinkMultiPicklist(self):
svc = self.svc
originalList = ["Pear", "Apple"]
newList = ["Pear"]
lastname = 'Doe'
data = dict(
type='Contact',
LastName=lastname,
FirstName='John',
Phone='123-456-7890',
Email='[email protected]',
Favorite_Fruit__c=originalList
)
res = svc.create([data])
id = res[0]['id']
self._todelete.append(id)
contacts = svc.retrieve('LastName, Favorite_Fruit__c', 'Contact', [id])
self.assertEqual(len(contacts[0]['Favorite_Fruit__c']), 2)
data = dict(
type='Contact',
Id=id,
Favorite_Fruit__c=newList)
svc.update(data)
contacts = svc.retrieve('LastName, Favorite_Fruit__c', 'Contact', [id])
self.assertEqual(len(contacts[0]['Favorite_Fruit__c']), 1)
def testGrowMultiPicklist(self):
svc = self.svc
originalList = ["Pear", "Apple"]
newList = ["Pear", "Apple", "Orange"]
lastname = 'Doe'
data = dict(
type='Contact',
LastName=lastname,
FirstName='John',
Phone='123-456-7890',
Email='[email protected]',
Favorite_Fruit__c=originalList
)
res = svc.create([data])
id = res[0]['id']
self._todelete.append(id)
contacts = svc.retrieve('LastName, Favorite_Fruit__c', 'Contact', [id])
self.assertEqual(len(contacts[0]['Favorite_Fruit__c']), 2)
data = dict(
type='Contact',
Id=id,
Favorite_Fruit__c=newList)
svc.update(data)
contacts = svc.retrieve('LastName, Favorite_Fruit__c', 'Contact', [id])
self.assertEqual(len(contacts[0]['Favorite_Fruit__c']), 3)
def testUpdateDeleted(self):
svc = self.svc
originaldate = datetime.date(1970, 1, 4)
newdate = datetime.date(1970, 1, 5)
lastname = 'Doe'
data = dict(
type='Contact',
LastName=lastname,
FirstName='John',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=originaldate
)
res = svc.create(data)
id = res[0]['id']
svc.delete(id)
contacts = svc.retrieve('LastName, Birthdate', 'Contact', [id])
self.assertEqual(len(contacts), 0)
data = dict(
type='Contact',
Id=id,
Birthdate=newdate)
res = svc.update(data)
self.failUnless(not res[0]['success'])
self.failUnless(len(res[0]['errors']) > 0)
def testQuery(self):
svc = self.svc
data = dict(
type='Contact',
LastName='Doe',
FirstName='John',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=datetime.date(1970, 1, 4)
)
res = svc.create([data])
self._todelete.append(res[0]['id'])
data2 = dict(
type='Contact',
LastName='Doe',
FirstName='Jane',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=datetime.date(1972, 10, 15)
)
res = svc.create([data2])
janeid = res[0]['id']
self._todelete.append(janeid)
res = svc.query("SELECT LastName, FirstName, Phone, Email, Birthdate "
"FROM Contact WHERE LastName = 'Doe'")
self.assertEqual(res['size'], 2)
res = svc.query("SELECT Id, LastName, FirstName, Phone, Email, Birthdate "
"FROM Contact WHERE LastName = 'Doe' and FirstName = 'Jane'")
self.assertEqual(res['size'], 1)
self.assertEqual(res['records'][0]['Id'], janeid)
def testBackwardsCompatibleQuery(self):
svc = self.svc
data = dict(
type='Contact',
LastName='Doe',
FirstName='John',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=datetime.date(1970, 1, 4)
)
res = svc.create([data])
self._todelete.append(res[0]['id'])
data2 = dict(
type='Contact',
LastName='Doe',
FirstName='Jane',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=datetime.date(1972, 10, 15)
)
res = svc.create([data2])
janeid = res[0]['id']
self._todelete.append(janeid)
# conditional expression as positional arg
res = svc.query(
'LastName, FirstName, Phone, Email, Birthdate',
'Contact', "LastName = 'Doe'")
self.assertEqual(res['size'], 2)
# conditional expression as *empty* positional arg
res = svc.query('LastName', 'Contact', '')
self.failUnless(res['size'] > 0)
# conditional expression as kwarg
res = svc.query(
'Id, LastName, FirstName, Phone, Email, Birthdate',
'Contact', conditionalExpression="LastName = 'Doe' and FirstName = 'Jane'")
self.assertEqual(res['size'], 1)
self.assertEqual(res['records'][0]['Id'], janeid)
def testTypeDescriptionsCache(self):
# patch describeSObjects to make a record when it is called
calls = []
standard_describeSObjects = beatbox.PythonClient.describeSObjects
def patched_describeSObjects(self, sObjectTypes):
calls.append(sObjectTypes)
return standard_describeSObjects(self, sObjectTypes)
beatbox.PythonClient.describeSObjects = patched_describeSObjects
# turn the cache on
self.svc.cacheTypeDescriptions = True
# should get called the first time
self.svc.query('SELECT Id FROM Contact')
self.assertEqual(calls, [['Contact']])
# but not the second time
self.svc.query('SELECT Id FROM Contact')
self.assertEqual(calls, [['Contact']])
# if we flush the cache, it should get called again
self.svc.flushTypeDescriptionsCache()
self.svc.query('SELECT Id FROM Contact')
self.assertEqual(calls, [['Contact'], ['Contact']])
# clean up
self.svc.cacheTypeDescriptions = False
def testChildToParentMultiQuery(self):
svc = self.svc
account_data = dict(
type='Account',
Name='ChildTestAccount',
AccountNumber='987654321',
Site='www.testsite.com',
)
account = svc.create([account_data])
self._todelete.append(account[0]['id'])
contact_data = dict(
type='Contact',
LastName='TestLastName',
FirstName='TestFirstName',
Phone='123-456-7890',
AccountID=account[0]['id'],
Email='[email protected]',
Birthdate=datetime.date(1965, 1, 5)
)
contact = svc.create([contact_data])
self._todelete.append(contact[0]['id'])
query_res = svc.query(
"Id, LastName, FirstName, Account.Site, Account.AccountNumber",
"Contact",
"Phone='123-456-7890'"
)
self.assertEqual(query_res.size, 1)
rr = query_res.records[0]
self.assertEqual(rr.type, 'Contact')
map(self.assertEqual,
[rr.Id, rr.LastName, rr.FirstName, rr.Account.Site, rr.Account.AccountNumber],
[contact[0]['id'], contact_data['LastName'], contact_data['FirstName'],
account_data['Site'], account_data['AccountNumber']])
def testChildToParentMultiQuery2(self):
svc = self.svc
paccount_data = dict(
type='Account',
Name='ParentTestAccount',
AccountNumber='123456789',
Site='www.testsite.com',
)
paccount = svc.create([paccount_data])
self._todelete.append(paccount[0]['id'])
caccount_data = dict(
type='Account',
Name='ChildTestAccount',
AccountNumber='987654321',
Site='www.testsite.com',
ParentID=paccount[0]['id']
)
caccount = svc.create([caccount_data])
self._todelete.append(caccount[0]['id'])
contact_data = dict(
type='Contact',
LastName='TestLastName',
FirstName='TestFirstName',
Phone='123-456-7890',
AccountID=caccount[0]['id'],
Email='[email protected]',
Birthdate=datetime.date(1965, 1, 5)
)
contact = svc.create([contact_data])
self._todelete.append(contact[0]['id'])
query_res = svc.query(
"Id, LastName, FirstName, Account.Site, Account.Parent.AccountNumber",
"Contact",
"Account.AccountNumber='987654321'"
)
rr = query_res.records[0]
self.assertEqual(query_res.size, 1)
self.assertEqual(rr.type, 'Contact')
map(self.assertEqual,
[rr.Id, rr.LastName, rr.FirstName, rr.Account.Site, rr.Account.Parent.AccountNumber],
[contact[0]['id'], contact_data['LastName'], contact_data['FirstName'],
caccount_data['Site'], paccount_data['AccountNumber']])
def testParentToChildMultiQuery(self):
svc = self.svc
caccount_data = dict(
type='Account',
Name='ChildTestAccount',
AccountNumber='987654321',
Site='www.testsite.com',
)
caccount = svc.create([caccount_data])
self._todelete.append(caccount[0]['id'])
contact_data = dict(
type='Contact',
LastName='TestLastName',
FirstName='TestFirstName',
Phone='123-456-7890',
AccountID=caccount[0]['id'],
Email='[email protected]',
Birthdate=datetime.date(1965, 1, 5)
)
contact = svc.create([contact_data])
self._todelete.append(contact[0]['id'])
contact_data2 = dict(
type='Contact',
LastName='TestLastName2',
FirstName='TestFirstName2',
Phone='123-456-7890',
AccountID=caccount[0]['id'],
Email='[email protected]',
Birthdate=datetime.date(1965, 1, 5)
)
contact2 = svc.create([contact_data2])
self._todelete.append(contact2[0]['id'])
query_res = svc.query(
"Id, Name, (select FirstName from Contacts)",
"Account",
"AccountNumber='987654321'"
)
rr = query_res.records[0]
self.assertEqual(query_res.size, 1)
self.assertEqual(rr.type, 'Account')
map(self.assertEqual,
[rr.Id, rr.Name],
[caccount[0]['id'], caccount_data['Name']])
def testParentToChildMultiQuery2(self):
svc = self.svc
caccount_data = dict(
type='Account',
Name='ChildTestAccount',
AccountNumber='987654321',
Site='www.testsite.com',
)
caccount = svc.create([caccount_data])
self._todelete.append(caccount[0]['id'])
contact_data = dict(
type='Contact',
LastName='TestLastName',
FirstName='TestFirstName',
Phone='123-456-7890',
AccountID=caccount[0]['id'],
Email='[email protected]',
Birthdate=datetime.date(1965, 1, 5)
)
contact = svc.create([contact_data])
self._todelete.append(contact[0]['id'])
contact_data2 = dict(
type='Contact',
LastName='TestLastName2',
FirstName='TestFirstName2',
Phone='123-456-7890',
AccountID=caccount[0]['id'],
Email='[email protected]',
Birthdate=datetime.date(1965, 1, 5)
)
contact2 = svc.create([contact_data2])
self._todelete.append(contact2[0]['id'])
query_res = svc.query(
"Id, Name, (select FirstName, Account.Site from Contacts), (select Name from Assets)",
"Account",
"AccountNumber='987654321'"
)
rr = query_res.records[0]
self.assertEqual(query_res.size, 1)
self.assertEqual(rr.type, 'Account')
map(self.assertEqual,
[rr.Id, rr.Name],
[caccount[0]['id'], caccount_data['Name']])
result = 0
for name in [contact_data2['FirstName'],
contact_data['FirstName']]:
if name in [rr.Contacts.records[i].FirstName for i in range(len(rr.Contacts.records))]:
result += 1
self.assertEqual(result, rr.Contacts.size)
def testMultiQueryCount(self):
svc = self.svc
contact_data = dict(
type='Contact',
LastName='TestLastName',
FirstName='TestFirstName',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=datetime.date(1965, 1, 5)
)
contact = svc.create([contact_data])
self._todelete.append(contact[0]['id'])
contact_data2 = dict(
type='Contact',
LastName='TestLastName2',
FirstName='TestFirstName2',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=datetime.date(1965, 1, 5)
)
contact2 = svc.create([contact_data2])
self._todelete.append(contact2[0]['id'])
query_res = svc.query("count()", "Contact", "Phone='123-456-7890'")
self.assertEqual(query_res.size, 2)
def testAggregateQuery(self):
svc = self.svc
contact_data = dict(
type='Contact',
LastName='TestLastName',
FirstName='TestFirstName',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=datetime.date(1900, 1, 5)
)
contact = svc.create([contact_data])
self._todelete.append(contact[0]['id'])
res = svc.query("SELECT MAX(CreatedDate) FROM Contact GROUP BY LastName")
# the aggregate result is in the 'expr0' attribute of the result
self.failUnless(hasattr(res[0], 'expr0'))
# (unfortunately no field type info is returned as part of the
# AggregateResult object, so we can't automatically marshall to the
# correct Python type)
def testQueryDoesNotExist(self):
res = self.svc.query(
'LastName, FirstName, Phone, Email, Birthdate',
'Contact', "LastName = 'Doe'")
self.assertEqual(res['size'], 0)
def testQueryMore(self):
svc = self.svc
svc.batchSize = 100
data = list()
for x in range(250):
data.append(dict(
type='Contact',
LastName='Doe',
FirstName='John',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=datetime.date(1970, 1, 4)
))
res = svc.create(data[:200])
ids = [x['id'] for x in res]
self._todelete.extend(ids)
res = svc.create(data[200:])
ids = [x['id'] for x in res]
self._todelete.extend(ids)
res = svc.query(
'LastName, FirstName, Phone, Email, Birthdate',
'Contact', "LastName = 'Doe'")
self.failUnless(not res['done'])
self.assertEqual(len(res['records']), 200)
res = svc.queryMore(res['queryLocator'])
self.failUnless(res['done'])
self.assertEqual(len(res['records']), 50)
def testSearch(self):
res = self.svc.search("FIND {barr} in ALL FIELDS RETURNING Contact(Id, Birthdate)")
self.assertEqual(len(res), 1)
self.assertEqual(res[0].type, 'Contact')
self.assertEqual(type(res[0].Birthdate), datetime.date)
res = self.svc.search("FIND {khgkshgsuhalsf} in ALL FIELDS RETURNING Contact(Id)")
self.assertEqual(len(res), 0)
def testGetDeleted(self):
svc = self.svc
startdate = datetime.datetime.utcnow()
enddate = startdate + datetime.timedelta(seconds=61)
data = dict(
type='Contact',
LastName='Doe',
FirstName='John',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=datetime.date(1970, 1, 4)
)
res = svc.create(data)
id = res[0]['id']
svc.delete(id)
res = svc.getDeleted('Contact', startdate, enddate)
self.failUnless(len(res) != 0)
ids = [r['id'] for r in res]
self.failUnless(id in ids)
def testGetUpdated(self):
svc = self.svc
startdate = datetime.datetime.utcnow()
enddate = startdate + datetime.timedelta(seconds=61)
data = dict(
type='Contact',
LastName='Doe',
FirstName='John',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=datetime.date(1970, 1, 4)
)
res = svc.create(data)
id = res[0]['id']
self._todelete.append(id)
data = dict(
type='Contact',
Id=id,
FirstName='Jane')
svc.update(data)
res = svc.getUpdated('Contact', startdate, enddate)
self.failUnless(id in res)
def testGetUserInfo(self):
svc = self.svc
userinfo = svc.getUserInfo()
self.failUnless('accessibilityMode' in userinfo)
self.failUnless('currencySymbol' in userinfo)
self.failUnless('organizationId' in userinfo)
self.failUnless('organizationMultiCurrency' in userinfo)
self.failUnless('organizationName' in userinfo)
self.failUnless('userDefaultCurrencyIsoCode' in userinfo)
self.failUnless('userEmail' in userinfo)
self.failUnless('userFullName' in userinfo)
self.failUnless('userId' in userinfo)
self.failUnless('userLanguage' in userinfo)
self.failUnless('userLocale' in userinfo)
self.failUnless('userTimeZone' in userinfo)
self.failUnless('userUiSkin' in userinfo)
def testDescribeTabs(self):
tabinfo = self.svc.describeTabs()
for info in tabinfo:
self.failUnless('label' in info)
self.failUnless('logoUrl' in info)
self.failUnless('selected' in info)
self.failUnless('tabs' in info)
for tab in info['tabs']:
self.failUnless('custom' in tab)
self.failUnless('label' in tab)
self.failUnless('sObjectName' in tab)
self.failUnless('url' in tab)
def testDescribeLayout(self):
svc = self.svc
self.assertRaises(
NotImplementedError, svc.describeLayout, 'Contact')
def testSetMultiPicklistToEmpty(self):
svc = self.svc
originalList = ["Pear", "Apple"]
newList = []
lastname = 'Doe'
data = dict(
type='Contact',
LastName=lastname,
FirstName='John',
Favorite_Fruit__c=originalList
)
res = svc.create([data])
id = res[0]['id']
self._todelete.append(id)
contacts = svc.retrieve('LastName, Favorite_Fruit__c', 'Contact', [id])
self.assertEqual(len(contacts[0]['Favorite_Fruit__c']), 2)
data = dict(
type='Contact',
Id=id,
Favorite_Fruit__c=newList)
svc.update(data)
contacts = svc.retrieve('LastName, Favorite_Fruit__c', 'Contact', [id])
self.failUnless(isinstance(contacts[0]['Favorite_Fruit__c'], list))
self.assertEqual(len(contacts[0]['Favorite_Fruit__c']), 0)
def testAddToEmptyMultiPicklist(self):
svc = self.svc
originalList = []
newList = ["Pear", "Apple"]
lastname = 'Doe'
data = dict(
type='Contact',
LastName=lastname,
FirstName='John',
Favorite_Fruit__c=originalList
)
res = svc.create([data])
id = res[0]['id']
self._todelete.append(id)
contacts = svc.retrieve('LastName, Favorite_Fruit__c', 'Contact', [id])
self.failUnless(isinstance(contacts[0]['Favorite_Fruit__c'], list))
self.assertEqual(len(contacts[0]['Favorite_Fruit__c']), 0)
data = dict(
type='Contact',
Id=id,
Favorite_Fruit__c=newList)
svc.update(data)
contacts = svc.retrieve('LastName, Favorite_Fruit__c', 'Contact', [id])
self.failUnless(isinstance(contacts[0]['Favorite_Fruit__c'], list))
self.assertEqual(len(contacts[0]['Favorite_Fruit__c']), 2)
def testIsNillableField(self):
svc = self.svc
res = svc.describeSObjects('Contact')
self.assertFalse(res[0].fields['LastName'].nillable)
self.assertTrue(res[0].fields['FirstName'].nillable)
self.assertTrue(res[0].fields['Favorite_Fruit__c'].nillable)
def testUpsert(self):
svc = self.svc
data = dict(
type='Contact',
LastName='Doe',
FirstName='John',
Phone='123-456-7890',
Email='[email protected]',
Birthdate=datetime.date(1970, 1, 4)
)
res = svc.upsert('Email', [data])
self.failUnless(type(res) in (ListType, TupleType))
self.failUnless(len(res) == 1)
self.failUnless(res[0]['success'])
id = res[0]['id']
self._todelete.append(id)
contacts = svc.retrieve(
'LastName, FirstName, Phone, Email, Birthdate',
'Contact', [id])
self.assertEqual(len(contacts), 1)
contact = contacts[0]
for k in ['LastName', 'FirstName', 'Phone', 'Email', 'Birthdate']:
self.assertEqual(
data[k], contact[k])
def testPrepareSObjectsWithNone(self):
obj = {
'val': None,
}
prepped_obj = _prepareSObjects([obj])
self.assertEqual(prepped_obj, [{
'val': [],
'fieldsToNull': ['val'],
}])
def testRetrieveTextWithNewlines(self):
data = dict(
type='Contact',
LastName='Doe',
FirstName='John',
Description="This is a\nmultiline description.",
)
res = self.svc.create([data])
self.failUnless(type(res) in (ListType, TupleType))
self.failUnless(len(res) == 1)
self.failUnless(res[0]['success'])
id = res[0]['id']
self._todelete.append(id)
contacts = self.svc.retrieve('Description', 'Contact', [id])
self.assertEqual(len(contacts), 1)
contact = contacts[0]
self.assertEqual(data['Description'], contact['Description'])
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(TestUtils),
))
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| gpl-2.0 | 9,205,919,762,729,724,000 | 34.298541 | 99 | 0.531538 | false | 3.971755 | true | false | false |
yatish27/mase | src/stockflow.py | 9 | 2560 | from __future__ import print_function, division
import sys
sys.dont_write_bytecode = True
from ok import *
import random
r = random.random
isa = isinstance
"""
# Compartmental Modeling
## Diapers
q +-----+ r +-----+
---->| C |---->| D |--> s
^ +-----+ +-+---+
| |
+-----------------+
C = stock of clean diapers
D = stock of dirty diapers
q = inflow of clean diapers
r = flow of clean diapers to dirty diapers
s = out-flow of dirty diapers
"""
class o:
"""Emulate Javascript's uber simple objects.
Note my convention: I use "`i`" not "`this`."""
def has(i) : return i.__dict__
def __init__(i,**d) : i.has().update(d)
def __setitem__(i,k,v) : i.has()[k] = v
def __getitem__(i,k) : return i.has()[k]
def __repr__(i) : return 'o'+str(i.has())
def copy(i):
j = o()
for k in i.has(): j[k] = i[k]
return j
def asList(i,keys=[]):
keys = keys or i.keys()
return [i[k] for k in keys]
class Has:
def __init__(i,init,lo=0,hi=100):
i.init,i.lo,i.hi = init,lo,hi
def restrain(i,x):
return max(i.lo,
min(i.hi, x))
def rank(i):
if isa(i,Flow) : return 3
if isa(i,Stock): return 1
if isa(i,Aux) : return 2
def __repr__(i):
return str(dict(what=i.__class__.__name__,
name= i.name,init= i.init,
lo = i.lo, hi = i.hi))
class Flow(Has) : pass
class Stock(Has): pass
class Aux(Has) : pass
F,S,A=Flow,Stock,Aux
class Model:
def about(i):
tmp=i.have()
for k,v in tmp.has().items():
v.name = k
return tmp
def run(i,dt=1,tmax=100):
print(r())
t,u, keep = 0, o(), []
about = i.about()
keys = sorted(about.keys,
key=lambda z:z.rank())
print(keys)
for k,a in about.items():
u[k] = a.init
keep = [["t"] + keys,
[0] + about.asList(u,keys)]
while t < tmax:
v = copy(u)
i.step(dt,t,u,v)
for k in about:
v[k] = about[k].restrain(v[k])
keep += [[dt] + about.asList(u,keys)]
t += dt
return keep
class Diapers(Model):
def have(i):
return o(C = S(20), D = S(0),
q = F(0), r = F(8), s = F(0))
def step(i,dt,t,u,v):
def saturday(x): return int(x) % 7 == 6
v.C += dt*(u.q - u.r)
v.D += dt*(u.r - u.s)
v.q = 70 if saturday(t) else 0
v.s = u.D if saturday(t) else 0
if t == 27: # special case (the day i forget)
v.s = 0
@ok
def _diapers1():
print(Diapers().about()) | unlicense | -4,875,680,603,400,262,000 | 22.934579 | 50 | 0.496094 | false | 2.694737 | false | false | false |
jmhobbs/detours-clone | lib/detours.py | 1 | 1397 | # -*- coding: utf-8 -*-
import os
class HostFile:
delimiter = "##### DO NOT EDIT BETWEEN THESE LINES - DETOURD #####\n"
def __init__ ( self, path ):
self.path = path
def canWrite ( self ):
try:
f = open( self.path, 'r+' )
f.close()
return True
except IOError:
return False
def getDetours ( self ):
with open( self.path, 'r' ) as handle:
detours = []
opened = False
for line in handle:
if line == self.delimiter:
if opened:
break
else:
opened = True
else:
if opened:
try:
detours.append( self.parseDetour( line ) )
except IndexError, e:
pass
return detours
def writeDetours ( self, detours ):
with open( self.path, 'r+' ) as handle:
buffer = ''
opened = False
for line in handle:
if line == self.delimiter:
opened = not opened
continue
if opened:
continue
buffer += line
handle.seek( 0 )
handle.truncate()
handle.write( self.delimiter )
for detour in detours:
handle.write( "%s\t%s\n" % ( detour['ip'], detour['host'].lower() ) )
handle.write( self.delimiter )
handle.write( buffer )
def parseDetour ( self, line ):
split = line.strip().split()
return { 'ip': split[0], 'host': split[1] }
def findDetour ( self, host ):
for detour in self.getDetours():
if detour['host'] == host.lower():
return detour
return None
| mit | -1,530,638,960,746,162,700 | 20.166667 | 73 | 0.591267 | false | 2.985043 | false | false | false |
palisadoes/switchmap-ng | switchmap/snmp/cisco/mib_ciscocdp.py | 2 | 5045 | #!/usr/bin/env python3
"""Module for CISCO-CDP-MIB."""
from collections import defaultdict
from switchmap.snmp.base_query import Query
def get_query():
"""Return this module's Query class."""
return CiscoCdpQuery
def init_query(snmp_object):
"""Return initialize and return this module's Query class."""
return CiscoCdpQuery(snmp_object)
class CiscoCdpQuery(Query):
"""Class interacts with CISCO-CDP-MIB.
Args:
None
Returns:
None
Key Methods:
supported: Queries the device to determine whether the MIB is
supported using a known OID defined in the MIB. Returns True
if the device returns a response to the OID, False if not.
layer1: Returns all needed layer 1 MIB information from the device.
Keyed by OID's MIB name (primary key), ifIndex (secondary key)
"""
def __init__(self, snmp_object):
"""Function for intializing the class.
Args:
snmp_object: SNMP Interact class object from snmp_manager.py
Returns:
None
"""
# Define query object
self.snmp_object = snmp_object
# Get one OID entry in MIB (cdpCacheDeviceId)
test_oid = '.1.3.6.1.4.1.9.9.23.1.2.1.1.6'
super().__init__(snmp_object, test_oid, tags=['layer1'])
def layer1(self):
"""Get layer 1 data from device.
Args:
None
Returns:
final: Final results
"""
# Initialize key variables
final = defaultdict(lambda: defaultdict(dict))
# Get interface cdpCacheDeviceId data
values = self.cdpcachedeviceid()
for key, value in values.items():
final[key]['cdpCacheDeviceId'] = value
# Get interface cdpCachePlatform data
values = self.cdpcacheplatform()
for key, value in values.items():
final[key]['cdpCachePlatform'] = value
# Get interface cdpCacheDevicePort data
values = self.cdpcachedeviceport()
if values is not None:
for key, value in values.items():
final[key]['cdpCacheDevicePort'] = value
# Return
return final
def cdpcachedeviceid(self, oidonly=False):
"""Return dict of CISCO-CDP-MIB cdpCacheDeviceId for each port.
Args:
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of cdpCacheDeviceId using ifIndex as key
"""
# Initialize key variables
data_dict = defaultdict(dict)
# OID to process
oid = '.1.3.6.1.4.1.9.9.23.1.2.1.1.6'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
results = self.snmp_object.swalk(oid, normalized=False)
for key, value in results.items():
ifindex = _ifindex(key)
data_dict[ifindex] = str(bytes(value), encoding='utf-8')
# Return the interface descriptions
return data_dict
def cdpcacheplatform(self, oidonly=False):
"""Return dict of CISCO-CDP-MIB cdpCachePlatform for each port.
Args:
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of cdpCachePlatform using ifIndex as key
"""
# Initialize key variables
data_dict = defaultdict(dict)
# OID to process
oid = '.1.3.6.1.4.1.9.9.23.1.2.1.1.8'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
results = self.snmp_object.swalk(oid, normalized=False)
for key, value in results.items():
ifindex = _ifindex(key)
data_dict[ifindex] = str(bytes(value), encoding='utf-8')
# Return the interface descriptions
return data_dict
def cdpcachedeviceport(self, oidonly=False):
"""Return dict of CISCO-CDP-MIB cdpCacheDevicePort for each port.
Args:
oidonly: Return OID's value, not results, if True
Returns:
data_dict: Dict of cdpCacheDevicePort using ifIndex as key
"""
# Initialize key variables
data_dict = defaultdict(dict)
# OID to process
oid = '.1.3.6.1.4.1.9.9.23.1.2.1.1.7'
# Return OID value. Used for unittests
if oidonly is True:
return oid
# Process results
results = self.snmp_object.swalk(oid, normalized=False)
for key, value in results.items():
ifindex = _ifindex(key)
data_dict[ifindex] = str(bytes(value), encoding='utf-8')
# Return the interface descriptions
return data_dict
def _ifindex(oid):
"""Return the ifindex from a CDP OID.
Args:
oid: OID
Returns:
ifindex: value of the ifindex
"""
# Initialize key variables
nodes = oid.split('.')
ifindex = int(nodes[-2])
# Return
return ifindex
| apache-2.0 | -8,081,237,075,447,690,000 | 25.276042 | 75 | 0.589891 | false | 3.938329 | false | false | false |
Ichaelus/Github-Classifier | Application/start.py | 1 | 8359 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################
# Initialization and startup file #
###################################
def packageMissing(name):
raise ImportError('Dependency \''+name+'\' has not been found. Please refer to the installation manual.')
import time, os, webbrowser, collections
try:
from bottle import Bottle
except ImportError:
packageMissing("Bottle")
serverUsed = ""
try:
if os.name.lower() == "nt":
import cherrypy
serverUsed = "cherrypy"
cherrypy.response.timeout = 14400000
cherrypy.config.update({'response.timeout': 14400000})
cherrypy.engine.timeout_monitor.unsubscribe()
else:
raise ImportError("Not Using Windows")
except ImportError:
try: # Fallback for MacOS, Unix.. or for Windows not having installed cherrypy
import paste
serverUsed = "paste"
except ImportError:
packageMissing("paste")
from Controllers.HomeController import homebottle, homesetclassifiercollection
from Models.ClassifierCollection import ClassifierCollection
from Models.ClassificationModules.nndescriptiononly import nndescriptiononly
from Models.ClassificationModules.lrdescriptiononly import lrdescriptiononly
from Models.ClassificationModules.nnreadmeonly import nnreadmeonly
from Models.ClassificationModules.lrreadmeonly import lrreadmeonly
from Models.ClassificationModules.readmeonlyrandomforest import readmeonlyrandomforest
from Models.ClassificationModules.multinomialnbreadmeonly import multinomialnbreadmeonly
from Models.ClassificationModules.multinomialnbdescriptiononly import multinomialnbdescriptiononly
from Models.ClassificationModules.bernoullinbreadmeonly import bernoullinbreadmeonly
from Models.ClassificationModules.bernoullinbdescriptiononly import bernoullinbdescriptiononly
from Models.ClassificationModules.nnmetaonly import nnmetaonly
from Models.ClassificationModules.metaonlyrandomforest import metaonlyrandomforest
from Models.ClassificationModules.metaonlysvc import metaonlysvc
from Models.ClassificationModules.metaonlyadaboost import metaonlyadaboost
from Models.ClassificationModules.reponamelstm import reponamelstm
from Models.ClassificationModules.readmelstm import readmelstm
from Models.ClassificationModules.nnall import nnall
from Models.ClassificationModules.knnreadmeonly import knnreadmeonly
from Models.ClassificationModules.svcfilenamesonly import filenamesonlysvc
from Models.ClassificationModules.lrstacking import lrstacking
from Models.ClassificationModules.svmall import svmall
from Models.ClassificationModules.rfall import allrandomforest
from Models.ClassificationModules.gbrtmetaonly import gbrtmetaonly
from Models.ClassificationModules.gbrtreadmeonly import gbrtreadmeonly
from Models.ClassificationModules.gbrtfilesandfolders import gbrtfilesandfolders
from Models.ClassificationModules.gbrtdescriptionmeta import gbrtdescriptionmeta
from Models.ClassificationModules.svmreadmemeta import svmreadmemeta
from Models.ClassificationModules.allbernoullinb import allbernoullinb
from Models.ClassificationModules.allmultinomialnb import allmultinomialnb
from Models.ClassificationModules.averageensemble import averageensemble
from Models.ClassificationModules.nnstacking import nnstacking
from Models.ClassificationModules.lrstackingmeta import lrstackingmeta
from Models.ClassificationModules.foldernameslstm import foldernameslstm
from Models.ClassificationModules.descriptionfoldersreponamelstm import descriptionfoldersreponamelstm
from Models.ClassificationModules.descriptionlstm import descriptionlstm
from Models.ClassificationModules.descriptionreponamelstm import descriptionreponamelstm
import Models.DatabaseCommunication as DC
print("Starting application..")
rootApp = Bottle()
# Initialize ClassifierCollection
classifiercollection = ClassifierCollection()
print 'Getting DB Data to be able to create vectorizers for classifiers that need it'
descriptionCorpus, readmeCorpus, filenameCorpus, filetypeCorpus, foldernameCorpus = DC.getCorpi()
#Initialize Classifiers
print 'Creating and adding Classifiers to Classifier Collection:'
# First load all classifiers which don't need other classifiers as parameter
loadedClassifiers = [] # Keep track, which classifiers have be loaded or such attempt has been made
classifiers = {}
classifiers['metaonlyrandomforest'] = metaonlyrandomforest()
classifiers['metaonlysvc'] = metaonlysvc()
classifiers['gbrtdescriptionmeta'] = gbrtdescriptionmeta(descriptionCorpus)
classifiers['svmreadmemeta'] = svmreadmemeta(readmeCorpus)
#classifiers['descriptionlstm'] = descriptionlstm() # Remove all commented classifiers?
#classifiers['descriptionfoldersreponamelstm'] = descriptionfoldersreponamelstm()
classifiers['reponamelstm'] = reponamelstm()
#classifiers['readmelstm'] = readmelstm()
#classifiers['descriptionreponamelstm'] = descriptionreponamelstm()
for classifier in classifiers:
loaded_classifier = classifiers[classifier].loadClassificationModuleSavePoint(filename="lastused")
if loaded_classifier is not None:
classifiers[classifier] = loaded_classifier
loadedClassifiers.append(classifier)
# Now all classifiers should have been loaded from last savepoint, if available
# Use these loaded classifiers by giving them to specific ensemble-Models
classifiers['nnall'] = nnall(readmeCorpus + descriptionCorpus, filetypeCorpus, filenameCorpus, foldernameCorpus)
classifiers['svmall'] = svmall(readmeCorpus + descriptionCorpus, filetypeCorpus, filenameCorpus, foldernameCorpus)
classifiers['allrandomforest'] = allrandomforest(readmeCorpus + descriptionCorpus, filetypeCorpus, filenameCorpus, foldernameCorpus)
for classifier in classifiers:
if classifier not in loadedClassifiers:
loaded_classifier = classifiers[classifier].loadClassificationModuleSavePoint(filename="lastused")
if loaded_classifier is not None:
classifiers[classifier] = loaded_classifier
loadedClassifiers.append(classifier)
#classifiers['lrstacking'] = lrstacking([classifiers['nnall'], classifiers['metaonlyrandomforest'], classifiers['svmall'], classifiers['metaonlysvc'], classifiers['allrandomforest'], classifiers['reponamelstm'], classifiers['gbrtdescriptionmeta'], classifiers['svmreadmemeta']])
#classifiers['averageensemble'] = averageensemble([classifiers['nnall'], classifiers['metaonlyrandomforest'], classifiers['svmall'], classifiers['metaonlysvc'], classifiers['allrandomforest'], classifiers['reponamelstm'], classifiers['gbrtdescriptionmeta'], classifiers['svmreadmemeta']])
classifiers['nnstacking'] = nnstacking([classifiers['nnall'], classifiers['metaonlyrandomforest'], classifiers['svmall'], classifiers['metaonlysvc'], classifiers['allrandomforest'], classifiers['reponamelstm'], classifiers['gbrtdescriptionmeta'], classifiers['svmreadmemeta']])
#classifiers['lrstackingmeta'] = lrstackingmeta([classifiers['nnall'], classifiers['metaonlyrandomforest'], classifiers['svmall'], classifiers['metaonlysvc'], classifiers['allrandomforest'], classifiers['reponamelstm'], classifiers['gbrtdescriptionmeta'], classifiers['svmreadmemeta']])
# Finally load all meta-models such as lrstacking
for classifier in classifiers:
if classifier not in loadedClassifiers:
loaded_classifier = classifiers[classifier].loadClassificationModuleSavePoint(filename="lastused")
if loaded_classifier is not None:
classifiers[classifier] = loaded_classifier
# Order the classifiers for the final submission
orderedClassifiers = collections.OrderedDict()
order = ['nnstacking', 'gbrtdescriptionmeta', 'svmall', 'nnall', 'svmreadmemeta', 'allrandomforest', 'metaonlyrandomforest', 'metaonlysvc', 'reponamelstm']
for classifiername in order:
orderedClassifiers[classifiername] = classifiers[classifiername]
# Load classifiers into collection
for c in orderedClassifiers:
classifiercollection.addClassificationModule(classifiers[c])
# Pass ClassifierCollection to Controller
homesetclassifiercollection(classifiercollection)
# Wait a bit so website doesnt get called before it's ready
time.sleep(3)
print 'Done. Starting Bottle...'
#Start Bottle
if __name__ == '__main__':
webbrowser.open("http://localhost:8080/")
rootApp.merge(homebottle)
rootApp.run(server=serverUsed, debug=True)
| mit | -8,548,725,733,669,870,000 | 50.919255 | 288 | 0.81158 | false | 4.506199 | false | false | false |
sajuptpm/manila | manila/tests/share/drivers/zfssa/test_zfssarest.py | 5 | 16891 | # Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for Oracle's ZFSSA REST API.
"""
import mock
from oslo_log import log
from manila import exception
from manila.share.drivers.zfssa import restclient
from manila.share.drivers.zfssa import zfssarest
from manila import test
from manila.tests import fake_zfssa
LOG = log.getLogger(__name__)
class ZFSSAApiTestCase(test.TestCase):
"""Tests ZFSSAApi."""
@mock.patch.object(zfssarest, 'factory_restclient')
def setUp(self, _restclient):
super(ZFSSAApiTestCase, self).setUp()
self.host = 'fakehost'
self.user = 'fakeuser'
self.url = None
self.pool = 'fakepool'
self.project = 'fakeproject'
self.share = 'fakeshare'
self.snap = 'fakesnapshot'
_restclient.return_value = fake_zfssa.FakeRestClient()
self._zfssa = zfssarest.ZFSSAApi()
self._zfssa.set_host('fakehost')
def _create_response(self, status):
response = fake_zfssa.FakeResponse(status)
return response
def test_enable_service(self):
self.mock_object(self._zfssa.rclient, 'put')
self._zfssa.rclient.put.return_value = self._create_response(
restclient.Status.ACCEPTED)
self._zfssa.enable_service('nfs')
self.assertEqual(1, self._zfssa.rclient.put.call_count)
self._zfssa.rclient.put.return_value = self._create_response(
restclient.Status.OK)
self.assertRaises(exception.ShareBackendException,
self._zfssa.enable_service,
'nfs')
def test_verify_avail_space(self):
self.mock_object(self._zfssa, 'verify_project')
self.mock_object(self._zfssa, 'get_project_stats')
self._zfssa.get_project_stats.return_value = 2000
self._zfssa.verify_avail_space(self.pool,
self.project,
self.share,
1000)
self.assertEqual(1, self._zfssa.verify_project.call_count)
self.assertEqual(1, self._zfssa.get_project_stats.call_count)
self._zfssa.verify_project.assert_called_with(self.pool, self.project)
self._zfssa.get_project_stats.assert_called_with(self.pool,
self.project)
self._zfssa.get_project_stats.return_value = 900
self.assertRaises(exception.ShareBackendException,
self._zfssa.verify_avail_space,
self.pool,
self.project,
self.share,
1000)
def test_create_project(self):
self.mock_object(self._zfssa, 'verify_pool')
self.mock_object(self._zfssa.rclient, 'get')
self.mock_object(self._zfssa.rclient, 'post')
arg = {
'name': self.project,
'sharesmb': 'off',
'sharenfs': 'off',
'mountpoint': 'fakemnpt',
}
self._zfssa.rclient.get.return_value = self._create_response(
restclient.Status.NOT_FOUND)
self._zfssa.rclient.post.return_value = self._create_response(
restclient.Status.CREATED)
self._zfssa.create_project(self.pool, self.project, arg)
self.assertEqual(1, self._zfssa.rclient.get.call_count)
self.assertEqual(1, self._zfssa.rclient.post.call_count)
self.assertEqual(1, self._zfssa.verify_pool.call_count)
self._zfssa.verify_pool.assert_called_with(self.pool)
self._zfssa.rclient.post.return_value = self._create_response(
restclient.Status.NOT_FOUND)
self.assertRaises(exception.ShareBackendException,
self._zfssa.create_project,
self.pool,
self.project,
arg)
def test_create_share(self):
self.mock_object(self._zfssa, 'verify_avail_space')
self.mock_object(self._zfssa.rclient, 'get')
self.mock_object(self._zfssa.rclient, 'post')
self._zfssa.rclient.get.return_value = self._create_response(
restclient.Status.NOT_FOUND)
self._zfssa.rclient.post.return_value = self._create_response(
restclient.Status.CREATED)
arg = {
"name": self.share,
"quota": 1,
}
self._zfssa.create_share(self.pool, self.project, arg)
self.assertEqual(1, self._zfssa.rclient.get.call_count)
self.assertEqual(1, self._zfssa.rclient.post.call_count)
self.assertEqual(1, self._zfssa.verify_avail_space.call_count)
self._zfssa.verify_avail_space.assert_called_with(self.pool,
self.project,
arg,
arg['quota'])
self._zfssa.rclient.post.return_value = self._create_response(
restclient.Status.NOT_FOUND)
self.assertRaises(exception.ShareBackendException,
self._zfssa.create_share,
self.pool,
self.project,
arg)
self._zfssa.rclient.get.return_value = self._create_response(
restclient.Status.OK)
self.assertRaises(exception.ShareBackendException,
self._zfssa.create_share,
self.pool,
self.project,
arg)
def test_modify_share(self):
self.mock_object(self._zfssa.rclient, 'put')
self._zfssa.rclient.put.return_value = self._create_response(
restclient.Status.ACCEPTED)
arg = {"name": "dummyname"}
svc = self._zfssa.share_path % (self.pool, self.project, self.share)
self._zfssa.modify_share(self.pool, self.project, self.share, arg)
self.assertEqual(1, self._zfssa.rclient.put.call_count)
self._zfssa.rclient.put.assert_called_with(svc, arg)
self._zfssa.rclient.put.return_value = self._create_response(
restclient.Status.BAD_REQUEST)
self.assertRaises(exception.ShareBackendException,
self._zfssa.modify_share,
self.pool,
self.project,
self.share,
arg)
def test_delete_share(self):
self.mock_object(self._zfssa.rclient, 'delete')
self._zfssa.rclient.delete.return_value = self._create_response(
restclient.Status.NO_CONTENT)
svc = self._zfssa.share_path % (self.pool, self.project, self.share)
self._zfssa.delete_share(self.pool, self.project, self.share)
self.assertEqual(1, self._zfssa.rclient.delete.call_count)
self._zfssa.rclient.delete.assert_called_with(svc)
def test_create_snapshot(self):
self.mock_object(self._zfssa.rclient, 'post')
self._zfssa.rclient.post.return_value = self._create_response(
restclient.Status.CREATED)
arg = {"name": self.snap}
svc = self._zfssa.snapshots_path % (self.pool,
self.project,
self.share)
self._zfssa.create_snapshot(self.pool,
self.project,
self.share,
self.snap)
self.assertEqual(1, self._zfssa.rclient.post.call_count)
self._zfssa.rclient.post.assert_called_with(svc, arg)
self._zfssa.rclient.post.return_value = self._create_response(
restclient.Status.BAD_REQUEST)
self.assertRaises(exception.ShareBackendException,
self._zfssa.create_snapshot,
self.pool,
self.project,
self.share,
self.snap)
def test_delete_snapshot(self):
self.mock_object(self._zfssa.rclient, 'delete')
self._zfssa.rclient.delete.return_value = self._create_response(
restclient.Status.NO_CONTENT)
svc = self._zfssa.snapshot_path % (self.pool,
self.project,
self.share,
self.snap)
self._zfssa.delete_snapshot(self.pool,
self.project,
self.share,
self.snap)
self.assertEqual(1, self._zfssa.rclient.delete.call_count)
self._zfssa.rclient.delete.assert_called_with(svc)
self._zfssa.rclient.delete.return_value = self._create_response(
restclient.Status.BAD_REQUEST)
self.assertRaises(exception.ShareBackendException,
self._zfssa.delete_snapshot,
self.pool,
self.project,
self.share,
self.snap)
def test_clone_snapshot(self):
self.mock_object(self._zfssa, 'verify_avail_space')
self.mock_object(self._zfssa.rclient, 'put')
self._zfssa.rclient.put.return_value = self._create_response(
restclient.Status.CREATED)
snapshot = {
"id": self.snap,
"share_id": self.share,
}
clone = {
"id": "cloneid",
"size": 1,
}
arg = {
"name": "dummyname",
"quota": 1,
}
self._zfssa.clone_snapshot(self.pool,
self.project,
snapshot,
clone,
arg)
self.assertEqual(1, self._zfssa.rclient.put.call_count)
self.assertEqual(1, self._zfssa.verify_avail_space.call_count)
self._zfssa.verify_avail_space.assert_called_with(self.pool,
self.project,
clone['id'],
clone['size'])
self._zfssa.rclient.put.return_value = self._create_response(
restclient.Status.NOT_FOUND)
self.assertRaises(exception.ShareBackendException,
self._zfssa.clone_snapshot,
self.pool,
self.project,
snapshot,
clone,
arg)
def _create_entry(self, sharenfs, ip):
if sharenfs == 'off':
sharenfs = 'sec=sys'
entry = (',rw=@%s' % ip)
if '/' not in ip:
entry = entry + '/32'
arg = {'sharenfs': sharenfs + entry}
return arg
def test_allow_access_nfs(self):
self.mock_object(self._zfssa, 'get_share')
self.mock_object(self._zfssa, 'modify_share')
details = {"sharenfs": "off"}
access = {
"access_type": "nonip",
"access_to": "foo",
}
# invalid access type
self.assertRaises(exception.InvalidShareAccess,
self._zfssa.allow_access_nfs,
self.pool,
self.project,
self.share,
access)
# valid entry
access.update({"access_type": "ip"})
arg = self._create_entry("off", access['access_to'])
self._zfssa.get_share.return_value = details
self._zfssa.allow_access_nfs(self.pool,
self.project,
self.share,
access)
self.assertEqual(1, self._zfssa.get_share.call_count)
self.assertEqual(1, self._zfssa.modify_share.call_count)
self._zfssa.get_share.assert_called_with(self.pool,
self.project,
self.share)
self._zfssa.modify_share.assert_called_with(self.pool,
self.project,
self.share,
arg)
# add another entry
access.update({"access_to": "10.0.0.1/24"})
arg = self._create_entry("off", access['access_to'])
self._zfssa.allow_access_nfs(self.pool,
self.project,
self.share,
access)
self.assertEqual(2, self._zfssa.modify_share.call_count)
self._zfssa.modify_share.assert_called_with(self.pool,
self.project,
self.share,
arg)
# verify modify_share is not called if sharenfs='on'
details = {"sharenfs": "on"}
self._zfssa.get_share.return_value = details
self._zfssa.allow_access_nfs(self.pool,
self.project,
self.share,
access)
self.assertEqual(2, self._zfssa.modify_share.call_count)
# verify modify_share is not called if ip is already in the list
access.update({"access_to": "10.0.0.1/24"})
details = self._create_entry("off", access['access_to'])
self._zfssa.get_share.return_value = details
self._zfssa.allow_access_nfs(self.pool,
self.project,
self.share,
access)
self.assertEqual(2, self._zfssa.modify_share.call_count)
def test_deny_access_nfs(self):
self.mock_object(self._zfssa, 'get_share')
self.mock_object(self._zfssa, 'modify_share')
data1 = self._create_entry("off", "10.0.0.1")
access = {
"access_type": "nonip",
"access_to": "foo",
}
# invalid access_type
self.assertRaises(exception.InvalidShareAccess,
self._zfssa.deny_access_nfs,
self.pool,
self.project,
self.share,
access)
# valid entry
access.update({"access_type": "ip"})
self._zfssa.get_share.return_value = data1
self._zfssa.deny_access_nfs(self.pool,
self.project,
self.share,
access)
self.assertEqual(1, self._zfssa.get_share.call_count)
self.assertEqual(0, self._zfssa.modify_share.call_count)
self._zfssa.get_share.assert_called_with(self.pool,
self.project,
self.share)
# another valid entry
data1 = self._create_entry(data1['sharenfs'], '10.0.0.2/24')
data2 = self._create_entry(data1['sharenfs'], access['access_to'])
self._zfssa.get_share.return_value = data2
self._zfssa.deny_access_nfs(self.pool,
self.project,
self.share,
access)
self.assertEqual(2, self._zfssa.get_share.call_count)
self.assertEqual(1, self._zfssa.modify_share.call_count)
self._zfssa.get_share.assert_called_with(self.pool,
self.project,
self.share)
self._zfssa.modify_share.assert_called_with(self.pool,
self.project,
self.share,
data1)
| apache-2.0 | -968,602,415,451,639,700 | 41.65404 | 78 | 0.503108 | false | 4.314432 | true | false | false |
oneminot/audacity | lib-src/lv2/sord/waflib/Tools/dbus.py | 318 | 1142 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import Task,Errors
from waflib.TaskGen import taskgen_method,before_method
@taskgen_method
def add_dbus_file(self,filename,prefix,mode):
if not hasattr(self,'dbus_lst'):
self.dbus_lst=[]
if not'process_dbus'in self.meths:
self.meths.append('process_dbus')
self.dbus_lst.append([filename,prefix,mode])
@before_method('apply_core')
def process_dbus(self):
for filename,prefix,mode in getattr(self,'dbus_lst',[]):
node=self.path.find_resource(filename)
if not node:
raise Errors.WafError('file not found '+filename)
tsk=self.create_task('dbus_binding_tool',node,node.change_ext('.h'))
tsk.env.DBUS_BINDING_TOOL_PREFIX=prefix
tsk.env.DBUS_BINDING_TOOL_MODE=mode
class dbus_binding_tool(Task.Task):
color='BLUE'
ext_out=['.h']
run_str='${DBUS_BINDING_TOOL} --prefix=${DBUS_BINDING_TOOL_PREFIX} --mode=${DBUS_BINDING_TOOL_MODE} --output=${TGT} ${SRC}'
shell=True
def configure(conf):
dbus_binding_tool=conf.find_program('dbus-binding-tool',var='DBUS_BINDING_TOOL')
| gpl-2.0 | -6,612,690,221,675,248 | 38.37931 | 124 | 0.7338 | false | 2.819753 | false | false | false |
joshfriend/sqlalchemy-utils | tests/test_expressions.py | 1 | 4399 | from pytest import raises
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_utils.expressions import (
explain,
explain_analyze,
tsvector_match,
tsvector_concat,
to_tsquery,
plainto_tsquery
)
from tests import TestCase
class ExpressionTestCase(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Article(self.Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
content = sa.Column(sa.UnicodeText)
search_vector = sa.Column(TSVectorType)
search_vector2 = sa.Column(TSVectorType)
self.Article = Article
def assert_startswith(self, query, query_part):
assert str(
query.compile(dialect=postgresql.dialect())
).startswith(query_part)
# Check that query executes properly
self.session.execute(query)
class TestExplain(ExpressionTestCase):
def test_render_explain(self):
self.assert_startswith(
explain(self.session.query(self.Article)),
'EXPLAIN SELECT'
)
def test_render_explain_with_analyze(self):
self.assert_startswith(
explain(self.session.query(self.Article), analyze=True),
'EXPLAIN (ANALYZE true) SELECT'
)
def test_with_string_as_stmt_param(self):
self.assert_startswith(
explain('SELECT 1 FROM article'),
'EXPLAIN SELECT'
)
def test_format(self):
self.assert_startswith(
explain('SELECT 1 FROM article', format='json'),
'EXPLAIN (FORMAT json) SELECT'
)
def test_timing(self):
self.assert_startswith(
explain('SELECT 1 FROM article', analyze=True, timing=False),
'EXPLAIN (ANALYZE true, TIMING false) SELECT'
)
def test_verbose(self):
self.assert_startswith(
explain('SELECT 1 FROM article', verbose=True),
'EXPLAIN (VERBOSE true) SELECT'
)
def test_buffers(self):
self.assert_startswith(
explain('SELECT 1 FROM article', analyze=True, buffers=True),
'EXPLAIN (ANALYZE true, BUFFERS true) SELECT'
)
def test_costs(self):
self.assert_startswith(
explain('SELECT 1 FROM article', costs=False),
'EXPLAIN (COSTS false) SELECT'
)
class TestExplainAnalyze(ExpressionTestCase):
def test_render_explain_analyze(self):
assert str(
explain_analyze(self.session.query(self.Article))
.compile(
dialect=postgresql.dialect()
)
).startswith('EXPLAIN (ANALYZE true) SELECT')
class TestMatchTSVector(ExpressionTestCase):
def test_raises_exception_if_less_than_2_parameters_given(self):
with raises(Exception):
str(
tsvector_match(
self.Article.search_vector,
)
)
def test_supports_postgres(self):
assert str(tsvector_match(
self.Article.search_vector,
to_tsquery('something'),
)) == '(article.search_vector) @@ to_tsquery(:to_tsquery_1)'
class TestToTSQuery(ExpressionTestCase):
def test_requires_atleast_one_parameter(self):
with raises(Exception):
str(to_tsquery())
def test_supports_postgres(self):
assert str(to_tsquery('something')) == 'to_tsquery(:to_tsquery_1)'
class TestPlainToTSQuery(ExpressionTestCase):
def test_requires_atleast_one_parameter(self):
with raises(Exception):
str(plainto_tsquery())
def test_supports_postgres(self):
assert str(plainto_tsquery('s')) == (
'plainto_tsquery(:plainto_tsquery_1)'
)
class TestConcatTSVector(ExpressionTestCase):
def test_concatenate_search_vectors(self):
assert str(tsvector_match(
tsvector_concat(
self.Article.search_vector,
self.Article.search_vector2
),
to_tsquery('finnish', 'something'),
)) == (
'(article.search_vector || article.search_vector2) '
'@@ to_tsquery(:to_tsquery_1, :to_tsquery_2)'
)
| bsd-3-clause | -1,616,012,066,129,236,500 | 29.337931 | 74 | 0.605138 | false | 4.032081 | true | false | false |
ideascube/pibox-installer | kiwix-hotspot/wipe.py | 1 | 1121 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
""" Flash a clean, single FAT partition MBR onto the specifief SD-card """
import os
import sys
import argparse
import multiprocessing
import data
from backend.util import flash_image_with_etcher
from util import CLILogger
def wipe_card(logger, sd_card):
logger.step("Wiping `{}` SD-card by flashing empty/clean MBR".format(sd_card))
retcode = multiprocessing.Value("i", -1)
flash_image_with_etcher(
os.path.join(data.data_dir, "mbr.img"), sd_card, retcode, True
)
if retcode.value == 0:
logger.succ("SD-card `{}` wiped successfuly".format(sd_card))
else:
logger.err("Unable to wipe SD-card at `{}`".format(sd_card))
return retcode.value
def main():
logger = CLILogger()
parser = argparse.ArgumentParser(description="SD-card Wiping Tool")
parser.add_argument("--sdcard", help="Device path for the SD-card", required=True)
# defaults to help
args = parser.parse_args(["--help"] if len(sys.argv) < 2 else None)
sys.exit(wipe_card(logger, sd_card=args.sdcard))
| gpl-3.0 | -3,246,676,374,475,777,000 | 28.5 | 86 | 0.67083 | false | 3.122563 | false | false | false |
csiu/promi2 | code/gff_unify_features.py | 1 | 4149 | #!/usr/bin/env python
# Author: csiu
# Created: 2015-02-17
import argparse
import sys
import os
from utils import random_string, get_value_from_keycolonvalue_list
usage = """Essentially running:
bedtools intersect -a <features.gff> -b <mirna_prox.gff> -s -f 1 -r -wao
"""
def bedtools_intersect(gff_a, gff_b, gff_out):
## unify
cmd = 'bedtools intersect -a '+gff_a+' -b '+gff_b+' -s -f 1 -r -wao >'+gff_out
print cmd
os.system(cmd)
return
def gff_unify_features(gff_a, gff_b, fname, dfvalue, f_out,
retainSourceFeature=False):
## unify
f_out_tmp = f_out+'.tmp'
bedtools_intersect(gff_a, gff_b, f_out_tmp)
## parse
with open(f_out, 'w') as out:
with open(f_out_tmp) as f:
for l in f:
l = l.strip().split('\t')
chrom = l[0]
start = l[3]
stop = l[4]
count = l[5]
strand = l[6]
features = l[7]
info_a = l[8]
_chrom = l[9]
if chrom == _chrom:
## yes overlap of features w/ mirna_proximity
x_b = l[14]
info_b = l[17]
mirbase_id = get_value_from_keycolonvalue_list('mirbase_id',
info_b.split(';'))
else:
x_b = dfvalue
info_b = ''
mirbase_id = '.'
features = '%s;%s:%s' % (features, fname, x_b)
new_info = info_a + '@' + info_b
if retainSourceFeature:
newline = '\t'.join([chrom, l[1], l[2], start, stop,
count, strand, features, new_info])
else:
newline = '\t'.join([chrom, 'putative_tss', mirbase_id, start, stop,
count, strand, features, new_info])
out.write(newline + '\n')
os.system('rm '+f_out_tmp)
return
def _verify_mirbaseID(gff_infile, gff_outfile):
with open(gff_outfile, 'w') as out:
with open(gff_infile) as f:
for l in f:
info = l.strip().split('\t')[8].split('@')
_x = info[-2].split(';')
_y = info[-1].split(';')
_x = get_value_from_keycolonvalue_list('mirbase_id', _x)
_y = get_value_from_keycolonvalue_list('mirbase_id', _y)
if _x == _y or _x == '' or _y == '':
out.write(l)
return
def main(gff_a, gff_b, fname, dfvalue, f_out, retainSourceFeature=False):
tmpfile = f_out + '.tmp'
gff_unify_features(gff_a, gff_b, fname, dfvalue, tmpfile, retainSourceFeature)
_verify_mirbaseID(tmpfile, f_out)
os.remove(tmpfile)
return f_out
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=usage,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-a', dest='gff_a',
required=True,
help='''path to <features>.gff (this should contain
"cpg, cons, and tata" in the info column e.g. column 8)''')
parser.add_argument('-b', dest='gff_b',
required=True,
help='''path to <mirna_proximity>.gff (this should contain
the feature you want in add to <GFF_A> in column 6 )''')
parser.add_argument('-f', dest='fname',
required=True,
help='''name of feature''')
parser.add_argument('-d', dest='dfvalue',
default='na',
help='''default feature value''')
parser.add_argument('-o', dest='outfile',
default='all_features.gff',
help='specify outfile; default = "all_features.gff"')
##get at the arguments
args = parser.parse_args()
## do something..
main(args.gff_a, args.gff_b, args.fname, args.dfvalue, args.outfile)
| mit | -5,052,340,892,532,687,000 | 34.461538 | 88 | 0.476018 | false | 3.570568 | false | false | false |
whitehorse-io/encarnia | Encarnia/typeclasses/npcs.py | 1 | 9579 | """
Weapon
The default weapon object, file is called Arms so that it doesn't conflict with a tutorial weapon object.
"""
from evennia import DefaultObject
from evennia import default_cmds, CmdSet, utils
from commands.default_cmdsets import ChargenCmdset, ShopCmdset, BankCmdset, MerchantCmdset
from world import english_utils, npc_rules # , npc_rules
from random import randint
import time
from evennia import TICKER_HANDLER as tickerhandler
class Combat_Mob(DefaultObject): # This mob will not attack people but it will defend itself from attack.
"""
"""
def at_object_creation(self):
# Inherit the object properties.
super(Combat_Mob, self).at_object_creation()
self.aliases.add([])
#self.name = "a ruddy bronze broadsword" # not sure if I need this
self.db.live_name = "a giant rat"
self.db.defeated_name = "the mangled remains of some large vermin" # must not have 'rat' in it or it can't be targetted!
self.db.alive = True
self.db.desc = ""
self.db.health = 35
self.db.max_health = 35 # NPC damage, (player level * strength) * weapon damage ratio.
# So a level 1 would do 10 damage a hit, then 20, then 30, up to 1,000 per hit at level 100.
self.db.damage_amount = 15
self.db.ticker_speed = 3 # how often it attempts to attack or move/attack if a target is not found. This will only fire so many times before they 'forget'.
self.db.counter_attack_chance = False # integer chance this npc will trigger a counter-attack. Defaults as false.
self.db.respawn_speed = 600 # SHOULD BE A MULTIPLE OF 100
self.db.tries = 3 # how long it will spend trying to find its attacker before shutting down.
self.db.exp_level = 10 # this is the relative level of the creature
self.db.exp_multiplier = 4 # If you're under the level, subtract player level from NPC level and multiply by the multiplier.
self.db.exp_max_level = 20 # At this level you won't gain any experience from killing this NPC.
self.db.home_location = "#2" # This should be set!
# So normally any kill is worth 1% exp.
# But if your level is under the npc's level, you get a bonus
# The bonus is level difference * multiplier.
# This multiplier equation could similarly be used when attacking people below your current level, so you might
# level up multiple times from killing a high-level person.
self.db.offended_by = []
self.db.lootable = False #can be LOOTed for silver.
self.db.looted_yet = False
self.db.silver_amount = 0
self.db.skinnable = True #can be SKINNED for a pelt or skin item.
self.db.skinned_yet = False
self.db.pelt_name = "a giant rat pelt"
self.db.attack_message_1 = "A giant rat hurls itself bodily into "
self.db.attack_message_2 = "A giant rat claws and bites at "
self.db.attack_message_3 = "With a resounding crunching sound, a giant rat bites into "
def npc_active_ticks(self, *args, **kwargs):
"Ticks after the NPC has been attacked."
targets = False # Any targets in the room?
# This should probably go below.
if self.db.tries_left > 0:
for i in self.location.contents:
if i in self.db.offended_by:
targets = True
npc_rules.attack(self, i)
self.db.tries_left = 3
return
if not targets:
for k, v in self.location.db.trails.iteritems():
target_name = str(self.db.offended_by[0])
if k == target_name:
destination = self.search(v)
self.move_to(destination)
for i in self.location.contents:
if i in self.db.offended_by:
targets = True
npc_rules.attack(self, i)
self.db.tries_left = 3
break
break
self.db.tries_left = self.db.tries_left - 1
if self.db.tries_left < 0:
self.db.offended_by = []
self.db.tries_left = self.db.tries
tickerhandler.remove(self.db.ticker_speed, self.npc_active_ticks)
return
def npc_revive_ticks(self, *args, **kwargs):
"ticked when "
self.db.alive = True
self.name = self.db.live_name
self.db.health = self.db.max_health
self.db.looted_yet = False
self.db.skinned_yet = False
destination = self.search(self.db.home_location, global_search=True)
self.move_to(destination)
tickerhandler.remove(self.db.respawn_speed, self.npc_revive_ticks)
return
class Combat_Merchant_Mob(DefaultObject): # This mob will not attack people but it will defend itself from attack.
"""
"""
def at_object_creation(self):
# Inherit the object properties.
super(Combat_Merchant_Mob, self).at_object_creation()
self.cmdset.add(MerchantCmdset, permanent=True)
self.aliases.add([])
#self.name = "a ruddy bronze broadsword" # not sure if I need this
self.db.live_name = "a giant rat"
self.db.defeated_name = "the mangled remains of some large vermin" # must not have 'rat' in it or it can't be targetted!
self.db.alive = True
self.db.desc = ""
self.db.trade_item = "pelts"
self.db.health = 135
self.db.max_health = 135 # NPC damage, (player level * strength) * weapon damage ratio.
# So a level 1 would do 10 damage a hit, then 20, then 30, up to 1,000 per hit at level 100.
self.db.damage_amount = 30
self.db.ticker_speed = 3 # how often it attempts to attack or move/attack if a target is not found. This will only fire so many times before they 'forget'.
self.db.counter_attack_chance = False # integer chance this npc will trigger a counter-attack. Defaults as false.
self.db.respawn_speed = 600 # SHOULD BE A MULTIPLE OF 100
self.db.tries = 3 # how long it will spend trying to find its attacker before shutting down.
self.db.exp_level = 10 # this is the relative level of the creature
self.db.exp_multiplier = 4 # If you're under the level, subtract player level from NPC level and multiply by the multiplier.
self.db.exp_max_level = 20 # At this level you won't gain any experience from killing this NPC.
self.db.home_location = "#2" # This should be set!
# So normally any kill is worth 1% exp.
# But if your level is under the npc's level, you get a bonus
# The bonus is level difference * multiplier.
# This multiplier equation could similarly be used when attacking people below your current level, so you might
# level up multiple times from killing a high-level person.
self.db.offended_by = []
self.db.lootable = True #can be LOOTed for silver.
self.db.looted_yet = False
self.db.silver_amount = 10
self.db.skinnable = False #can be SKINNED for a pelt or skin item.
self.db.skinned_yet = False
self.db.pelt_name = "a giant rat pelt"
self.db.attack_message_1 = "A giant rat hurls itself bodily into "
self.db.attack_message_2 = "A giant rat claws and bites at "
self.db.attack_message_3 = "With a resounding crunching sound, a giant rat bites into "
def npc_active_ticks(self, *args, **kwargs):
"Ticks after the NPC has been attacked."
targets = False # Any targets in the room?
# This should probably go below.
if self.db.tries_left > 0:
for i in self.location.contents:
if i in self.db.offended_by:
targets = True
npc_rules.attack(self, i)
self.db.tries_left = 3
return
if not targets:
for k, v in self.location.db.trails.iteritems():
target_name = str(self.db.offended_by[0])
if k == target_name:
destination = self.search(v)
self.move_to(destination)
for i in self.location.contents:
if i in self.db.offended_by:
targets = True
npc_rules.attack(self, i)
self.db.tries_left = 3
break
break
self.db.tries_left = self.db.tries_left - 1
if self.db.tries_left < 0:
self.db.offended_by = []
self.db.tries_left = self.db.tries
tickerhandler.remove(self.db.ticker_speed, self.npc_active_ticks)
return
def npc_revive_ticks(self, *args, **kwargs):
"ticked when "
self.db.alive = True
self.name = self.db.live_name
self.db.health = self.db.max_health
self.db.looted_yet = False
self.db.skinned_yet = False
destination = self.search(self.db.home_location, global_search=True)
self.move_to(destination)
tickerhandler.remove(self.db.respawn_speed, self.npc_revive_ticks)
return | mit | 1,308,225,917,950,228,500 | 38.770213 | 164 | 0.590145 | false | 3.795166 | false | false | false |
djeshkov/guap | js.py | 1 | 10421 | from __future__ import absolute_import, print_function, division
from pony.py23compat import int_types, basestring, imap, iteritems
import json
from operator import attrgetter
from collections import defaultdict
from datetime import date, datetime
from decimal import Decimal
from pony.orm.core import Attribute, Set, Entity, EntityMeta, TransactionError, db_session, flush
# PermissionError, get_current_user, get_current_user_groups
# can_view, can_edit, can_delete
from pony.utils import throw, cut_traceback
__all__ = 'basic_converter', 'get_schema_dict', 'get_schema_json', 'to_json', 'save_changes'
def basic_converter(x):
if isinstance(x, (datetime, date, Decimal)):
return str(x)
if isinstance(x, dict):
return dict(x)
if isinstance(x, Entity):
pkval = x._get_raw_pkval_()
return pkval[0] if len(pkval) == 1 else pkval
try: iter(x)
except: raise TypeError(x)
return list(x)
def get_schema_dict(db):
result = []
for entity in sorted(db.entities.values(), key=attrgetter('_id_')):
# if not can_view(entity): continue
attrs = []
for attr in entity._new_attrs_:
d = dict(name = attr.name, type = attr.py_type.__name__, kind = attr.__class__.__name__)
if attr.auto: d['auto'] = True
if attr.reverse:
# if not can_view(attr.reverse.entity): continue
d['reverse'] = attr.reverse.name
if attr.lazy: d['lazy'] = True
if attr.nullable: d['nullable'] = True
if attr.default and issubclass(type(attr.default), (int_types, basestring)):
d['defaultValue'] = attr.default
attrs.append(d)
d = dict(name=entity.__name__, newAttrs=attrs, pkAttrs=[ attr.name for attr in entity._pk_attrs_ ])
if entity._all_bases_:
d['bases'] = [ base.__name__ for base in entity._all_bases_ ]
if entity._simple_keys_:
d['simpleKeys'] = [ attr.name for attr in entity._simple_keys_ ]
if entity._composite_keys_:
d['compositeKeys'] = [ [ attr.name for attr in attrs ] for attrs in entity._composite_keys_ ]
result.append(d)
return result
def get_schema_json(db):
return json.dumps(get_schema_dict(db), default=basic_converter)
@cut_traceback
def to_json(database, data, include=(), exclude=(), converter=None, with_schema=True):
for attrs, param_name in ((include, 'include'), (exclude, 'exclude')):
for attr in attrs:
if not isinstance(attr, Attribute): throw(TypeError,
"Each item of '%s' list should be attribute. Got: %s" % (param_name, attr))
include, exclude = set(include), set(exclude)
if converter is None: converter = basic_converter
# def user_has_no_rights_to_see(obj, attr=None):
# user_groups = get_current_user_groups()
# throw(PermissionError, 'The current user %s which belongs to groups %s '
# 'has no rights to see the object %s on the frontend'
# % (get_current_user(), sorted(user_groups), obj))
object_set = set()
caches = set()
def obj_converter(obj):
if not isinstance(obj, Entity): return converter(obj)
caches.add(obj._session_cache_)
if len(caches) > 1: throw(TransactionError,
'An attempt to serialize objects belonging to different transactions')
# if not can_view(obj):
# user_has_no_rights_to_see(obj)
object_set.add(obj)
pkval = obj._get_raw_pkval_()
if len(pkval) == 1: pkval = pkval[0]
return { 'class': obj.__class__.__name__, 'pk': pkval }
data_json = json.dumps(data, default=obj_converter)
objects = {}
if caches:
cache = caches.pop()
if cache.database is not database:
throw(TransactionError, 'An object does not belong to specified database')
object_list = list(object_set)
objects = {}
for obj in object_list:
if obj in cache.seeds[obj._pk_attrs_]: obj._load_()
entity = obj.__class__
# if not can_view(obj):
# user_has_no_rights_to_see(obj)
d = objects.setdefault(entity.__name__, {})
for val in obj._get_raw_pkval_(): d = d.setdefault(val, {})
assert not d, d
for attr in obj._attrs_:
if attr in exclude: continue
if attr in include: pass
# if attr not in entity_perms.can_read: user_has_no_rights_to_see(obj, attr)
elif attr.is_collection: continue
elif attr.lazy: continue
# elif attr not in entity_perms.can_read: continue
if attr.is_collection:
if not isinstance(attr, Set): throw(NotImplementedError)
value = []
for item in attr.__get__(obj):
if item not in object_set:
object_set.add(item)
object_list.append(item)
pkval = item._get_raw_pkval_()
value.append(pkval[0] if len(pkval) == 1 else pkval)
value.sort()
else:
value = attr.__get__(obj)
if value is not None and attr.is_relation:
if attr in include and value not in object_set:
object_set.add(value)
object_list.append(value)
pkval = value._get_raw_pkval_()
value = pkval[0] if len(pkval) == 1 else pkval
d[attr.name] = value
objects_json = json.dumps(objects, default=converter)
if not with_schema:
return '{"data": %s, "objects": %s}' % (data_json, objects_json)
schema_json = get_schema_json(database)
return '{"data": %s, "objects": %s, "schema": %s}' % (data_json, objects_json, schema_json)
@cut_traceback
@db_session
def save_changes(db, changes, observer=None):
changes = json.loads(changes)
import pprint; pprint.pprint(changes)
objmap = {}
for diff in changes['objects']:
if diff['_status_'] == 'c': continue
pk = diff['_pk_']
pk = (pk,) if type(pk) is not list else tuple(pk)
entity_name = diff['class']
entity = db.entities[entity_name]
obj = entity._get_by_raw_pkval_(pk, from_db=False)
oid = diff['_id_']
objmap[oid] = obj
def id2obj(attr, val):
return objmap[val] if attr.reverse and val is not None else val
# def user_has_no_rights_to(operation, obj):
# user_groups = get_current_user_groups()
# throw(PermissionError, 'The current user %s which belongs to groups %s '
# 'has no rights to %s the object %s on the frontend'
# % (get_current_user(), sorted(user_groups), operation, obj))
for diff in changes['objects']:
entity_name = diff['class']
entity = db.entities[entity_name]
dbvals = {}
newvals = {}
for name, val in diff.items():
if name not in ('class', '_pk_', '_id_', '_status_'):
attr = entity._adict_[name]
if not attr.is_collection:
if type(val) is dict:
if 'old' in val: dbvals[attr] = attr.validate(id2obj(attr, val['old']))
if 'new' in val: newvals[attr.name] = attr.validate(id2obj(attr, val['new']))
else: newvals[attr.name] = attr.validate(id2obj(attr, val))
oid = diff['_id_']
status = diff['_status_']
if status == 'c':
assert not dbvals
obj = entity(**newvals)
if observer:
flush() # in order to get obj.id
observer('create', obj, newvals)
objmap[oid] = obj
# if not can_edit(obj): user_has_no_rights_to('create', obj)
else:
obj = objmap[oid]
if status == 'd':
# if not can_delete(obj): user_has_no_rights_to('delete', obj)
if observer: observer('delete', obj)
obj.delete()
elif status == 'u':
# if not can_edit(obj): user_has_no_rights_to('update', obj)
if newvals:
assert dbvals
if observer:
oldvals = dict((attr.name, val) for attr, val in iteritems(dbvals))
observer('update', obj, newvals, oldvals)
obj._db_set_(dbvals) # dbvals can be modified here
for attr in dbvals: attr.__get__(obj)
obj.set(**newvals)
else: assert not dbvals
objmap[oid] = obj
flush()
for diff in changes['objects']:
if diff['_status_'] == 'd': continue
obj = objmap[diff['_id_']]
entity = obj.__class__
for name, val in diff.items():
if name not in ('class', '_pk_', '_id_', '_status_'):
attr = entity._adict_[name]
if attr.is_collection and attr.reverse.is_collection and attr < attr.reverse:
removed = [ objmap[oid] for oid in val.get('removed', ()) ]
added = [ objmap[oid] for oid in val.get('added', ()) ]
collection = attr.__get__(obj)
if removed:
observer('remove', obj, {name: removed})
collection.remove(removed)
if added:
observer('add', obj, {name: added})
collection.add(added)
flush()
def deserialize(x):
t = type(x)
if t is list: return list(imap(deserialize, x))
if t is dict:
if '_id_' not in x:
return dict((key, deserialize(val)) for key, val in iteritems(x))
obj = objmap.get(x['_id_'])
if obj is None:
entity_name = x['class']
entity = db.entities[entity_name]
pk = x['_pk_']
obj = entity[pk]
return obj
return x
return deserialize(changes['data'])
| gpl-2.0 | 3,923,570,456,196,481,500 | 42.061983 | 107 | 0.52922 | false | 4.003458 | false | false | false |
gibiansky/tensorflow | tensorflow/python/client/session_test.py | 3 | 64506 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
# NOTE(mrry): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]],
dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config = config_pb2.ConfigProto()
pool = config.session_inter_op_thread_pool.add()
with session.Session(config=config) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertTrue(isinstance(res, list))
self.assertEqual(42.0, res[0])
self.assertEqual(None, res[1])
self.assertEqual(44.0, res[2])
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[4])
self.assertEqual(63.0, sess.run(v))
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(42.0, res[0])
self.assertEqual(None, res[1])
self.assertEqual(44.0, res[2])
self.assertEqual(42.0, res[3])
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertTrue(isinstance(res, dict))
self.assertEqual(42.0, res['a'])
self.assertEqual(None, res['b'])
self.assertEqual(44.0, res['c'])
def testFetchOrderedDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)]))
self.assertTrue(isinstance(res, collections.OrderedDict))
self.assertEqual([3, 2, 1], list(res.keys()))
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[2])
self.assertEqual(44.0, res[1])
def testFetchNestingEmptyOneLevel(self):
with session.Session() as sess:
a_val = 11.0
a = constant_op.constant(a_val)
res = sess.run([[], tuple(), {}])
self.assertTrue(isinstance(res, list))
self.assertEquals(3, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
res = sess.run([[], tuple(), {}, a])
self.assertTrue(isinstance(res, list))
self.assertEquals(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
self.assertEqual(a_val, res[3])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
# List of lists, tuples, namedtuple, and dict
res = sess.run([[a, b, c], (a, b, c), ABC(a=a, b=b, c=c),
{'a': a.name, 'c': c, 'b': b}])
self.assertTrue(isinstance(res, list))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Tuple of lists, tuples, namedtuple, and dict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c),
{'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Namedtuple of lists, tuples, namedtuples, and dict
res = sess.run(DEFG(d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g={'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, DEFG))
self.assertTrue(isinstance(res.d, list))
self.assertEqual(3, len(res.d))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertTrue(isinstance(res.e, tuple))
self.assertEqual(3, len(res.e))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertTrue(isinstance(res.f, ABC))
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertTrue(isinstance(res.g, dict))
self.assertEqual(3, len(res.g))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
# Dict of lists, tuples, namedtuples, and dict
res = sess.run({'d': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': {'a': a.name, 'c': c, 'b': b}})
self.assertTrue(isinstance(res, dict))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res['d'], list))
self.assertEqual(3, len(res['d']))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertTrue(isinstance(res['e'], tuple))
self.assertEqual(3, len(res['e']))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertTrue(isinstance(res['f'], ABC))
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
self.assertTrue(isinstance(res['g'], dict))
self.assertEqual(3, len(res['g']))
self.assertEqual(a_val, res['g']['a'])
self.assertEqual(b_val, res['g']['b'])
self.assertEqual(c_val, res['g']['c'])
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].dense_shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].dense_shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(
sp, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderPartialShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
shape=[None, 9, 2], dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32,
shape=shape,
name='placeholder1')
self.assertAllEqual(sp.dense_shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.dense_shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run([])
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run(())
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run({})
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64,
dtypes.complex128]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={feed_t: np_array}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={feed_t: np_array}))
# Also check that we can get both back.
out_v, feed_v = sess.run([out_t, feed_t],
feed_dict={feed_t: np_array})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(sess.run(feed_t, feed_dict={feed_t: c_list}),
c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01',
u'\u26a3 unicode', u'\U0001f60e deal with it']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
# Call again on the same graph.
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 18
res = sess.partial_run(h2, r2, feed_dict={c: temp})
self.assertEqual(162, res)
def testPartialRunIncomplete(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def testConcurrentPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
def testManyPartialRun(self):
with session.Session() as sess:
steps = 200
inputs = []
outputs = []
a = constant_op.constant(2.0, dtypes.float32)
for i in xrange(steps):
inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
a = math_ops.mul(a, inputs[i])
outputs.append(a)
h = sess.partial_run_setup(outputs, inputs)
for i in xrange(steps):
res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0})
self.assertEqual(2.0, res)
feed_dict = {}
for i in xrange(steps):
feed_dict[inputs[i]] = 1.0
res = sess.run(outputs, feed_dict)
self.assertEqual(steps, len(res))
self.assertEqual(2.0, res[-1])
def testRunAndPartialRun(self):
with session.Session() as sess:
a = constant_op.constant(2.0, dtypes.float32)
b = a * 2
c = b * 3
r1 = sess.run([b, c])
h = sess.partial_run_setup([b, c], [])
r2 = sess.partial_run(h, [b, c])
self.assertEqual(r1, r2)
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(constant_op.constant(1.0), options=None,
run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(ValueError, 'may not be fed'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config) as sess:
with ops.device('/gpu:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.mul(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(d, feed_dict={a: 1.0},
options=run_options, run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
def testAsDefault(self):
c = constant_op.constant(37)
sess = session.Session()
with sess.as_default():
self.assertEqual(37, c.eval())
# Ensure that the session remains valid even when it is not captured.
with session.Session().as_default():
self.assertEqual(37, c.eval())
def testReentry(self):
sess = session.Session()
with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'):
with sess:
with sess:
pass
def testInvalidArgument(self):
with self.assertRaisesRegexp(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(
capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
def testRegisterFetchAndFeedConversionFunctions(self):
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = math_ops.square(tensor)
fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0])
feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_fn2 = lambda feed: [feed.sq]
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.assertRaises(ValueError):
session.register_session_run_conversion_functions(SquaredTensor,
fetch_fn, feed_fn1, feed_fn2)
with self.test_session() as sess:
np1 = np.array([1.0, 1.5, 2.0, 2.5])
np2 = np.array([3.0, 3.5, 4.0, 4.5])
squared_tensor = SquaredTensor(np2)
squared_eval = sess.run(squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
squared_eval = sess.run(squared_tensor, feed_dict={
squared_tensor : np1 * np1})
self.assertAllClose(np1 * np1, squared_eval)
partial_run = sess.partial_run_setup([squared_tensor], [])
squared_eval = sess.partial_run(partial_run, squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
if __name__ == '__main__':
googletest.main()
| apache-2.0 | -1,216,164,355,176,531,000 | 39.366708 | 80 | 0.61709 | false | 3.319405 | true | false | false |
appi147/Jarvis | installer/steps/d_optional.py | 1 | 1996 | from helper import *
import optional
section("Check optional requirements")
requirements_failed = []
def check_optional_requirement(requirement):
if 'pip' in requirement.keys():
if shell("pip install -U {}".format(" ".join(requirement['pip'])), True).success():
return True
else:
return False
elif 'executable' in requirement.keys():
requirement_ok = True
for executable in requirement['executable']:
if not executable_exists(executable):
requirement_ok = False
if requirement_ok:
return True
else:
return False
for requirement in optional.OPTIONAL_REQUIREMENTS:
if check_optional_requirement(requirement):
printlog("* Success: {}".format(requirement['name']))
else:
printlog("* Fail: {}".format(requirement['name']))
text = "{} - {}".format(requirement['name'], requirement['description'])
requirements_failed.append((text, requirement))
section("Install *optional* non-python requirements")
requirements_failed.append(("Install nothing", 'exit'))
while True:
requirement = user_input(requirements_failed)
print('')
print('')
if requirement == 'exit':
break
guess = None
printlog(requirement['name'])
printlog(requirement['instruction'])
if 'package_guess' in requirement.keys():
package = optional.get_guess(requirement['package_guess'])
if package is not False:
package_manager = optional.get_guess(optional.PackageManager)
cmd = "{} {}".format(package_manager, package)
print("\nOur Guess how to install:\n>{}".format(cmd))
print('')
input('continue ')
print('')
print('')
print('')
if check_optional_requirement(requirement):
printlog('Success!')
requirements_failed -= requirement
else:
printlog('Sorry; but looks like this did not work...')
print('')
| mit | -4,382,335,933,048,287,700 | 27.112676 | 91 | 0.617735 | false | 4.696471 | false | false | false |
jackellice2/Population-Simulator | popgen.py | 1 | 21723 | from random import random
from random import choice
from math import floor
import names
'''
Names is required to run this script.
download from https://pypi.python.org/pypi/names/.
'''
elder_population = 0
married_population = 0
single_population = 0
runtime = 0
born_this_year = 0
living = []
the_dead = []
'''
A bunch of global functions to store
statistical and simulation data.
'''
class Person(object):
'''
This creates a class called person.
'''
def __init__(self, name, gender, value, dad = "None", mom = "None"):
'''
Everything a person needs to be a person
is contained here.
'''
self.value = value
self.name = name
self.gender = gender
self.age = 0
self.status = "alive"
self.single = True
self.love_value = 0
self.love = ""
"""
flagged for removal^
"""
self.dad = dad
self.mom = mom
self.fertility = round(random() * 10)
self.want_child = round(random() * 5)
self.children = 0
self.child_name = []
self.friends = {}
self.mood = 0
self.health = 0
self.personality = round(random()*10)
self.antisocial = round(random()*10)
self.shallow = round(random()*20)-10
self.charisma = round(random()*20)-10
self.love_status = "None"
self.child_status = []
self.hunger = 0
def __str__(self):
'''
This displays all attributes of the person.
'''
if self.single:
return "My name is %s and I am %s and I am %s.\n and I want to have %s children.\n" % (self.name, self.gender,
self.age, self.want_child)
elif self.children == 0:
return "My name is %s and I am %s and I am %s.\n I am married to %s and I want to have %s children.\n" % (self.name, self.gender, self.age,
self.love, self.want_child)
else:
return "My name is %s and I am %s and I am %s.\n I am married to %s and I have %s children.\n" % (self.name, self.gender, self.age,
self.love, self.children)
def get_age(self):
'''
This is a method to get the age
'''
return "%s is %s years old" % (self.name, self.age)
def get_family(self):
'''
This method gets the family members
'''
if (self.dad == "none"):
if not self.single:
if self.children > 0:
return "%s is married to %s.\n%s has %s kids named %s." % (self.name, self.love, self.name,
self.children,self.child_name)
else:
return "%s is married to %s.\n%s has no kids" % (self.name, self.love, self.name)
else:
return "%s has no family!" % self.name
elif not self.single:
if self.children > 0:
return "%s parents are %s and %s\n%s is married to %s.\n%s has %s kids named %s." % (self.name, self.dad, self.mom,
self.name, self.love, self.name,
self.children,self.child_name)
else:
return "%s parents are %s and %s\n%s is married to %s.\n%s has no kids" % (self.name, self.dad, self.mom,
self.name, self.love, self.name)
else:
return "%s parents are %s and %s" % (self.name, self.dad, self.mom)
return "%s is not married" % self.name
def get_friends(self):
'''
This method lists the friends the person has
'''
for key in self.friends:
print key, self.friends[key]
'''
functions that are called by elapse_time()
'''
def elapse_time(years=100):
'''
This moves time forward in the simulation.
'''
global runtime
global born_this_year
global single_population
elder_population = 0
single_population = 0
born_this_year = 0
print "running simulation for %s years..." % years
print "Would you like to monitor the population? (y/n)"
response = raw_input()
for i in range(years):
t = 0
while t < len(living)-1:
if living [t].status != "dead":
time(living [t])
if (living [t].love == "" and living[t].single == False):
print living [t].name
print living [t].value
print living [t].love_value
print living [t]
print living [t].get_family()
print len(living)
print("something")
wait = input("PRESS ENTER TO CONTINUE.")
break
t += 1
runtime += 1
if response == "y":
print "Population is %s in the year %s" % (len(living), runtime)
sim_stats.present_stats(years)
class Statistics(object):
"""
statistical data & methods stored here.
"""
def __init__(self):
self.counter = 0
self.name = ""
def most_popular(self):
for t in living:
if t.single:
single_population += 1
if len(t.friends) > counter:
self.name = t.name
def present_stats(self,years):
print "\nSimulation ran for %s years this time. Total runtime %s" % (str(years), runtime)
print "Population is %s" % len(living)
print "\nOut of %s people, %s made it to their 80s" % (len(living), elder_population)
print "%s babies we born in %s years" % (born_this_year, years)
print "Out of %s people, %s married and have changed their sirnames" % (len(living), married_population)
print "Out of %s people, %s never married" % (len(living), single_population)
print "%s have died since the beginning of this simulation." % len(the_dead)
print "%s has the most friends" % self.name
def get_info(self):
'''
A function that searches the person list for a match.
'''
if type(name) == str and len(living)>0:
for i in living:
if living [i].name == name:
return living [i].__str__()
else:
return "Invalid entry. Please enter a string."
def who_is(self, s):
'''
Lists people's names based on parameters
'''
if (s == living or s == the_dead):
for i in s:
print s [i].name
def count_people(self, s):
'''
Lists people with parameters
'''
if (s == the_dead or s == living):
return len(s)
else:
return totalPop
def who_is_married(self, s = "all"):
'''
A function that lists married people.
'''
if s != "all":
for i in s:
if not s [i].single:
print s [i].name
else:
for i in living:
if not living [i].single:
print living [i].name
for i in the_dead:
if not the_dead [i].single:
print the_dead [i].name
def who_is_has_children(self, s="all",t = True):
'''
Lists who has children
'''
if t:
if s != "all":
for i in s:
if s [i].children > 0:
return s [i].name
else:
for i in living:
if s [i].children > 0:
return s [i].name
for i in the_dead:
if s [i].children > 0:
return s [i].name
else:
if s != "all":
for i in s:
if s [i].children < 1:
return s [i].name
else:
for i in living:
if s [i].children < 1:
return s [i].name
for i in the_dead:
if s [i].children < 1:
return s [i].name
def count_has_children(self, s = "all",t = True):
'''
counts parents
'''
counter = 0
if (t and s!= "all"):
for i in s:
if s [i].children > 0:
counter += 1
return counter
elif (not t and s!= "all"):
for i in s:
if s [i].children < 1:
counter += 1
return counter
elif t:
for i in living:
if s [i].children > 0:
counter += 1
for i in the_dead:
if s [i].children > 0:
counter += 1
return counter
else:
for i in living:
if s [i].children < 1:
counter += 1
for i in the_dead:
if s [i].children < 1:
counter += 1
return counter
def count_married(self, s=0):
'''
counts married
'''
if (s == the_dead or s == living):
counter = 0
for i in living:
if not i.single:
counter += 1
return counter
else:
return married_population
def time(your):
'''
This simulates a year of living for the person
and his likelihood of dying that year
'''
global living
global elder_population
global born_this_year
your.age += 1
if your.age > 79:
elder_population += 1
"""if round(random() * 100)/100 + float(your.age) / 800 > 1: #This is the mortality algorithm.
mortality(your)
"""
if round(random() * 100)/100 + float(your.age) / 800 > 1:
your.status = "dead"
the_dead.append(living[your.value])
if your.love_status != "none":
if (not your.single and your.love_status):
living[your.love_value].love_status = False
living[your.love_value].love_value = len(the_dead)-1
elif not your.single and your.love_status:
the_dead[your.love_value].love_status = False
the_dead[your.love_value].love_value = len(the_dead)-1
number = len(living)-1-your.value
for i in range(number):
if not living[i+(len(living)-number)].single:
if living[i+(len(living)-number)].love_status:
if living[i+(len(living)-number)].love_value > living[i+(len(living)-number)].value:
living[i+(len(living)-number)].love_value -= 1
living[i+(len(living)-number)].value -= 1
del living[your.value]
else:
make_friends(your) #Every year entities meet new people
if your.single:
get_love(your)
make_friends(your) #And have a chance to find love.
if not your.single and your.love_status:
born_this_year += repro(your, living[your.love_value])
def make_friends(your):
'''
allows people to gain friends
'''
randomFactor = int(round(((your.age/100)+random())*10))
for i in range(randomFactor):
their = living[int(round(random()*(len(living)-1)))]
found = False
for j in your.friends:
if j == their.name or j == your.name:
found = True
break
if found != True:
test_of_friendship(your, their)
def test_of_friendship(your,their):
'''
The initial test of friendship between strangers
'''
friendship_constant = 5
personality_score = (your.personality + their.personality) - (your.antisocial + their.antisocial)
attraction = (your.charisma + their.shallow + your.shallow + their.charisma)
totalScore = personality_score + attraction*random()
if totalScore > (your.antisocial + their.antisocial)*random():
your.friends [their.name] = their.charisma + their.personality
their.friends [your.name] = your.charisma + your.personality
#print str(your.name) +" has made a friend with "+str(their.name)
else:
pass
#print str(your.name) +" failed to make friends."
def get_love(your):
'''
This function searches for a couple
'''
if (your.age > 18 and your.single):
global married_population
for i in range(5):
y = int(round(random() * len(living)) - 1)
if (your.gender != living [y].gender and living [y].age > 18 and living [y].status == "alive" and living [y].single):
#print "%s courts %s" % (your.name, living [y].name)
if (round(random() * your.age) / 40) > 0:
your.single = False
living [y].single = False
your.love_status = True
living [y].love_status = True
if your.gender == "female":
your.name = changeName(your.name, living [y].name)
married_population += 2
living [y].love_value = your.value
your.love_value = y
your.love = living [y].name
living [y].love = your.name
break
else:
living [y].name = changeName(living [y].name, your.name)
your.love = living [y].name
living [y].love = your.name
married_population += 2
living [y].love_value = your.value
your.love_value = y
break
def changeName (hers, his):
"""
This changes the wife's surname
"""
oldName = hers
newName = ""
for i in range(len(hers)):
if hers [i] == " ":
newName = hers [:i]
break
for i in range (len(his)):
if his[i] == " ":
newName = newName + his [i:]
break
return newName
def repro(his, hers):
"""
This function tests if a couple will have a child.
"""
global born_this_year
fertilityrate = ((his.fertility+hers.fertility) * (1 - ((his.age+hers.age) / 100))) / 2
if (his.children < (round((his.want_child + hers.want_child) / 2)) and random()*fertilityrate > 1):
his.children += 1
hers.children += 1
gender = choice(["male", "female"])
child_name = changeName (str(names.get_first_name(gender))+" ",his.name)
living.append(Person(str(child_name), gender, len(living),his.name,hers.name))
his.child_name.append(child_name)
hers.child_name.append(child_name)
return 1
else:
return 0
"""
Simulation setup and restart functions below
"""
"""
Information gathering functions below
"""
'''
The menu activates on startup.
'''
def main_menu():
answer = ""
while answer != "0":
'''
This is the main menu where the simulation
is controlled from.
'''
print "\nWhat would you like to do?"
print "1. Start Simulation\n2. Elapse Simulation\n3. Population Information\n4. Quick Start\n5. Restart Simulation\n0. Quit"
answer = raw_input()
if answer == "1" or answer == "5":
print "\nhow large of a population would you like to simulate? 100 should be the max."
answer = raw_input()
if type(answer) != str or answer == "":
print "\nApologies. You entered an invalid input.\n \n"
else:
restart(int(answer))
elif answer == "2":
print "\nhow long do you wish to elapse? no more than 300."
answer = raw_input()
if type(answer) != str:
print "\nApologies. You entered an invalid input.\n \n"
else:
elapse_time(int(answer))
elif answer == "4":
restart(20)
elapse_time(200)
elif answer == "3":
"""
This is where the crap starts.
Statistics galore!
God help me.
"""
while answer != "0":
print "\n1. Count alive\n2. Count dead\n3. Count married\n4. Name search \n5. List Alive \n6. List dead\n7. List Married\n8. List Married and Alive\n9. List Married and Dead\n10. Count Has Children and Alive\n0. Return"
answer = raw_input()
if answer == "1":
"""
Count alive
"""
print sim_stats.count_people(living)
elif answer == "2":
"""
Count dead
"""
print sim_stats.count_people(the_dead)
elif answer == "3":
print sim_stats.count_married()
elif answer == "4":
print "\nPlease enter his or her name."
answer = raw_input()
if type(answer) != str or answer == "":
print "\nApologies. You entered an invalid input.\n \n"
else:
search_value = "nothing"
gender = ""
for i in living:
if answer == living [i].name:
search_value = i
break
if search_value != "nothing":
print "found %s! What do you want to do?" % gender
while answer != "0":
print "\n1. About %s\n2. Family\n3. Age\n4. Friends\n0. Return" % gender
answer = raw_input()
if type(answer) != str:
print "\nApologies. You entered an invalid input.\n \n"
elif answer == "1":
print "searching..."
print living[search_value].__str__()
elif answer == "2":
print living[search_value].get_family()
elif answer == "3":
print living[search_value].get_age()
elif answer == "4":
print living[search_value].get_friends()
elif answer == "0":
pass
else:
print "\nCould you repeat that? \n \n"
answer = 1
else:
print "Didn't find answer."
elif answer == "5":
print sim_stats.who_is(living)
elif answer == "6":
sim_stats.who_is(the_dead)
elif answer == "7":
sim_stats.who_is_married()
elif answer == "8":
sim_stats.who_is_married(living)
elif answer == "9":
sim_stats.who_is_married(the_dead)
elif answer == "10":
print sim_stats.count_has_children(living)
answer = 1
print "\nreturning to main menu"
else:
print "\nCould you repeat that? \n \n"
def sim_setup(p):
'''
This starts the simulation by preparing
the first group of people.
'''
print "\nJust a moment...\n\n"
for i in range(p):
living.append(i)
gender = choice(["male", "female"])
living[i] = Person (str(nahttps://github.com/jackellice2/Population-Simulator/new/master#fullscreen_blob_contentsmes.get_full_name(gender)), gender, i)
print "%s people successfully created!\n" % len(living)
def restart(p):
'''
Restarts the simulation.
'''
global living
global the_dead
global runtime
global elder_population
global married_population
global single_population
runtime = 0
elder_population = 0
married_population = 0
single_population = 0
del living[:]
del the_dead[:]
sim_setup(p)
sim_stats = Statistics()
main_menu()
print "\nGood Bye!"
| mit | 6,692,986,190,573,323,000 | 31.617117 | 235 | 0.458086 | false | 4.244431 | false | false | false |
pellaeon/bsd-cloudinit | cloudbaseinit/tests/plugins/windows/test_extendvolumes.py | 5 | 9719 | # Copyright (c) 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import re
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit.tests import testutils
class ExtendVolumesPluginTests(unittest.TestCase):
def setUp(self):
self._ctypes_mock = mock.MagicMock()
self._comtypes_mock = mock.MagicMock()
self._module_patcher = mock.patch.dict(
'sys.modules',
{'comtypes': self._comtypes_mock,
'ctypes': self._ctypes_mock})
self._module_patcher.start()
extendvolumes = importlib.import_module('cloudbaseinit.plugins.'
'windows.extendvolumes')
self._extend_volumes = extendvolumes.ExtendVolumesPlugin()
def tearDown(self):
self._module_patcher.stop()
@mock.patch('cloudbaseinit.plugins.windows.extendvolumes'
'.ExtendVolumesPlugin._get_volume_index')
@mock.patch('cloudbaseinit.plugins.windows.extendvolumes'
'.ExtendVolumesPlugin._extend_volume')
@mock.patch('cloudbaseinit.utils.windows.vds.IVdsVolume')
def test_extend_volumes(self, _vds_mock, mock_extend_volume,
mock_get_volume_index):
mock_pack = mock.MagicMock()
mock_volume_idxs = mock.MagicMock()
mock_enum = mock.MagicMock()
mock_unk = mock.MagicMock()
mock_c = mock.MagicMock()
mock_volume = mock.MagicMock()
mock_properties = mock.MagicMock()
mock_pack.QueryVolumes.return_value = mock_enum
mock_enum.Next.side_effect = [(mock_unk, mock_c), (None, None)]
mock_unk.QueryInterface.return_value = mock_volume
mock_volume.GetProperties.return_value = mock_properties
self._ctypes_mock.wstring_at.return_value = 'fake name'
mock_get_volume_index.return_value = mock_volume_idxs
self._extend_volumes._extend_volumes(mock_pack, [mock_volume_idxs])
mock_pack.QueryVolumes.assert_called_once_with()
mock_enum.Next.assert_called_with(1)
mock_unk.QueryInterface.assert_called_once_with(_vds_mock)
mock_volume.GetProperties.assert_called_once_with()
self._ctypes_mock.wstring_at.assert_called_with(
mock_properties.pwszName)
mock_get_volume_index.assert_called_once_with('fake name')
mock_extend_volume.assert_called_once_with(mock_pack, mock_volume,
mock_properties)
self._ctypes_mock.windll.ole32.CoTaskMemFree.assert_called_once_with(
mock_properties.pwszName)
def test_get_volume_index(self):
mock_value = mock.MagicMock()
re.match = mock.MagicMock(return_value=mock_value)
mock_value.group.return_value = '9999'
response = self._extend_volumes._get_volume_index('$2')
mock_value.group.assert_called_once_with(1)
self.assertTrue(response == 9999)
@mock.patch('cloudbaseinit.plugins.windows.extendvolumes'
'.ExtendVolumesPlugin._get_volume_extents_to_resize')
@mock.patch('cloudbaseinit.utils.windows.vds.VDS_INPUT_DISK')
def test_extend_volume(self, mock_VDS_INPUT_DISK,
mock_get_volume_extents_to_resize):
mock_disk = mock.MagicMock()
mock_pack = mock.MagicMock()
mock_volume = mock.MagicMock()
mock_properties = mock.MagicMock()
mock_volume_extent = mock.MagicMock()
mock_async = mock.MagicMock()
mock_get_volume_extents_to_resize.return_value = [(mock_volume_extent,
9999)]
mock_VDS_INPUT_DISK.return_value = mock_disk
mock_volume.Extend.return_value = mock_async
self._extend_volumes._extend_volume(mock_pack, mock_volume,
mock_properties)
mock_get_volume_extents_to_resize.assert_called_once_with(
mock_pack, mock_properties.id)
self._ctypes_mock.wstring_at.assert_called_with(
mock_properties.pwszName)
mock_volume.Extend.assert_called_once_with(
mock_VDS_INPUT_DISK.__mul__()(), 1)
mock_async.Wait.assert_called_once_with()
@mock.patch('cloudbaseinit.utils.windows.vds.IVdsDisk')
@mock.patch('cloudbaseinit.utils.windows.vds.VDS_DISK_EXTENT')
def test_get_volume_extents_to_resize(self, mock_VDS_DISK_EXTENT,
mock_IVdsDisk):
mock_pack = mock.MagicMock()
mock_extents_p = mock.MagicMock()
mock_unk = mock.MagicMock()
mock_c = mock.MagicMock()
mock_disk = mock.MagicMock()
mock_enum = mock.MagicMock()
fake_volume_id = '$1'
mock_array = mock.MagicMock()
mock_array.volumeId = fake_volume_id
mock_pack.QueryDisks.return_value = mock_enum
mock_enum.Next.side_effect = [(mock_unk, mock_c), (None, None)]
mock_unk.QueryInterface.return_value = mock_disk
mock_disk.QueryExtents.return_value = (mock_extents_p,
1)
mock_VDS_DISK_EXTENT.__mul__().from_address.return_value = [mock_array]
response = self._extend_volumes._get_volume_extents_to_resize(
mock_pack, fake_volume_id)
mock_pack.QueryDisks.assert_called_once_with()
mock_enum.Next.assert_called_with(1)
mock_unk.QueryInterface.assert_called_once_with(mock_IVdsDisk)
self._ctypes_mock.addressof.assert_called_with(mock_extents_p.contents)
mock_VDS_DISK_EXTENT.__mul__().from_address.assert_called_with(
self._ctypes_mock.addressof(mock_extents_p.contents))
self._ctypes_mock.pointer.assert_called_once_with(
mock_VDS_DISK_EXTENT())
self.assertEqual([], response)
self._ctypes_mock.windll.ole32.CoTaskMemFree.assert_called_with(
mock_extents_p)
@mock.patch('cloudbaseinit.utils.windows.vds.'
'VDS_QUERY_SOFTWARE_PROVIDERS')
@mock.patch('cloudbaseinit.utils.windows.vds.IVdsSwProvider')
def test_query_providers(self, mock_IVdsSwProvider,
mock_VDS_QUERY_SOFTWARE_PROVIDERS):
mock_svc = mock.MagicMock()
mock_enum = mock.MagicMock()
mock_unk = mock.MagicMock()
mock_c = mock.MagicMock()
mock_svc.QueryProviders.return_value = mock_enum
mock_enum.Next.side_effect = [(mock_unk, mock_c), (None, None)]
mock_unk.QueryInterface.return_value = 'fake providers'
response = self._extend_volumes._query_providers(mock_svc)
mock_svc.QueryProviders.assert_called_once_with(
mock_VDS_QUERY_SOFTWARE_PROVIDERS)
mock_enum.Next.assert_called_with(1)
mock_unk.QueryInterface.assert_called_once_with(mock_IVdsSwProvider)
self.assertEqual(['fake providers'], response)
@mock.patch('cloudbaseinit.utils.windows.vds.IVdsPack')
def test_query_packs(self, mock_IVdsPack):
mock_provider = mock.MagicMock()
mock_enum = mock.MagicMock()
mock_unk = mock.MagicMock()
mock_c = mock.MagicMock()
mock_provider.QueryPacks.return_value = mock_enum
mock_enum.Next.side_effect = [(mock_unk, mock_c), (None, None)]
mock_unk.QueryInterface.return_value = 'fake packs'
response = self._extend_volumes._query_packs(mock_provider)
mock_provider.QueryPacks.assert_called_once_with()
mock_enum.Next.assert_called_with(1)
mock_unk.QueryInterface.assert_called_once_with(mock_IVdsPack)
self.assertEqual(['fake packs'], response)
def test_get_volumes_to_extend(self):
with testutils.ConfPatcher('volumes_to_extend', '1'):
response = self._extend_volumes._get_volumes_to_extend()
self.assertEqual([1], response)
@mock.patch('cloudbaseinit.utils.windows.vds.load_vds_service')
@mock.patch('cloudbaseinit.plugins.windows.extendvolumes.'
'ExtendVolumesPlugin._query_providers')
@mock.patch('cloudbaseinit.plugins.windows.extendvolumes.'
'ExtendVolumesPlugin._query_packs')
@mock.patch('cloudbaseinit.plugins.windows.extendvolumes.'
'ExtendVolumesPlugin._extend_volumes')
def test_execute(self, mock_extend_volumes, mock_query_packs,
mock_query_providers, mock_load_vds_service):
mock_svc = mock.MagicMock()
fake_providers = ['fake providers']
fake_packs = ['fake packs']
mock_service = mock.MagicMock()
fake_data = 'fake data'
mock_load_vds_service.return_value = mock_svc
mock_query_providers.return_value = fake_providers
mock_query_packs.return_value = fake_packs
with testutils.ConfPatcher('volumes_to_extend', '1'):
self._extend_volumes.execute(mock_service, fake_data)
mock_query_providers.assert_called_once_with(mock_svc)
mock_query_packs.assert_called_once_with('fake providers')
mock_extend_volumes.assert_called_with('fake packs', [1])
| apache-2.0 | 3,875,034,955,939,940,000 | 43.99537 | 79 | 0.638646 | false | 3.692629 | true | false | false |
0x90sled/catapult | third_party/gsutil/third_party/boto/tests/unit/manage/test_ssh.py | 114 | 2004 | #!/usr/bin/env python
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
try:
import paramiko
from boto.manage.cmdshell import SSHClient
except ImportError:
paramiko = None
SSHClient = None
from tests.compat import mock, unittest
class TestSSHTimeout(unittest.TestCase):
@unittest.skipIf(not paramiko, 'Paramiko missing')
def test_timeout(self):
client_tmp = paramiko.SSHClient
def client_mock():
client = client_tmp()
client.connect = mock.Mock(name='connect')
return client
paramiko.SSHClient = client_mock
paramiko.RSAKey.from_private_key_file = mock.Mock()
server = mock.Mock()
test = SSHClient(server)
self.assertEqual(test._ssh_client.connect.call_args[1]['timeout'], None)
test2 = SSHClient(server, timeout=30)
self.assertEqual(test2._ssh_client.connect.call_args[1]['timeout'], 30)
| bsd-3-clause | 292,889,131,147,557,600 | 36.111111 | 80 | 0.717565 | false | 4.175 | true | false | false |
kanairen/RegularIcosahedronDict | src/view/qt_main.py | 1 | 12865 | #!/usr/bin/env python
# coding: utf-8
import sys
import time
import threading
import StringIO
from PyQt4 import QtGui, QtCore
from src.util.app_util import save_cache, load_cache
class MainWindow(QtGui.QMainWindow):
"""
プロジェクトをグラフィカルに操作するUIウィンドウ
"""
LABEL_TEXT_MODEL_PATH = "Model File (.off or .obj) Path"
LABEL_TEXT_GRID_PATH = "Grid Path"
LABEL_TEXT_CLA_PATH = ".cla File Path"
LABEL_TEXT_SAVE_PATH = "Save Path"
LABEL_TEXT_N_DIV_PATH = "N-Division"
LABEL_TEXT_GRID_SCALE_PATH = "Grid Scale"
LABEL_TEXT_GROUP = "Settings"
BUTTON_TEXT_FILE_DIALOG = "..."
BUTTON_TEXT_CREATE = "create"
DIALOG_TITLE_FILE = "open file"
DIALOG_TITLE_FOLDER = "choice folder"
FILE_DIALOG_INIT_PATH = "../res"
CACHE_PATH = "../.cache"
KEY_MODEL_PATH = "model_path"
KEY_GRID_PATH = "grid_path"
KEY_CLA_PATH = "cla_path"
KEY_SAVE_PATH = "save_path"
KEY_N_DIV = "n_div"
KEY_GRID_SCALE = "grid_scale"
class SygnalHost(QtCore.QObject):
"""
単一シグナルを持つオブジェクト
"""
sygnal = QtCore.pyqtSignal()
def __init__(self, title, x, y, width, height, create_button_click_handler):
"""
:type title: str
:param title: ウィンドウタイトル
:type x: int
:param x: ウィンドウのx座標
:type y: int
:param y: ウィンドウのy座標
:type width: int
:param width: ウィンドウの幅
:type height: int
:param height: ウィンドウの高さ
"""
super(MainWindow, self).__init__()
self.setGeometry(x, y, width, height)
self.setWindowTitle(title)
self.parent_widget = QtGui.QWidget()
self.create_button_click_handler = create_button_click_handler
### result layout ###
self.te_result = QtGui.QTextEdit(self)
vl_result = QtGui.QVBoxLayout()
vl_result.addWidget(self.te_result)
### path input layout ###
# text box
self.tb_model_path = self.get_cached_line_edit(MainWindow.CACHE_PATH,
MainWindow.KEY_MODEL_PATH)
self.tb_grid_path = self.get_cached_line_edit(MainWindow.CACHE_PATH,
MainWindow.KEY_GRID_PATH)
self.tb_cla_path = self.get_cached_line_edit(MainWindow.CACHE_PATH,
MainWindow.KEY_CLA_PATH)
self.tb_save_path = self.get_cached_line_edit(MainWindow.CACHE_PATH,
MainWindow.KEY_SAVE_PATH)
self.tb_n_div = self.get_cached_line_edit(MainWindow.CACHE_PATH,
MainWindow.KEY_N_DIV)
self.tb_grid_scale = self.get_cached_line_edit(MainWindow.CACHE_PATH,
MainWindow.KEY_GRID_SCALE)
# button
btn_fd_model_path = self.get_file_dialog_button(self.tb_model_path,
MainWindow.KEY_MODEL_PATH,
False)
btn_fd_grid_path = self.get_file_dialog_button(self.tb_grid_path,
MainWindow.KEY_GRID_PATH,
True)
btn_fd_cla_path = self.get_file_dialog_button(self.tb_cla_path,
MainWindow.KEY_CLA_PATH,
True)
btn_fd_save_path = self.get_file_dialog_button(self.tb_save_path,
MainWindow.KEY_SAVE_PATH,
False)
# path layout row
hl_model_path = self.get_file_path_layout(self.tb_model_path,
btn_fd_model_path)
hl_grid_path = self.get_file_path_layout(self.tb_grid_path,
btn_fd_grid_path)
hl_cla_path = self.get_file_path_layout(self.tb_cla_path,
btn_fd_cla_path)
hl_save_path = self.get_file_path_layout(self.tb_save_path,
btn_fd_save_path)
# path layout
vl_path = QtGui.QVBoxLayout()
vl_path.addWidget(QtGui.QLabel(MainWindow.LABEL_TEXT_MODEL_PATH))
vl_path.addLayout(hl_model_path)
vl_path.addWidget(QtGui.QLabel(MainWindow.LABEL_TEXT_GRID_PATH))
vl_path.addLayout(hl_grid_path)
vl_path.addWidget(QtGui.QLabel(MainWindow.LABEL_TEXT_CLA_PATH))
vl_path.addLayout(hl_cla_path)
vl_path.addWidget(QtGui.QLabel(MainWindow.LABEL_TEXT_SAVE_PATH))
vl_path.addLayout(hl_save_path)
vl_path.addWidget(QtGui.QLabel(MainWindow.LABEL_TEXT_N_DIV_PATH))
vl_path.addWidget(self.tb_n_div)
vl_path.addWidget(QtGui.QLabel(MainWindow.LABEL_TEXT_GRID_SCALE_PATH))
vl_path.addWidget(self.tb_grid_scale)
gw_path = QtGui.QGroupBox(MainWindow.LABEL_TEXT_GROUP)
gw_path.setLayout(vl_path)
vl_path_group = QtGui.QVBoxLayout()
vl_path_group.addWidget(gw_path)
### create button layout ###
self.btn_create = QtGui.QPushButton(self)
self.btn_create.setText(MainWindow.BUTTON_TEXT_CREATE)
vl_button = QtGui.QVBoxLayout()
vl_button.addWidget(self.btn_create)
self.connect(self.btn_create, QtCore.SIGNAL('clicked()'),
self.on_create_button_clicked)
# combine path input layout and create button layout.
vl_path_button = QtGui.QVBoxLayout()
vl_path_button.addLayout(vl_path_group)
vl_path_button.addLayout(vl_button)
# inflate
parent_layout = QtGui.QHBoxLayout()
parent_layout.addLayout(vl_result)
parent_layout.addLayout(vl_path_button)
# set layout
self.parent_widget.setLayout(parent_layout)
self.setCentralWidget(self.parent_widget)
self.show()
# start std-output to tb_result.
self.__show_stdout_as_result()
def get_cached_line_edit(self, cache_path, cache_key):
"""
QLineEditを返す
QLineEditには前回最後に入力した内容が入る
:type cache_path: str
:param cache_path: キャッシュ保存パス
:type cache_key: str
:param cache_key: キャッシュされたデータのキー
:type: QtGui.QLineEdit
:return: キャッシュされたパスを読み込んだQLineEditオブジェクト
"""
line_edit = QtGui.QLineEdit(self)
cache = load_cache(cache_path, cache_key)
if cache is not None:
line_edit.setText(cache)
return line_edit
def get_file_dialog_button(self, line_edit, cache_key, is_file):
"""
ファイルダイアログを開くQPushButtonを返す
ファイルダイアログで読み込まれたパスはキャッシュされる
:type line_edit: QtGui.QLineEdit
:param line_edit: パスを入力するQLineEditオブジェクト
:type cache_key: str
:param cache_key: キャッシュされたデータのキー
:type is_file: bool
:param is_file: ファイルパス読み込みかどうか(Falseの場合ディレクトリパス読み込み)
:rtype: QtGui.QPushButton
:return: ファイルダイアログを開くQPushButton
"""
button = QtGui.QPushButton(self)
button.setText(MainWindow.BUTTON_TEXT_FILE_DIALOG)
def handler():
if is_file:
f_dialog = QtGui.QFileDialog.getOpenFileName
title = MainWindow.DIALOG_TITLE_FILE
else:
f_dialog = QtGui.QFileDialog.getExistingDirectory
title = MainWindow.DIALOG_TITLE_FOLDER
path = f_dialog(self, title, MainWindow.FILE_DIALOG_INIT_PATH)
line_edit.setText(path)
save_cache(MainWindow.CACHE_PATH, cache_key, path)
self.connect(button, QtCore.SIGNAL('clicked()'), handler)
return button
def get_file_path_layout(self, line_edit, button):
"""
ファイルパスを入力するQLineEditと
ファイルダイアログを開くQPushButtonを統合したレイアウトを返す
:type line_edit: QtGui.QLineEdit
:param line_edit: QLineEditオブジェクト
:type button: QtGui.QPushButton
:param button: QPushButtonオブジェクト
"""
hl_model_path = QtGui.QHBoxLayout()
hl_model_path.addWidget(line_edit)
hl_model_path.addWidget(button)
return hl_model_path
def set_on_create_button_click_handler(self, handler):
"""
createボタンが押された時のサブハンドラを設定
:type handler: func(**kwarg)
:param handler: サブハンドラ関数
"""
self.create_button_click_handler = handler
def on_create_button_clicked(self):
"""
createボタンが押された時のメインハンドラ
"""
try:
kwarg = {
MainWindow.KEY_MODEL_PATH: str(self.tb_model_path.text()),
MainWindow.KEY_GRID_PATH: str(self.tb_grid_path.text()),
MainWindow.KEY_CLA_PATH: str(self.tb_cla_path.text()),
MainWindow.KEY_SAVE_PATH: str(self.tb_save_path.text()),
MainWindow.KEY_N_DIV: int(str(self.tb_n_div.text())),
MainWindow.KEY_GRID_SCALE: float(
str(self.tb_grid_scale.text()))}
except (ValueError, TypeError), e:
if "n_div" in e.message:
QtGui.QMessageBox.critical(self, "",
"N-Division should be a number.")
elif MainWindow.KEY_GRID_SCALE in e.message:
QtGui.QMessageBox.critical(self, "",
"Grid Scale should be a number.")
else:
QtGui.QMessageBox.critical(self, "",
"Check paths in text boxes.")
# サブハンドラが非Noneの場合、GUI上の入力値を渡して呼ぶ
if self.create_button_click_handler is not None:
self.create_button_click_handler(kwarg)
def __show_stdout(self):
"""
標準出力を文字列として逐次取得し、GUI上に表示する
"""
stdout_as_string_io = sys.stdout
stderr_as_string_io = sys.stderr
stdout_as_string_io.seek(0)
stderr_as_string_io.seek(0)
text_out = stdout_as_string_io.read()
text_err = stderr_as_string_io.read()
self.te_result.setText(
self.te_result.toPlainText() + text_out + text_err)
cursor = self.te_result.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
self.te_result.setTextCursor(cursor)
stdout_as_string_io.close()
stderr_as_string_io.close()
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.stdout.write(text_out)
sys.stderr.write(text_err)
sys.stdout = StringIO.StringIO()
sys.stderr = StringIO.StringIO()
def __show_stdout_as_result(self, duration=1):
"""
標準出力を文字列として逐次取得する
"""
# pyqt signal to show standard output on te_result.
signal_stdout = MainWindow.SygnalHost()
# connect
signal_stdout.sygnal.connect(self.__show_stdout)
def emit_signal():
# change sys.stdout to StringIO.
sys.stdout = StringIO.StringIO()
sys.stderr = StringIO.StringIO()
while True:
signal_stdout.sygnal.emit()
time.sleep(duration)
threading.Thread(target=emit_signal).start()
def main(title, x, y, width, height, create_button_click_handler):
"""
GUI Main関数
:type init: func(QtGui.QMainWindow)
:param init: 初期化関数
:type title: str
:param title: ウィンドウタイトル
:type x: int
:param x: ウィンドウのx座標
:type y: int
:param y: ウィンドウのy座標
:type width: int
:param width: ウィンドウの幅
:type height: int
:param height: ウィンドウの高さ
"""
app = QtGui.QApplication(sys.argv)
window = MainWindow(title, x, y, width, height, create_button_click_handler)
sys.exit(app.exec_())
| mit | -7,816,039,373,286,060,000 | 30.700265 | 82 | 0.558614 | false | 3.23 | false | false | false |
debugger06/MiroX | tv/lib/frontends/widgets/gtk/customcontrols.py | 2 | 20354 | # Miro - an RSS based video player application
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
# Participatory Culture Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
#
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
"""miro.frontends.widgets.gtk.controls -- Contains the ControlBox and
CustomControl classes. These handle the custom buttons/sliders used during
playback.
"""
from __future__ import division
import math
import gtk
import gobject
from miro.frontends.widgets.gtk import wrappermap
from miro.frontends.widgets.gtk.base import Widget
from miro.frontends.widgets.gtk.simple import Label, Image
from miro.frontends.widgets.gtk.drawing import (CustomDrawingMixin, Drawable,
ImageSurface)
from miro.plat.frontends.widgets import timer
from miro.frontends.widgets import widgetconst
class CustomControlMixin(CustomDrawingMixin):
def do_expose_event(self, event):
CustomDrawingMixin.do_expose_event(self, event)
if self.is_focus():
style = self.get_style()
style.paint_focus(self.window, self.state,
event.area, self, None, self.allocation.x,
self.allocation.y, self.allocation.width,
self.allocation.height)
class CustomButtonWidget(CustomControlMixin, gtk.Button):
def draw(self, wrapper, context):
if self.is_active():
wrapper.state = 'pressed'
elif self.state == gtk.STATE_PRELIGHT:
wrapper.state = 'hover'
else:
wrapper.state = 'normal'
wrapper.draw(context, wrapper.layout_manager)
self.set_focus_on_click(False)
def is_active(self):
return self.state == gtk.STATE_ACTIVE
class ContinuousCustomButtonWidget(CustomButtonWidget):
def is_active(self):
return (self.state == gtk.STATE_ACTIVE or
wrappermap.wrapper(self).button_down)
class DragableCustomButtonWidget(CustomButtonWidget):
def __init__(self):
CustomButtonWidget.__init__(self)
self.button_press_x = None
self.set_events(self.get_events() | gtk.gdk.POINTER_MOTION_MASK)
def do_button_press_event(self, event):
self.button_press_x = event.x
self.last_drag_event = None
gtk.Button.do_button_press_event(self, event)
def do_button_release_event(self, event):
self.button_press_x = None
gtk.Button.do_button_release_event(self, event)
def do_motion_notify_event(self, event):
DRAG_THRESHOLD = 15
if self.button_press_x is None:
# button not down
return
if (self.last_drag_event != 'right' and
event.x > self.button_press_x + DRAG_THRESHOLD):
wrappermap.wrapper(self).emit('dragged-right')
self.last_drag_event = 'right'
elif (self.last_drag_event != 'left' and
event.x < self.button_press_x - DRAG_THRESHOLD):
wrappermap.wrapper(self).emit('dragged-left')
self.last_drag_event = 'left'
def do_clicked(self):
# only emit clicked if we didn't emit dragged-left or dragged-right
if self.last_drag_event is None:
wrappermap.wrapper(self).emit('clicked')
class _DragInfo(object):
"""Info about the start of a drag.
Attributes:
- button: button that started the drag
- start_pos: position of the slider
- click_pos: position of the click
Note that start_pos and click_pos will be different if the user clicks
inside the slider.
"""
def __init__(self, button, start_pos, click_pos):
self.button = button
self.start_pos = start_pos
self.click_pos = click_pos
class CustomScaleMixin(CustomControlMixin):
def __init__(self):
CustomControlMixin.__init__(self)
self.drag_info = None
self.min = self.max = 0.0
def get_range(self):
return self.min, self.max
def set_range(self, min, max):
self.min = float(min)
self.max = float(max)
gtk.Range.set_range(self, min, max)
def is_continuous(self):
return wrappermap.wrapper(self).is_continuous()
def is_horizontal(self):
# this comes from a mixin
pass
def gtk_scale_class(self):
if self.is_horizontal():
return gtk.HScale
else:
return gtk.VScale
def get_slider_pos(self, value=None):
if value is None:
value = self.get_value()
if self.is_horizontal():
size = self.allocation.width
else:
size = self.allocation.height
ratio = (float(value) - self.min) / (self.max - self.min)
start_pos = self.slider_size() / 2.0
return start_pos + ratio * (size - self.slider_size())
def slider_size(self):
return wrappermap.wrapper(self).slider_size()
def _event_pos(self, event):
"""Get the position of an event.
If we are horizontal, this will be the x coordinate. If we are
vertical, the y.
"""
if self.is_horizontal():
return event.x
else:
return event.y
def do_button_press_event(self, event):
if self.drag_info is not None:
return
current_pos = self.get_slider_pos()
event_pos = self._event_pos(event)
pos_difference = abs(current_pos - event_pos)
# only move the slider if the click was outside its boundaries
# (#18840)
if pos_difference > self.slider_size() / 2.0:
self.move_slider(event_pos)
current_pos = event_pos
self.drag_info = _DragInfo(event.button, current_pos, event_pos)
self.grab_focus()
wrappermap.wrapper(self).emit('pressed')
def do_motion_notify_event(self, event):
if self.drag_info is not None:
event_pos = self._event_pos(event)
delta = event_pos - self.drag_info.click_pos
self.move_slider(self.drag_info.start_pos + delta)
def move_slider(self, new_pos):
"""Move the slider so that it's centered on new_pos."""
if self.is_horizontal():
size = self.allocation.width
else:
size = self.allocation.height
slider_size = self.slider_size()
new_pos -= slider_size / 2
size -= slider_size
ratio = max(0, min(1, float(new_pos) / size))
self.set_value(ratio * (self.max - self.min))
wrappermap.wrapper(self).emit('moved', self.get_value())
if self.is_continuous():
wrappermap.wrapper(self).emit('changed', self.get_value())
def handle_drag_out_of_bounds(self):
if not self.is_continuous():
self.set_value(self.start_value)
def do_button_release_event(self, event):
if self.drag_info is None or event.button != self.drag_info.button:
return
self.drag_info = None
if (self.is_continuous and
(0 <= event.x < self.allocation.width) and
(0 <= event.y < self.allocation.height)):
wrappermap.wrapper(self).emit('changed', self.get_value())
wrappermap.wrapper(self).emit('released')
def do_scroll_event(self, event):
wrapper = wrappermap.wrapper(self)
if self.is_horizontal():
if event.direction == gtk.gdk.SCROLL_UP:
event.direction = gtk.gdk.SCROLL_DOWN
elif event.direction == gtk.gdk.SCROLL_DOWN:
event.direction = gtk.gdk.SCROLL_UP
if (wrapper._scroll_step is not None and
event.direction in (gtk.gdk.SCROLL_UP, gtk.gdk.SCROLL_DOWN)):
# handle the scroll ourself
if event.direction == gtk.gdk.SCROLL_DOWN:
delta = wrapper._scroll_step
else:
delta = -wrapper._scroll_step
self.set_value(self.get_value() + delta)
else:
# let GTK handle the scroll
self.gtk_scale_class().do_scroll_event(self, event)
# Treat mouse scrolls as if the user clicked on the new position
wrapper.emit('pressed')
wrapper.emit('changed', self.get_value())
wrapper.emit('released')
def do_move_slider(self, scroll):
if self.is_horizontal():
if scroll == gtk.SCROLL_STEP_UP:
scroll = gtk.SCROLL_STEP_DOWN
elif scroll == gtk.SCROLL_STEP_DOWN:
scroll = gtk.SCROLL_STEP_UP
elif scroll == gtk.SCROLL_PAGE_UP:
scroll = gtk.SCROLL_PAGE_DOWN
elif scroll == gtk.SCROLL_PAGE_DOWN:
scroll = gtk.SCROLL_PAGE_UP
elif scroll == gtk.SCROLL_START:
scroll = gtk.SCROLL_END
elif scroll == gtk.SCROLL_END:
scroll = gtk.SCROLL_START
return self.gtk_scale_class().do_move_slider(self, scroll)
class CustomHScaleWidget(CustomScaleMixin, gtk.HScale):
def __init__(self):
CustomScaleMixin.__init__(self)
gtk.HScale.__init__(self)
def is_horizontal(self):
return True
class CustomVScaleWidget(CustomScaleMixin, gtk.VScale):
def __init__(self):
CustomScaleMixin.__init__(self)
gtk.VScale.__init__(self)
def is_horizontal(self):
return False
gobject.type_register(CustomButtonWidget)
gobject.type_register(ContinuousCustomButtonWidget)
gobject.type_register(DragableCustomButtonWidget)
gobject.type_register(CustomHScaleWidget)
gobject.type_register(CustomVScaleWidget)
class CustomControlBase(Drawable, Widget):
def __init__(self):
Widget.__init__(self)
Drawable.__init__(self)
self._gtk_cursor = None
self._entry_handlers = None
def _connect_enter_notify_handlers(self):
if self._entry_handlers is None:
self._entry_handlers = [
self.wrapped_widget_connect('enter-notify-event',
self.on_enter_notify),
self.wrapped_widget_connect('leave-notify-event',
self.on_leave_notify),
self.wrapped_widget_connect('button-release-event',
self.on_click)
]
def _disconnect_enter_notify_handlers(self):
if self._entry_handlers is not None:
for handle in self._entry_handlers:
self._widget.disconnect(handle)
self._entry_handlers = None
def set_cursor(self, cursor):
if cursor == widgetconst.CURSOR_NORMAL:
self._gtk_cursor = None
self._disconnect_enter_notify_handlers()
elif cursor == widgetconst.CURSOR_POINTING_HAND:
self._gtk_cursor = gtk.gdk.Cursor(gtk.gdk.HAND2)
self._connect_enter_notify_handlers()
else:
raise ValueError("Unknown cursor: %s" % cursor)
def on_enter_notify(self, widget, event):
self._widget.window.set_cursor(self._gtk_cursor)
def on_leave_notify(self, widget, event):
if self._widget.window:
self._widget.window.set_cursor(None)
def on_click(self, widget, event):
self.emit('clicked')
return True
class CustomButton(CustomControlBase):
def __init__(self):
"""Create a new CustomButton. active_image will be displayed while
the button is pressed. The image must have the same size.
"""
CustomControlBase.__init__(self)
self.set_widget(CustomButtonWidget())
self.create_signal('clicked')
self.forward_signal('clicked')
class ContinuousCustomButton(CustomControlBase):
def __init__(self):
CustomControlBase.__init__(self)
self.set_widget(ContinuousCustomButtonWidget())
self.button_down = False
self.button_held = False
self.timeout = None
self.create_signal('clicked')
self.create_signal('held-down')
self.create_signal('released')
self.wrapped_widget_connect('pressed', self.on_pressed)
self.wrapped_widget_connect('released', self.on_released)
self.wrapped_widget_connect('clicked', self.on_clicked)
self.initial_delay = 0.6
self.repeat_delay = 0.3
def set_delays(self, initial_delay, repeat_delay):
self.initial_delay = initial_delay
self.repeat_delay = repeat_delay
def on_pressed(self, widget):
if self.timeout:
timer.cancel(self.timeout)
self.button_down = True
self.button_held = False
self.timeout = timer.add(self.initial_delay, self.on_button_hold)
def on_button_hold(self):
self.button_held = True
self.emit('held-down')
self.timeout = timer.add(self.repeat_delay, self.on_button_hold)
def on_released(self, widget):
if self.timeout:
timer.cancel(self.timeout)
self.timeout = None
self.button_down = self.button_held = False
self.queue_redraw()
self.emit('released')
def on_clicked(self, widget):
if self.timeout:
timer.cancel(self.timeout)
if not self.button_held:
self.emit('clicked')
class DragableCustomButton(CustomControlBase):
def __init__(self):
CustomControlBase.__init__(self)
self.set_widget(DragableCustomButtonWidget())
self.create_signal('clicked')
self.create_signal('dragged-left')
self.create_signal('dragged-right')
class CustomSlider(CustomControlBase):
def __init__(self):
CustomControlBase.__init__(self)
self.create_signal('pressed')
self.create_signal('released')
self.create_signal('changed')
self.create_signal('moved')
self._scroll_step = None
if self.is_horizontal():
self.set_widget(CustomHScaleWidget())
else:
self.set_widget(CustomVScaleWidget())
self.wrapped_widget_connect('move-slider', self.on_slider_move)
def on_slider_move(self, widget, scrolltype):
self.emit('changed', widget.get_value())
self.emit('moved', widget.get_value())
def get_value(self):
return self._widget.get_value()
def set_value(self, value):
self._widget.set_value(value)
def get_range(self):
return self._widget.get_range()
def get_slider_pos(self, value=None):
"""Get the position for the slider for our current value.
This will return position that the slider should be centered on to
display the value. It will be the x coordinate if is_horizontal() is
True and the y coordinate otherwise.
This method takes into acount the size of the slider when calculating
the position. The slider position will start at (slider_size / 2) and
will end (slider_size / 2) px before the end of the widget.
:param value: value to get the position for. Defaults to the current
value
"""
return self._widget.get_slider_pos(value)
def set_range(self, min_value, max_value):
self._widget.set_range(min_value, max_value)
# set_digits controls the precision of the scale by limiting changes
# to a certain number of digits. If the range is [0, 1], this code
# will give us 4 digits of precision, which seems reasonable.
range = max_value - min_value
self._widget.set_digits(int(round(math.log10(10000.0 / range))))
def set_increments(self, small_step, big_step, scroll_step=None):
"""Set the increments to scroll.
:param small_step: scroll amount for up/down
:param big_step: scroll amount for page up/page down.
:param scroll_step: scroll amount for mouse wheel, or None to make
this 2 times the small step
"""
self._widget.set_increments(small_step, big_step)
self._scroll_step = scroll_step
def to_miro_volume(value):
"""Convert from 0 to 1.0 to 0.0 to MAX_VOLUME.
"""
if value == 0:
return 0.0
return value * widgetconst.MAX_VOLUME
def to_gtk_volume(value):
"""Convert from 0.0 to MAX_VOLUME to 0 to 1.0.
"""
if value > 0.0:
value = (value / widgetconst.MAX_VOLUME)
return value
if hasattr(gtk.VolumeButton, "get_popup"):
# FIXME - Miro on Windows has an old version of gtk (2.16) and
# doesn't have the get_popup method. Once we upgrade and
# fix that, we can take out the hasattr check.
class VolumeMuter(Label):
"""Empty space that has a clicked signal so it can be dropped
in place of the VolumeMuter.
"""
def __init__(self):
Label.__init__(self)
self.create_signal("clicked")
class VolumeSlider(Widget):
"""VolumeSlider that uses the gtk.VolumeButton().
"""
def __init__(self):
Widget.__init__(self)
self.set_widget(gtk.VolumeButton())
self.wrapped_widget_connect('value-changed', self.on_value_changed)
self._widget.get_popup().connect("hide", self.on_hide)
self.create_signal('changed')
self.create_signal('released')
def on_value_changed(self, *args):
value = self.get_value()
self.emit('changed', value)
def on_hide(self, *args):
self.emit('released')
def get_value(self):
value = self._widget.get_property('value')
return to_miro_volume(value)
def set_value(self, value):
value = to_gtk_volume(value)
self._widget.set_property('value', value)
class ClickableImageButton(CustomButton):
"""Image that can send clicked events. If max_width and/or max_height are
specified, resizes the image proportionally such that all constraints are
met.
"""
def __init__(self, image_path, max_width=None, max_height=None):
CustomButton.__init__(self)
self.max_width = max_width
self.max_height = max_height
self.image = None
self._width, self._height = None, None
if image_path:
self.set_path(image_path)
self.set_cursor(widgetconst.CURSOR_POINTING_HAND)
def set_path(self, path):
image = Image(path)
if self.max_width:
image = image.resize_for_space(self.max_width, self.max_height)
self.image = ImageSurface(image)
self._width, self._height = image.width, image.height
def size_request(self, layout):
w = self._width
h = self._height
if not w:
w = self.max_width
if not h:
h = self.max_height
return w, h
def draw(self, context, layout):
if self.image:
self.image.draw(context, 0, 0, self._width, self._height)
w = self._width
h = self._height
if not w:
w = self.max_width
if not h:
h = self.max_height
w = min(context.width, w)
h = min(context.height, h)
context.rectangle(0, 0, w, h)
context.set_color((0, 0, 0)) # black
context.set_line_width(1)
context.stroke()
| gpl-2.0 | -1,318,889,697,418,349,800 | 35.088652 | 79 | 0.61639 | false | 3.876952 | false | false | false |
netoaraujjo/hal | clustering/kmeans.py | 1 | 1671 | #-*- coding: utf-8 -*-
from sklearn.cluster import KMeans as sk_KMeans
from .clustering import Clustering
class KMeans(Clustering):
"""docstring for KMeans."""
def __init__(self, data, n_clusters = 8, init = 'k-means++', n_init = 10,
max_iter = 300, tol = 0.0001, precompute_distances = 'auto',
verbose = 0, random_state = None, copy_x = True, n_jobs = 1,
algorithm = 'auto'):
super(KMeans, self).__init__()
self.data = data
self.n_clusters = n_clusters
self.init = init
self.n_init = n_init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.algorithm = algorithm
def execute(self):
"""Constroi o modelo de clusterizacao."""
self.model = sk_KMeans(n_clusters = self.n_clusters,
init = self.init,
n_init = self.n_init,
max_iter = self.max_iter,
tol = self.tol,
precompute_distances = self.precompute_distances,
verbose = self.verbose,
random_state = self.random_state,
copy_x = self.copy_x,
n_jobs = self.n_jobs,
algorithm = self.algorithm).fit(self.data)
self.clusters = super().make_clusters(self.data, self.model.labels_)
@property
def labels_(self):
"""Retorna os labels dos elementos do dataset."""
return self.model.labels_
@property
def clusters_(self):
"""Retorna um dicionaro onde os indices dos grupos sao as chaves."""
return self.clusters
@property
def model_(self):
"""Retorna o modelo de agrupamento."""
return self.model_
| mit | 7,356,097,911,148,121,000 | 27.810345 | 74 | 0.636744 | false | 3.111732 | false | false | false |
ClearCorp/account-financial-reporting | mis_builder/report/mis_builder_xls.py | 5 | 4678 | # -*- coding: utf-8 -*-
# © 2014-2015 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
import xlwt
from openerp.report import report_sxw
from openerp.addons.report_xls.report_xls import report_xls
import logging
_logger = logging.getLogger(__name__)
class MisBuilderXlsParser(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(MisBuilderXlsParser, self).__init__(
cr, uid, name, context=context)
self.context = context
class MisBuilderXls(report_xls):
def __init__(self, name, table, rml=False, parser=False, header=True,
store=False):
super(MisBuilderXls, self).__init__(
name, table, rml, parser, header, store)
# Cell Styles
_xs = self.xls_styles
# header
rh_cell_format = _xs['bold'] + _xs['fill'] + \
_xs['borders_all'] + _xs['right']
self.rh_cell_style = xlwt.easyxf(rh_cell_format)
self.rh_cell_style_date = xlwt.easyxf(
rh_cell_format, num_format_str=report_xls.date_format)
# lines
self.mis_rh_cell_style = xlwt.easyxf(
_xs['borders_all'] + _xs['bold'] + _xs['fill'])
def generate_xls_report(self, _p, _xs, data, objects, wb):
report_name = objects[0].name
ws = wb.add_sheet(report_name[:31])
ws.panes_frozen = True
ws.remove_splits = True
ws.portrait = 0 # Landscape
ws.fit_width_to_pages = 1
row_pos = 0
# set print header/footer
ws.header_str = self.xls_headers['standard']
ws.footer_str = self.xls_footers['standard']
# Title
c_specs = [
('report_name', 1, 0, 'text', report_name),
]
row_data = self.xls_row_template(c_specs, ['report_name'])
row_pos = self.xls_write_row(
ws, row_pos, row_data, row_style=xlwt.easyxf(_xs['xls_title']))
row_pos += 1
# get the computed result of the report
data = self.pool.get('mis.report.instance').compute(
self.cr, self.uid, objects[0].id, self.context)
# Column headers
header_name_list = ['']
col_specs_template = {'': {'header': [1, 30, 'text', ''],
'header_date': [1, 1, 'text', '']}}
for col in data['header'][0]['cols']:
col_specs_template[col['name']] = {'header': [1, 30, 'text',
col['name']],
'header_date': [1, 1, 'text',
col['date']]}
header_name_list.append(col['name'])
c_specs = map(
lambda x: self.render(x, col_specs_template, 'header'),
header_name_list)
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(
ws, row_pos, row_data, row_style=self.rh_cell_style,
set_column_size=True)
c_specs = map(lambda x: self.render(
x, col_specs_template, 'header_date'), header_name_list)
row_data = self.xls_row_template(c_specs, [x[0] for x in c_specs])
row_pos = self.xls_write_row(
ws, row_pos, row_data, row_style=self.rh_cell_style_date)
ws.set_horz_split_pos(row_pos)
ws.set_vert_split_pos(1)
for line in data['content']:
col = 0
ws.write(row_pos, col, line['kpi_name'], self.mis_rh_cell_style)
for value in line['cols']:
col += 1
num_format_str = '#'
if value.get('dp'):
num_format_str += '.'
num_format_str += '0' * int(value['dp'])
if value.get('prefix'):
num_format_str = '"%s"' % value['prefix'] + num_format_str
if value.get('suffix'):
num_format_str += ' "%s"' % value['suffix']
kpi_cell_style = xlwt.easyxf(
_xs['borders_all'] + _xs['right'],
num_format_str=num_format_str)
if value.get('val'):
val = value['val']
if value.get('is_percentage'):
val = val / 0.01
ws.write(row_pos, col, val, kpi_cell_style)
else:
ws.write(row_pos, col, value['val_r'], kpi_cell_style)
row_pos += 1
MisBuilderXls('report.mis.report.instance.xls',
'mis.report.instance',
parser=MisBuilderXlsParser)
| agpl-3.0 | -3,678,627,083,664,979,500 | 37.975 | 78 | 0.504169 | false | 3.527149 | false | false | false |
inclement/python-for-android-revamp | pythonforandroid/toolchain.py | 1 | 83695 | #!/usr/bin/env python
"""
Tool for compiling Android toolchain
====================================
This tool intend to replace all the previous tools/ in shell script.
"""
from __future__ import print_function
import sys
from sys import stdout
from os.path import join, dirname, realpath, exists, isdir, basename
from os import listdir, unlink, makedirs, environ, chdir, getcwd, walk, uname
import os
import zipfile
import tarfile
import importlib
import io
import json
import glob
import shutil
import fnmatch
import re
from functools import wraps
from datetime import datetime
from distutils.spawn import find_executable
try:
from urllib.request import FancyURLopener
except ImportError:
from urllib import FancyURLopener
import argparse
from appdirs import user_data_dir
curdir = dirname(__file__)
sys.path.insert(0, join(curdir, "tools", "external"))
import sh
import logging
import contextlib
import imp
from colorama import Style, Fore
logger = logging.getLogger('p4a')
# logger.setLevel(logging.DEBUG)
if not hasattr(logger, 'touched'): # Necessary as importlib reloads
# this, which would add a second
# handler and reset the level
logger.setLevel(logging.INFO)
logger.touched = True
ch = logging.StreamHandler(stdout)
formatter = logging.Formatter('{}[%(levelname)s]{}: %(message)s'.format(
Style.BRIGHT, Style.RESET_ALL))
ch.setFormatter(formatter)
logger.addHandler(ch)
# logger.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
info = logger.info
debug = logger.debug
warning = logger.warning
IS_PY3 = sys.version_info[0] >= 3
def info_main(*args):
logger.info(''.join([Style.BRIGHT, Fore.GREEN] + list(args) +
[Style.RESET_ALL, Fore.RESET]))
def shprint(command, *args, **kwargs):
kwargs["_iter"] = True
kwargs["_out_bufsize"] = 1
kwargs["_err_to_out"] = True
if len(logger.handlers) > 1:
logger.removeHandler(logger.handlers[1])
command_path = str(command).split('/')
command_string = command_path[-1]
# if len(command_path) > 1:
# command_string = '.../' + command_string
print('args are', args)
string = ' '.join(['running', Style.DIM, command_string] + list(args))
# If logging is not in DEBUG mode, trim the command if necessary
if logger.level > logging.DEBUG:
short_string = string
if len(string) > 100:
short_string = string[:100] + '... (and {} more)'.format(len(string) - 100)
logger.info(short_string + Style.RESET_ALL)
else:
logger.debug(string + Style.RESET_ALL)
output = command(*args, **kwargs)
need_closing_newline = False
for line in output:
if logger.level > logging.DEBUG:
string = '\r' + 'working ... ' + line[:100].replace('\n', '').rstrip() + ' ...'
if len(string) < 20:
continue
if len(string) < 120:
string = string + ' '*(120 - len(string))
sys.stdout.write(string)
sys.stdout.flush()
need_closing_newline = True
else:
logger.debug(''.join([Style.DIM, '\t', line.rstrip()]))
if logger.level > logging.DEBUG and need_closing_newline:
print()
return output
# shprint(sh.ls, '-lah')
# exit(1)
def require_prebuilt_dist(func):
'''Decorator for ToolchainCL methods. If present, the method will
automatically make sure a dist has been built before continuing
or, if no dists are present or can be obtained, will raise an
error.
'''
@wraps(func)
def wrapper_func(self, args):
ctx = self.ctx
ctx.prepare_build_environment()
dist = self._dist
if dist.needs_build:
info('No dist exists that meets your requirements, so one will '
'be built.')
args = build_dist_from_args(ctx, dist, args)
func(self, args)
return wrapper_func
def get_directory(filename):
if filename.endswith('.tar.gz'):
return basename(filename[:-7])
elif filename.endswith('.tgz'):
return basename(filename[:-4])
elif filename.endswith('.tar.bz2'):
return basename(filename[:-8])
elif filename.endswith('.tbz2'):
return basename(filename[:-5])
elif filename.endswith('.zip'):
return basename(filename[:-4])
info('Unknown file extension for {}'.format(filename))
exit(1)
def which(program, path_env):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in path_env.split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
@contextlib.contextmanager
def current_directory(new_dir):
cur_dir = getcwd()
logger.info(''.join((Fore.CYAN, '-> directory context ', new_dir,
Fore.RESET)))
chdir(new_dir)
yield
logger.info(''.join((Fore.CYAN, '<- directory context ', cur_dir,
Fore.RESET)))
chdir(cur_dir)
def cache_execution(f):
def _cache_execution(self, *args, **kwargs):
state = self.ctx.state
key = "{}.{}".format(self.name, f.__name__)
force = kwargs.pop("force", False)
if args:
for arg in args:
key += ".{}".format(arg)
key_time = "{}.at".format(key)
if key in state and not force:
print("# (ignored) {} {}".format(f.__name__.capitalize(), self.name))
return
print("{} {}".format(f.__name__.capitalize(), self.name))
f(self, *args, **kwargs)
state[key] = True
state[key_time] = str(datetime.utcnow())
return _cache_execution
class ChromeDownloader(FancyURLopener):
version = (
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36')
urlretrieve = ChromeDownloader().retrieve
class JsonStore(object):
"""Replacement of shelve using json, needed for support python 2 and 3.
"""
def __init__(self, filename):
super(JsonStore, self).__init__()
self.filename = filename
self.data = {}
if exists(filename):
try:
with io.open(filename, encoding='utf-8') as fd:
self.data = json.load(fd)
except ValueError:
print("Unable to read the state.db, content will be replaced.")
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
self.sync()
def __delitem__(self, key):
del self.data[key]
self.sync()
def __contains__(self, item):
return item in self.data
def get(self, item, default=None):
return self.data.get(item, default)
def keys(self):
return self.data.keys()
def remove_all(self, prefix):
for key in self.data.keys()[:]:
if not key.startswith(prefix):
continue
del self.data[key]
self.sync()
def sync(self):
# http://stackoverflow.com/questions/12309269/write-json-data-to-file-in-python/14870531#14870531
if IS_PY3:
with open(self.filename, 'w') as fd:
json.dump(self.data, fd, ensure_ascii=False)
else:
with io.open(self.filename, 'w', encoding='utf-8') as fd:
fd.write(unicode(json.dumps(self.data, ensure_ascii=False)))
class Arch(object):
def __init__(self, ctx):
super(Arch, self).__init__()
self.ctx = ctx
def __str__(self):
return self.arch
@property
def include_dirs(self):
return [
"{}/{}".format(
self.ctx.include_dir,
d.format(arch=self))
for d in self.ctx.include_dirs]
def get_env(self):
include_dirs = [
"-I{}/{}".format(
self.ctx.include_dir,
d.format(arch=self))
for d in self.ctx.include_dirs]
env = {}
env["CFLAGS"] = " ".join([
"-DANDROID", "-mandroid", "-fomit-frame-pointer",
"--sysroot", self.ctx.ndk_platform])
env["CXXFLAGS"] = env["CFLAGS"]
env["LDFLAGS"] = " ".join(['-lm'])
py_platform = sys.platform
if py_platform in ['linux2', 'linux3']:
py_platform = 'linux'
if self.ctx.ndk_ver == 'r5b':
toolchain_prefix = 'arm-eabi'
toolchain_version = '4.4.0'
elif self.ctx.ndk_ver[:2] in ('r7', 'r8'):
toolchain_prefix = 'arm-linux-androideabi'
toolchain_version = '4.4.3'
elif self.ctx.ndk_ver[:2] == 'r9':
toolchain_prefix = 'arm-linux-androideabi'
toolchain_version = '4.9'
elif self.ctx.ndk_ver[:3] == 'r10':
toolchain_prefix = 'arm-linux-androideabi'
toolchain_version = '4.9'
else:
warning('Error: NDK not supported by these tools?')
exit(1)
env['TOOLCHAIN_PREFIX'] = toolchain_prefix
env['TOOLCHAIN_VERSION'] = toolchain_version
cc = find_executable('{toolchain_prefix}-gcc'.format(
toolchain_prefix=toolchain_prefix), path=environ['PATH'])
if cc is None:
warning('Couldn\'t find executable for CC. Exiting.')
exit(1)
env['CC'] = '{toolchain_prefix}-gcc {cflags}'.format(
toolchain_prefix=toolchain_prefix,
cflags=env['CFLAGS'])
env['CXX'] = '{toolchain_prefix}-g++ {cxxflags}'.format(
toolchain_prefix=toolchain_prefix,
cxxflags=env['CXXFLAGS'])
# AND: Not sure if these are still important
env['AR'] = '{}-ar'.format(toolchain_prefix)
env['RANLIB'] = '{}-ranlib'.format(toolchain_prefix)
env['LD'] = '{}-ld'.format(toolchain_prefix)
env['STRIP'] = '{}-strip --strip-unneeded'.format(toolchain_prefix)
env['MAKE'] = 'make -j5'
env['READELF'] = '{}-readelf'.format(toolchain_prefix)
hostpython_recipe = Recipe.get_recipe('hostpython2', self.ctx)
# AND: This hardcodes python version 2.7, needs fixing
# AND: This also hardcodes armeabi, which isn't even correct, don't forget to fix!
env['BUILDLIB_PATH'] = join(hostpython_recipe.get_build_dir('armeabi'),
'build', 'lib.linux-{}-2.7'.format(uname()[-1]))
env['PATH'] = environ['PATH']
# AND: This stuff is set elsewhere in distribute.sh. Does that matter?
env['ARCH'] = self.arch
# env['LIBLINK_PATH'] = join(self.ctx.build_dir, 'other_builds', 'objects')
# ensure_dir(env['LIBLINK_PATH']) # AND: This should be elsewhere
return env
class ArchAndroid(Arch):
arch = "armeabi"
# class ArchSimulator(Arch):
# sdk = "iphonesimulator"
# arch = "i386"
# triple = "i386-apple-darwin11"
# version_min = "-miphoneos-version-min=6.0.0"
# sysroot = sh.xcrun("--sdk", "iphonesimulator", "--show-sdk-path").strip()
# class Arch64Simulator(Arch):
# sdk = "iphonesimulator"
# arch = "x86_64"
# triple = "x86_64-apple-darwin13"
# version_min = "-miphoneos-version-min=7.0"
# sysroot = sh.xcrun("--sdk", "iphonesimulator", "--show-sdk-path").strip()
# class ArchIOS(Arch):
# sdk = "iphoneos"
# arch = "armv7"
# triple = "arm-apple-darwin11"
# version_min = "-miphoneos-version-min=6.0.0"
# sysroot = sh.xcrun("--sdk", "iphoneos", "--show-sdk-path").strip()
# class Arch64IOS(Arch):
# sdk = "iphoneos"
# arch = "arm64"
# triple = "aarch64-apple-darwin13"
# version_min = "-miphoneos-version-min=7.0"
# sysroot = sh.xcrun("--sdk", "iphoneos", "--show-sdk-path").strip()
class Graph(object):
# Taken from python-for-android/depsort
def __init__(self):
# `graph`: dict that maps each package to a set of its dependencies.
self.graph = {}
def add(self, dependent, dependency):
"""Add a dependency relationship to the graph"""
self.graph.setdefault(dependent, set())
self.graph.setdefault(dependency, set())
if dependent != dependency:
self.graph[dependent].add(dependency)
def add_optional(self, dependent, dependency):
"""Add an optional (ordering only) dependency relationship to the graph
Only call this after all mandatory requirements are added
"""
if dependent in self.graph and dependency in self.graph:
self.add(dependent, dependency)
def find_order(self):
"""Do a topological sort on a dependency graph
:Parameters:
:Returns:
iterator, sorted items form first to last
"""
graph = dict((k, set(v)) for k, v in self.graph.items())
while graph:
# Find all items without a parent
leftmost = [l for l, s in graph.items() if not s]
if not leftmost:
raise ValueError('Dependency cycle detected! %s' % graph)
# If there is more than one, sort them for predictable order
leftmost.sort()
for result in leftmost:
# Yield and remove them from the graph
yield result
graph.pop(result)
for bset in graph.values():
bset.discard(result)
class Context(object):
'''A build context. If anything will be built, an instance this class
will be instantiated and used to hold all the build state.'''
env = environ.copy()
root_dir = None # the filepath of toolchain.py
storage_dir = None # the root dir where builds and dists will be stored
build_dir = None # in which bootstraps are copied for building and recipes are built
dist_dir = None # the Android project folder where everything ends up
libs_dir = None
javaclass_dir = None
ccache = None # whether to use ccache
cython = None # the cython interpreter name
sdk_dir = None # the directory of the android sdk
ndk_dir = None # the directory of the android ndk
ndk_platform = None # the ndk platform directory
ndk_ver = None # the ndk version, defaults to r9
android_api = None # the android api target, defaults to 14
dist_name = None
bootstrap = None
bootstrap_build_dir = None
recipe_build_order = None # Will hold the list of all built recipes
@property
def packages_path(self):
'''Where packages are downloaded before being unpacked'''
return join(self.storage_dir, 'packages')
@property
def templates_dir(self):
return join(self.root_dir, 'templates')
def setup_dirs(self):
'''Calculates all the storage and build dirs, and makes sure
the directories exist where necessary.'''
self.root_dir = realpath(dirname(__file__))
# AND: TODO: Allow the user to set the build_dir
self.storage_dir = user_data_dir('python-for-android')
# self.storage_dir = self.root_dir
self.build_dir = join(self.storage_dir, 'build')
self.libs_dir = join(self.build_dir, 'libs')
self.dist_dir = join(self.storage_dir, 'dists')
self.javaclass_dir = join(self.build_dir, 'java')
ensure_dir(self.storage_dir)
ensure_dir(self.build_dir)
ensure_dir(self.libs_dir)
ensure_dir(self.dist_dir)
ensure_dir(self.javaclass_dir)
@property
def android_api(self):
if self._android_api is None:
raise ValueError('Tried to access android_api but it has not '
'been set - this should not happen, something '
'went wrong!')
return self._android_api
@android_api.setter
def android_api(self, value):
self._android_api = value
@property
def ndk_ver(self):
if self._ndk_ver is None:
raise ValueError('Tried to access android_api but it has not '
'been set - this should not happen, something '
'went wrong!')
return self._ndk_ver
@ndk_ver.setter
def ndk_ver(self, value):
self._ndk_ver = value
@property
def sdk_dir(self):
if self._sdk_dir is None:
raise ValueError('Tried to access android_api but it has not '
'been set - this should not happen, something '
'went wrong!')
return self._sdk_dir
@sdk_dir.setter
def sdk_dir(self, value):
self._sdk_dir = value
@property
def ndk_dir(self):
if self._ndk_dir is None:
raise ValueError('Tried to access android_api but it has not '
'been set - this should not happen, something '
'went wrong!')
return self._ndk_dir
@ndk_dir.setter
def ndk_dir(self, value):
self._ndk_dir = value
def prepare_build_environment(self):
'''Checks that build dependencies exist and sets internal variables
for the Android SDK etc.
..warning:: This *must* be called before trying any build stuff
'''
if self._build_env_prepared:
return
ok = True
# AND: We should check for ndk-build and ant?
self.android_api = environ.get('ANDROIDAPI', '14')
self.ndk_ver = environ.get('ANDROIDNDKVER', 'r9')
self.sdk_dir = environ.get('ANDROIDSDK', None)
if self.sdk_dir is None:
ok = False
self.ndk_dir = environ.get('ANDROIDNDK', None)
if self.ndk_dir is None:
ok = False
else:
self.ndk_platform = join(
self.ndk_dir,
'platforms',
'android-{}'.format(self.android_api),
'arch-arm')
print('ndk platform', self.ndk_platform)
if not exists(self.ndk_platform):
warning('ndk_platform doesn\'t exist')
ok = False
virtualenv = None
if virtualenv is None:
virtualenv = sh.which('virtualenv2')
if virtualenv is None:
virtualenv = sh.which('virtualenv-2.7')
if virtualenv is None:
virtualenv = sh.which('virtualenv')
if virtualenv is None:
raise IOError('Couldn\'t find a virtualenv executable, '
'you must install this to use p4a.')
self.virtualenv = virtualenv
info('Found virtualenv at {}'.format(virtualenv))
# path to some tools
self.ccache = sh.which("ccache")
if not self.ccache:
info("ccache is missing, the build will not be optimized in the future.")
for cython_fn in ("cython2", "cython-2.7", "cython"):
cython = sh.which(cython_fn)
if cython:
self.cython = cython
break
if not self.cython:
ok = False
warning("Missing requirement: cython is not installed")
# Modify the path so that sh finds modules appropriately
py_platform = sys.platform
if py_platform in ['linux2', 'linux3']:
py_platform = 'linux'
if self.ndk_ver == 'r5b':
toolchain_prefix = 'arm-eabi'
toolchain_version = '4.4.0'
elif self.ndk_ver[:2] in ('r7', 'r8'):
toolchain_prefix = 'arm-linux-androideabi'
toolchain_version = '4.4.3'
elif self.ndk_ver[:2] == 'r9':
toolchain_prefix = 'arm-linux-androideabi'
toolchain_version = '4.9'
elif self.ndk_ver[:3] == 'r10':
toolchain_prefix = 'arm-linux-androideabi'
toolchain_version = '4.9'
else:
warning('Error: NDK not supported by these tools?')
exit(1)
environ['PATH'] = ('{ndk_dir}/toolchains/{toolchain_prefix}-{toolchain_version}/'
'prebuilt/{py_platform}-x86/bin/:{ndk_dir}/toolchains/'
'{toolchain_prefix}-{toolchain_version}/prebuilt/'
'{py_platform}-x86_64/bin/:{ndk_dir}:{sdk_dir}/'
'tools:{path}').format(
sdk_dir=self.sdk_dir, ndk_dir=self.ndk_dir,
toolchain_prefix=toolchain_prefix,
toolchain_version=toolchain_version,
py_platform=py_platform, path=environ.get('PATH'))
# AND: Are these necessary? Where to check for and and ndk-build?
# check the basic tools
for tool in ("pkg-config", "autoconf", "automake", "libtool",
"tar", "bzip2", "unzip", "make", "gcc", "g++"):
if not sh.which(tool):
warning("Missing requirement: {} is not installed".format(
tool))
if not ok:
sys.exit(1)
def __init__(self):
super(Context, self).__init__()
self.include_dirs = []
self._build_env_prepared = False
# root of the toolchain
self.setup_dirs()
# AND: Currently only the Android architecture is supported
self.archs = (
ArchAndroid(self),
)
ensure_dir(join(self.build_dir, 'bootstrap_builds'))
ensure_dir(join(self.build_dir, 'other_builds')) # where everything else is built
# # remove the most obvious flags that can break the compilation
self.env.pop("LDFLAGS", None)
self.env.pop("ARCHFLAGS", None)
self.env.pop("CFLAGS", None)
# set the state
self.state = JsonStore(join(self.dist_dir, "state.db"))
def prepare_bootstrap(self, bs):
bs.ctx = self
self.bootstrap = bs
self.bootstrap.prepare_build_dir()
self.bootstrap_build_dir = self.bootstrap.build_dir
def prepare_dist(self, name):
self.dist_name = name
self.bootstrap.prepare_dist_dir(self.dist_name)
def get_site_packages_dir(self, arch=None):
'''Returns the location of site-packages in the python-install build
dir.
'''
# AND: This *must* be replaced with something more general in
# order to support multiple python versions and/or multiple
# archs.
return join(self.build_dir, 'python-install', 'lib', 'python2.7',
'site-packages')
def get_libs_dir(self, arch):
'''The libs dir for a given arch.'''
ensure_dir(join(self.libs_dir, arch))
# AND: See warning:
warning('Ensuring libs dir in get_libs_dir, should fix this '
'to ensure elsewhere')
return join(self.libs_dir, arch)
class Distribution(object):
'''State container for information about a distribution (i.e. an
Android project).
This is separate from a Bootstrap because the Bootstrap is
concerned with building and populating the dist directory, whereas
the dist itself could also come from e.g. a binary download.
'''
ctx = None
name = None # A name identifying the dist. May not be None.
needs_build = False # Whether the dist needs compiling
url = None
dist_dir = None # Where the dist dir ultimately is. Should not be None.
recipes = []
description = '' # A long description
def __init__(self, ctx):
self.ctx = ctx
def __str__(self):
return '<Distribution: name {} with recipes ({})>'.format(
# self.name, ', '.join([recipe.name for recipe in self.recipes]))
self.name, ', '.join(self.recipes))
def __repr__(self):
return str(self)
@classmethod
def get_distribution(cls, ctx, name=None, recipes=[], allow_download=True,
force_build=False,
allow_build=True, extra_dist_dirs=[],
require_perfect_match=False):
'''Takes information about the distribution, and decides what kind of
distribution it will be.
If parameters conflict (e.g. a dist with that name already
exists, but doesn't have the right set of recipes),
an error is thrown.
Parameters
----------
name : str
The name of the distribution. If a dist with this name already '
exists, it will be used.
recipes : list
The recipes that the distribution must contain.
allow_download : bool
Whether binary dists may be downloaded.
allow_build : bool
Whether the distribution may be built from scratch if necessary.
This is always False on e.g. Windows.
force_download: bool
If True, only downloaded dists are considered.
force_build : bool
If True, the dist is forced to be built locally.
extra_dist_dirs : list
Any extra directories in which to search for dists.
require_perfect_match : bool
If True, will only match distributions with precisely the
correct set of recipes.
'''
# AND: This whole function is a bit hacky, it needs checking
# properly to make sure it follows logically correct
# possibilities
existing_dists = Distribution.get_distributions(ctx)
needs_build = True # whether the dist needs building, will be returned
possible_dists = existing_dists
# 0) Check if a dist with that name already exists
if name is not None and name:
possible_dists = [d for d in possible_dists if d.name == name]
# 1) Check if any existing dists meet the requirements
_possible_dists = []
for dist in possible_dists:
for recipe in recipes:
if recipe not in dist.recipes:
break
else:
_possible_dists.append(dist)
possible_dists = _possible_dists
if possible_dists:
info('Of the existing distributions, the following meet '
'the given requirements:')
for dist in possible_dists:
info('\tname {}: recipes ({})'.format(dist.name, ', '.join(dist.recipes)))
else:
info('No existsing dists meet the given requirements!')
# If any dist has perfect recipes, return it
for dist in possible_dists:
if force_build:
continue
if (set(dist.recipes) == set(recipes) or
(set(recipes).issubset(set(dist.recipes)) and not require_perfect_match)):
info('{} has compatible recipes, using this one'.format(dist.name))
return dist
assert len(possible_dists) < 2
if not name and possible_dists:
info('Asked for dist with name {} with recipes ({}), but a dist '
'with this name already exists and has incompatible recipes '
'({})'.format(name, ', '.join(recipes), ', '.join(possible_dists[0].recipes)))
info('No compatible dist found, so exiting.')
exit(1)
# # 2) Check if any downloadable dists meet the requirements
# online_dists = [('testsdl2', ['hostpython2', 'sdl2_image',
# 'sdl2_mixer', 'sdl2_ttf',
# 'python2', 'sdl2',
# 'pyjniussdl2', 'kivysdl2'],
# 'https://github.com/inclement/sdl2-example-dist/archive/master.zip'),
# ]
# _possible_dists = []
# for dist_name, dist_recipes, dist_url in online_dists:
# for recipe in recipes:
# if recipe not in dist_recipes:
# break
# else:
# dist = Distribution(ctx)
# dist.name = dist_name
# dist.url = dist_url
# _possible_dists.append(dist)
# # if _possible_dists
# If we got this far, we need to build a new dist
dist = Distribution(ctx)
dist.needs_build = True
if not name:
filen = 'unnamed_dist_{}'
i = 1
while exists(join(ctx.dist_dir, filen.format(i))):
i += 1
name = filen.format(i)
dist.name = name
dist.dist_dir = join(ctx.dist_dir, dist.name)
dist.recipes = recipes
return dist
@classmethod
def get_distributions(cls, ctx, extra_dist_dirs=[]):
'''Returns all the distributions found locally.'''
if extra_dist_dirs:
warning('extra_dist_dirs argument to get_distributions is not yet implemented')
exit(1)
dist_dir = ctx.dist_dir
folders = glob.glob(join(dist_dir, '*'))
for dir in extra_dist_dirs:
folders.extend(glob.glob(join(dir, '*')))
dists = []
for folder in folders:
if exists(join(folder, 'dist_info.json')):
with open(join(folder, 'dist_info.json')) as fileh:
dist_info = json.load(fileh)
dist = cls(ctx)
dist.name = folder.split('/')[-1] # AND: also equal
# to
# dist_info['dist_name']...which
# one should we
# use?
dist.dist_dir = folder
dist.needs_build = False
dist.recipes = dist_info['recipes']
dists.append(dist)
return dists
def save_info(self):
'''
Save information about the distribution in its dist_dir.
'''
with current_directory(self.dist_dir):
info('Saving distribution info')
with open('dist_info.json', 'w') as fileh:
json.dump({'dist_name': self.name,
'recipes': self.ctx.recipe_build_order},
fileh)
def load_info(self):
with current_directory(self.dist_dir):
filen = 'dist_info.json'
if not exists(filen):
return None
with open('dist_info.json', 'r') as fileh:
dist_info = json.load(fileh)
return dist_info
class Bootstrap(object):
'''An Android project template, containing recipe stuff for
compilation and templated fields for APK info.
'''
name = ''
jni_subdir = '/jni'
ctx = None
build_dir = None
dist_dir = None
dist_name = None
distribution = None
recipe_depends = []
# Other things a Bootstrap might need to track (maybe separately):
# ndk_main.c
# whitelist.txt
# blacklist.txt
@property
def dist_dir(self):
if self.distribution is None:
warning('Tried to access {}.dist_dir, but {}.distribution '
'is None'.format(self, self))
exit(1)
return self.distribution.dist_dir
@property
def jni_dir(self):
return self.name + self.jni_subdir
def get_build_dir(self):
return join(self.ctx.build_dir, 'bootstrap_builds', self.name)
def get_dist_dir(self, name):
return join(self.ctx.dist_dir, name)
@property
def name(self):
modname = self.__class__.__module__
return modname.split(".", 2)[-1]
def prepare_build_dir(self):
'''Ensure that a build dir exists for the recipe. This same single
dir will be used for building all different archs.'''
self.build_dir = self.get_build_dir()
shprint(sh.cp, '-r',
join(self.bootstrap_dir, 'build'),
# join(self.ctx.root_dir,
# 'bootstrap_templates',
# self.name),
self.build_dir)
def prepare_dist_dir(self, name):
# self.dist_dir = self.get_dist_dir(name)
ensure_dir(self.dist_dir)
def run_distribute(self):
# print('Default bootstrap being used doesn\'t know how to distribute...failing.')
# exit(1)
with current_directory(self.dist_dir):
info('Saving distribution info')
with open('dist_info.json', 'w') as fileh:
json.dump({'dist_name': self.ctx.dist_name,
'bootstrap': self.ctx.bootstrap.name,
'recipes': self.ctx.recipe_build_order},
fileh)
# AND: This method must be replaced by manual dir setting, in
# order to allow for user dirs
# def get_bootstrap_dir(self):
# return(dirname(__file__))
@classmethod
def list_bootstraps(cls):
forbidden_dirs = ('__pycache__', )
bootstraps_dir = join(dirname(__file__), 'bootstraps')
for name in listdir(bootstraps_dir):
if name in forbidden_dirs:
continue
filen = join(bootstraps_dir, name)
if isdir(filen):
yield name
@classmethod
def get_bootstrap(cls, name, ctx):
'''Returns an instance of a bootstrap with the given name.
This is the only way you should access a bootstrap class, as
it sets the bootstrap directory correctly.
'''
# AND: This method will need to check user dirs, and access
# bootstraps in a slightly different way
if not hasattr(cls, 'bootstraps'):
cls.bootstraps = {}
if name in cls.bootstraps:
return cls.bootstraps[name]
mod = importlib.import_module('pythonforandroid.bootstraps.{}'.format(name))
if len(logger.handlers) > 1:
logger.removeHandler(logger.handlers[1])
bootstrap = mod.bootstrap
bootstrap.bootstrap_dir = join(ctx.root_dir, 'bootstraps', name)
bootstrap.ctx = ctx
return bootstrap
class Recipe(object):
url = None
'''The address from which the recipe may be downloaded. This is not
essential, it may be omitted if the source is available some other
way, such as via the :class:`IncludedFilesBehaviour` mixin.
If the url includes the version, you may (and probably should)
replace this with ``{version}``, which will automatically be
replaced by the :attr:`version` string during download.
.. note:: Methods marked (internal) are used internally and you
probably don't need to call them, but they are available
if you want.
'''
version = None
'''A string giving the version of the software the recipe describes,
e.g. ``2.0.3`` or ``master``.'''
md5sum = None
'''The md5sum of the source from the :attr:`url`. Non-essential, but
you should try to include this, it is used to check that the download
finished correctly.
'''
depends = []
'''A list containing the names of any recipes that this recipe depends on.
'''
conflicts = []
# AND: Not currently used
'''A list containing the names of any recipes that are known to be
incompatible with this one.'''
# patches = []
# '''Filepaths (relative to the recipe script) for any pathches that are
# to be applied. By default, these are applied in prebuild_arch, so
# if you override this but want to use patches then don't forget to
# call super().
# name = None # name for the recipe dir
archs = ['armeabi'] # will android use this?
@property
def versioned_url(self):
'''A property returning the url of the recipe with ``{version}``
replaced by the :attr:`url`. If accessing the url, you should use this
property, *not* access the url directly.'''
if self.url is None:
return None
return self.url.format(version=self.version)
def download_file(self, url, filename, cwd=None):
"""
(internal) Download an ``url`` to a ``filename``.
"""
if not url:
return
def report_hook(index, blksize, size):
if size <= 0:
progression = '{0} bytes'.format(index * blksize)
else:
progression = '{0:.2f}%'.format(
index * blksize * 100. / float(size))
stdout.write('- Download {}\r'.format(progression))
stdout.flush()
if cwd:
filename = join(cwd, filename)
if exists(filename):
unlink(filename)
info('Downloading {} from {}'.format(self.name, url))
urlretrieve(url, filename, report_hook)
return filename
def extract_file(self, filename, cwd):
"""
(internal) Extract the `filename` into the directory `cwd`.
"""
if not filename:
return
info("Extract {} into {}".format(filename, cwd))
if filename.endswith(".tgz") or filename.endswith(".tar.gz"):
shprint(sh.tar, "-C", cwd, "-xvzf", filename)
elif filename.endswith(".tbz2") or filename.endswith(".tar.bz2"):
shprint(sh.tar, "-C", cwd, "-xvjf", filename)
elif filename.endswith(".zip"):
zf = zipfile.ZipFile(filename)
zf.extractall(path=cwd)
zf.close()
else:
warning("Error: cannot extract, unrecognized extension for {}".format(
filename))
raise Exception()
# def get_archive_rootdir(self, filename):
# if filename.endswith(".tgz") or filename.endswith(".tar.gz") or \
# filename.endswith(".tbz2") or filename.endswith(".tar.bz2"):
# archive = tarfile.open(filename)
# root = archive.next().path.split("/")
# return root[0]
# elif filename.endswith(".zip"):
# with zipfile.ZipFile(filename) as zf:
# return dirname(zf.namelist()[0])
# else:
# print("Error: cannot detect root directory")
# print("Unrecognized extension for {}".format(filename))
# raise Exception()
def apply_patch(self, filename):
"""
Apply a patch from the current recipe directory into the current
build directory.
"""
info("Applying patch {}".format(filename))
filename = join(self.recipe_dir, filename)
# AND: get_build_dir shouldn't need to hardcode armeabi
sh.patch("-t", "-d", self.get_build_dir('armeabi'), "-p1", "-i", filename)
def copy_file(self, filename, dest):
info("Copy {} to {}".format(filename, dest))
filename = join(self.recipe_dir, filename)
dest = join(self.build_dir, dest)
shutil.copy(filename, dest)
def append_file(self, filename, dest):
info("Append {} to {}".format(filename, dest))
filename = join(self.recipe_dir, filename)
dest = join(self.build_dir, dest)
with open(filename, "rb") as fd:
data = fd.read()
with open(dest, "ab") as fd:
fd.write(data)
# def has_marker(self, marker):
# """
# Return True if the current build directory has the marker set
# """
# return exists(join(self.build_dir, ".{}".format(marker)))
# def set_marker(self, marker):
# """
# Set a marker info the current build directory
# """
# with open(join(self.build_dir, ".{}".format(marker)), "w") as fd:
# fd.write("ok")
# def delete_marker(self, marker):
# """
# Delete a specific marker
# """
# try:
# unlink(join(self.build_dir, ".{}".format(marker)))
# except:
# pass
@property
def name(self):
modname = self.__class__.__module__
return modname.split(".", 2)[-1]
# @property
# def archive_fn(self):
# bfn = basename(self.url.format(version=self.version))
# fn = "{}/{}-{}".format(
# self.ctx.cache_dir,
# self.name, bfn)
# return fn
@property
def filtered_archs(self):
'''Return archs of self.ctx that are valid build archs for the Recipe.'''
result = []
for arch in self.ctx.archs:
if not self.archs or (arch.arch in self.archs):
result.append(arch)
return result
def get_build_container_dir(self, arch):
'''Given the arch name, returns the directory where it will be built.'''
return join(self.ctx.build_dir, 'other_builds', self.name, arch)
def get_build_dir(self, arch):
'''Given the arch name, returns the directory where the
downloaded/copied package will be built.'''
# if self.url is not None:
# return join(self.get_build_container_dir(arch),
# get_directory(self.versioned_url))
return join(self.get_build_container_dir(arch), self.name)
def get_recipe_dir(self):
# AND: Redundant, an equivalent property is already set by get_recipe
return join(self.ctx.root_dir, 'recipes', self.name)
# Public Recipe API to be subclassed if needed
def ensure_build_container_dir(self):
info_main('Preparing build dir for {}'.format(self.name))
build_dir = self.get_build_container_dir('armeabi')
ensure_dir(build_dir)
def download_if_necessary(self):
info_main('Downloading {}'.format(self.name))
user_dir = environ.get('P4A_{}_DIR'.format(self.name.lower()))
if user_dir is not None:
info('P4A_{}_DIR is set, skipping download for {}'.format(
self.name, self.name))
return
self.download()
def download(self):
if self.url is None:
info('Skipping {} download as no URL is set'.format(self.name))
return
url = self.versioned_url
shprint(sh.mkdir, '-p', join(self.ctx.packages_path, self.name))
with current_directory(join(self.ctx.packages_path, self.name)):
filename = shprint(sh.basename, url).stdout[:-1].decode('utf-8')
do_download = True
marker_filename = '.mark-{}'.format(filename)
if exists(filename):
if not exists(marker_filename):
shprint(sh.rm, filename)
elif self.md5sum:
current_md5 = shprint(sh.md5sum, filename)
print('downloaded md5: {}'.format(current_md5))
print('expected md5: {}'.format(self.md5sum))
print('md5 not handled yet, exiting')
exit(1)
else:
do_download = False
info('{} download already cached, skipping'.format(self.name))
# Should check headers here!
warning('Should check headers here! Skipping for now.')
# If we got this far, we will download
if do_download:
print('Downloading {} from {}'.format(self.name, url))
shprint(sh.rm, '-f', marker_filename)
self.download_file(url, filename)
shprint(sh.touch, marker_filename)
if self.md5sum is not None:
print('downloaded md5: {}'.format(current_md5))
print('expected md5: {}'.format(self.md5sum))
print('md5 not handled yet, exiting')
exit(1)
def unpack(self, arch):
info_main('Unpacking {} for {}'.format(self.name, arch))
build_dir = self.get_build_container_dir(arch)
user_dir = environ.get('P4A_{}_DIR'.format(self.name.lower()))
if user_dir is not None:
info('P4A_{}_DIR exists, symlinking instead'.format(
self.name.lower()))
# AND: Currently there's something wrong if I use ln, fix this
warning('Using git clone instead of symlink...fix this!')
if exists(self.get_build_dir(arch)):
return
shprint(sh.rm, '-rf', build_dir)
shprint(sh.mkdir, '-p', build_dir)
shprint(sh.rmdir, build_dir)
ensure_dir(build_dir)
# shprint(sh.ln, '-s', user_dir, join(build_dir, get_directory(self.versioned_url)))
shprint(sh.git, 'clone', user_dir, self.get_build_dir('armeabi'))
return
if self.url is None:
info('Skipping {} unpack as no URL is set'.format(self.name))
return
filename = shprint(sh.basename, self.versioned_url).stdout[:-1].decode('utf-8')
# AND: TODO: Use tito's better unpacking method
with current_directory(build_dir):
directory_name = self.get_build_dir(arch)
# AND: Could use tito's get_archive_rootdir here
if not exists(directory_name) or not isdir(directory_name):
extraction_filename = join(self.ctx.packages_path, self.name, filename)
if (extraction_filename.endswith('.tar.gz') or
extraction_filename.endswith('.tgz')):
sh.tar('xzf', extraction_filename)
root_directory = shprint(
sh.tar, 'tzf', extraction_filename).stdout.decode(
'utf-8').split('\n')[0].strip('/')
if root_directory != directory_name:
shprint(sh.mv, root_directory, directory_name)
elif (extraction_filename.endswith('.tar.bz2') or
extraction_filename.endswith('.tbz2')):
info('Extracting {} at {}'.format(extraction_filename, filename))
sh.tar('xjf', extraction_filename)
root_directory = sh.tar('tjf', extraction_filename).stdout.decode(
'utf-8').split('\n')[0].strip('/')
if root_directory != directory_name:
shprint(sh.mv, root_directory, directory_name)
elif extraction_filename.endswith('.zip'):
sh.unzip(extraction_filename)
import zipfile
fileh = zipfile.ZipFile(extraction_filename, 'r')
root_directory = fileh.filelist[0].filename.strip('/')
if root_directory != directory_name:
shprint(sh.mv, root_directory, directory_name)
else:
raise Exception('Could not extract {} download, it must be .zip, '
'.tar.gz or .tar.bz2')
else:
info('{} is already unpacked, skipping'.format(self.name))
def get_recipe_env(self, arch=None):
"""Return the env specialized for the recipe
"""
if arch is None:
arch = self.filtered_archs[0]
return arch.get_env()
# @property
# def archive_root(self):
# key = "{}.archive_root".format(self.name)
# value = self.ctx.state.get(key)
# if not key:
# value = self.get_archive_rootdir(self.archive_fn)
# self.ctx.state[key] = value
# return value
# def execute(self):
# if self.custom_dir:
# self.ctx.state.remove_all(self.name)
# self.download()
# self.extract()
# self.build_all()
# AND: Will need to change how this works
# @property
# def custom_dir(self):
# """Check if there is a variable name to specify a custom version /
# directory to use instead of the current url.
# """
# d = environ.get("P4A_{}_DIR".format(self.name.lower()))
# if not d:
# return
# if not exists(d):
# return
# return d
# def prebuild(self):
# self.prebuild_arch(self.ctx.archs[0]) # AND: Need to change
# # this to support
# # multiple archs
# def build(self):
# self.build_arch(self.ctx.archs[0]) # Same here!
# def postbuild(self):
# self.postbuild_arch(self.ctx.archs[0])
def prebuild_arch(self, arch):
prebuild = "prebuild_{}".format(arch.arch)
if hasattr(self, prebuild):
getattr(self, prebuild)()
else:
print('{} has no {}, skipping'.format(self.name, prebuild))
def should_build(self):
'''Should perform any necessary test and return True only if it needs
building again.
'''
return True
def build_arch(self, arch):
build = "build_{}".format(arch.arch)
if hasattr(self, build):
getattr(self, build)()
def postbuild_arch(self, arch):
postbuild = "postbuild_{}".format(arch.arch)
if hasattr(self, postbuild):
getattr(self, postbuild)()
def prepare_build_dir(self, arch):
'''Copies the recipe data into a build dir for the given arch. By
default, this unpacks a downloaded recipe. You should override
it (or use a Recipe subclass with different behaviour) if you
want to do something else.
'''
self.unpack(arch)
@classmethod
def list_recipes(cls):
forbidden_dirs = ('__pycache__', )
recipes_dir = join(dirname(__file__), "recipes")
for name in listdir(recipes_dir):
if name in forbidden_dirs:
continue
fn = join(recipes_dir, name)
if isdir(fn):
yield name
@classmethod
def get_recipe(cls, name, ctx):
if not hasattr(cls, "recipes"):
cls.recipes = {}
if name in cls.recipes:
return cls.recipes[name]
mod = importlib.import_module("pythonforandroid.recipes.{}".format(name))
if len(logger.handlers) > 1:
logger.removeHandler(logger.handlers[1])
recipe = mod.recipe
recipe.recipe_dir = join(ctx.root_dir, "recipes", name)
recipe.ctx = ctx
return recipe
class IncludedFilesBehaviour(object):
'''Recipe mixin class that will automatically unpack files included in
the recipe directory.'''
src_filename = None
def prepare_build_dir(self, arch):
if self.src_filename is None:
print('IncludedFilesBehaviour failed: no src_filename specified')
exit(1)
shprint(sh.cp, '-a', join(self.get_recipe_dir(), self.src_filename),
self.get_build_dir(arch))
class NDKRecipe(Recipe):
'''A recipe class for recipes built in an Android project jni dir with
an Android.mk. These are not cached separatly, but built in the
bootstrap's own building directory.
In the future they should probably also copy their contents from a
standalone set of ndk recipes, but for now the bootstraps include
all their recipe code.
'''
dir_name = None # The name of the recipe build folder in the jni dir
def get_build_container_dir(self, arch):
return self.get_jni_dir()
def get_build_dir(self, arch):
if self.dir_name is None:
raise ValueError('{} recipe doesn\'t define a dir_name, but '
'this is necessary'.format(self.name))
return join(self.get_build_container_dir(arch), self.dir_name)
def get_jni_dir(self):
return join(self.ctx.bootstrap.build_dir, 'jni')
# def download_if_necessary(self):
# info_main('Downloading {}'.format(self.name))
# info('{} is an NDK recipe, it is alread included in the '
# 'bootstrap (for now), so skipping'.format(self.name))
# # Do nothing; in the future an NDKRecipe can copy its
# # contents to the bootstrap build dir, but for now each
# # bootstrap already includes available recipes (as was
# # already the case in p4a)
# def prepare_build_dir(self, arch):
# info_main('Unpacking {} for {}'.format(self.name, arch))
# info('{} is included in the bootstrap, unpacking currently '
# 'unnecessary, so skipping'.format(self.name))
class PythonRecipe(Recipe):
site_packages_name = None # The name of the module in
# site_packages (i.e. as a python
# module)
def should_build(self):
# AND: This should be different for each arch and use some
# kind of data store to know what has been built in a given
# python env
print('name is', self.site_packages_name, type(self))
name = self.site_packages_name
if name is None:
name = self.name
if exists(join(self.ctx.get_site_packages_dir(), name)):
info('Python package already exists in site-packages')
return False
print('site packages', self.ctx.get_site_packages_dir())
info('{} apparently isn\'t already in site-packages'.format(name))
return True
def build_arch(self, arch):
'''Install the Python module by calling setup.py install with
the target Python dir.'''
super(PythonRecipe, self).build_arch(arch)
self.install_python_package()
# @cache_execution
# def install(self):
# self.install_python_package()
# self.reduce_python_package()
def install_python_package(self, name=None, env=None, is_dir=True):
'''Automate the installation of a Python package (or a cython
package where the cython components are pre-built).'''
arch = self.filtered_archs[0]
if name is None:
name = self.name
if env is None:
env = self.get_recipe_env(arch)
info('Installing {} into site-packages'.format(self.name))
with current_directory(self.get_build_dir(arch.arch)):
hostpython = sh.Command(self.ctx.hostpython)
shprint(hostpython, 'setup.py', 'install', '-O2', _env=env)
# def install_python_package(self, name=None, env=None, is_dir=True):
# """Automate the installation of a Python package into the target
# site-packages.
# It will works with the first filtered_archs, and the name of the recipe.
# """
# arch = self.filtered_archs[0]
# if name is None:
# name = self.name
# if env is None:
# env = self.get_recipe_env(arch)
# print("Install {} into the site-packages".format(name))
# build_dir = self.get_build_dir(arch.arch)
# chdir(build_dir)
# hostpython = sh.Command(self.ctx.hostpython)
# iosbuild = join(build_dir, "iosbuild")
# shprint(hostpython, "setup.py", "install", "-O2",
# "--prefix", iosbuild,
# _env=env)
# dest_dir = join(self.ctx.site_packages_dir, name)
# if is_dir:
# if exists(dest_dir):
# shutil.rmtree(dest_dir)
# func = shutil.copytree
# else:
# func = shutil.copy
# func(
# join(iosbuild, "lib",
# self.ctx.python_ver_dir, "site-packages", name),
# dest_dir)
# def reduce_python_package(self):
# """Feel free to remove things you don't want in the final
# site-packages.
# """
# pass
class CompiledComponentsPythonRecipe(PythonRecipe):
pre_build_ext = False
def build_arch(self, arch):
'''Build any cython components, then install the Python module by
calling setup.py install with the target Python dir.
'''
Recipe.build_arch(self, arch) # AND: Having to directly call the
# method like this is nasty...could
# use tito's method of having an
# install method that always runs
# after everything else but isn't
# used by a normal recipe.
self.build_compiled_components(arch)
self.install_python_package()
def build_compiled_components(self, arch):
info('Building compiled components in {}'.format(self.name))
env = self.get_recipe_env(arch)
with current_directory(self.get_build_dir(arch.arch)):
hostpython = sh.Command(self.ctx.hostpython)
shprint(hostpython, 'setup.py', 'build_ext', '-v')
build_dir = glob.glob('build/lib.*')[0]
shprint(sh.find, build_dir, '-name', '"*.o"', '-exec',
env['STRIP'], '{}', ';', _env=env)
class CythonRecipe(PythonRecipe):
pre_build_ext = False
cythonize = True
def build_arch(self, arch):
'''Build any cython components, then install the Python module by
calling setup.py install with the target Python dir.
'''
Recipe.build_arch(self, arch) # AND: Having to directly call the
# method like this is nasty...could
# use tito's method of having an
# install method that always runs
# after everything else but isn't
# used by a normal recipe.
self.build_cython_components(arch)
self.install_python_package()
def build_cython_components(self, arch):
# AND: Should we use tito's cythonize methods? How do they work?
info('Cythonizing anything necessary in {}'.format(self.name))
env = self.get_recipe_env(arch)
with current_directory(self.get_build_dir(arch.arch)):
hostpython = sh.Command(self.ctx.hostpython)
info('Trying first build of {} to get cython files: this is '
'expected to fail'.format(self.name))
try:
shprint(hostpython, 'setup.py', 'build_ext', _env=env)
except sh.ErrorReturnCode_1:
print()
info('{} first build failed (as expected)'.format(self.name))
info('Running cython where appropriate')
shprint(sh.find, self.get_build_dir('armeabi'), '-iname', '*.pyx', '-exec',
self.ctx.cython, '{}', ';', _env=env)
info('ran cython')
shprint(hostpython, 'setup.py', 'build_ext', '-v', _env=env)
print('stripping')
build_lib = glob.glob('./build/lib*')
shprint(sh.find, build_lib[0], '-name', '*.o', '-exec',
env['STRIP'], '{}', ';', _env=env)
print('stripped!?')
# exit(1)
# def cythonize_file(self, filename):
# if filename.startswith(self.build_dir):
# filename = filename[len(self.build_dir) + 1:]
# print("Cythonize {}".format(filename))
# cmd = sh.Command(join(self.ctx.root_dir, "tools", "cythonize.py"))
# shprint(cmd, filename)
# def cythonize_build(self):
# if not self.cythonize:
# return
# root_dir = self.build_dir
# for root, dirnames, filenames in walk(root_dir):
# for filename in fnmatch.filter(filenames, "*.pyx"):
# self.cythonize_file(join(root, filename))
# def biglink(self):
# dirs = []
# for root, dirnames, filenames in walk(self.build_dir):
# if fnmatch.filter(filenames, "*.so.libs"):
# dirs.append(root)
# cmd = sh.Command(join(self.ctx.root_dir, "tools", "biglink"))
# shprint(cmd, join(self.build_dir, "lib{}.a".format(self.name)), *dirs)
def get_recipe_env(self, arch):
env = super(CythonRecipe, self).get_recipe_env(arch)
env['LDFLAGS'] = env['LDFLAGS'] + ' -L{}'.format(
self.ctx.get_libs_dir(arch.arch))
env['LDSHARED'] = join(self.ctx.root_dir, 'tools', 'liblink')
env['LIBLINK'] = 'NOTNONE'
env['NDKPLATFORM'] = self.ctx.ndk_platform
# Every recipe uses its own liblink path, object files are collected and biglinked later
liblink_path = join(self.get_build_container_dir(arch.arch), 'objects_{}'.format(self.name))
env['LIBLINK_PATH'] = liblink_path
ensure_dir(liblink_path)
return env
def build_recipes(names, ctx):
# Put recipes in correct build order
graph = Graph()
recipe_to_load = set(names)
bs = ctx.bootstrap
if bs.recipe_depends:
info('Bootstrap requires recipes {}'.format(bs.recipe_depends))
recipe_to_load = recipe_to_load.union(set(bs.recipe_depends))
recipe_to_load = list(recipe_to_load)
recipe_loaded = []
python_modules = []
while recipe_to_load:
name = recipe_to_load.pop(0)
if name in recipe_loaded:
continue
try:
recipe = Recipe.get_recipe(name, ctx)
except ImportError:
info('No recipe named {}; will attempt to install with pip'.format(name))
python_modules.append(name)
continue
graph.add(name, name)
info('Loaded recipe {} (depends on {})'.format(name, recipe.depends))
for depend in recipe.depends:
graph.add(name, depend)
recipe_to_load += recipe.depends
recipe_loaded.append(name)
build_order = list(graph.find_order())
info("Recipe build order is {}".format(build_order))
ctx.recipe_build_order = build_order
recipes = [Recipe.get_recipe(name, ctx) for name in build_order]
# download is arch independent
info_main('# Downloading recipes ')
for recipe in recipes:
recipe.download_if_necessary()
for arch in ctx.archs:
info_main('# Building all recipes for arch {}'.format(arch.arch))
info_main('# Unpacking recipes')
for recipe in recipes:
ensure_dir(recipe.get_build_container_dir(arch.arch))
recipe.prepare_build_dir(arch.arch)
info_main('# Prebuilding recipes')
# 2) prebuild packages
for recipe in recipes:
info_main('Prebuilding {} for {}'.format(recipe.name, arch.arch))
recipe.prebuild_arch(arch)
# 3) build packages
info_main('# Building recipes')
for recipe in recipes:
info_main('Building {} for {}'.format(recipe.name, arch.arch))
if recipe.should_build():
recipe.build_arch(arch)
else:
info('{} said it is already built, skipping'.format(recipe.name))
# 4) biglink everything
# AND: Should make this optional (could use
info_main('# Biglinking object files')
biglink(ctx, arch)
# 5) postbuild packages
info_main('# Postbuilding recipes')
for recipe in recipes:
info_main('Postbuilding {} for {}'.format(recipe.name, arch.arch))
recipe.postbuild_arch(arch)
info_main('# Installing pure Python modules')
run_pymodules_install(ctx, python_modules)
return
def run_pymodules_install(ctx, modules):
if not modules:
info('There are no Python modules to install, skipping')
return
info('The requirements ({}) don\'t have recipes, attempting to install '
'them with pip'.format(', '.join(modules)))
info('If this fails, it may mean that the module has compiled '
'components and needs a recipe.')
venv = sh.Command(ctx.virtualenv)
with current_directory(join(ctx.build_dir)):
shprint(venv, '--python=python2.7', 'venv')
info('Creating a requirements.txt file for the Python modules')
with open('requirements.txt', 'w') as fileh:
for module in modules:
fileh.write('{}\n'.format(module))
info('Installing Python modules with pip')
# AND: This doesn't work yet
shprint(sh.bash, '-c', '''"source venv/bin/activate && env CC=/bin/false CXX=/bin/false pip install --target '{}' -r requirements.txt"'''.format(ctx.get_site_packages_dir()))
def biglink(ctx, arch):
# First, collate object files from each recipe
info('Collating object files from each recipe')
obj_dir = join(ctx.bootstrap.build_dir, 'collated_objects')
ensure_dir(obj_dir)
recipes = [Recipe.get_recipe(name, ctx) for name in ctx.recipe_build_order]
for recipe in recipes:
recipe_obj_dir = join(recipe.get_build_container_dir(arch.arch),
'objects_{}'.format(recipe.name))
if not exists(recipe_obj_dir):
info('{} recipe has no biglinkable files dir, skipping'.format(recipe.name))
continue
files = glob.glob(join(recipe_obj_dir, '*'))
if not len(files):
info('{} recipe has no biglinkable files, skipping'.format(recipe.name))
info('{} recipe has object files, copying'.format(recipe.name))
files.append(obj_dir)
shprint(sh.cp, '-r', *files)
# AND: Shouldn't hardcode ArchAndroid! In reality need separate
# build dirs for each arch
arch = ArchAndroid(ctx)
env = ArchAndroid(ctx).get_env()
env['LDFLAGS'] = env['LDFLAGS'] + ' -L{}'.format(
join(ctx.bootstrap.build_dir, 'obj', 'local', 'armeabi'))
if not len(glob.glob(join(obj_dir, '*'))):
info('There seem to be no libraries to biglink, skipping.')
return
info('Biglinking')
# bl = sh.Command(join(ctx.root_dir, 'tools', 'biglink'))
print('ldflags are', env['LDFLAGS'])
# shprint(bl, join(ctx.libs_dir, 'libpymodules.so'),
# env['LIBLINK_PATH'], _env=env)
biglink_function(
join(ctx.libs_dir, 'libpymodules.so'),
obj_dir.split(' '),
# env['LIBLINK_PATH'].split(' '), # AND: This line should be obselete now
extra_link_dirs=[join(ctx.bootstrap.build_dir, 'obj', 'local', 'armeabi')],
env=env)
def biglink_function(soname, objs_paths, extra_link_dirs=[], env=None):
print('objs_paths are', objs_paths)
sofiles = []
for directory in objs_paths:
for fn in os.listdir(directory):
fn = os.path.join(directory, fn)
if not fn.endswith(".so.o"):
continue
if not os.path.exists(fn[:-2] + ".libs"):
continue
sofiles.append(fn[:-2])
# The raw argument list.
args = [ ]
for fn in sofiles:
afn = fn + ".o"
libsfn = fn + ".libs"
args.append(afn)
with open(libsfn) as fd:
data = fd.read()
args.extend(data.split(" "))
unique_args = [ ]
while args:
a = args.pop()
if a in ('-L', ):
continue
if a not in unique_args:
unique_args.insert(0, a)
for dir in extra_link_dirs:
link = '-L{}'.format(dir)
if link not in unique_args:
unique_args.append(link)
# print('Biglink create %s library' % soname)
# print('Biglink arguments:')
# for arg in unique_args:
# print(' %s' % arg)
cc_name = env['CC']
cc = sh.Command(cc_name.split()[0])
cc = cc.bake(*cc_name.split()[1:])
shprint(cc, '-shared', '-O3', '-o', soname, *unique_args, _env=env)
# args = os.environ['CC'].split() + \
# ['-shared', '-O3', '-o', soname] + \
# unique_args
# sys.exit(subprocess.call(args))
def ensure_dir(filename):
if not exists(filename):
makedirs(filename)
def dist_from_args(ctx, dist_args):
'''Parses out any distribution-related arguments, and uses them to
obtain a Distribution class instance for the build.
'''
return Distribution.get_distribution(
ctx,
name=dist_args.dist_name,
recipes=split_argument_list(dist_args.requirements),
allow_download=dist_args.allow_download,
allow_build=dist_args.allow_build,
extra_dist_dirs=split_argument_list(dist_args.extra_dist_dirs),
require_perfect_match=dist_args.require_perfect_match)
def build_dist_from_args(ctx, dist, args_list):
'''Parses out any bootstrap related arguments, and uses them to build
a dist.'''
parser = argparse.ArgumentParser(
description='Create a newAndroid project')
parser.add_argument('--bootstrap', help=('The name of the bootstrap type, \'pygame\' '
'or \'sdl2\''),
default='sdl2')
args, unknown = parser.parse_known_args(args_list)
bs = Bootstrap.get_bootstrap(args.bootstrap, ctx)
info_main('# Creating dist with with {} bootstrap'.format(bs.name))
bs.distribution = dist
info('Dist will have name {} and recipes ({})'.format(
dist.name, ', '.join(dist.recipes)))
ctx.dist_name = bs.distribution.name
ctx.prepare_bootstrap(bs)
ctx.prepare_dist(ctx.dist_name)
recipes = dist.recipes
build_recipes(recipes, ctx)
ctx.bootstrap.run_distribute()
info_main('# Your distribution was created successfully, exiting.')
info('Dist can be found at (for now) {}'.format(join(ctx.dist_dir, ctx.dist_name)))
return unknown
def split_argument_list(l):
if not len(l):
return []
return re.split(r'[ ,]*', l)
# def create_dist
class ToolchainCL(object):
def __init__(self):
self._ctx = None
parser = argparse.ArgumentParser(
description="Tool for managing the iOS / Python toolchain",
usage="""toolchain <command> [<args>]
Currently available commands:
create Build an android project with all recipes
Available commands:
Not yet confirmed
Planned commands:
recipes
distributions
build_dist
symlink_dist
copy_dist
clean_all
status
clean_builds
clean_download_cache
clean_dists
""")
parser.add_argument("command", help="Command to run")
parser.add_argument('--debug', dest='debug', action='store_true',
help='Display debug output and all build info')
# Options for specifying the Distribution
parser.add_argument(
'--dist_name', help='The name of the distribution to use or create',
default='')
parser.add_argument(
'--requirements',
help='Dependencies of your app, should be recipe names or Python modules',
default='')
parser.add_argument(
'--allow_download', help='Allow binary dist download.',
default=False, type=bool)
parser.add_argument(
'--allow_build', help='Allow compilation of a new distribution.',
default=True, type=bool)
parser.add_argument(
'--force_build', help='Force compilation of a new distribution.',
default=False, type=bool)
parser.add_argument(
'--extra_dist_dirs', help='Directories in which to look for distributions',
default='')
parser.add_argument(
'--require_perfect_match', help=('Whether the dist recipes must '
'perfectly match those requested.'),
type=bool, default=False)
args, unknown = parser.parse_known_args(sys.argv[1:])
self.dist_args = args
if args.debug:
logger.setLevel(logging.DEBUG)
# import ipdb
# ipdb.set_trace()
# AND: Fail nicely if the args aren't handled yet
if args.extra_dist_dirs:
warning('Received --extra_dist_dirs but this arg currently is not '
'handled, exiting.')
exit(1)
if args.allow_download:
warning('Received --allow_download but this arg currently is not '
'handled, exiting.')
exit(1)
# if args.allow_build:
# warning('Received --allow_build but this arg currently is not '
# 'handled, exiting.')
# exit(1)
if not hasattr(self, args.command):
print('Unrecognized command')
parser.print_help()
exit(1)
getattr(self, args.command)(unknown)
# def build(self):
# parser = argparse.ArgumentParser(
# description="Build the toolchain")
# parser.add_argument("recipe", nargs="+", help="Recipe to compile")
# parser.add_argument("--arch", help="Restrict compilation to this arch")
# args = parser.parse_args(sys.argv[2:])
# ctx = Context()
# # if args.arch:
# # archs = args.arch.split()
# # ctx.archs = [arch for arch in ctx.archs if arch.arch in archs]
# # print("Architectures restricted to: {}".format(archs))
# build_recipes(args.recipe, ctx)
@property
def ctx(self):
if self._ctx is None:
self._ctx = Context()
return self._ctx
def recipes(self, args):
parser = argparse.ArgumentParser(
description="List all the available recipes")
parser.add_argument(
"--compact", action="store_true",
help="Produce a compact list suitable for scripting")
parser.add_argument(
'--color', type=bool, default=True,
help='Whether the output should be coloured')
args = parser.parse_args(args)
if args.compact:
print(" ".join(list(Recipe.list_recipes())))
else:
ctx = self.ctx
for name in Recipe.list_recipes():
recipe = Recipe.get_recipe(name, ctx)
version = str(recipe.version)
if args.color:
print('{Fore.BLUE}{Style.BRIGHT}{recipe.name:<12} '
'{Style.RESET_ALL}{Fore.LIGHTBLUE_EX}'
'{version:<8}{Style.RESET_ALL}'.format(
recipe=recipe, Fore=Fore, Style=Style,
version=version))
print(' {Fore.GREEN}depends: {recipe.depends}'
'{Fore.RESET}'.format(recipe=recipe, Fore=Fore))
if recipe.conflicts:
print(' {Fore.RED}conflicts: {recipe.conflicts}'
'{Fore.RESET}'.format(recipe=recipe, Fore=Fore))
else:
print("{recipe.name:<12} {recipe.version:<8}".format(
recipe=recipe))
print(' depends: {recipe.depends}'.format(recipe=recipe))
print(' conflicts: {recipe.conflicts}'.format(recipe=recipe))
def bootstraps(self, args):
'''List all the bootstraps available to build with.'''
print(list(Bootstrap.list_bootstraps()))
def clean_all(self, args):
'''Delete all build components; the package cache, package builds,
bootstrap builds and distributions.'''
parser = argparse.ArgumentParser(
description="Clean the build cache, downloads and dists")
args = parser.parse_args(args)
ctx = Context()
if exists(ctx.build_dir):
shutil.rmtree(ctx.build_dir)
if exists(ctx.dist_dir):
shutil.rmtree(ctx.dist_dir)
if exists(ctx.packages_path):
shutil.rmtree(ctx.packages_path)
def clean_dists(self, args):
'''Delete all compiled distributions in the internal distribution
directory.'''
parser = argparse.ArgumentParser(
description="Delete any distributions that have been built.")
args = parser.parse_args(args)
ctx = Context()
if exists(ctx.dist_dir):
shutil.rmtree(ctx.dist_dir)
def clean_builds(self, args):
'''Delete all build caches for each recipe.
This does *not* delete the package download cache or the final distributions.
'''
parser = argparse.ArgumentParser(
description="Delete all build files (but not download caches)")
args = parser.parse_args(args)
ctx = Context()
if exists(ctx.dist_dir):
shutil.rmtree(ctx.dist_dir)
if exists(ctx.build_dir):
shutil.rmtree(ctx.build_dir)
def clean_download_cache(self, args):
'''
Deletes any downloaded recipe packages.
This does *not* delete the build caches or final distributions.
'''
parser = argparse.ArgumentParser(
description="Delete all download caches")
args = parser.parse_args(args)
ctx = Context()
if exists(ctx.packages_path):
shutil.rmtree(ctx.packages_path)
# def status(self, args):
# parser = argparse.ArgumentParser(
# description="Give a status of the build")
# args = parser.parse_args(args)
# ctx = Context()
# # AND: TODO
# print('This isn\'t implemented yet, but should list all currently existing '
# 'distributions, the modules they include, and all the build caches.')
# exit(1)
@require_prebuilt_dist
def export_dist(self, args):
'''Copies a created dist to an output dir.
This makes it easy to navigate to the dist to investigate it
or call build.py, though you do not in general need to do this
and can use the apk command instead.
'''
parser = argparse.ArgumentParser(
description='Copy a created dist to a given directory')
parser.add_argument('--output', help=('The output dir to copy to'),
required=True)
args = parser.parse_args(args)
ctx = self.ctx
dist = dist_from_args(ctx, self.dist_args)
if dist.needs_build:
info('You asked to export a dist, but there is no dist with suitable '
'recipes available. For now, you must create one first with '
'the create argument.')
exit(1)
shprint(sh.cp, '-r', dist.dist_dir, args.output)
@require_prebuilt_dist
def symlink_dist(self, args):
'''Symlinks a created dist to an output dir.
This makes it easy to navigate to the dist to investigate it
or call build.py, though you do not in general need to do this
and can use the apk command instead.
'''
parser = argparse.ArgumentParser(
description='Symlink a created dist to a given directory')
parser.add_argument('--output', help=('The output dir to copy to'),
required=True)
args = parser.parse_args(args)
ctx = self.ctx
dist = dist_from_args(ctx, self.dist_args)
if dist.needs_build:
info('You asked to symlink a dist, but there is no dist with suitable '
'recipes available. For now, you must create one first with '
'the create argument.')
exit(1)
shprint(sh.ln, '-s', dist.dist_dir, args.output)
# def _get_dist(self):
# ctx = self.ctx
# dist = dist_from_args(ctx, self.dist_args)
@property
def _dist(self):
ctx = self.ctx
dist = dist_from_args(ctx, self.dist_args)
return dist
@require_prebuilt_dist
def apk(self, args):
'''Create an APK using the given distribution.'''
# AND: Need to add a parser here for any extra options
# parser = argparse.ArgumentParser(
# description='Build an APK')
# args = parser.parse_args(args)
ctx = self.ctx
dist = self._dist
# dist = dist_from_args(ctx, self.dist_args)
# if dist.needs_build:
# info('No dist exists that meets your requirements, so one will '
# 'be built.')
# args = build_dist_from_args(ctx, dist, args)
build = imp.load_source('build', join(dist.dist_dir, 'build.py'))
with current_directory(dist.dist_dir):
build.parse_args(args)
shprint(sh.ant, 'debug')
# AND: This is very crude, needs improving. Also only works
# for debug for now.
info_main('# Copying APK to current directory')
apks = glob.glob(join(dist.dist_dir, 'bin', '*-*-debug.apk'))
if len(apks) == 0:
raise ValueError('Couldn\'t find the built APK')
if len(apks) > 1:
info('More than one built APK found...guessing you '
'just built {}'.format(apks[-1]))
shprint(sh.cp, apks[-1], './')
@require_prebuilt_dist
def create(self, args):
'''Create a distribution directory if it doesn't already exist, run
any recipes if necessary, and build the apk.
'''
pass # The decorator does this for us
# ctx = self.ctx
# dist = dist_from_args(ctx, self.dist_args)
# if not dist.needs_build:
# info('You asked to create a distribution, but a dist with this name '
# 'already exists. If you don\'t want to use '
# 'it, you must delete it and rebuild, or create your '
# 'new dist with a different name.')
# exit(1)
# info('Ready to create dist {}, contains recipes {}'.format(
# dist.name, ', '.join(dist.recipes)))
# build_dist_from_args(ctx, dist, args)
def print_context_info(self, args):
'''Prints some debug information about which system paths
python-for-android will internally use for package building, along
with information about where the Android SDK and NDK will be called
from.'''
ctx = Context()
for attribute in ('root_dir', 'build_dir', 'dist_dir', 'libs_dir',
'ccache', 'cython', 'sdk_dir', 'ndk_dir', 'ndk_platform',
'ndk_ver', 'android_api'):
print('{} is {}'.format(attribute, getattr(ctx, attribute)))
def dists(self, args):
'''The same as :meth:`distributions`.'''
self.distributions(args)
def distributions(self, args):
'''Lists all distributions currently available (i.e. that have already
been built).'''
ctx = Context()
dists = Distribution.get_distributions(ctx)
infos = []
for dist in dists:
infos.append('{Fore.GREEN}{Style.BRIGHT}{name}{Style.RESET_ALL}: '
'includes recipes ({Fore.GREEN}{recipes}'
'{Style.RESET_ALL})'.format(
name=dist.name, recipes=', '.join(dist.recipes),
Fore=Fore, Style=Style))
print('{Style.BRIGHT}Distributions stored internally are:'
'{Style.RESET_ALL}'.format(Style=Style, Fore=Fore))
for line in infos:
print('\t' + line)
if __name__ == "__main__":
ToolchainCL()
| mit | -8,061,471,493,450,780,000 | 35.310195 | 182 | 0.563463 | false | 4.062075 | false | false | false |
classner/barrista | barrista/solver.py | 1 | 43153 | # -*- coding: utf-8 -*-
"""Exposes the caffe solvers."""
# pylint: disable=E1101, F0401, C0103, R0913, R0914, W0212, E1121, E0611, W0406
# pylint: disable=duplicate-code, too-many-lines
from __future__ import print_function
from . import monitoring as _monitoring
from . import parallel as _parallel
# CAREFUL! This must be imported pre any caffe-related import!
from .tools import pbufToPyEnum as _pbufToPyEnum
import time as _time
import logging as _logging
import hashlib
import copy
from tempfile import NamedTemporaryFile as _NamedTemporaryFile
import numpy as _np
import google.protobuf.text_format as _gprototext
import caffe as _caffe
import caffe.proto.caffe_pb2 as _caffe_pb2
#: Describes the type of the solver used. All solver types supported by caffe
#: are available.
SolverType = _pbufToPyEnum(_caffe_pb2.SolverParameter.SolverType)
#: Describes the Phase used. All solver types supported by caffe
#: are available.
_Phase = _pbufToPyEnum(_caffe_pb2.Phase)
_HAS_ITER_SIZE = hasattr(_caffe_pb2.SolverParameter, 'iter_size')
try:
_ADAM_SOLVER_CLASS = _caffe.AdamSolver
_ADAM_SOLVER_ENUM = SolverType.ADAM
except AttributeError: # pragma: no cover
_ADAM_SOLVER_CLASS = None
_ADAM_SOLVER_ENUM = None
try:
_ADADELTA_SOLVER_CLASS = _caffe.AdaDeltaSolver
_ADADELTA_SOLVER_ENUM = SolverType.ADADELTA
except AttributeError: # pragma: no cover
_ADADELTA_SOLVER_CLASS = None
_ADADELTA_SOLVER_ENUM = None
try:
_ADAGRAD_SOLVER_CLASS = _caffe.AdaGradSolver
_ADAGRAD_SOLVER_ENUM = SolverType.ADAGRAD
except AttributeError: # pragma: no cover
_ADAGRAD_SOLVER_CLASS = None
_ADAGRAD_SOLVER_ENUM = None
try:
_RMSPROP_SOLVER_CLASS = _caffe.RMSPropSolver
_RMSPROP_SOLVER_ENUM = SolverType.RMSPROP
except AttributeError: # pragma: no cover
_RMSPROP_SOLVER_CLASS = None
_RMSPROP_SOLVER_ENUM = None
_LOGGER = _logging.getLogger(__name__)
# pylint: disable=too-many-instance-attributes
class Solver(object):
"""Describes the Solver concept."""
_solver_types = {}
_caffe_solver_type = None
_solver_type = None
def __init__(self, **kwargs):
r"""
Constructor.
:param iter_size: int>0.
The number of batches the gradient is accumulated over (not
available in older caffe versions).
:param lr_policy: string in ['fixed', 'step', ...]
The policy to use to adjust the learning rate during fitting.
Taken from ``solver.cpp``:
* fixed: always return base_lr.
* step: return base_lr \* gamma ^ (floor(iter / step))
* exp: return base_lr \* gamma ^ iter
* inv: return base_lr \* (1 + gamma \* iter) ^ (- power)
* multistep: similar to step but it allows non uniform steps defined
by stepvalue
* poly: the effective learning rate follows a polynomial decay, to be
zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power)
* sigmoid: the effective learning rate follows a sigmod decay
return base_lr ( 1/(1 + exp(-gamma \* (iter - stepsize))))
:param base_lr: float or None.
The base learning rate to use.
:param gamma: float or None.
:param power: float or None.
:param weight_decay: float or None.
Use weight decay to reduce the weights at each step.
:param regularization_type: string in ['L1', 'L2'].
Specifies how the ``weight_decay`` is applied.
:param step_stepsize: float or None.
The stepsize for the step policy.
:param stepvalue: list(int) or None.
The stepvalue parameter for the multistep policy.
:param clip_gradients: float or None.
Clips the gradients to the specified value.
:param random_seed: int>0 or None.
If specified, seeds the solver for reproducible results. Otherwise,
it uses a time dependent seed.
:param snapshot_prefix: string or None.
If the ``Checkpointer`` monitor is used, this prefix is used to
create the snapshots.
:param debug_info: bool.
If set to ``True``, gives additional output in the logs.
"""
self._net = None
self._parameter_hash = None
self._parameter_dict = dict()
self.update_parameters(**kwargs)
# some default internal parameters
self._parameter_dict['snapshot_after_train'] = False
self._parameter_dict['solver_type'] = self._caffe_solver_type
# every solver can append its on assertions or overwrite the given ones
self._asserts = []
if _HAS_ITER_SIZE:
self._asserts.append(self.Assert_iter_size)
self._asserts.append(self.Assert_regularization_types)
self._asserts.append(self.Assert_policy)
self._solver = None
self._print_warning = False
self._train_net_dummy = None
self._test_net_dummy = None
self._parallel_train_filler = None
self._parallel_test_filler = None
self._parallel_batch_res_train = None
self._parallel_batch_res_test = None
def restore(self, filename, net=None):
"""Restore the solverstate from a file."""
if self._net is None:
assert net is not None, ('you must specify a net on which the '
'restored solver will be used!')
if net is not None:
# The method self._Update_net must not be used here, since it
# is allowed to use a new net.
self._net = net
self._Update_solver()
self._solver.restore(filename)
@classmethod
def Get_required_arguments(cls):
"""The minimum number of required parameters."""
return ['base_lr']
@classmethod
def Get_optional_arguments(cls):
"""
Get the optional parameters.
Optional parameters and some of which are None
not all combinations are possible, this is enforced by various
asserts when calling Get_parameter_dict().
"""
ret_dict = {'debug_info': False,
'weight_decay': None,
'lr_policy': 'fixed',
'regularization_type': 'L2',
'power': None,
'gamma': None,
'snapshot_prefix': None,
'stepsize': None,
'stepvalue': None,
'clip_gradients': None,
'random_seed': None,
'net': None}
if _HAS_ITER_SIZE:
ret_dict['iter_size'] = 1
return ret_dict
def fit(self, # pylint: disable=too-many-statements, too-many-branches
iterations,
X=None,
X_val=None,
input_processing_flags=None,
test_iterations=0,
test_interval=0,
test_initialization=False,
train_callbacks=None,
test_callbacks=None,
net=None,
read_input_batch_size_from_blob_name=None,
use_fit_phase_for_validation=False,
allow_test_phase_for_train=False,
shuffle=False):
r"""
fit the network to specific data.
Use monitors from the module :py:mod:`barrista.monitoring` as
callbacks to monitor the state of the net and create checkpoints.
This method offers the following kwargs to monitors (* indicates,
that the values are only available at test time, - indicates, that
the value is not necessarily available):
* max_iter,
* iter,
* batch_size,
* net,
* testnet\[only if there is a test phase, i.e., X_val is set]
* solver,
* callback_signal\[is automatically set by the fit function],
* X\-[only if provided by the user],
* X_val\-[only if provided by the user],
* [the following fields are only set if the corresponding
loss/accuracy layer exists for the train and/or test phase.
It can also be set by providing a custom ResultExtractor]
* loss\-,
* test_loss\*,
* accuracy\-,
* test_accuracy\*-,
:param iterations: int.
The number of training iterations to do. This is the plain number
of iterations, completely disregarding the batch size, i.e., for
``iterations`` being 10 and ``batch_size`` being 10, just one batch
is forward propagated.
:param X: dict of numpy.ndarray or None.
If specified, is used as input data. It is used sequentially, so
shuffle it pre, if required. The keys of the dict have to have
a corresponding layer name in the net.
:param X_val: dict of numpy.ndarray or None.
If specified and ``test_interval>0``, it is used as input data.
It is used sequentially, so shuffle it pre, if required. The
keys of the dict have to have a corresponding layer name in
the net.
:param input_processing_flags: dict(string, string) or None.
See ``CyclingDataMonitor.__init__`` for the ``input_processing_flags``
parameter. In short, if you specify your sample via list, you may
specify for each blob, whether they should be padded 'p', or
resized 'r' to match the network input size. If they fit perfectly,
you may specify 'n' or omit the parameter and use ``None``.
:param test_iterations: int.
The number of test iterations to determine the validation score,
if ``test_interval>0``.
:param test_interval: int.
The number of iterations between runs on the validation set. Is
specified in plain iterations, disregarding batch size. Hence, it
must be a multiple of the batch size.
:param test_initialization: bool.
Whether to do a run on the validation set pre the training is
started to get an initial score.
:param train_callbacks: list(barrista.monitoring.Monitor).
List of callback callables. Will be called pre and post training
batch is processed. This list will be processed
sequentially, meaning that monitors in the sequence can
provide information for later monitors as done with
ResultExtractor.
:param test_callbacks: list(callable).
List of callback callables. Will be called for pre and post
testing and pre and post each batch of testing processed.
This list will be processed sequentially, meaning that
monitors in the sequence can provide information for later
monitors as done with ResultExtractor.
:param read_input_batch_size_from_blob_name: string.
The name of the layer to take the input batch size from (as the
first dimension of its first blob). Must be specified if the
network does not have explicit inputs (e.g., when trained from
an LMDB).
:param use_fit_phase_for_validation: bool.
If set to True, do not change the phase of the net for running
a validation step during training. This can be helpful to reduce
memory consumption. This ignores the TEST phase of the net completely,
but it's not necessary to use it if the data is provided by the
Python layers.
:param allow_test_phase_for_train: bool.
If set to True, allow using a network in its TEST phase to be trained.
May make sense in exotic settings, but should prevent bugs. If not
set to True, an AssertionError is raised in this scenario.
Why is this so important? The ``DropoutLayer`` and ``PoolLayer`` (in
the case of stochastic pooling) are sensitive to this parameter and
results are very different for the two settings.
:param shuffle: bool.
If set to True, shuffle the training data every epoch. The test data
is not shuffled. Default: False.
"""
if net is not None:
from barrista import net as _net
assert isinstance(net, _net.Net), (
'net must be an instance of barrista.net.Net')
self._Update_net(net)
assert self._net is not None, (
'neither the solver was initialized with a net nor',
'the fit function was called with one')
assert self._net._mode == _Phase.TRAIN or allow_test_phase_for_train, (
'The network must be in TRAIN phase for fitting! If you really '
'want to, you can override this requirement by setting '
'the optional parameter `allow_test_phase_for_train` to True.'
)
train_callbacks = self._Assert_callbacks(self._net,
train_callbacks,
'train')
testnet = self._Init_testnet(test_interval,
use_fit_phase_for_validation)
if testnet is not None:
test_callbacks = self._Assert_callbacks(testnet,
test_callbacks,
'test')
else:
test_callbacks = []
batch_size, test_iterations = self._Get_batch_size(
self._net,
testnet,
test_interval,
test_iterations,
X_val,
read_input_batch_size_from_blob_name)
self._Assert_iterations(
batch_size,
iterations,
test_interval,
test_iterations,
self._parameter_dict.get('stepvalue')
)
if self._parameter_dict.get('stepvalue') is not None:
self._parameter_dict['stepvalue'] = [
val / batch_size for val in self._parameter_dict['stepvalue']]
self._Init_cycling_monitor(X,
X_val,
input_processing_flags,
batch_size,
test_interval,
train_callbacks,
test_callbacks,
shuffle)
run_pre = True
iteration = 0
cbparams = dict()
cbparams['max_iter'] = iterations
cbparams['batch_size'] = batch_size
cbparams['iter'] = 0
cbparams['net'] = self._net
cbparams['testnet'] = testnet
cbparams['solver'] = self
cbparams['X'] = X
cbparams['X_val'] = X_val
cbparams['test_iterations'] = test_iterations
cbparams['test_interval'] = test_interval
cbparams['train_callbacks'] = train_callbacks
cbparams['test_callbacks'] = test_callbacks
cbparams['callback_signal'] = 'initialize_train'
for cb in train_callbacks:
cb(cbparams)
if test_interval > 0:
cbparams['callback_signal'] = 'initialize_test'
for cb in test_callbacks:
cb(cbparams)
try:
_parallel.init_prebatch(
self,
self._net,
train_callbacks,
True)
if test_interval > 0:
_parallel.init_prebatch(
self,
testnet,
test_callbacks,
False)
while iteration <= iterations:
cbparams['iter'] = iteration
# Check whether to test the net.
if (( # pylint: disable=too-many-boolean-expressions
test_interval > 0 and
iteration % test_interval == 0 and iteration > 0
) or (
iteration == 0 and test_initialization
) or (
test_interval > 0 and iteration + batch_size > iterations
)
):
###############################################################
# testing loop
###############################################################
test_iter = 0
run_pre = True
# Pretest gets called if necessary in `run_prebatch`.
while test_iter < test_iterations:
cbparams['callback_signal'] = 'pre_test_batch'
_parallel.run_prebatch(
self,
test_callbacks,
cbparams,
False,
cbparams['iter'],
run_pre)
# pylint: disable=W0212
testnet._forward(0, len(testnet.layers) - 1)
cbparams['callback_signal'] = 'post_test_batch'
for cb in test_callbacks:
cb(cbparams)
test_iter += batch_size
run_pre = False
cbparams['callback_signal'] = 'post_test'
for cb in test_callbacks:
cb(cbparams)
run_pre = True
if iteration == iterations:
break
###################################################################
# training loop
###################################################################
# `pre_fit` gets called if necessary in `run_prebatch`.
PRETRBATCH_BEGINPOINT = _time.time()
cbparams['callback_signal'] = 'pre_train_batch'
_parallel.run_prebatch(
self,
train_callbacks,
cbparams,
True,
cbparams['iter'] + batch_size,
run_pre)
run_pre = False
PRETRBATCH_DURATION = _time.time() - PRETRBATCH_BEGINPOINT
_LOGGER.debug("Pre-batch preparation time: %03.3fs.",
PRETRBATCH_DURATION)
TRBATCH_BEGINPOINT = _time.time()
self.step(1)
TRBATCH_DURATION = _time.time() - TRBATCH_BEGINPOINT
_LOGGER.debug("Batch processing time: %03.3fs.",
TRBATCH_DURATION)
POSTTRBATCH_BEGINPOINT = _time.time()
cbparams['callback_signal'] = 'post_train_batch'
for cb in train_callbacks:
cb(cbparams)
POSTTRBATCH_DURATION = _time.time() - POSTTRBATCH_BEGINPOINT
_LOGGER.debug("Post-batch processing time: %03.3fs.",
POSTTRBATCH_DURATION)
iteration += batch_size
finally:
for cb in set(train_callbacks + test_callbacks):
if not isinstance(cb, _monitoring.ParallelMonitor):
try:
cb.finalize(cbparams)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.fatal(str(ex))
continue
_parallel.finalize_prebatch(self, cbparams)
if self._parameter_dict.get('stepvalue') is not None:
self._parameter_dict['stepvalue'] = [
val * batch_size for val in self._parameter_dict['stepvalue']]
def step(self, number_of_batches):
"""Run ``number_of_batches`` solver steps."""
tmp_hash = self.Get_parameter_hash(self.Get_parameter_dict())
if self._parameter_hash != tmp_hash:
if self._print_warning: # pragma: no cover
_LOGGER.warn('WARNING: ---------------------------------------------')
_LOGGER.warn('you are re-initializing a new solver which will delete')
_LOGGER.warn('the weight history of the solver.')
_LOGGER.warn('Only use this option if you know what you are doing!')
self._print_warning = False
self._Update_solver()
return self._solver.step(number_of_batches)
def Get_parameter_dict(self):
"""Get the solver describing parameters in a dictionary."""
# work our stack of assertions followed by a weak copy of the dict
for Tmp_assert in self._asserts:
assert Tmp_assert()
return copy.copy(self._parameter_dict)
def Assert_iter_size(self):
"""Enforce the parameter constraints."""
return self._parameter_dict['iter_size'] > 0
def Assert_regularization_types(self):
"""Enforce the parameter constraints."""
return self._parameter_dict['regularization_type'] in ['L1', 'L2']
def Assert_policy(self): # pylint: disable=R0911
"""Enforce the parameter constraints."""
# although redundant this allows to have a quick check
# of what is really required without loading the actuall net which
# might take a bit of time
if self._parameter_dict['lr_policy'] == 'fixed':
return 'base_lr' in self._parameter_dict
if self._parameter_dict['lr_policy'] == 'step':
return 'gamma' in self._parameter_dict
if self._parameter_dict['lr_policy'] == 'exp':
return 'gamma' in self._parameter_dict
if self._parameter_dict['lr_policy'] == 'inv':
return ('gamma' in self._parameter_dict and
'power' in self._parameter_dict)
if self._parameter_dict['lr_policy'] == 'multistep':
return ('stepvalue' in self._parameter_dict and
'base_lr' in self._parameter_dict and
'gamma' in self._parameter_dict)
if self._parameter_dict['lr_policy'] == 'poly':
return 'power' in self._parameter_dict
if self._parameter_dict['lr_policy'] == 'sigmoid':
return 'stepsize' in self._parameter_dict
return False
@classmethod
def Get_parameter_hash(cls, solver_parameter_dict):
"""Get a has of the parameter dict."""
hash_obj = hashlib.md5()
for key in sorted(solver_parameter_dict.keys()):
hash_obj.update(str(key).encode('utf-8'))
hash_obj.update(str(solver_parameter_dict[key]).encode('utf-8'))
return str(hash_obj.hexdigest())
@classmethod
def Get_caffe_solver_instance(cls, solver_parameter_dict, net):
"""Get a caffe solver object."""
# now we actually create a instance of the solver
solver_message = _caffe_pb2.SolverParameter(**solver_parameter_dict)
messagestr = _gprototext.MessageToString(solver_message)
with _NamedTemporaryFile(mode='w+b', suffix='.prototxt') as tmpfile:
tmpfile.write(bytes(messagestr.encode('utf-8')))
tmpfile.flush()
try:
# Newer version of caffe with full solver init support.
return cls.Get_caffe_solver_class(
solver_parameter_dict['solver_type'])._caffe_solver_class(
tmpfile.name, net, _caffe._caffe.NetVec(), True)
except TypeError:
# Fallback for older, patched versions.
return cls.Get_caffe_solver_class(
solver_parameter_dict['solver_type'])._caffe_solver_class(
tmpfile.name, net)
raise Exception('could not initialize solver class')
@classmethod
def Get_solver_class(cls, solver_type):
"""Get the solver class as string."""
return cls._solver_types[solver_type]
@classmethod
def Get_caffe_solver_class(cls, caffe_solver_type):
"""Get the solver class as ``caffe_solver_type``."""
return cls._solver_types[caffe_solver_type]
@classmethod
def Register_solver(cls, solver_class):
"""Register a solver class."""
assert issubclass(solver_class, Solver)
if solver_class._solver_type in cls._solver_types:
raise Exception(
' '.join('solver',
solver_class._solver_type,
'already defined'))
if solver_class._caffe_solver_type in cls._solver_types:
raise Exception(
' '.join('solver',
solver_class._solver_type,
'already defined'))
# we register with both access types
cls._solver_types[solver_class._caffe_solver_type] = solver_class
cls._solver_types[solver_class._solver_type] = solver_class
def _Update_solver(self):
"""Re-initialize the solver."""
# we (re-)initialize the solver
self._solver = self.Get_caffe_solver_instance(
self.Get_parameter_dict(),
self._net)
self._parameter_hash = self.Get_parameter_hash(
self.Get_parameter_dict())
# we only want to see the warning once
self._print_warning = True
def update_parameters(self, **kwargs):
"""Update the solver parameters."""
# adding the default keys if they are not yet set
for argument, default in list(self.Get_optional_arguments().items()):
if argument not in self._parameter_dict and default is not None:
self._parameter_dict[argument] = default
# first add all parameters which are actually required
for arg_key, arg_value in list(kwargs.items()):
if arg_key in self.Get_required_arguments():
self._parameter_dict[arg_key] = arg_value
# make sure that all required arguments are set
tmp_required_arguments = set(self.Get_required_arguments())
intersection = tmp_required_arguments.intersection(set(kwargs.keys()))
if intersection != tmp_required_arguments:
raise Exception(' '.join(
['we are missing required arguments',
str(list(kwargs.keys())),
'vs',
str(self.Get_required_arguments())]))
for arg_key, arg_value in list(kwargs.items()):
# the very special case of passing the net
# this will not be passed as a parameter to the parameter dict
# but we will ensure that the net is always the same
# as the one used for initialization
if arg_key == 'net':
self._Update_net(arg_value)
continue
if arg_key in list(self.Get_optional_arguments().keys()):
self._parameter_dict[arg_key] = arg_value
# we make sure that there is no spelling mistake in the kwargs
total_arguments = set(self.Get_required_arguments())
total_arguments = total_arguments.union(
list(self.Get_optional_arguments().keys()))
for argument in list(kwargs.keys()):
if argument not in total_arguments:
raise Exception(' '.join(
['argument', argument, 'is not supported']))
def _Update_net(self, net):
"""Check that the net remains the same."""
# since the user could potentially provide two different nets to
# the solver, which is not supported, thus we check that the net
# has not changed
if net is None:
return
if self._net is not None:
if id(self._net) != id(net):
raise Exception(' '.join(
['a solver works only with one network',
'the network has to remain the same']))
self._net = net
def _Get_batch_size(self, # pylint: disable=R0201
net,
testnet,
test_interval,
test_iterations,
X_val,
read_input_batch_size_from_blob_name):
"""Get the batch size and the test iterations."""
if len(net.inputs) > 0:
# Otherwise, a DB backend is used.
batch_size = net.blobs[net.inputs[0]].data.shape[0]
if testnet is not None:
assert (testnet.blobs[net.inputs[0]].data.shape[0] ==
batch_size), ("Validation and fit network batch size "
"must agree!")
if (test_interval != 0 and
test_iterations == 0 and
X_val is not None):
if isinstance(X_val, dict):
if len(X_val.values()[0]) % batch_size != 0:
_LOGGER.warn(
"The number of test samples is not a multiple "
"of the batch size. Test performance estimates "
"will be slightly off.")
test_iterations = _np.ceil(float(len(X_val.values()[0])) /
float(batch_size)) * batch_size
else:
if len(X_val) % batch_size != 0:
_LOGGER.warn(
"The number of test samples is not a multiple "
"of the batch size. Test performance estimates "
"will be slightly off.")
test_iterations = _np.ceil(float(len(X_val)) /
float(batch_size)) * batch_size
if read_input_batch_size_from_blob_name is not None:
tmp_batch_size = net.blobs[
read_input_batch_size_from_blob_name].data.shape[0]
assert (tmp_batch_size == batch_size), (
"The input size and the first dimension of "
"the blob to read the batch size from don't "
"match: {}, {}.".format(tmp_batch_size, batch_size))
return batch_size, test_iterations
# some kind of backend is used
assert read_input_batch_size_from_blob_name is not None, (
'no inputs thus the batch_size must be determined from a blob')
batch_size = net.blobs[
read_input_batch_size_from_blob_name].data.shape[0]
return batch_size, test_iterations
@classmethod
def _Assert_iterations(cls,
batch_size,
iterations,
test_interval,
test_iterations,
multistep_stepvalue):
"""Make sure iterations follow all of our rules."""
# namely being a multiple of the batch_size
assert iterations % batch_size == 0, (
'Error: iterations do not match {} {}'.format(iterations,
batch_size))
if test_interval > 0:
assert test_iterations > 0, (
'Test iterations must be > 0 but is {}'.format(
test_iterations))
# Set the configurable arguments.
assert test_iterations >= 0, (
'Test iterations must be >= 0 but is {}'.format(
test_iterations))
assert test_interval >= 0, (
'Test interval must be >= 0 but is {}'.format(
test_iterations))
assert test_interval % batch_size == 0, (
'The test interval must be a multiple of the batch size: {}, {}',
test_iterations, batch_size)
if multistep_stepvalue is not None:
for val in multistep_stepvalue:
assert val % batch_size == 0, (
"The step values must be multiples of the batch size "
"(is given in sample iterations)! Is %d, batch size %d." % (
val, batch_size))
@classmethod
def _Assert_callbacks(cls, net, callbacks, phase):
"""Assert the callbacks work properly."""
if callbacks is None:
callbacks = []
assert isinstance(callbacks, list), (
'callbacks have to be in a list {} {}'.format(
str(callbacks), type(callbacks)))
for callback in callbacks:
assert isinstance(callback, _monitoring.Monitor), (
'a callback has to derive from montoring.Monitor')
if 'loss' in list(net.blobs.keys()):
callbacks.insert(0, _monitoring.ResultExtractor(
phase + '_loss', 'loss'))
if 'accuracy' in list(net.blobs.keys()):
callbacks.insert(0, _monitoring.ResultExtractor(
phase + '_accuracy', 'accuracy'))
return callbacks
@classmethod
def _Init_cycling_monitor(cls,
X,
X_val,
input_processing_flags,
batch_size,
test_interval,
train_callbacks,
test_callbacks,
shuffle):
"""
Convencience initialization function.
...such that the user can
simply provide X, X_val dicts and we internally create
the CyclingDataMonitors.
"""
if X is not None:
assert len(list(X.values())[0]) >= batch_size
# safety measure, we do not want to have two different data
# monitors in the same callback list
for callback in train_callbacks:
assert not isinstance(callback, _monitoring.DataMonitor), (
'if we use X we cannot use a data monitor')
tmp_data_monitor = _monitoring.CyclingDataMonitor(
X=X,
input_processing_flags=input_processing_flags,
shuffle=shuffle)
train_callbacks.insert(0, tmp_data_monitor)
if test_interval > 0 and X_val is not None:
assert X_val is not None
if X is not None:
assert len(list(X_val.values())) == len(list(X.values()))
# safety measure, we do not want to have two different data
# monitors in the same callback list
for callback in test_callbacks:
assert not isinstance(callback, _monitoring.DataMonitor), (
'if we use X_val we cannot use a data monitor')
tmp_data_monitor = _monitoring.CyclingDataMonitor(
X=X_val,
input_processing_flags=input_processing_flags)
test_callbacks.insert(0, tmp_data_monitor)
def _Init_testnet(self, test_interval, use_fit_phase_for_validation):
"""Initialize the test phase network."""
testnet = None
if test_interval > 0:
if use_fit_phase_for_validation:
testnet = self._net
else:
# Setup the test net.
test_netspec = self._net._specification.copy()
test_netspec.phase = _Phase.TEST
test_netspec.predict_inputs = None
test_netspec.predict_input_shapes = None
testnet = test_netspec.instantiate()
testnet.share_with(self._net)
return testnet
class SGDSolver(Solver):
r"""
Thin wrapper for the vanilla SGD solver provided by the caffe framework.
:param momentum: float or None.
The momentum to use. Multiplies the former gradient with this factor
and adds it to the gradient in the following step.
"""
_solver_type = 'sgd'
_caffe_solver_type = SolverType.SGD
_caffe_solver_class = _caffe.SGDSolver
def __init__(self, **kwargs):
"""Constructor."""
Solver.__init__(self, **kwargs)
@classmethod
def Get_required_arguments(cls):
"""See :py:class:`barrista.solver.Solver`."""
return Solver.Get_required_arguments()
@classmethod
def Get_optional_arguments(cls):
"""See :py:class:`barrista.solver.Solver`."""
optional_arguments = Solver.Get_optional_arguments()
optional_arguments['momentum'] = 0.0
return optional_arguments
class AdagradSolver(Solver):
r"""
Thin wrapper for the Adagrad solver provided by the caffe framework.
To understand how this solver works please inspect the
cplusplus implementation in solver.cpp.
The corresponding publication is called 'Adaptive Subgradient
Methods for Online Learning and Stochastic Optimization' by
John Duchi, Elad Hazan, Yoram Singer
:param momentum: float or None.
The momentum to use. Multiplies the former gradient with this factor
and adds it to the gradient in the following step.
"""
_solver_type = 'adagrad'
_caffe_solver_type = _ADAGRAD_SOLVER_ENUM
_caffe_solver_class = _ADAGRAD_SOLVER_CLASS
def __init__(self, **kwargs):
"""See :py:class:`barrista.solver.Solver`."""
Solver.__init__(self, **kwargs)
@classmethod
def Get_required_arguments(cls):
"""See :py:class:`barrista.solver.Solver`."""
required_arguments = Solver.Get_required_arguments()
required_arguments.append('delta')
return required_arguments
@classmethod
def Get_optional_arguments(cls):
"""See :py:class:`barrista.solver.Solver`."""
return Solver.Get_optional_arguments()
class NesterovSolver(Solver):
r"""
Thin wrapper for the Nesterov solver provided by the caffe framework.
To understand how this solver works please inspect the
cplusplus implementation in solver.cpp.
:param momentum: float or None.
The momentum to use. Multiplies the former gradient with this factor
and adds it to the gradient in the following step.
"""
_solver_type = 'nesterov'
_caffe_solver_type = SolverType.NESTEROV
_caffe_solver_class = _caffe.NesterovSolver
def __init__(self, **kwargs):
"""See :py:class:`barrista.solver.Solver`."""
Solver.__init__(self, **kwargs)
@classmethod
def Get_required_arguments(cls):
"""See :py:class:`barrista.solver.Solver`."""
return Solver.Get_required_arguments()
@classmethod
def Get_optional_arguments(cls):
"""See :py:class:`barrista.solver.Solver`."""
optional_arguments = Solver.Get_optional_arguments()
optional_arguments['momentum'] = 0.0
return optional_arguments
class RMSPropSolver(Solver):
r"""
Thin wrapper for the RMSProp solver provided by the caffe framework.
To understand how this solver works please inspect the
cplusplus implementation in solver.cpp.
This solver has been discussed in a lecture given by Hinton.
www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf
:param rms_decay: float
MeanSquare(t) = rms_decay*MeanSquare(t-1)+(1-rms_decay)*SquareGradient(t)
:param delta: float
numerical stability [useful choice 1E-8]
"""
_solver_type = 'rmsprop'
_caffe_solver_type = _RMSPROP_SOLVER_ENUM
_caffe_solver_class = _RMSPROP_SOLVER_CLASS
def __init__(self, **kwargs):
"""See :py:class:`barrista.solver.Solver`."""
Solver.__init__(self, **kwargs)
@classmethod
def Get_required_arguments(cls):
"""See :py:class:`barrista.solver.Solver`."""
required_arguments = Solver.Get_required_arguments()
required_arguments.append('rms_decay')
required_arguments.append('delta')
return required_arguments
@classmethod
def Get_optional_arguments(cls):
"""See :py:class:`barrista.solver.Solver`."""
return Solver.Get_optional_arguments()
class AdaDeltaSolver(Solver):
r"""
Thin wrapper for the AdaDelta solver provided by the caffe framework.
To understand how this solver works please inspect the
cplusplus implementation in solver.cpp.
The corresponding arxiv paper is called 'ADADELTA: An Adaptive
Learning Rate Method' by Matthew D. Zeiler.
:param delta: float
numerical stability [useful choice 1E-8]
:param momentum: float or None.
The momentum to use. Multiplies the former gradient with this factor
and adds it to the gradient in the following step.
"""
_solver_type = 'adadelta'
_caffe_solver_type = _ADADELTA_SOLVER_ENUM
_caffe_solver_class = _ADADELTA_SOLVER_CLASS
def __init__(self, **kwargs):
"""See :py:class:`barrista.solver.Solver`."""
Solver.__init__(self, **kwargs)
@classmethod
def Get_required_arguments(cls):
"""See :py:class:`barrista.solver.Solver`."""
required_arguments = Solver.Get_required_arguments()
required_arguments.append('momentum')
return required_arguments
@classmethod
def Get_optional_arguments(cls):
"""See :py:class:`barrista.solver.Solver`."""
optional_arguments = Solver.Get_optional_arguments()
# epsilon
optional_arguments['delta'] = 1E-8
return optional_arguments
class AdamSolver(Solver):
r"""
Thin wrapper for the Adam solver provided by the caffe framework.
To understand how this solver works please inspect the
cplusplus implementation in solver.cpp.
The corresponding arxiv paper is called ' Adam: A Method for
Stochastic Optimization ' by Diederik Kingma, Jimmy Ba
:param base_lr: float
[useful choice 0.001]
:param momentum: float.
beta 1 useful default 0.9
:param momentum2: float.
beta 2 useful default 0.999
:param delta: float
numerical stability [useful choice 1E-8]
"""
_solver_type = 'adam'
_caffe_solver_type = _ADAM_SOLVER_ENUM
_caffe_solver_class = _ADAM_SOLVER_CLASS
def __init__(self, **kwargs):
"""See :py:class:`barrista.solver.Solver`."""
Solver.__init__(self, **kwargs)
@classmethod
def Get_required_arguments(cls):
"""See :py:class:`barrista.solver.Solver`."""
return Solver.Get_required_arguments()
@classmethod
def Get_optional_arguments(cls):
"""See :py:class:`barrista.solver.Solver`."""
optional_arguments = Solver.Get_optional_arguments()
# beta 1
optional_arguments['momentum'] = 0.9
# beta 2
optional_arguments['momentum2'] = 0.999
# epsilon
optional_arguments['delta'] = 1E-8
return optional_arguments
# register the locally specified solver
Solver.Register_solver(SGDSolver)
Solver.Register_solver(AdagradSolver)
Solver.Register_solver(NesterovSolver)
if _RMSPROP_SOLVER_CLASS is not None:
Solver.Register_solver(RMSPropSolver)
if _ADADELTA_SOLVER_CLASS is not None:
Solver.Register_solver(AdaDeltaSolver)
if _ADAM_SOLVER_CLASS is not None:
Solver.Register_solver(AdamSolver)
Get_solver_class = Solver.Get_solver_class
Get_caffe_solver_class = Solver.Get_caffe_solver_class
| mit | 3,881,279,440,033,057,000 | 38.993513 | 86 | 0.567006 | false | 4.413726 | true | false | false |
shubham1810/aquabrim_project | machine/migrations/0002_auto__add_field_device_user.py | 2 | 4307 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Device.user'
db.add_column(u'machine_device', 'user',
self.gf('django.db.models.fields.related.OneToOneField')(default='', to=orm['auth.User'], unique=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Device.user'
db.delete_column(u'machine_device', 'user_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'machine.device': {
'Meta': {'object_name': 'Device'},
'field_1': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'field_2': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['machine'] | mit | 679,957,161,165,323,500 | 62.352941 | 195 | 0.557465 | false | 3.674915 | false | false | false |
litvinchuck/python-workout | ftp/ftptracker.py | 2 | 2541 | import sys
from datetime import datetime
from utils.readable import readable_size, readable_time
class FTPTracker:
"""Tracks ftp upload and download progress. Displays progress bar
Args:
file_size (int): size of tracked file
bar_length (int, optional): length of output bar. Defaults to 50
Attributes:
size_written (int): number of bytes that are already written
file_size (int): size of tracked file
bar_length (int): length of output bar. Defaults to 50
start_time (datetime): ftp transfer start time
"""
def __init__(self, file_size, bar_length=50):
self.size_written = 0
self.file_size = file_size
self.bar_length = bar_length
self.start_time = datetime.now()
def percentage(self):
"""
Returns:
str: completeness percentage in string form.
"""
return '{0:.1f}'.format(100 * (self.size_written / float(self.file_size)))
def bar_filled(self):
"""
Returns:
int: rounded value of how much bar is filled
"""
return round(self.bar_length * self.size_written / float(self.file_size))
def rate(self):
"""
Returns:
float: transfer rate measured in bytes per second
"""
return self.size_written / (datetime.now() - self.start_time).total_seconds()
def eta(self):
"""
Returns:
float: approximately how much time is left
"""
return (self.file_size - self.size_written) / self.rate()
def bar_string(self):
"""
Returns:
str: bar string format
"""
bar_filled = self.bar_filled()
bar = '#' * bar_filled + '-' * (self.bar_length - bar_filled)
return '\r |{bar}| {percentage}% {size_written}/{file_size} {rate}/s {eta}'.format(
bar=bar,
percentage=self.percentage(),
size_written=readable_size(self.size_written).split()[0],
file_size=readable_size(self.file_size).split()[0],
rate=readable_size(self.rate()).split()[0],
eta=readable_time(self.eta())
)
def handle(self, block):
"""Handles bar output"""
self.size_written += len(block)
sys.stdout.write(self.bar_string())
if self.size_written == self.file_size:
sys.stdout.write('\n')
sys.stdout.flush()
sys.stdout.write('\033[K') # Clears the end of the line to prevent output overlapping
| mit | 8,415,032,502,189,293,000 | 31.576923 | 94 | 0.575364 | false | 4.052632 | false | false | false |
neuront/redis-ctl | daemonutils/node_polling.py | 1 | 4016 | import time
import logging
import random
import threading
from socket import error as SocketError
import file_ipc
import stats
from config import NODES_EACH_THREAD
from stats_models import RedisNodeStatus, ProxyStatus
from models.base import db
from models.polling_stat import PollingStat
class Poller(threading.Thread):
def __init__(self, nodes, algalon_client):
threading.Thread.__init__(self)
self.daemon = True
self.nodes = nodes
logging.debug('Poller %x distributed %d nodes',
id(self), len(self.nodes))
self.algalon_client = algalon_client
def run(self):
for node in self.nodes:
logging.debug('Poller %x collect for %s:%d',
id(self), node['host'], node['port'])
node.collect_stats(self._emit_data, self._send_alarm)
def _send_alarm(self, message, trace):
if self.algalon_client is not None:
self.algalon_client.send_alarm(message, trace)
def _emit_data(self, addr, points):
try:
stats.client.write_points(addr, points)
except (SocketError, stats.StatisticError, StandardError), e:
logging.exception(e)
CACHING_NODES = {}
def _load_from(cls, nodes):
def update_node_settings(node, file_settings):
node.suppress_alert = file_settings.get('suppress_alert')
node.balance_plan = file_settings.get('balance_plan')
r = []
for n in nodes:
if (n['host'], n['port']) in CACHING_NODES:
cache_node = CACHING_NODES[(n['host'], n['port'])]
r.append(cache_node)
update_node_settings(cache_node, n)
continue
loaded_node = cls.get_by(n['host'], n['port'])
CACHING_NODES[(n['host'], n['port'])] = loaded_node
update_node_settings(loaded_node, n)
r.append(loaded_node)
return r
def save_polling_stat(nodes, proxies):
nodes_ok = []
nodes_fail = []
proxies_ok = []
proxies_fail = []
for n in nodes:
if n.details['stat']:
nodes_ok.append(n.addr)
else:
nodes_fail.append(n.addr)
for p in proxies:
if p.details['stat']:
proxies_ok.append(p.addr)
else:
proxies_fail.append(p.addr)
db.session.add(PollingStat(nodes_ok, nodes_fail, proxies_ok, proxies_fail))
class NodeStatCollector(threading.Thread):
def __init__(self, app, interval, algalon_client):
threading.Thread.__init__(self)
self.daemon = True
self.app = app
self.interval = interval
self.algalon_client = algalon_client
def _shot(self):
poll = file_ipc.read_poll()
nodes = _load_from(RedisNodeStatus, poll['nodes'])
proxies = _load_from(ProxyStatus, poll['proxies'])
# commit because `get_by` may create new nodes
# to reattach session they must be persisted
db.session.commit()
all_nodes = nodes + proxies
random.shuffle(all_nodes)
pollers = [Poller(all_nodes[i: i + NODES_EACH_THREAD],
self.algalon_client)
for i in xrange(0, len(all_nodes), NODES_EACH_THREAD)]
for p in pollers:
p.start()
time.sleep(self.interval)
for p in pollers:
p.join()
for p in pollers:
for n in p.nodes:
n.add_to_db()
save_polling_stat(nodes, proxies)
db.session.commit()
logging.debug('Total %d nodes, %d proxies', len(nodes), len(proxies))
try:
file_ipc.write_details({n.addr: n.details for n in nodes},
{p.addr: p.details for p in proxies})
except StandardError, e:
logging.exception(e)
def run(self):
with self.app.app_context():
while True:
try:
self._shot()
except Exception as e:
logging.exception(e)
| mit | -7,240,690,717,303,977,000 | 29.892308 | 79 | 0.573456 | false | 3.763824 | false | false | false |
hamelg/Diamond | src/collectors/mysqlstat/mysql55.py | 28 | 8243 | # coding=utf-8
"""
Diamond collector that monitors relevant MySQL performance_schema values
For now only monitors replication load
[Blog](http://bit.ly/PbSkbN) announcement.
[Snippet](http://bit.ly/SHwYhT) to build example graph.
#### Dependencies
* MySQLdb
* MySQL 5.5.3+
"""
from __future__ import division
try:
import MySQLdb
from MySQLdb import MySQLError
except ImportError:
MySQLdb = None
import diamond
import time
import re
class MySQLPerfCollector(diamond.collector.Collector):
def process_config(self):
super(MySQLPerfCollector, self).process_config()
self.db = None
self.last_wait_count = {}
self.last_wait_sum = {}
self.last_timestamp = {}
self.last_data = {}
self.monitors = {
'slave_sql': {
'wait/synch/cond/sql/MYSQL_RELAY_LOG::update_cond':
'wait_for_update',
'wait/io/file/innodb/innodb_data_file':
'innodb_data_file',
'wait/io/file/innodb/innodb_log_file':
'innodb_log_file',
'wait/io/file/myisam/dfile':
'myisam_dfile',
'wait/io/file/myisam/kfile':
'myisam_kfile',
'wait/io/file/sql/binlog':
'binlog',
'wait/io/file/sql/relay_log_info':
'relaylog_info',
'wait/io/file/sql/relaylog':
'relaylog',
'wait/synch/mutex/innodb':
'innodb_mutex',
'wait/synch/mutex':
'other_mutex',
'wait/synch/rwlock':
'rwlocks',
'wait/io':
'other_io',
},
'slave_io': {
'wait/io/file/sql/relaylog_index':
'relaylog_index',
'wait/synch/mutex/sql/MYSQL_RELAY_LOG::LOCK_index':
'relaylog_index_lock',
'wait/synch/mutex/sql/Master_info::data_lock':
'master_info_lock',
'wait/synch/mutex/mysys/IO_CACHE::append_buffer_lock':
'append_buffer_lock',
'wait/synch/mutex/sql/LOG::LOCK_log':
'log_lock',
'wait/io/file/sql/master_info':
'master_info',
'wait/io/file/sql/relaylog':
'relaylog',
'wait/synch/mutex':
'other_mutex',
'wait/synch/rwlock':
'rwlocks',
'wait/io':
'other_io',
}
}
if self.config['hosts'].__class__.__name__ != 'list':
self.config['hosts'] = [self.config['hosts']]
# Move legacy config format to new format
if 'host' in self.config:
hoststr = "%s:%s@%s:%s/%s" % (
self.config['user'],
self.config['passwd'],
self.config['host'],
self.config['port'],
self.config['db'],
)
self.config['hosts'].append(hoststr)
def get_default_config_help(self):
config_help = super(MySQLPerfCollector, self).get_default_config_help()
config_help.update({
'hosts': 'List of hosts to collect from. Format is ' +
'yourusername:yourpassword@host:' +
'port/performance_schema[/nickname]',
'slave': 'Collect Slave Replication Metrics',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MySQLPerfCollector, self).get_default_config()
config.update({
'path': 'mysql',
# Connection settings
'hosts': [],
'slave': 'False',
})
return config
def connect(self, params):
if MySQLdb is None:
self.log.error('Unable to import MySQLdb')
return
try:
self.db = MySQLdb.connect(**params)
except MySQLError, e:
self.log.error('MySQLPerfCollector couldnt connect to database %s',
e)
return {}
self.log.debug('MySQLPerfCollector: Connected to database.')
def query_list(self, query, params):
cursor = self.db.cursor()
cursor.execute(query, params)
return list(cursor.fetchall())
def slave_load(self, nickname, thread):
data = self.query_list("""
SELECT
his.event_name,
his.sum_timer_wait,
his.count_star,
cur.event_name,
UNIX_TIMESTAMP(SYSDATE())
FROM
events_waits_summary_by_thread_by_event_name his
JOIN threads thr USING (thread_id)
JOIN events_waits_current cur USING (thread_id)
WHERE
name = %s
ORDER BY
his.event_name
""", (thread,))
wait_sum = sum([x[1] for x in data])
wait_count = sum([x[2] for x in data])
timestamp = int(time.time())
if 0 in data and len(data[0]) > 5:
cur_event_name, timestamp = data[0][3:]
if thread not in self.last_wait_sum:
# Avoid bogus data
self.last_wait_sum[thread] = wait_sum
self.last_wait_count[thread] = wait_count
self.last_timestamp[thread] = timestamp
self.last_data[thread] = data
return
wait_delta = wait_sum - self.last_wait_sum[thread]
time_delta = (timestamp - self.last_timestamp[thread]) * 1000000000000
if time_delta == 0:
return
# Summarize a few things
thread_name = thread[thread.rfind('/') + 1:]
data.append(
['wait/synch/mutex/innodb',
sum([x[1] for x in data
if x[0].startswith('wait/synch/mutex/innodb')])])
data.append(
['wait/synch/mutex',
sum([x[1] for x in data
if (x[0].startswith('wait/synch/mutex') and
x[0] not in self.monitors[thread_name])]) - data[-1][1]])
data.append(
['wait/synch/rwlock',
sum([x[1] for x in data
if x[0].startswith('wait/synch/rwlock')])])
data.append(
['wait/io',
sum([x[1] for x in data
if (x[0].startswith('wait/io') and
x[0] not in self.monitors[thread_name])])])
for d in zip(self.last_data[thread], data):
if d[0][0] in self.monitors[thread_name]:
self.publish(nickname + thread_name + '.' +
self.monitors[thread_name][d[0][0]],
(d[1][1] - d[0][1]) / time_delta * 100)
# Also log what's unaccounted for. This is where Actual Work gets done
self.publish(nickname + thread_name + '.other_work',
float(time_delta - wait_delta) / time_delta * 100)
self.last_wait_sum[thread] = wait_sum
self.last_wait_count[thread] = wait_count
self.last_timestamp[thread] = timestamp
self.last_data[thread] = data
def collect(self):
for host in self.config['hosts']:
matches = re.search(
'^([^:]*):([^@]*)@([^:]*):?([^/]*)/([^/]*)/?(.*)$', host)
if not matches:
continue
params = {}
params['host'] = matches.group(3)
try:
params['port'] = int(matches.group(4))
except ValueError:
params['port'] = 3306
params['db'] = matches.group(5)
params['user'] = matches.group(1)
params['passwd'] = matches.group(2)
nickname = matches.group(6)
if len(nickname):
nickname += '.'
self.connect(params=params)
if self.config['slave']:
self.slave_load(nickname, 'thread/sql/slave_io')
self.slave_load(nickname, 'thread/sql/slave_sql')
self.db.close()
| mit | -3,602,694,593,909,790,700 | 32.104418 | 79 | 0.491569 | false | 3.961076 | true | false | false |
pengyuan/markov2tensor | poirank/self_correlation.py | 1 | 2204 | #!/usr/bin/env python
# coding: UTF-8
from __future__ import division
from poirank.transition import init_data, trans
import pylab
poi_axis, axis_poi, data = init_data(tuple(range(2,30)))
#tensor = trans(data, len(axis_poi))
# print "data: ", data
data_length = len(data)
print "data_length: ", data_length
def poi_coorelation(order):
esum = 0.0
for i in range(data_length):
esum = esum + data[i]
expectation = esum / data_length
print "expectation: ", expectation
vsum = 0.0
for j in range(data_length):
vsum += pow((data[j] - expectation), 2)
print "vsum: ", vsum
variance = vsum / data_length
tsum = 0.0
for k in range(data_length - order):
tsum += (data[k] - expectation) * (data[k + order] - expectation)
print "tsum: ", tsum
ar = tsum / vsum
print "ar: ", ar
return ar
order = 1
y_values = []
x_values = []
while order <= 40:
ar = poi_coorelation(order)
y_values.append(ar)
x_values.append(order)
order += 1
pylab.plot(x_values, y_values, 'rs',linewidth=1, linestyle="-")
pylab.xlabel(u"马尔科夫链阶数")
pylab.ylabel(u"自相关系数")
pylab.title(u"马尔科夫链阶数与自相关系数的关系(兴趣点序列长度为3868)")
pylab.legend(loc='center right')
pylab.show()
#(2,3) 26,29
#(2,10) 494,2600
#(2,20) 853,3868
# from scipy import stats
# def measure(n):
# "Measurement model, return two coupled measurements."
# m1 = np.random.normal(size=n)
# m2 = np.random.normal(scale=0.5, size=n)
# return m1+m2, m1-m2
# m1, m2 = measure(2000)
# xmin = m1.min()
# xmax = m1.max()
# ymin = m2.min()
# ymax = m2.max()
# 对数据执行内核密度估计:
# X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
# positions = np.vstack([X.ravel(), Y.ravel()])
# values = np.vstack([m1, m2])
# kernel = stats.gaussian_kde(values)
# Z = np.reshape(kernel(positions).T, X.shape)
# 绘制的结果:
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
# extent=[xmin, xmax, ymin, ymax])
# ax.plot(m1, m2, 'k.', markersize=2)
# ax.set_xlim([xmin, xmax])
# ax.set_ylim([ymin, ymax])
# plt.show() | mit | 5,267,185,298,583,716,000 | 23.916667 | 73 | 0.633365 | false | 2.377273 | false | false | false |
byuphamerator/phamerator-dev | phamerator/config.py | 1 | 3155 | #!/usr/bin/python
import gconf, os
class PhameratorConfiguration:
def __init__(self):
# add keys if needed
self.client = gconf.client_get_default()
self.gconf_dir = '/apps/phamerator'
if not self.client.dir_exists(self.gconf_dir):
self.client.add_dir(self.gconf_dir, gconf.CLIENT_PRELOAD_NONE)
entries = self.client.all_entries(self.gconf_dir)
self.gconf_strings = [
'/apps/phamerator/gene_color',
'/apps/phamerator/default_save_folder',
'/apps/phamerator/pham_circle_color_scheme'
]
self.gconf_bools = [
'/apps/phamerator/show_pham_names',
'/apps/phamerator/show_alignment_text',
'/apps/phamerator/show_alignment',
'/apps/phamerator/show_domains'
]
self.gconf_ints = []
self.gconf_floats = ['/apps/phamerator/transparency_adjustment']
entries = self.client.all_entries('/apps/phamerator')
#for entry in entries: print entry.get_key()
# for each bool, check if it's in the gconf database
# and add it if needed
keys = []
for entry in entries:
keys.append(entry.get_key())
for bool in self.gconf_bools:
if bool not in keys:
self.client.set_bool(bool, True)
print "can't find %s in %s" % (bool, self.gconf_bools)
# for each float, check if it's in the gconf database
# and add a defualt value if needed
keys = []
for entry in entries:
keys.append(entry.get_key())
for flt in self.gconf_floats:
if flt not in keys:
if flt == '/apps/phamerator/transparency_adjustment':
self.client.set_float(flt, 1.0)
# for each string, check if it's in the gconf database
# and, if not, add a reasonable default value
keys = []
for entry in entries:
keys.append(entry.get_key())
for s in self.gconf_strings:
try:
if s not in keys:
if s == '/apps/phamerator/gene_color':
self.client.set_string('/apps/phamerator/gene_color', 'pham')
elif s == '/apps/phamerator/default_save_folder':
self.client.set_string('/apps/phamerator/default_save_folder', os.environ['HOME'])
elif s == '/apps/phamerator/pham_circle_color_scheme':
self.client.set_string('/apps/phamerator/pham_circle_color_scheme', 'alignmentType')
except:
pass
def set_bool(self, key, param):
print 'setting bool %s:%s' % (key, param)
self.client.set_bool(key, param)
def get_bool(self, key):
print 'getting bool %s' % (key)
return self.client.get_bool(key)
def set_float(self, key, param):
print 'setting float %s:%s' % (key, param)
self.client.set_float(key, param)
def get_float(self, key):
print 'getting float %s' % (key)
return self.client.get_float(key)
def set(self, key, param):
print 'setting string %s:%s' % (key, param)
self.client.set_string(key, param)
def get(self, key):
print 'getting string %s' % (key)
return self.client.get_string(key)
| gpl-2.0 | -2,609,633,916,455,646,700 | 33.293478 | 96 | 0.598732 | false | 3.388829 | false | false | false |
julienmalard/Tikon | pruebas/test_central/rcrs/vars_interés.py | 1 | 1179 | from datetime import date
import pandas as pd
import xarray as xr
from tikon.central import Módulo, SimulMódulo, Modelo, Exper, Parcela
from tikon.central.res import Resultado
from tikon.datos import Obs
from tikon.utils import EJE_TIEMPO
class Res1_1(Resultado):
nombre = 'res 1_1'
unids = None
class Res1_2(Resultado):
nombre = 'res 1_2'
unids = None
class Res2_1(Resultado):
nombre = 'res 2_1'
unids = None
class Res2_2(Resultado):
nombre = 'res 2_2'
unids = None
class SimulMódulo1(SimulMódulo):
resultados = [Res1_1, Res1_2]
class Módulo1(Módulo):
nombre = 'módulo 1'
cls_simul = SimulMódulo1
class SimulMódulo2(SimulMódulo):
resultados = [Res2_1, Res2_2]
class Módulo2(Módulo):
nombre = 'módulo 2'
cls_simul = SimulMódulo2
class MiObs(Obs):
mód = 'módulo 1'
var = 'res 1_1'
obs_1_1 = MiObs(
datos=xr.DataArray(
1.5, coords={EJE_TIEMPO: pd.date_range(date.today(), periods=10, freq='D')}, dims=[EJE_TIEMPO]
)
)
exper = Exper('exper', Parcela('parcela'))
exper_obs_1_1 = Exper('exper', Parcela('parcela'), obs=obs_1_1)
modelo = Modelo([Módulo1, Módulo2])
| agpl-3.0 | 1,028,831,009,600,824,700 | 17.725806 | 102 | 0.66236 | false | 2.350202 | false | false | false |
midlgxdev/cdecimal-2.3 | python/test_cdecimal3.py | 2 | 172013 | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
http://speleotrove.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
import numbers
import locale
from test.support import run_unittest, run_doctest, is_resource_enabled
from test.support import check_warnings, TestFailed
import random
import time
import warnings
import fractions
try:
import threading
except ImportError:
threading = None
import cdecimal as C
import decimal as P
orig_sys_decimal = sys.modules['decimal']
py_minor = sys.version_info[1]
# Testing all failures of API functions for _decimal. First,
# the number of API calls in a test case is determined. Then,
# the testcase is run with all possible API failures, checking
# that FailAPIException is properly raised.
def assertRaises(expEx, func, *args, **kwargs):
"""assertRaises has to reraise FailAPIException."""
try:
func(*args, **kwargs)
except Exception as e:
if e.__class__ is C.FailAPIException:
raise C.FailAPIException
if not e.__class__ is expEx:
raise e
def withFailpoint(func):
"""Wrap a function for testing all possible API failures."""
def iter_failpoint(testcase, *args):
# These tests will not work.
setattr(testcase, 'assertRaises', assertRaises)
# Determine number of API calls.
C.setapicalls(0)
C.setfailpoint(0)
func(testcase, *args)
n = C.getapicalls()
# Fail at each possible API call.
for i in range(1, n+1):
C.setapicalls(0)
C.setfailpoint(i)
try:
func(testcase, *args)
except C.FailAPIException:
continue
# Error: FailAPIException was not raised
raise TestFailed("FailAPIException not raised in: %s" % func)
C.setapicalls(0)
C.setfailpoint(0)
return iter_failpoint
class ProtectFail(object):
"""Protect code regions that modify global state (e.g. lines
that set or restore global context values). Otherwise it
would not be possible to rerun a test case several times."""
def __enter__(self):
if hasattr(C, 'setfailpoint'):
self.calls = C.getapicalls()
self.fpoint = C.getfailpoint()
C.setfailpoint(0)
def __exit__(self, *_):
if hasattr(C, 'setfailpoint'):
C.setfailpoint(self.fpoint)
if self.fpoint:
if self.calls < self.fpoint <= C.getapicalls():
# Pretend that API calls in the protected block failed.
raise C.FailAPIException
def protectfail():
return ProtectFail()
if hasattr(C, 'setfailpoint'):
# Functions that are iterated several times must use
# the same random sequence each time.
randseed = int(time.time())
# Implicit initialization of the module context must
# be tested first.
for i in range(1, 100):
C.setapicalls(0)
C.setfailpoint(i)
try:
C.getcontext()
except C.FailAPIException as e:
continue
C.setapicalls(0)
C.setfailpoint(0)
# Useful Test Constant
Signals = {
C: tuple(C.getcontext().flags.keys()) if C else None,
P: tuple(P.getcontext().flags.keys())
}
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = {
C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow,
C.Overflow, C.DivisionByZero, C.InvalidOperation] if C else None,
P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow,
P.Overflow, P.DivisionByZero, P.InvalidOperation]
}
def assert_signals(cls, context, attr, expected):
d = getattr(context, attr)
cls.assertTrue(all(d[s] if s in expected else not d[s] for s in d))
RoundingModes = {
C: (C.ROUND_UP, C.ROUND_DOWN, C.ROUND_CEILING, C.ROUND_FLOOR,
C.ROUND_HALF_UP, C.ROUND_HALF_DOWN, C.ROUND_HALF_EVEN,
C.ROUND_05UP) if C else None,
P: (P.ROUND_UP, P.ROUND_DOWN, P.ROUND_CEILING, P.ROUND_FLOOR,
P.ROUND_HALF_UP, P.ROUND_HALF_DOWN, P.ROUND_HALF_EVEN,
P.ROUND_05UP)
}
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
ORIGINAL_CONTEXT = {
C: C.getcontext().copy() if C else None,
P: P.getcontext().copy()
}
def init(m):
if not m: return
DefaultTestContext = m.Context(
prec=9, rounding=m.ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0)
)
m.setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
# decorator for skipping tests on non-IEEE 754 platforms
requires_IEEE_754 = unittest.skipUnless(
float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
class IBMTestCases(unittest.TestCase):
"""Class which tests the Decimal class against the IBM test cases."""
def setUp(self):
self.context = self.decimal.Context()
self.readcontext = self.decimal.Context()
self.ignore_list = ['#']
# List of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
self.skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# Skip large powers for _power_exact:
'extr1700',
'extr1701',
'extr1702',
'extr1703',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
if self.decimal == C:
# status has additional Subnormal, Underflow
self.skipped_test_ids.add('pwsx803')
self.skipped_test_ids.add('pwsx805')
# Correct rounding (skipped for decNumber, too)
self.skipped_test_ids.add('powx4302')
self.skipped_test_ids.add('powx4303')
self.skipped_test_ids.add('powx4342')
self.skipped_test_ids.add('powx4343')
# http://bugs.python.org/issue7049
self.skipped_test_ids.add('pwmx325')
self.skipped_test_ids.add('pwmx326')
elif py_minor < 3:
self.skipped_test_ids.add('minx1030')
self.skipped_test_ids.add('plux1031')
# Map test directives to setter functions.
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw.
self.NameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor'}
# Map test-case names to roundings.
self.RoundingDict = {'ceiling' : self.decimal.ROUND_CEILING,
'down' : self.decimal.ROUND_DOWN,
'floor' : self.decimal.ROUND_FLOOR,
'half_down' : self.decimal.ROUND_HALF_DOWN,
'half_even' : self.decimal.ROUND_HALF_EVEN,
'half_up' : self.decimal.ROUND_HALF_UP,
'up' : self.decimal.ROUND_UP,
'05up' : self.decimal.ROUND_05UP}
# Map the test cases' error names to the actual errors.
self.ErrorNames = {'clamped' : self.decimal.Clamped,
'conversion_syntax' : self.decimal.InvalidOperation,
'division_by_zero' : self.decimal.DivisionByZero,
'division_impossible' : self.decimal.InvalidOperation,
'division_undefined' : self.decimal.InvalidOperation,
'inexact' : self.decimal.Inexact,
'invalid_context' : self.decimal.InvalidOperation,
'invalid_operation' : self.decimal.InvalidOperation,
'overflow' : self.decimal.Overflow,
'rounded' : self.decimal.Rounded,
'subnormal' : self.decimal.Subnormal,
'underflow' : self.decimal.Underflow}
# The following functions return True/False rather than a
# Decimal instance.
self.LogicalFunctions = ('is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum')
def read_unlimited(self, v, context):
"""Work around the limitations of the 32-bit _decimal version. The
guaranteed maximum values for prec, Emax etc. are 425000000,
but higher values usually work, except for rare corner cases.
In particular, all of the IBM tests pass with maximum values
of 1070000000."""
if self.decimal == C and self.decimal.MAX_EMAX == 425000000:
self.readcontext.unsafe_setprec(1070000000)
self.readcontext.unsafe_setemax(1070000000)
self.readcontext.unsafe_setemin(-1070000000)
return self.readcontext.create_decimal(v)
else:
return self.decimal.Decimal(v, context)
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
return
with open(file) as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except self.decimal.DecimalException as exception:
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
return
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = self.RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, (lambda *args: None))
funct(value)
def eval_equation(self, s):
if not TEST_ALL and not hasattr(C, 'setfailpoint') and \
random.random() < 0.90:
return
with protectfail():
self.context.clear_flags()
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise self.decimal.InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in self.skipped_test_ids:
return
fname = self.NameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [self.ErrorNames[x.lower()] for x in exceptions]
for exception in Signals[self.decimal]:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = self.read_unlimited(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
# three argument power/powmod (deprecated)
if self.decimal == C:
if fname == 'power' and len(vals) == 3:
# name is different
fname = 'powmod'
funct = getattr(self.context, fname)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals[self.decimal] if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in self.LogicalFunctions:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals[self.decimal] as error:
self.fail("Raised %s in %s" % (error, s))
except Exception as e: #Catch any error long enough to state the test case.
# Errors are expected with failpoints.
if not hasattr(C, 'setfailpoint'):
print("ERROR:", s)
raise e.__class__
myexceptions = self.getexceptions()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
return
def getexceptions(self):
return [e for e in Signals[self.decimal] if self.context.flags[e]]
def change_precision(self, prec):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context.unsafe_setprec(prec)
else:
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context.unsafe_setemin(exp)
else:
self.context.Emin = exp
def change_max_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context.unsafe_setemax(exp)
else:
self.context.Emax = exp
def change_clamp(self, clamp):
if py_minor <= 2:
self.context._clamp = clamp
else:
self.context.clamp = clamp
class CIBMTestCases(IBMTestCases):
decimal = C
class PyIBMTestCases(IBMTestCases):
decimal = P
# The following classes test the behaviour of Decimal according to PEP 327
class ExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
# single word longs
for n in range(0, 32):
for sign in (-1, 1):
for x in range(-5, 5):
i = sign * (2**n + x)
d = Decimal(i)
self.assertEqual(str(d), str(i))
def test_explicit_from_string(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679')
with localcontext() as c:
c.traps[InvalidOperation] = True
# Invalid string
self.assertRaises(InvalidOperation, Decimal, "xyz")
# Two arguments max
self.assertRaises(TypeError, Decimal, "1234", "x", "y")
def test_explicit_from_tuples(self):
Decimal = self.decimal.Decimal
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#inf
d = Decimal( (0, (), "F") )
self.assertEqual(str(d), 'Infinity')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, "xyz", 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_bool(self):
Decimal = self.decimal.Decimal
self.assertIs(bool(Decimal(0)), False)
self.assertIs(bool(Decimal(1)), True)
self.assertEqual(Decimal(False), Decimal(0))
self.assertEqual(Decimal(True), Decimal(1))
def test_explicit_from_Decimal(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
@requires_IEEE_754
def test_explicit_from_float(self):
if hasattr(C, 'setfailpoint'):
random.seed(randseed)
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
if self.decimal == C:
FloatOperation = self.decimal.FloatOperation
if py_minor <= 1:
with localcontext() as c:
# TypeError in 3.1
c.clear_flags()
self.assertRaises(TypeError, Decimal, 7.5)
self.assertFalse(c.flags[FloatOperation])
c.traps[FloatOperation] = True
self.assertRaises(TypeError, Decimal, 7.5)
self.assertFalse(c.flags[FloatOperation])
return
else:
with localcontext() as c:
c.clear_flags()
self.assertEqual(Decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
c.traps[FloatOperation] = True
self.assertRaises(FloatOperation, Decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
r = Decimal.from_float(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertTrue(Decimal.from_float(float('nan')).is_qnan())
self.assertTrue(Decimal.from_float(float('inf')).is_infinite())
self.assertTrue(Decimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(Decimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal.from_float(float('-0.0'))),
str(Decimal('-0')))
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal.from_float(x))) # roundtrip
def test_explicit_context_create_decimal(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
Rounded = self.decimal.Rounded
nc = copy.copy(self.decimal.getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertTrue(isinstance(d, Decimal))
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
# more integers
nc.prec = 28
nc.traps[InvalidOperation] = True
for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0,
2**31-1, 2**31, 2**63-1, 2**63]:
d = nc.create_decimal(v)
self.assertTrue(isinstance(d, Decimal))
self.assertEqual(int(d), v)
nc.prec = 3
nc.traps[Rounded] = True
self.assertRaises(Rounded, nc.create_decimal, 1234)
# from string
nc.prec = 28
self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17')
self.assertEqual(str(nc.create_decimal('45')), '45')
self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity')
self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123')
# invalid arguments
self.assertRaises(InvalidOperation, nc.create_decimal, "xyz")
self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25))
self.assertRaises(TypeError, nc.create_decimal, "1234", "5678")
# too many NaN payload digits
nc.prec = 3
self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345')
self.assertRaises(InvalidOperation, nc.create_decimal,
Decimal('NaN12345'))
nc.traps[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
nc.flags[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
def test_explicit_context_create_from_float(self):
if hasattr(C, 'setfailpoint'):
random.seed(randseed)
Decimal = self.decimal.Decimal
Context = self.decimal.Context
nc = Context()
if self.decimal == C:
FloatOperation = self.decimal.FloatOperation
if py_minor <= 1:
# TypeError in 3.1
nc.clear_flags()
self.assertRaises(TypeError, nc.create_decimal, 7.5)
self.assertFalse(nc.flags[FloatOperation])
nc.traps[FloatOperation] = True
self.assertRaises(TypeError, nc.create_decimal, 7.5)
self.assertFalse(nc.flags[FloatOperation])
nc.traps[FloatOperation] = False
return
else:
nc.clear_flags()
self.assertEqual(nc.create_decimal(7.5), 7.5)
self.assertTrue(nc.flags[FloatOperation])
nc.traps[FloatOperation] = True
self.assertRaises(FloatOperation, nc.create_decimal, 7.5)
self.assertTrue(nc.flags[FloatOperation])
nc.traps[FloatOperation] = False
r = nc.create_decimal_from_float(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r), '0.1000000000000000055511151231')
self.assertTrue(nc.create_decimal_from_float(float('nan')).is_qnan())
self.assertTrue(nc.create_decimal_from_float(float('inf')).is_infinite())
self.assertTrue(nc.create_decimal_from_float(float('-inf')).is_infinite())
self.assertEqual(str(nc.create_decimal_from_float(float('nan'))),
str(nc.create_decimal('NaN')))
self.assertEqual(str(nc.create_decimal_from_float(float('inf'))),
str(nc.create_decimal('Infinity')))
self.assertEqual(str(nc.create_decimal_from_float(float('-inf'))),
str(nc.create_decimal('-Infinity')))
self.assertEqual(str(nc.create_decimal_from_float(float('-0.0'))),
str(nc.create_decimal('-0')))
nc.prec = 100
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(nc.create_decimal_from_float(x))) # roundtrip
def test_unicode_digits(self):
Decimal = self.decimal.Decimal
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class CExplicitConstructionTest(ExplicitConstructionTest):
decimal = C
class PyExplicitConstructionTest(ExplicitConstructionTest):
decimal = P
class ImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + None', locals())
def test_implicit_from_int(self):
Decimal = self.decimal.Decimal
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals())
def test_implicit_from_float(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals())
def test_implicit_from_Decimal(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
Decimal = self.decimal.Decimal
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class CImplicitConstructionTest(ImplicitConstructionTest):
decimal = C
class PyImplicitConstructionTest(ImplicitConstructionTest):
decimal = P
class FormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
Decimal = self.decimal.Decimal
if py_minor <= 1 and self.decimal == P:
raise unittest.SkipTest("requires Python 3.2")
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
# bytes format argument
self.assertRaises(TypeError, Decimal(1).__format__, b'-020')
def test_n_format(self):
Decimal = self.decimal.Decimal
try:
from locale import CHAR_MAX
except ImportError:
return
def make_grouping(lst):
return ''.join([chr(x) for x in lst]) if self.decimal == C else lst
def get_fmt(x, override=None, fmt='n'):
if self.decimal == C:
return Decimal(x).__format__(fmt, override)
else:
return Decimal(x).__format__(fmt, _localeconv=override)
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : make_grouping([CHAR_MAX]),
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping': make_grouping([1, 4, 2, CHAR_MAX]),
'thousands_sep' : '-'
}
dotsep_wide = {
'decimal_point' : b'\xc2\xbf'.decode('utf-8'),
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : b'\xc2\xb4'.decode('utf-8')
}
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
# wide char separator and decimal point
self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'),
'-0´000´000´000´001¿5')
def test_wide_char_separator_decimal_point(self):
# locale with wide char separator and decimal point
Decimal = self.decimal.Decimal
try:
locale.setlocale(locale.LC_ALL, 'ps_AF')
except locale.Error:
return
self.assertEqual(format(Decimal('100000000.123'), 'n'),
'100٬000٬000٫123')
locale.resetlocale()
class CFormatTest(FormatTest):
decimal = C
class PyFormatTest(FormatTest):
decimal = P
class ArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
Decimal = self.decimal.Decimal
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but return False or True respectively.
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
if py_minor <= 1 and self.decimal == P:
raise unittest.SkipTest("requires Python 3.2")
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation is not trapped
for x, y in qnan_pairs + snan_pairs:
for op in order_ops + equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
with localcontext() as ctx:
ctx.traps[InvalidOperation] = 1
for x, y in qnan_pairs:
for op in equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
for x, y in snan_pairs:
for op in equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
for x, y in qnan_pairs + snan_pairs:
for op in order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
Decimal = self.decimal.Decimal
if py_minor <= 1 and self.decimal == P:
raise unittest.SkipTest("requires Python 3.2")
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
class CArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = C
class PyArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = P
# The following are two functions used to test threading in the next class
def thfunc1(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.finish1.set()
cls.synchro.wait()
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(DivisionByZero, c2.divide, d1, 0)
cls.assertTrue(c2.flags[DivisionByZero])
with localcontext() as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertTrue(c3.flags[DivisionByZero])
cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN'))
cls.assertTrue(c3.flags[InvalidOperation])
del c3
cls.assertFalse(c2.flags[InvalidOperation])
del c2
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333333333'))
c1 = getcontext()
cls.assertTrue(c1.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(c1.flags[sig])
return
def thfunc2(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999)
cls.assertTrue(c2.flags[Overflow])
with localcontext(thiscontext) as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertFalse(c3.flags[Overflow])
c3.traps[Underflow] = True
cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999)
cls.assertTrue(c3.flags[Underflow])
del c3
cls.assertFalse(c2.flags[Underflow])
cls.assertFalse(c2.traps[Underflow])
del c2
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
cls.assertFalse(thiscontext.traps[Underflow])
cls.assertTrue(thiscontext.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(thiscontext.flags[sig])
return
class ThreadingTest(unittest.TestCase):
'''Unit tests for thread local contexts in Decimal.'''
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
DefaultContext = self.decimal.DefaultContext
if self.decimal == C and not self.decimal.HAVE_THREADS:
self.skipTest("compiled without threading")
# Test the "threading isolation" of a Context. Also test changing
# the DefaultContext, which acts as a template for the thread-local
# contexts.
save_prec = DefaultContext.prec
save_emax = DefaultContext.Emax
save_emin = DefaultContext.Emin
DefaultContext.prec = 24
DefaultContext.Emax = 425000000
DefaultContext.Emin = -425000000
with protectfail():
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
for sig in Signals[self.decimal]:
self.assertFalse(DefaultContext.flags[sig])
DefaultContext.prec = save_prec
DefaultContext.Emax = save_emax
DefaultContext.Emin = save_emin
return
@unittest.skipUnless(threading, 'threading required')
class CThreadingTest(ThreadingTest):
decimal = C
@unittest.skipUnless(threading, 'threading required')
class PyThreadingTest(ThreadingTest):
decimal = P
class UsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
if hasattr(C, 'setfailpoint'):
random.seed(randseed)
Decimal = self.decimal.Decimal
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal and an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
def assert_attr(a, b, attr, context, signal=None):
context.clear_flags()
f = getattr(a, attr)
if self.decimal == C:
if signal == C.FloatOperation:
self.assertRaises(signal, f, b)
elif py_minor <= 1:
# The actual TypeError is raised by
# the caller of the comparison methods.
self.assertIs(f(b), NotImplemented)
else:
self.assertIs(f(b), True)
self.assertTrue(context.flags[C.FloatOperation])
else:
if py_minor <= 1:
self.assertIs(f(b), NotImplemented)
else:
self.assertIs(f(b), True)
small_d = Decimal('0.25')
big_d = Decimal('3.0')
small_f = 0.25
big_f = 3.0
zero_d = Decimal('0.0')
neg_zero_d = Decimal('-0.0')
zero_f = 0.0
neg_zero_f = -0.0
inf_d = Decimal('Infinity')
neg_inf_d = Decimal('-Infinity')
inf_f = float('inf')
neg_inf_f = float('-inf')
def doit(c, signal=None):
# Order
for attr in '__lt__', '__le__':
assert_attr(small_d, big_f, attr, c, signal)
for attr in '__gt__', '__ge__':
assert_attr(big_d, small_f, attr, c, signal)
# Equality
assert_attr(small_d, small_f, '__eq__', c, None)
assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(neg_zero_d, zero_f, '__eq__', c, None)
assert_attr(zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(zero_d, zero_f, '__eq__', c, None)
assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None)
assert_attr(inf_d, inf_f, '__eq__', c, None)
# Inequality
assert_attr(small_d, big_f, '__ne__', c, None)
assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None)
assert_attr(neg_inf_d, inf_f, '__ne__', c, None)
assert_attr(inf_d, neg_inf_f, '__ne__', c, None)
assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None)
def test_containers(c, signal):
c.clear_flags()
s = set([100.0, Decimal('100.0')])
expected_len = 2 if py_minor <= 1 else 1
self.assertEqual(len(s), expected_len)
self.assertTrue(c.flags[C.FloatOperation])
c.clear_flags()
if signal:
self.assertRaises(signal, sorted, [1.0, Decimal('10.0')])
else:
s = sorted([10.0, Decimal('10.0')])
self.assertTrue(c.flags[C.FloatOperation])
c.clear_flags()
b = 10.0 in [Decimal('10.0'), 1.0]
self.assertTrue(c.flags[C.FloatOperation])
c.clear_flags()
b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'}
self.assertTrue(c.flags[C.FloatOperation])
if self.decimal == C:
nc = Context()
with localcontext(nc) as c:
sig = TypeError if py_minor <= 1 else None
self.assertFalse(c.traps[C.FloatOperation])
doit(c, signal=sig)
test_containers(c, sig)
c.traps[C.FloatOperation] = True
doit(c, signal=C.FloatOperation)
test_containers(c, C.FloatOperation)
else:
# decimal.py does not have the FloatOperation signal.
nc = Context()
with localcontext(nc) as c:
doit(c, signal=False)
def test_decimal_complex_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertNotEqual(da, (1.5+0j))
self.assertNotEqual((1.5+0j), da)
if py_minor >= 2:
self.assertEqual(da, (0.25+0j))
self.assertEqual((0.25+0j), da)
self.assertEqual((3.0+0j), db)
self.assertEqual(db, (3.0+0j))
else:
self.assertNotEqual(da, (0.25+0j))
self.assertNotEqual((0.25+0j), da)
self.assertNotEqual((3.0+0j), db)
self.assertNotEqual(db, (3.0+0j))
self.assertNotEqual(db, (3.0+1j))
self.assertNotEqual((3.0+1j), db)
self.assertIs(db.__lt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
self.assertIs(db.__gt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
def test_decimal_fraction_comparison(self):
D = self.decimal.Decimal
F = fractions.Fraction
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
emax = C.MAX_EMAX if C else 999999999
emin = C.MIN_EMIN if C else -999999999
etiny = C.MIN_ETINY if C else -1999999997
c = Context(Emax=emax, Emin=emin)
with localcontext(c):
c.prec = emax
if py_minor >= 2:
self.assertLess(D(0), F(1,9999999999999999999999999999999999999))
self.assertLess(F(-1,9999999999999999999999999999999999999), D(0))
self.assertLess(F(0,1), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,1))
self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999))
else:
self.assertIs(NotImplemented, D(0).__lt__(F(1,1)))
self.assertIs(NotImplemented, D(0).__le__(F(1,1)))
self.assertIs(NotImplemented, D(0).__gt__(F(1,1)))
self.assertIs(NotImplemented, D(0).__ge__(F(1,1)))
if py_minor >= 2:
self.assertEqual(D("0.1"), F(1,10))
self.assertEqual(F(1,10), D("0.1"))
else:
self.assertNotEqual(D("0.1"), F(1,10))
self.assertNotEqual(F(1,10), D("0.1"))
c.prec = 300
self.assertNotEqual(D(1)/3, F(1,3))
self.assertNotEqual(F(1,3), D(1)/3)
if py_minor >= 2:
self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax)))
self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999))
self.assertGreater(D('inf'), F(99999999999,123))
self.assertGreater(D('inf'), F(-99999999999,123))
self.assertLess(D('-inf'), F(99999999999,123))
self.assertLess(D('-inf'), F(-99999999999,123))
self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123))
self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan')))
self.assertNotEqual(D('nan'), F(-9,123))
self.assertNotEqual(F(-9,123), D('nan'))
def test_copy_and_deepcopy_methods(self):
Decimal = self.decimal.Decimal
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
if hasattr(C, 'setfailpoint'):
random.seed(randseed)
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
return a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
if py_minor >= 2:
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
if hasattr(C, 'setfailpoint'):
test_values = random.sample(test_values, 10)
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hashit(value), hashit(int(value)))
#the same hash that to an int
self.assertEqual(hashit(Decimal(23)), hashit(23))
self.assertRaises(TypeError, hash, Decimal('sNaN'))
self.assertTrue(hashit(Decimal('Inf')))
self.assertTrue(hashit(Decimal('-Inf')))
if py_minor >= 2:
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
for s in test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(f), hashit(d))
with localcontext() as c:
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = 10000
x = 1100 ** 1248
self.assertEqual(hashit(Decimal(x)), hashit(x))
def test_min_and_max_methods(self):
Decimal = self.decimal.Decimal
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal and long
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
Decimal = self.decimal.Decimal
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
Decimal = self.decimal.Decimal
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float and int methods.
Decimal = self.decimal.Decimal
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_eval_round_trip(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
# XXX non-compliant infinity payload.
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
# XXX coefficient in infinity should raise an error
if self.decimal == P:
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_subclassing(self):
# Different behaviours when subclassing Decimal
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
pass
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
d = copy.copy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
d = copy.deepcopy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class CUsabilityTest(UsabilityTest):
decimal = C
class PyUsabilityTest(UsabilityTest):
decimal = P
class PythonAPItests(unittest.TestCase):
def test_abc(self):
Decimal = self.decimal.Decimal
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertFalse(issubclass(Decimal, numbers.Real))
self.assertTrue(isinstance(Decimal(0), numbers.Number))
self.assertFalse(isinstance(Decimal(0), numbers.Real))
def test_pickle(self):
Decimal = self.decimal.Decimal
# Round trip
d = Decimal('-3.141590000')
p = pickle.dumps(d)
e = pickle.loads(p)
self.assertEqual(d, e)
def test_int(self):
Decimal = self.decimal.Decimal
ROUND_DOWN = self.decimal.ROUND_DOWN
lim = 10 if hasattr(C, 'setfailpoint') else 250
for x in range(-lim, lim):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
def test_trunc(self):
Decimal = self.decimal.Decimal
ROUND_DOWN = self.decimal.ROUND_DOWN
lim = 10 if hasattr(C, 'setfailpoint') else 250
for x in range(-lim, lim):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
if hasattr(C, 'setfailpoint'):
random.seed(randseed)
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
pass
self.assertTrue(issubclass(MyDecimal, Decimal))
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
lim = 10 if hasattr(C, 'setfailpoint') else 200
for i in range(lim):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
ROUND_DOWN = self.decimal.ROUND_DOWN
ROUND_UP = self.decimal.ROUND_UP
Inexact = self.decimal.Inexact
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
ROUND_DOWN = self.decimal.ROUND_DOWN
c = Context(Emax=99999, Emin=-99999)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01')),
Decimal('7.34')
)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN),
Decimal('7.33')
)
self.assertRaises(
InvalidOperation,
Decimal("10e99999").quantize, Decimal('1e100000'), context=c
)
c = Context()
d = Decimal("0.871831e800")
x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN)
self.assertEqual(x, Decimal('8.71E+799'))
def test_complex(self):
Decimal = self.decimal.Decimal
x = Decimal("9.8182731e181273")
self.assertEqual(x.real, x)
self.assertEqual(x.imag, 0)
self.assertEqual(x.conjugate(), x)
x = Decimal("1")
self.assertEqual(complex(x), complex(float(1)))
self.assertRaises(AttributeError, setattr, x, 'real', 100)
self.assertRaises(AttributeError, setattr, x, 'imag', 100)
self.assertRaises(AttributeError, setattr, x, 'conjugate', 100)
self.assertRaises(AttributeError, setattr, x, '__complex__', 100)
class CPythonAPItests(PythonAPItests):
decimal = C
class PyPythonAPItests(PythonAPItests):
decimal = P
class ContextAPItests(unittest.TestCase):
def test_pickle(self):
Context = self.decimal.Context
c = Context()
e = pickle.loads(pickle.dumps(c))
if self.decimal == C:
self.assertEqual(c.prec, e.prec)
self.assertEqual(c.Emin, e.Emin)
self.assertEqual(c.Emax, e.Emax)
self.assertEqual(c.rounding, e.rounding)
self.assertEqual(c.capitals, e.capitals)
self.assertEqual(c.traps, e.traps)
self.assertEqual(c.flags, e.flags)
self.assertEqual(c._clamp, e._clamp)
else:
for k in vars(c):
v1 = vars(c)[k]
v2 = vars(e)[k]
self.assertEqual(v1, v2)
def test_equality_with_other_types(self):
Decimal = self.decimal.Decimal
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
k1 = set(c.flags.keys())
k2 = set(d.flags.keys())
self.assertEqual(k1, k2)
self.assertEqual(c.flags, d.flags)
def test_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
if self.decimal == C or py_minor >= 3:
# Issue 12079 for Context.fma ...
self.assertRaises(TypeError, c.fma,
Decimal('Infinity'), Decimal(0), "not a decimal")
self.assertRaises(TypeError, c.fma,
Decimal(1), Decimal('snan'), 1.222)
# ... and for Decimal.fma.
self.assertRaises(TypeError, Decimal('Infinity').fma,
Decimal(0), "not a decimal")
self.assertRaises(TypeError, Decimal(1).fma,
Decimal('snan'), 1.222)
def test_is_finite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_power(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.power(Decimal(1), Decimal(4))
self.assertEqual(c.power(1, 4), d)
self.assertEqual(c.power(Decimal(1), 4), d)
self.assertEqual(c.power(1, Decimal(4)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4)), d)
self.assertRaises(TypeError, c.power, '1', 4)
self.assertRaises(TypeError, c.power, 1, '4')
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
self.assertRaises(TypeError, c.to_integral_value, 10, 'x')
class CContextAPItests(ContextAPItests):
decimal = C
@unittest.skipIf(py_minor <= 1, "requires Python 3.2")
class PyContextAPItests(ContextAPItests):
decimal = P
class ContextWithStatement(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_nested_with_statements(self):
# Use a copy of the supplied context in the block
Decimal = self.decimal.Decimal
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
Clamped = self.decimal.Clamped
Overflow = self.decimal.Overflow
orig_ctx = getcontext()
orig_ctx.clear_flags()
new_ctx = Context(Emax=384)
with localcontext() as c1:
self.assertEqual(c1.flags, orig_ctx.flags)
self.assertEqual(c1.traps, orig_ctx.traps)
c1.traps[Clamped] = True
c1.Emin = -383
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertRaises(Clamped, c1.create_decimal, '0e-999')
self.assertTrue(c1.flags[Clamped])
with localcontext(new_ctx) as c2:
self.assertEqual(c2.flags, new_ctx.flags)
self.assertEqual(c2.traps, new_ctx.traps)
self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2)
self.assertFalse(c2.flags[Clamped])
self.assertTrue(c2.flags[Overflow])
del c2
self.assertFalse(c1.flags[Overflow])
del c1
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertFalse(orig_ctx.flags[Clamped])
self.assertFalse(orig_ctx.flags[Overflow])
self.assertFalse(new_ctx.flags[Clamped])
self.assertFalse(new_ctx.flags[Overflow])
def test_with_statements_gc1(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
del c1
with localcontext() as c2:
del c2
with localcontext() as c3:
del c3
with localcontext() as c4:
del c4
def test_with_statements_gc2(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
with localcontext(c1) as c2:
del c1
with localcontext(c2) as c3:
del c2
with localcontext(c3) as c4:
del c3
del c4
def test_with_statements_gc3(self):
Context = self.decimal.Context
localcontext = self.decimal.localcontext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
with localcontext() as c1:
del c1
n1 = Context(prec=1)
setcontext(n1)
with localcontext(n1) as c2:
del n1
self.assertEqual(c2.prec, 1)
del c2
n2 = Context(prec=2)
setcontext(n2)
del n2
self.assertEqual(getcontext().prec, 2)
n3 = Context(prec=3)
setcontext(n3)
self.assertEqual(getcontext().prec, 3)
with localcontext(n3) as c3:
del n3
self.assertEqual(c3.prec, 3)
del c3
n4 = Context(prec=4)
setcontext(n4)
del n4
self.assertEqual(getcontext().prec, 4)
with localcontext() as c4:
self.assertEqual(c4.prec, 4)
del c4
class CContextWithStatement(ContextWithStatement):
decimal = C
class PyContextWithStatement(ContextWithStatement):
decimal = P
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
Subnormal = self.decimal.Subnormal
ROUND_HALF_EVEN = self.decimal.ROUND_HALF_EVEN
def raise_error(context, flag):
if self.decimal == C:
context.flags[flag] = True
if context.traps[flag]:
raise flag
else:
context._raise_error(flag)
context = Context(prec=9, Emin = -425000000, Emax = 425000000,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-425000010")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
raise_error(context, flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
class CContextFlags(ContextFlags):
decimal = C
class PyContextFlags(ContextFlags):
decimal = P
class SpecialContexts(unittest.TestCase):
"""Test the context templates."""
def test_context_templates(self):
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
assert_signals(self, BasicContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped]
)
savecontext = getcontext().copy()
basic_context_prec = BasicContext.prec
extended_context_prec = ExtendedContext.prec
ex = None
try:
BasicContext.prec = ExtendedContext.prec = 441
for template in BasicContext, ExtendedContext:
setcontext(template)
c = getcontext()
self.assertIsNot(c, template)
self.assertEqual(c.prec, 441)
except Exception as e:
ex = e.__class__
finally:
with protectfail():
BasicContext.prec = basic_context_prec
ExtendedContext.prec = extended_context_prec
setcontext(savecontext)
if ex:
raise ex
def test_default_context(self):
DefaultContext = self.decimal.DefaultContext
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
self.assertEqual(BasicContext.prec, 9)
self.assertEqual(ExtendedContext.prec, 9)
assert_signals(self, DefaultContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow]
)
savecontext = getcontext().copy()
default_context_prec = DefaultContext.prec
ex = None
try:
c = getcontext()
saveprec = c.prec
DefaultContext.prec = 961
c = getcontext()
self.assertEqual(c.prec, saveprec)
setcontext(DefaultContext)
c = getcontext()
self.assertIsNot(c, DefaultContext)
self.assertEqual(c.prec, 961)
except Exception as e:
ex = e.__class__
finally:
with protectfail():
DefaultContext.prec = default_context_prec
setcontext(savecontext)
if ex:
raise ex
class CSpecialContexts(SpecialContexts):
decimal = C
class PySpecialContexts(SpecialContexts):
decimal = P
class ContextInputValidation(unittest.TestCase):
def test_invalid_context(self):
Context = self.decimal.Context
DefaultContext = self.decimal.DefaultContext
c = DefaultContext.copy()
# prec, Emax
for attr in ['prec', 'Emax']:
setattr(c, attr, 999999)
self.assertEqual(getattr(c, attr), 999999)
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(TypeError, setattr, c, attr, 'xyz')
# Emin
setattr(c, 'Emin', -999999)
self.assertEqual(getattr(c, 'Emin'), -999999)
self.assertRaises(ValueError, setattr, c, 'Emin', 1)
self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3))
# rounding: always raise TypeError in order to get consistent
# exceptions across implementations. In decimal, rounding
# modes are strings, in _decimal they are integers. The idea
# is to view rounding as an abstract type and not mind the
# implementation details.
# Hence, a user should view the rounding modes as if they
# had been defined in a language that supports abstract
# data types, e.g. ocaml:
#
# type rounding = ROUND_DOWN | ROUND_HALF_UP | ... ;;
#
self.assertRaises(TypeError, setattr, c, 'rounding', -1)
self.assertRaises(TypeError, setattr, c, 'rounding', 9)
self.assertRaises(TypeError, setattr, c, 'rounding', 1.0)
self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz')
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
# Invalid attribute
self.assertRaises(AttributeError, setattr, c, 'emax', 100)
# Invalid signal dict
self.assertRaises(TypeError, setattr, c, 'flags', [])
self.assertRaises(TypeError, setattr, c, 'flags', {})
self.assertRaises(TypeError, setattr, c, 'traps',
{'InvalidOperation':0})
# Attributes cannot be deleted
for attr in ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp',
'flags', 'traps']:
self.assertRaises(AttributeError, c.__delattr__, attr)
# Invalid attributes
self.assertRaises(TypeError, getattr, c, 9)
self.assertRaises(TypeError, setattr, c, 9)
# Invalid values in constructor
self.assertRaises(TypeError, Context, rounding=999999)
self.assertRaises(TypeError, Context, rounding='xyz')
self.assertRaises(ValueError, Context, clamp=2)
self.assertRaises(ValueError, Context, capitals=-1)
self.assertRaises(TypeError, Context, flags=["P"])
self.assertRaises(TypeError, Context, traps=["Q"])
# Type error in conversion
self.assertRaises(TypeError, Context, flags=(0,1))
self.assertRaises(TypeError, Context, traps=(1,0))
class CContextInputValidation(ContextInputValidation):
decimal = C
class PyContextInputValidation(unittest.TestCase):
# No context input validation in decimal.py
pass
class Coverage(unittest.TestCase):
def test_adjusted(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal('1234e9999').adjusted(), 10002)
# XXX raise?
self.assertEqual(Decimal('nan').adjusted(), 0)
self.assertEqual(Decimal('inf').adjusted(), 0)
def test_canonical(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
x = Decimal(9).canonical()
self.assertEqual(x, 9)
c = getcontext()
x = c.canonical(Decimal(9))
self.assertEqual(x, 9)
def test_context_repr(self):
c = self.decimal.DefaultContext.copy()
if py_minor <= 1 and self.decimal == P:
raise unittest.SkipTest("requires Python 3.2")
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = self.decimal.ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[self.decimal]:
c.flags[sig] = False
c.traps[sig] = False
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[], traps=[])"
self.assertEqual(s, t)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
# abs
self.assertEqual(abs(Decimal("-10")), 10)
# add
self.assertEqual(Decimal("7") + 1, 8)
# divide
self.assertEqual(Decimal("10") / 5, 2)
# divide_int
self.assertEqual(Decimal("10") // 7, 1)
# fma
self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1)
self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True)
# three arg power
self.assertEqual(pow(Decimal(10), 2, 7), 2)
# exp
self.assertEqual(Decimal("1.01").exp(), 3)
# is_normal
self.assertIs(Decimal("0.01").is_normal(), False)
# is_subnormal
self.assertIs(Decimal("0.01").is_subnormal(), True)
# ln
self.assertEqual(Decimal("20").ln(), 3)
# log10
self.assertEqual(Decimal("20").log10(), 1)
# logb
self.assertEqual(Decimal("580").logb(), 2)
# logical_invert
self.assertEqual(Decimal("10").logical_invert(), 1)
# minus
self.assertEqual(-Decimal("-10"), 10)
# multiply
self.assertEqual(Decimal("2") * 4, 8)
# next_minus
self.assertEqual(Decimal("10").next_minus(), 9)
# next_plus
self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1'))
# normalize
self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1'))
# number_class
self.assertEqual(Decimal("10").number_class(), '+Normal')
# plus
self.assertEqual(+Decimal("-1"), -1)
# remainder
self.assertEqual(Decimal("10") % 7, 3)
# subtract
self.assertEqual(Decimal("10") - 7, 3)
# to_integral_exact
self.assertEqual(Decimal("1.12345").to_integral_exact(), 1)
# Boolean functions
self.assertTrue(Decimal("1").is_canonical())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("snan").is_snan())
self.assertTrue(Decimal("-1").is_signed())
self.assertTrue(Decimal("0").is_zero())
self.assertTrue(Decimal("0").is_zero())
# Copy
with localcontext() as c:
c.prec = 10000
x = 1228 ** 1523
y = -Decimal(x)
z = y.copy_abs()
self.assertEqual(z, x)
z = y.copy_negate()
self.assertEqual(z, x)
z = y.copy_sign(Decimal(1))
self.assertEqual(z, x)
def test_divmod(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
with localcontext() as c:
q, r = divmod(Decimal("10912837129"), 1001)
self.assertEqual(q, Decimal('10901935'))
self.assertEqual(r, Decimal('194'))
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
c.clear_flags()
q, r = divmod(Decimal("inf"), Decimal("inf"))
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal("inf"), 101)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal(0), 0)
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.traps[DivisionByZero] = False
c.clear_flags()
q, r = divmod(Decimal(11), 0)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation] and
c.flags[DivisionByZero])
def test_power(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
Overflow = self.decimal.Overflow
Rounded = self.decimal.Rounded
with localcontext() as c:
c.prec = 3
c.clear_flags()
self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00'))
self.assertTrue(c.flags[Rounded])
c.prec = 1
c.Emax = 1
c.Emin = -1
c.clear_flags()
c.traps[Overflow] = False
self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf'))
self.assertTrue(c.flags[Overflow])
def test_quantize(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
c.traps[InvalidOperation] = False
x = Decimal(99).quantize(Decimal("1e1"))
self.assertTrue(x.is_nan())
def test_radix(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
self.assertEqual(Decimal("1").radix(), 10)
self.assertEqual(c.radix(), 10)
def test_rop(self):
Decimal = self.decimal.Decimal
for attr in ('__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'):
self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented)
def test_round(self):
# Python3 behavior: round() returns Decimal
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
c.prec = 28
self.assertEqual(str(Decimal("9.99").__round__()), "10")
self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
def test_create_decimal(self):
c = self.decimal.Context()
self.assertRaises(ValueError, c.create_decimal, ["%"])
def test_int(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 9999
x = Decimal(1221**1271) / 10**3923
self.assertEqual(int(x), 1)
self.assertEqual(x.to_integral(), 2)
def test_copy(self):
Context = self.decimal.Context
if py_minor <= 1 and self.decimal == P:
raise unittest.SkipTest("requires Python 3.2")
c = Context()
c.prec = 10000
x = -(1172 ** 1712)
y = c.copy_abs(x)
self.assertEqual(y, -x)
y = c.copy_negate(x)
self.assertEqual(y, -x)
y = c.copy_sign(x, 1)
self.assertEqual(y, -x)
class CCoverage(Coverage):
decimal = C
class PyCoverage(Coverage):
decimal = P
class PyFunctionality(unittest.TestCase):
"""Extra functionality in decimal.py"""
def test_py_quantize_watchexp(self):
# watchexp functionality
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
x = Decimal(99999).quantize(Decimal("1e3"), watchexp=False)
self.assertEqual(x, Decimal('1.00E+5'))
def test_py_alternate_formatting(self):
# triples giving a format, a Decimal, and the expected result
Decimal = P.Decimal
localcontext = P.localcontext
if py_minor <= 1:
raise unittest.SkipTest("requires Python 3.2")
test_values = [
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
class PyWhitebox(unittest.TestCase):
"""White box testing for decimal.py"""
def test_py_exact_power(self):
# Rarely exercised lines in _power_exact.
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
c.prec = 8
x = Decimal(2**16) ** Decimal("-0.5")
self.assertEqual(x, Decimal('0.00390625'))
x = Decimal(2**16) ** Decimal("-0.6")
self.assertEqual(x, Decimal('0.0012885819'))
x = Decimal("256e7") ** Decimal("-0.5")
x = Decimal(152587890625) ** Decimal('-0.0625')
self.assertEqual(x, Decimal("0.2"))
x = Decimal("152587890625e7") ** Decimal('-0.0625')
x = Decimal(5**2659) ** Decimal('-0.0625')
c.prec = 1
x = Decimal("152587890625") ** Decimal('-0.5')
c.prec = 201
x = Decimal(2**578) ** Decimal("-0.5")
def test_py_immutability_operations(self):
# Do operations and check that it didn't change change internal objects.
Decimal = P.Decimal
DefaultContext = P.DefaultContext
setcontext = P.setcontext
c = DefaultContext.copy()
c.traps = dict((s, 0) for s in OrderedSignals[P])
setcontext(c)
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
return
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_py_decimal_id(self):
Decimal = P.Decimal
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
def test_py_rescale(self):
# Coverage
Decimal = P.Decimal
ROUND_UP = P.ROUND_UP
localcontext = P.localcontext
with localcontext() as c:
x = Decimal("NaN")._rescale(3, ROUND_UP)
self.assertTrue(x.is_nan())
def test_py__round(self):
# Coverage
Decimal = P.Decimal
ROUND_UP = P.ROUND_UP
self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP)
class CFunctionality(unittest.TestCase):
"""Extra functionality in _decimal"""
def test_c_ieee_context(self):
# issue 8786: Add support for IEEE 754 contexts to decimal module.
IEEEContext = C.IEEEContext
DECIMAL32 = C.DECIMAL32
DECIMAL64 = C.DECIMAL64
DECIMAL128 = C.DECIMAL128
def assert_rest(self, context):
self.assertEqual(context.clamp, 1)
assert_signals(self, context, 'traps', [])
assert_signals(self, context, 'flags', [])
c = IEEEContext(DECIMAL32)
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 96)
self.assertEqual(c.Emin, -95)
assert_rest(self, c)
c = IEEEContext(DECIMAL64)
self.assertEqual(c.prec, 16)
self.assertEqual(c.Emax, 384)
self.assertEqual(c.Emin, -383)
assert_rest(self, c)
c = IEEEContext(DECIMAL128)
self.assertEqual(c.prec, 34)
self.assertEqual(c.Emax, 6144)
self.assertEqual(c.Emin, -6143)
assert_rest(self, c)
# Invalid values
self.assertRaises(OverflowError, IEEEContext, 2**63)
self.assertRaises(ValueError, IEEEContext, -1)
self.assertRaises(ValueError, IEEEContext, 1024)
def test_apply(self):
# Decimal("9.9999999").apply() applies the current context.
Decimal = C.Decimal
localcontext = C.localcontext
with localcontext() as c:
c.prec = 5
c.Emax = 99999
c.Emin = -99999
d = c.copy()
d.prec = 4
x = Decimal("123456")
self.assertEqual(str(x.apply()), "1.2346E+5")
self.assertEqual(str(c.apply(x)), "1.2346E+5")
self.assertEqual(str(x.apply(d)), "1.235E+5")
self.assertEqual(str(d.apply(x)), "1.235E+5")
self.assertRaises(TypeError, x.apply, "p")
self.assertRaises(TypeError, x.apply, "p", "q")
self.assertRaises(TypeError, c.apply, "p")
x = Decimal(1171**2221)
self.assertEqual(str(x.apply()), "1.8402E+6815")
self.assertEqual(str(c.apply(x)), "1.8402E+6815")
self.assertEqual(str(d.apply(x)), "1.840E+6815")
def test_c_float_operation_default(self):
Decimal = C.Decimal
Context = C.Context
Inexact = C.Inexact
DecInexact = C.DecInexact
FloatOperation= C.FloatOperation
DecFloatOperation= C.DecFloatOperation
context = Context()
self.assertFalse(context.flags[FloatOperation])
self.assertFalse(context.traps[FloatOperation])
self.assertFalse(context._flags&DecFloatOperation)
self.assertFalse(context._traps&DecFloatOperation)
context.settraps([Inexact, FloatOperation])
self.assertEqual(context._traps, DecInexact|DecFloatOperation)
self.assertTrue(context.traps[FloatOperation])
self.assertTrue(context.traps[Inexact])
def test_c_powmod(self):
Decimal = C.Decimal
Context = C.Context
c = Context()
d = c.powmod(Decimal(1), Decimal(4), Decimal(2))
self.assertEqual(c.powmod(1, 4, 2), d)
self.assertEqual(c.powmod(Decimal(1), 4, 2), d)
self.assertEqual(c.powmod(1, Decimal(4), 2), d)
self.assertEqual(c.powmod(1, 4, Decimal(2)), d)
self.assertEqual(c.powmod(Decimal(1), Decimal(4), 2), d)
self.assertRaises(TypeError, c.powmod, '1', 4, 2)
self.assertRaises(TypeError, c.powmod, 1, '4', 2)
self.assertRaises(TypeError, c.powmod, 1, 4, '2')
def test_c_context(self):
Context = C.Context
c = Context(flags=C.DecClamped, traps=C.DecRounded)
self.assertEqual(c._flags, C.DecClamped)
self.assertEqual(c._traps, C.DecRounded)
def test_sundry(self):
Decimal = C.Decimal
# mpd_isinteger
self.assertTrue(Decimal("1.234e5").is_integer())
self.assertTrue(Decimal("snan").is_special())
# Extra functions
self.assertEqual(Decimal(-1).abs(), 1)
self.assertEqual(Decimal(1).minus(), -1)
self.assertEqual(Decimal(1).plus(), 1)
self.assertEqual(Decimal(1).add(1), 2)
self.assertEqual(Decimal(12).div(2), 6)
self.assertEqual(Decimal(10).divint(7), 1)
self.assertEqual(Decimal(10).mul(12), 120)
self.assertEqual(Decimal(10).rem(7), 3)
self.assertEqual(Decimal(10).sub(7), 3)
self.assertEqual(Decimal(10).divmod(7), (1, 3))
def test_constants(self):
# Condition flags
cond = (
C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero,
C.DecDivisionImpossible, C.DecDivisionUndefined,
C.DecFpuError, C.DecInexact, C.DecInvalidContext,
C.DecInvalidOperation, C.DecMallocError,
C.DecFloatOperation, C.DecOverflow, C.DecRounded,
C.DecSubnormal, C.DecUnderflow
)
# Architecture dependent context limits
if C.MAX_EMAX > 425000000:
self.assertEqual(C.MAX_PREC, 999999999999999999)
self.assertEqual(C.MAX_EMAX, 999999999999999999)
self.assertEqual(C.MIN_EMIN, -999999999999999999)
self.assertEqual(C.MIN_ETINY, -1999999999999999997)
else:
self.assertEqual(C.MAX_PREC, 425000000)
self.assertEqual(C.MAX_EMAX, 425000000)
self.assertEqual(C.MIN_EMIN, -425000000)
self.assertEqual(C.MIN_ETINY, -849999999)
# IEEEContext
self.assertEqual(C.DECIMAL32, 32)
self.assertEqual(C.DECIMAL64, 64)
self.assertEqual(C.DECIMAL128, 128)
self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512)
# Rounding modes
for i, v in enumerate(RoundingModes[C]):
self.assertEqual(v, i)
self.assertEqual(C.ROUND_TRUNC, 8)
# Conditions
for i, v in enumerate(cond):
self.assertEqual(v, 1<<i)
self.assertEqual(C.DecIEEEInvalidOperation,
C.DecConversionSyntax|
C.DecDivisionImpossible|
C.DecDivisionUndefined|
C.DecFpuError|
C.DecInvalidContext|
C.DecInvalidOperation|
C.DecMallocError)
self.assertEqual(C.DecErrors,
C.DecIEEEInvalidOperation|
C.DecDivisionByZero)
self.assertEqual(C.DecTraps,
C.DecErrors|C.DecOverflow|C.DecUnderflow)
class CWhitebox(unittest.TestCase):
"""Whitebox testing for _decimal"""
def test_bignum(self):
# Not exactly whitebox, but too slow with pydecimal.
if hasattr(C, 'setfailpoint'):
random.seed(randseed)
Decimal = C.Decimal
localcontext = C.localcontext
b1 = 10**35
b2 = 10**36
with localcontext() as c:
c.prec = 1000000
r = 1 if hasattr(C, 'setfailpoint') else 5
for i in range(r):
a = random.randrange(b1, b2)
b = random.randrange(1000, 1200)
x = a ** b
y = Decimal(a) ** Decimal(b)
self.assertEqual(x, y)
def test_c_input_restriction(self):
# Too large for _decimal to be converted exactly
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
Context = C.Context
localcontext = C.localcontext
with localcontext(Context()):
self.assertRaises(InvalidOperation, Decimal,
"1e9999999999999999999")
def test_c_context_repr(self):
# This test is _decimal-only because flags are not printed
# in the same order.
DefaultContext = C.DefaultContext
FloatOperation = C.FloatOperation
ROUND_HALF_DOWN = C.ROUND_HALF_DOWN
c = DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[C]:
c.flags[sig] = True
c.traps[sig] = True
c.flags[FloatOperation] = True
c.traps[FloatOperation] = True
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \
"traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow])"
self.assertEqual(s, t)
def test_c_context_errors(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# SignalDict: input validation
self.assertRaises(TypeError, c.flags.__setitem__, 801, 0)
self.assertRaises(TypeError, c.traps.__setitem__, 801, 0)
self.assertRaises(ValueError, c.flags.__delitem__, Overflow)
self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation)
self.assertRaises(TypeError, setattr, c, 'flags', ['x'])
self.assertRaises(TypeError, setattr, c,'traps', ['y'])
self.assertRaises(TypeError, setattr, c, 'flags', {0:1})
self.assertRaises(TypeError, setattr, c, 'traps', {0:1})
self.assertRaises(TypeError, c.setflags, ['x'])
self.assertRaises(TypeError, c.settraps, ['y'])
self.assertRaises(TypeError, c.setflags, 'x')
self.assertRaises(TypeError, c.settraps, 'y')
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
gt_max_emax = 10**18 if HAVE_CONFIG_64 else 10**9
# prec, Emax, Emin
for attr in ['prec', 'Emax']:
self.assertRaises(ValueError, setattr, c, attr, gt_max_emax)
self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax)
# prec, Emax, Emin in context constructor
self.assertRaises(ValueError, Context, prec=gt_max_emax)
self.assertRaises(ValueError, Context, Emax=gt_max_emax)
self.assertRaises(ValueError, Context, Emin=-gt_max_emax)
# Overflow in conversion
self.assertRaises(OverflowError, Context, prec=int_max+1)
self.assertRaises(OverflowError, Context, Emax=int_max+1)
self.assertRaises(OverflowError, Context, Emin=-int_max-2)
self.assertRaises(OverflowError, Context, rounding=int_max+1)
self.assertRaises(OverflowError, Context, clamp=int_max+1)
self.assertRaises(OverflowError, Context, capitals=int_max+1)
self.assertRaises(OverflowError, Context, _allcr=int_max+1)
# OverflowError, general ValueError
for attr in ('prec', 'Emin', 'Emax', 'capitals', 'clamp', '_allcr'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, attr, int_max)
self.assertRaises(ValueError, setattr, c, attr, -int_max-1)
# OverflowError, general TypeError
for attr in ('rounding', '_flags', '_traps'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(TypeError, setattr, c, attr, int_max)
self.assertRaises(TypeError, setattr, c, attr, -int_max-1)
# OverflowError: unsafe_prec, unsafe_emin, unsafe_emax
self.assertRaises(OverflowError, getattr(c, 'unsafe_setprec'), int_max+1)
self.assertRaises(OverflowError, getattr(c, 'unsafe_setemax'), int_max+1)
self.assertRaises(OverflowError, getattr(c, 'unsafe_setemin'), -int_max-2)
# capitals, clamp, _allcr
for attr in ['capitals', 'clamp', '_allcr']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, attr, 2**32)
self.assertRaises(ValueError, setattr, c, attr, 2**32+1)
self.assertRaises(ValueError, Context, _allcr=2)
# _flags, _traps
for attr in ['_flags', '_traps']:
self.assertRaises(TypeError, setattr, c, attr, 999999)
self.assertRaises(TypeError, setattr, c, attr, 'x')
def test_c_valid_context(self):
# These tests are for code coverage in _decimal.
DefaultContext = C.DefaultContext
ROUND_HALF_UP = C.ROUND_HALF_UP
Clamped = C.Clamped
Underflow = C.Underflow
Inexact = C.Inexact
Rounded = C.Rounded
Subnormal = C.Subnormal
DecClamped = C.DecClamped
DecUnderflow = C.DecUnderflow
DecInexact = C.DecInexact
DecRounded = C.DecRounded
DecSubnormal = C.DecSubnormal
c = DefaultContext.copy()
# Exercise all getters and setters
c.prec = 34
c.rounding = ROUND_HALF_UP
c.Emax = 3000
c.Emin = -3000
c.capitals = 1
c.clamp = 0
c._flags = DecUnderflow
c._traps = DecClamped
c._allcr = 0
self.assertEqual(c.prec, 34)
self.assertEqual(c.rounding, ROUND_HALF_UP)
self.assertEqual(c.Emin, -3000)
self.assertEqual(c.Emax, 3000)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
self.assertEqual(c._flags, DecUnderflow)
self.assertEqual(c._traps, DecClamped)
self.assertEqual(c._allcr, 0)
self.assertEqual(c.Etiny(), -3033)
self.assertEqual(c.Etop(), 2967)
# Set traps/flags from list
c.settraps([Clamped, Underflow])
self.assertEqual(c._traps, DecClamped|DecUnderflow)
c.setflags([Inexact, Rounded, Subnormal])
self.assertEqual(c._flags, DecInexact|DecRounded|DecSubnormal)
# Exercise all unsafe setters
c.unsafe_setprec(999999999)
c.unsafe_setemax(999999999)
c.unsafe_setemin(-999999999)
self.assertEqual(c.prec, 999999999)
self.assertEqual(c.Emax, 999999999)
self.assertEqual(c.Emin, -999999999)
def test_c_round(self):
# Restricted input.
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
localcontext = C.localcontext
MAX_EMAX = C.MAX_EMAX
MIN_ETINY = C.MIN_ETINY
int_max = 2**63-1 if C.MAX_PREC > 425000000 else 2**31-1
with localcontext() as c:
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
-int_max-1)
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
int_max)
self.assertRaises(InvalidOperation, Decimal("1").__round__,
int(MAX_EMAX+1))
self.assertRaises(C.InvalidOperation, Decimal("1").__round__,
-int(MIN_ETINY-1))
self.assertRaises(OverflowError, Decimal("1.23").__round__,
-int_max-2)
self.assertRaises(OverflowError, Decimal("1.23").__round__,
int_max+1)
def test_c_format(self):
# Restricted input
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
Rounded = C.Rounded
localcontext = C.localcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9)
self.assertRaises(TypeError, Decimal(1).__format__, [])
with localcontext() as c:
c.traps[InvalidOperation] = True
c.traps[Rounded] = True
self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10")
maxsize = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
self.assertRaises(InvalidOperation, Decimal("1.23456789").__format__,
"=%d.1" % maxsize)
def test_c_integral(self):
Decimal = C.Decimal
Inexact = C.Inexact
ROUND_UP = C.ROUND_UP
localcontext = C.localcontext
x = Decimal(10)
self.assertEqual(x.to_integral(), 10)
self.assertRaises(TypeError, x.to_integral, '10')
self.assertRaises(TypeError, x.to_integral, 10, 'x')
self.assertRaises(TypeError, x.to_integral, 10)
self.assertEqual(x.to_integral_value(), 10)
self.assertRaises(TypeError, x.to_integral_value, '10')
self.assertRaises(TypeError, x.to_integral_value, 10, 'x')
self.assertRaises(TypeError, x.to_integral_value, 10)
self.assertEqual(x.to_integral_exact(), 10)
self.assertRaises(TypeError, x.to_integral_exact, '10')
self.assertRaises(TypeError, x.to_integral_exact, 10, 'x')
self.assertRaises(TypeError, x.to_integral_exact, 10)
with localcontext() as c:
x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
c.traps[Inexact] = True
self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP)
def test_c_funcs(self):
# Invalid arguments
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
ROUND_UP = C.ROUND_UP
getcontext = C.getcontext
localcontext = C.localcontext
self.assertEqual(Decimal('9.99e10').to_sci_string(), '9.99E+10')
self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9')
self.assertRaises(TypeError, pow, Decimal(1), 2, "3")
self.assertRaises(TypeError, Decimal(9).number_class, "x", "y")
self.assertRaises(TypeError, Decimal(9).divmod, 8, "x", "y")
self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y")
self.assertRaises(TypeError, Decimal(9).to_sci, 3, "x", "y")
self.assertRaises(TypeError, Decimal(9).to_eng, 3, "x", "y")
self.assertEqual(Decimal("1.234e2007").sign(), 1)
self.assertEqual(Decimal("-1.234e2007").sign(), -1)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), []
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext()
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), 10
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000
)
with localcontext() as c:
c.clear_traps()
# Invalid arguments
self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y")
self.assertRaises(TypeError, c.canonical, 200)
self.assertRaises(TypeError, c.is_canonical, 200)
self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y")
self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y")
self.assertEqual(str(c.canonical(Decimal(200))), '200')
self.assertEqual(c.radix(), 10)
c.traps[DivisionByZero] = True
self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0)
self.assertRaises(DivisionByZero, Decimal(9).divmod, 0)
self.assertRaises(DivisionByZero, c.divmod, 9, 0)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0)
self.assertRaises(InvalidOperation, Decimal(9).divmod, 0)
self.assertRaises(InvalidOperation, c.divmod, 9, 0)
self.assertTrue(c.flags[DivisionByZero])
c.traps[InvalidOperation] = True
c.prec = 2
self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501)
c.prec = 10
x = Decimal(2).invroot()
self.assertEqual(str(x), '0.7071067812')
x = c.invroot(3)
self.assertEqual(str(x), '0.5773502692')
c.prec = 28
x = Decimal(2).power(8)
self.assertEqual(str(x), '256')
x = Decimal(2).powmod(8, 31)
self.assertEqual(str(x), '8')
def test_c_context_templates(self):
self.assertEqual(
C.BasicContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow|
C.DecUnderflow|C.DecClamped
)
self.assertEqual(
C.DefaultContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow
)
def test_c_signal_dict(self):
if hasattr(C, 'setfailpoint'):
random.seed(randseed)
# SignalDict coverage
Context = C.Context
DefaultContext = C.DefaultContext
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
Overflow = C.Overflow
Subnormal = C.Subnormal
Underflow = C.Underflow
Rounded = C.Rounded
Inexact = C.Inexact
Clamped = C.Clamped
DecClamped = C.DecClamped
DecInvalidOperation = C.DecInvalidOperation
DecIEEEInvalidOperation = C.DecIEEEInvalidOperation
def assertIsExclusivelySet(signal, signal_dict):
for sig in signal_dict:
if sig == signal:
self.assertTrue(signal_dict[sig])
else:
self.assertFalse(signal_dict[sig])
c = DefaultContext.copy()
# Signal dict methods
self.assertTrue(Overflow in c.traps)
self.assertTrue(c.traps.has_key(Overflow))
c.clear_traps()
for k in c.traps.keys():
c.traps[k] = True
for v in c.traps.values():
self.assertTrue(v)
c.clear_traps()
for k, v in c.traps.items():
self.assertFalse(v)
self.assertFalse(c.flags.get(Overflow))
self.assertIs(c.flags.get("x"), None)
self.assertEqual(c.flags.get("x", "y"), "y")
self.assertRaises(TypeError, c.flags.get, "x", "y", "z")
self.assertEqual(len(c.flags), len(c.traps))
s = sys.getsizeof(c.flags)
s = sys.getsizeof(c.traps)
s = c.flags.__repr__()
# Set flags/traps.
c.clear_flags()
c._flags = DecClamped
self.assertTrue(c.flags[Clamped])
c.clear_traps()
c._traps = DecInvalidOperation
self.assertTrue(c.traps[InvalidOperation])
# Set flags/traps from dictionary.
c.clear_flags()
d = c.flags.copy()
d[DivisionByZero] = True
c.flags = d
assertIsExclusivelySet(DivisionByZero, c.flags)
c.clear_traps()
d = c.traps.copy()
d[Underflow] = True
c.traps = d
assertIsExclusivelySet(Underflow, c.traps)
# Random constructors
IntSignals = {
Clamped: C.DecClamped,
Rounded: C.DecRounded,
Inexact: C.DecInexact,
Subnormal: C.DecSubnormal,
Underflow: C.DecUnderflow,
Overflow: C.DecOverflow,
DivisionByZero: C.DecDivisionByZero,
InvalidOperation: C.DecIEEEInvalidOperation
}
IntCond = [
C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError,
C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError,
C.DecConversionSyntax,
]
lim = 1 if hasattr(C, 'setfailpoint') else len(OrderedSignals[C])
for r in range(lim):
for t in range(lim):
for round in RoundingModes[C]:
flags = random.sample(OrderedSignals[C], r)
traps = random.sample(OrderedSignals[C], t)
prec = random.randrange(1, 10000)
emin = random.randrange(-10000, 0)
emax = random.randrange(0, 10000)
clamp = random.randrange(0, 2)
caps = random.randrange(0, 2)
cr = random.randrange(0, 2)
c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax,
capitals=caps, clamp=clamp, flags=list(flags),
traps=list(traps), _allcr=cr)
self.assertEqual(c.prec, prec)
self.assertEqual(c.rounding, round)
self.assertEqual(c.Emin, emin)
self.assertEqual(c.Emax, emax)
self.assertEqual(c.capitals, caps)
self.assertEqual(c.clamp, clamp)
self.assertEqual(c._allcr, cr)
f = 0
for x in flags:
f |= IntSignals[x]
self.assertEqual(c._flags, f)
f = 0
for x in traps:
f |= IntSignals[x]
self.assertEqual(c._traps, f)
for cond in IntCond:
c._flags = cond
self.assertTrue(c._flags&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.flags)
for cond in IntCond:
c._traps = cond
self.assertTrue(c._traps&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.traps)
all_tests = [
CExplicitConstructionTest, PyExplicitConstructionTest,
CImplicitConstructionTest, PyImplicitConstructionTest,
CFormatTest, PyFormatTest,
CArithmeticOperatorsTest, PyArithmeticOperatorsTest,
CThreadingTest, PyThreadingTest,
CUsabilityTest, PyUsabilityTest,
CPythonAPItests, PyPythonAPItests,
CContextAPItests, PyContextAPItests,
CContextWithStatement, PyContextWithStatement,
CContextFlags, PyContextFlags,
CSpecialContexts, PySpecialContexts,
CContextInputValidation, PyContextInputValidation,
CCoverage, PyCoverage,
CFunctionality, PyFunctionality,
CWhitebox, PyWhitebox,
CIBMTestCases, PyIBMTestCases,
]
if py_minor <= 1:
all_tests = all_tests[::2]
# Wrap test functions for testing api failures. Doing this in
# test_main() causes spurious refleaks, so it is done here.
if hasattr(C, 'setapicalls'):
for cls in all_tests:
if cls == CIBMTestCases or cls == PyIBMTestCases:
newfunc = withFailpoint(getattr(cls, 'eval_equation'))
setattr(cls, 'eval_equation', newfunc)
else:
for attr in dir(cls):
if attr.startswith('test_'):
if attr == 'test_threading':
continue
newfunc = withFailpoint(getattr(cls, attr))
setattr(cls, attr, newfunc)
def test_main(arith=False, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init(C)
init(P)
global TEST_ALL, DEBUG
TEST_ALL = arith or is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = all_tests
else:
test_classes = [CIBMTestCases, PyIBMTestCases]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(CIBMTestCases, 'test_' + head, tester)
setattr(PyIBMTestCases, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
run_doctest(C, verbose)
run_doctest(P, verbose)
finally:
if C: C.setcontext(ORIGINAL_CONTEXT[C])
P.setcontext(ORIGINAL_CONTEXT[P])
if not C:
warnings.warn('C tests skipped: no module named _decimal.',
UserWarning)
if not orig_sys_decimal is sys.modules['decimal']:
raise TestFailed("Internal error: unbalanced number of changes to "
"sys.modules['decimal'].")
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
| bsd-2-clause | -1,927,077,917,711,028,000 | 35.249737 | 113 | 0.54981 | false | 3.751063 | true | false | false |
jotajunior/scrapers | riot/src/riot.py | 1 | 3422 | import requests
class Riot:
api_key = None
region = None
valid_regions = None
summoner_version = 'v1.4'
base_api_url = 'https://{0}.api.pvp.net'
base_summoner_suffix = '/api/lol/{0}/{1}/summoner'
base_summoner_url = None
MAX_ID_LIST = 40
def check_region(self):
if not self.region or self.region not in self.valid_regions:
raise Exception('You need to provide a valid region for this call.')
def init_valid_regions(self):
self.valid_regions = ['br', 'eune', 'euw', 'kr', 'lan', \
'las', 'na', 'oce', 'ru', 'tr' \
]
def init_base_url(self):
self.check_region()
self.base_api_url = self.base_api_url.format(self.region)
def init_summoner_url(self):
if self.base_summoner_url:
return True
self.check_region()
base_summoner_suffix = self.base_summoner_suffix.format(self.region \
,self.summoner_version \
)
self.base_summoner_url = self.base_api_url + base_summoner_suffix
def get_api_key_query_string(self):
return '?api_key={0}'.format(self.api_key)
def set_region(self, region):
self.region = self.standardize_name(region)
def __init__(self, api_key, region=None):
self.api_key = api_key
self.set_region(region)
self.init_valid_regions()
self.init_base_url()
def standardize_name(self, name):
if not name or not isinstance(name, str):
return False
return name.replace(' ', '').lower()
def parse_name_list(self, names):
if not names:
return False
if isinstance(names, list):
names = ','.join(names)
return self.standardize_name(names)
def parse_id_list(self, ids):
if not ids:
return False
exceeded_exception = Exception('You are querying the server for more than ' \
+ str(self.MAX_ID_LIST) + 'names.')
if isinstance(ids, list):
if len(ids) > self.MAX_ID_LIST:
raise exceeded_exception
ids = [str(_id) for _id in ids]
return ','.join(ids)
elif isinstance(ids, str):
if ids.count(',') > (self.MAX_ID_LIST - 1):
raise exceeded_exception
return ids.replace(' ', '')
def get_summoner_by_name(self, names):
self.init_summoner_url()
names = self.parse_name_list(names)
if not names:
raise Exception('Riot: No name provided.')
url = self.base_summoner_url + '/by-name'
url += '/' + names + self.get_api_key_query_string()
return requests.get(url).text
def user_exists_by_name(self, name):
result = self.get_summoner_by_name(name)
if result.find('HTTP ERROR 404') == -1:
return True
else:
return False
def get_summoner_by_id(self, ids):
self.init_summoner_url()
ids = self.parse_id_list(ids)
if not ids:
raise Exception('Id list provided not valid.')
url = self.base_summoner_url
url += '/' + ids + self.get_api_key_query_string()
return requests.get(url).text
| mit | 6,683,860,858,705,820,000 | 28.756522 | 87 | 0.532437 | false | 3.735808 | false | false | false |
fetox74/wormboard-backend | scripts/python/StatsUpdater.py | 1 | 23828 | #!/usr/bin/env python
import gzip
import json
import re
import urllib2
import psycopg2
import time
from StringIO import StringIO
from multiprocessing.pool import ThreadPool
from itertools import islice
from functools import wraps
# See LICENSE.md in /scripts/python folder
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck, e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
print msg
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
@retry(urllib2.URLError, tries=4, delay=3, backoff=2)
def urlopen_with_retry(request):
return urllib2.urlopen(request)
def partition(data, SIZE=100):
it = iter(data)
for i in xrange(0, len(data), SIZE):
yield dict((k, data[k]) for k in islice(it, SIZE))
def getSolarSystemIdNameDict():
result = {}
cur.execute('SELECT * FROM "mapSolarSystems"')
solarSystems = cur.fetchall()
for solarSystem in solarSystems:
result[solarSystem[2]] = solarSystem[3]
return result
def getCharacterIdNameDict():
result = {}
cur.execute('SELECT DISTINCT characterid, character FROM "zwbAggregateChar"')
characters = cur.fetchall()
for character in characters:
result[character[0]] = character[1]
return result
def getCorporationIdNameDict():
result = {}
cur.execute('SELECT DISTINCT corporationid, corporation FROM "zwbAggregateCorp"')
corporations = cur.fetchall()
for corporation in corporations:
result[corporation[0]] = corporation[1]
return result
def getCharacterNameESI(characterId):
request = urllib2.Request("https://esi.evetech.net/latest/universe/names/?datasource=tranquility")
request.add_header("Accept-Encoding", "gzip")
request.add_header("Cache-Control", "1")
query = "[" + str(characterId) + "]"
response = urllib2.urlopen(url=request, data=query)
if response.info().get("Content-Encoding") == "gzip":
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
return json.loads(data)[0]["name"]
def getCorporationNameESI(corporationId):
request = urllib2.Request("https://esi.evetech.net/latest/universe/names/?datasource=tranquility")
request.add_header("Accept-Encoding", "gzip")
request.add_header("Cache-Control", "1")
query = "[" + str(corporationId) + "]"
response = urllib2.urlopen(url=request, data=query)
if response.info().get("Content-Encoding") == "gzip":
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
return json.loads(data)[0]["name"]
def getCharacterNameForId(charId):
characterId = int(charId)
if characterId not in dictCharacterIdName:
dictCharacterIdName[characterId] = getCharacterNameESI(characterId)
return dictCharacterIdName[characterId]
def getCorporationNameForId(corpId):
corporationId = int(corpId)
if corporationId not in dictCorporationIdName:
dictCorporationIdName[corporationId] = getCorporationNameESI(corporationId)
return dictCorporationIdName[corporationId]
def getKillmailHashes(date):
request = urllib2.Request("https://zkillboard.com/api/history/" + date + "/")
request.add_header("Accept-Encoding", "gzip")
request.add_header("Cache-Control", "1")
request.add_header("User-Agent", "http://fetox-developments.com/wormboard/ Maintainer: fetox74 EMail: [email protected]")
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError as err:
print err
return []
if response.info().get("Content-Encoding") == "gzip":
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
return json.loads(data)
def getESI(tupleIdHash):
request = urllib2.Request("https://esi.tech.ccp.is/latest/killmails/" + tupleIdHash[0] + "/" + tupleIdHash[1] + "/?datasource=tranquility")
request.add_header("Accept-Encoding", "gzip")
request.add_header("Cache-Control", "1")
try:
response = urlopen_with_retry(request)
except urllib2.HTTPError as err:
print err
return []
if response.info().get("Content-Encoding") == "gzip":
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
return json.loads(data)
def getZKB(id, solarSystemId):
if id in mapIdKillmail:
return mapIdKillmail[id]
# todo: this should actually only be done if the solar system has not been read for the current date already (add set of solarsystemid's, make sure to reset for next date)
for page in range(1, 11):
request = urllib2.Request(
"https://zkillboard.com/api/no-items/no-attackers/solarSystemID/" + str(solarSystemId) + "/startTime/" + str(date) + "0000/endTime/" + str(date) +
"2400/page/" + str(page) + "/")
request.add_header("Accept-Encoding", "gzip")
request.add_header("Cache-Control", "1")
request.add_header("User-Agent", "http://fetox-developments.com/wormboard/ Maintainer: fetox74 EMail: [email protected]")
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError as err:
print err
return None
if response.info().get("Content-Encoding") == "gzip":
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
killmails = json.loads(data)
if len(killmails) > 0:
for killmail in killmails:
mapIdKillmail[killmail["killmail_id"]] = killmail["zkb"]
else:
break
if id in mapIdKillmail:
return mapIdKillmail[id]
else:
return getSingleKillmailZKB(id)
def getSingleKillmailZKB(id):
request = urllib2.Request("https://zkillboard.com/api/no-items/no-attackers/killID/" + str(id) + "/")
request.add_header("Accept-Encoding", "gzip")
request.add_header("Cache-Control", "1")
request.add_header("User-Agent", "http://fetox-developments.com/wormboard/ Maintainer: fetox74 EMail: [email protected]")
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError as err:
print err.headers
return None
if response.info().get("Content-Encoding") == "gzip":
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
result = json.loads(data)
if len(result) > 0:
return result[0]["zkb"]
else:
return None
def getFinalHitCorpAndUpdateAttackers(attackers, value):
for attacker in attackers:
if "character_id" in attacker:
characterid = attacker["character_id"]
character = getCharacterNameForId(characterid)
updateCharacter(characterid, character, 1, 0, value, 0.0)
for attacker in attackers:
if attacker["final_blow"]:
if "corporation_id" in attacker:
corporationid = attacker["corporation_id"]
return corporationid, getCorporationNameForId(corporationid)
else:
return -1, ""
def getIskLossForCorp(corporationid):
if corporationid in lossDict:
return lossDict[corporationid]["isklost"]
else:
return 0.0
def getLossesForCorp(corporationid):
if corporationid in lossDict:
return lossDict[corporationid]["losses"]
else:
return 0
def getAttackersOfCorp(attackers, corporationid):
result = set()
for attacker in attackers:
if "corporation_id" in attacker and attacker["corporation_id"] == corporationid and "character_id" in attacker:
result.add(attacker["character_id"])
return result
def addNumberToHourDict(datetimestring, dict, number):
hour = datetimestring[11:13]
dict[hour] = dict[hour] + number
def createHourDict():
result = {}
for i in range(24):
result[str(i).zfill(2)] = 0
return result
def updateCharacter(characterid, character, kills, losses, iskwon, isklost):
if characterid in characterDict:
characterDict[characterid]["kills"] = characterDict[characterid]["kills"] + kills
characterDict[characterid]["losses"] = characterDict[characterid]["losses"] + losses
characterDict[characterid]["iskwon"] = characterDict[characterid]["iskwon"] + iskwon
characterDict[characterid]["isklost"] = characterDict[characterid]["isklost"] + isklost
else:
characterDict[characterid] = {"character": character, "kills": kills, "losses": losses, "iskwon": iskwon, "isklost": isklost}
def updateDictionaries(killmailESI, killmailZKB):
if killmailZKB:
finalHitCorpId, finalHitCorp = getFinalHitCorpAndUpdateAttackers(killmailESI["attackers"], killmailZKB["totalValue"])
victimCorpId = killmailESI["victim"]["corporation_id"]
victimCorp = getCorporationNameForId(victimCorpId)
if "character_id" in killmailESI["victim"]:
characterid = killmailESI["victim"]["character_id"]
character = getCharacterNameForId(characterid)
updateCharacter(characterid, character, 0, 1, 0.0, killmailZKB["totalValue"])
if finalHitCorpId != -1:
attackersOfFinalHitCorp = getAttackersOfCorp(killmailESI["attackers"], finalHitCorpId)
if finalHitCorpId in masterDict:
masterDict[finalHitCorpId]["kills"] = masterDict[finalHitCorpId]["kills"] + 1
masterDict[finalHitCorpId]["iskwon"] = masterDict[finalHitCorpId]["iskwon"] + killmailZKB["totalValue"]
masterDict[finalHitCorpId]["active"] = masterDict[finalHitCorpId]["active"] | attackersOfFinalHitCorp
masterDict[finalHitCorpId]["sumonkills"] = masterDict[finalHitCorpId]["sumonkills"] + len(attackersOfFinalHitCorp)
else:
masterDict[finalHitCorpId] = {"corporation": finalHitCorp, "kills": 1, "iskwon": killmailZKB["totalValue"], "active": attackersOfFinalHitCorp,
"sumonkills": len(attackersOfFinalHitCorp), "killsinhour": createHourDict(), "sumonkillsinhour": createHourDict()}
addNumberToHourDict(killmailESI["killmail_time"], masterDict[finalHitCorpId]["killsinhour"], 1)
addNumberToHourDict(killmailESI["killmail_time"], masterDict[finalHitCorpId]["sumonkillsinhour"], len(attackersOfFinalHitCorp))
if victimCorpId not in masterDict:
masterDict[victimCorpId] = {"corporation": victimCorp, "kills": 0, "iskwon": 0.0, "active": set(),
"sumonkills": 0, "killsinhour": createHourDict(), "sumonkillsinhour": createHourDict()}
if victimCorpId in lossDict:
lossDict[victimCorpId]["losses"] = lossDict[victimCorpId]["losses"] + 1
lossDict[victimCorpId]["isklost"] = lossDict[victimCorpId]["isklost"] + killmailZKB["totalValue"]
else:
lossDict[victimCorpId] = {"losses": 1, "isklost": killmailZKB["totalValue"]}
else:
print "kill id " + str(killmailESI["killmail_id"]) + " seems not to exist on zKillboard.."
def queryAggregateAlreadyInDB(cur, date, corp):
cur.execute('SELECT * FROM "zwbAggregateCorp" WHERE "date" = ' + date + ' AND "corporation" = ' "'" + corp + "'")
if len(cur.fetchall()) > 0:
return True
else:
return False
def updateDB(cur, date):
cur.execute('DELETE FROM "zwbAggregateCorp" WHERE "date" = %i' % int(date))
cur.execute('DELETE FROM "zwbAggregateChar" WHERE "date" = %i' % int(date))
for key, value in masterDict.items():
cur.execute(
'''INSERT INTO "zwbAggregateCorp" ("date", "corporationid", "corporation", "kills", "losses", "iskwon", "isklost", "active", "numactive", "sumonkills",
"killsinhour00", "killsinhour01", "killsinhour02", "killsinhour03", "killsinhour04", "killsinhour05", "killsinhour06", "killsinhour07",
"killsinhour08", "killsinhour09", "killsinhour10", "killsinhour11", "killsinhour12", "killsinhour13", "killsinhour14", "killsinhour15",
"killsinhour16", "killsinhour17", "killsinhour18", "killsinhour19", "killsinhour20", "killsinhour21", "killsinhour22", "killsinhour23",
"sumonkillsinhour00", "sumonkillsinhour01", "sumonkillsinhour02", "sumonkillsinhour03", "sumonkillsinhour04", "sumonkillsinhour05",
"sumonkillsinhour06", "sumonkillsinhour07", "sumonkillsinhour08", "sumonkillsinhour09", "sumonkillsinhour10", "sumonkillsinhour11",
"sumonkillsinhour12", "sumonkillsinhour13", "sumonkillsinhour14", "sumonkillsinhour15", "sumonkillsinhour16", "sumonkillsinhour17",
"sumonkillsinhour18", "sumonkillsinhour19", "sumonkillsinhour20", "sumonkillsinhour21", "sumonkillsinhour22", "sumonkillsinhour23")
VALUES (%i, %i, %s, %i, %i, %f, %f, %s, %i, %i,
%i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i,
%i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i, %i)''' % (
int(date),
key,
"'" + value["corporation"].replace("'", "''") + "'",
value["kills"],
getLossesForCorp(key),
value["iskwon"],
getIskLossForCorp(key),
"'" + ",".join(map(str, value["active"])) + "'",
len(value["active"]),
value["sumonkills"],
value["killsinhour"]["00"],
value["killsinhour"]["01"],
value["killsinhour"]["02"],
value["killsinhour"]["03"],
value["killsinhour"]["04"],
value["killsinhour"]["05"],
value["killsinhour"]["06"],
value["killsinhour"]["07"],
value["killsinhour"]["08"],
value["killsinhour"]["09"],
value["killsinhour"]["10"],
value["killsinhour"]["11"],
value["killsinhour"]["12"],
value["killsinhour"]["13"],
value["killsinhour"]["14"],
value["killsinhour"]["15"],
value["killsinhour"]["16"],
value["killsinhour"]["17"],
value["killsinhour"]["18"],
value["killsinhour"]["19"],
value["killsinhour"]["20"],
value["killsinhour"]["21"],
value["killsinhour"]["22"],
value["killsinhour"]["23"],
value["sumonkillsinhour"]["00"],
value["sumonkillsinhour"]["01"],
value["sumonkillsinhour"]["02"],
value["sumonkillsinhour"]["03"],
value["sumonkillsinhour"]["04"],
value["sumonkillsinhour"]["05"],
value["sumonkillsinhour"]["06"],
value["sumonkillsinhour"]["07"],
value["sumonkillsinhour"]["08"],
value["sumonkillsinhour"]["09"],
value["sumonkillsinhour"]["10"],
value["sumonkillsinhour"]["11"],
value["sumonkillsinhour"]["12"],
value["sumonkillsinhour"]["13"],
value["sumonkillsinhour"]["14"],
value["sumonkillsinhour"]["15"],
value["sumonkillsinhour"]["16"],
value["sumonkillsinhour"]["17"],
value["sumonkillsinhour"]["18"],
value["sumonkillsinhour"]["19"],
value["sumonkillsinhour"]["20"],
value["sumonkillsinhour"]["21"],
value["sumonkillsinhour"]["22"],
value["sumonkillsinhour"]["23"]))
for key, value in characterDict.items():
cur.execute(
'INSERT INTO "zwbAggregateChar" ("date", "characterid", "character", "kills", "losses", "iskwon", "isklost") VALUES (%i, %i, %s, %i, %i, %f, %f)' %
(int(date), key, "'" + value["character"].replace("'", "''") + "'", value["kills"], value["losses"], value["iskwon"], value["isklost"]))
conn.commit()
DATES = ["20180101", "20180102", "20180103", "20180104", "20180105", "20180106", "20180107", "20180108", "20180109", "20180110", "20180111", "20180112", "20180113", "20180114", "20180115", "20180116", "20180117", "20180118", "20180119", "20180120", "20180121", "20180122", "20180123", "20180124", "20180125", "20180126", "20180127", "20180128", "20180129", "20180130", "20180131",
"20180201", "20180202", "20180203", "20180204", "20180205", "20180206", "20180207", "20180208", "20180209", "20180210", "20180211", "20180212", "20180213", "20180214", "20180215", "20180216", "20180217", "20180218", "20180219", "20180220", "20180221", "20180222", "20180223", "20180224", "20180225", "20180226", "20180227", "20180228",
"20180301", "20180302", "20180303", "20180304", "20180305", "20180306", "20180307", "20180308", "20180309", "20180310", "20180311", "20180312", "20180313", "20180314", "20180315", "20180316", "20180317", "20180318", "20180319", "20180320", "20180321", "20180322", "20180323", "20180324", "20180325", "20180326", "20180327", "20180328", "20180329", "20180330", "20180331",
"20180401", "20180402", "20180403", "20180404", "20180405", "20180406", "20180407", "20180408", "20180409", "20180410", "20180411", "20180412", "20180413", "20180414", "20180415", "20180416", "20180417", "20180418", "20180419", "20180420", "20180421", "20180422", "20180423", "20180424", "20180425", "20180426", "20180427", "20180428", "20180429", "20180430",
"20180501", "20180502", "20180503", "20180504", "20180505", "20180506", "20180507", "20180508", "20180509", "20180510", "20180511", "20180512", "20180513", "20180514", "20180515", "20180516", "20180517", "20180518", "20180519", "20180520", "20180521", "20180522", "20180523", "20180524", "20180525", "20180526", "20180527", "20180528", "20180529", "20180530", "20180531",
"20180601", "20180602", "20180603", "20180604", "20180605", "20180606", "20180607", "20180608", "20180609", "20180610", "20180611", "20180612", "20180613", "20180614", "20180615", "20180616", "20180617", "20180618", "20180619", "20180620", "20180621", "20180622", "20180623", "20180624", "20180625", "20180626", "20180627", "20180628", "20180629", "20180630",
"20180701", "20180702", "20180703", "20180704", "20180705", "20180706", "20180707", "20180708", "20180709", "20180710", "20180711", "20180712", "20180713", "20180714", "20180715", "20180716", "20180717", "20180718", "20180719", "20180720", "20180721", "20180722", "20180723", "20180724", "20180725", "20180726", "20180727", "20180728", "20180729", "20180730", "20180731",
"20180801", "20180802", "20180803", "20180804", "20180805", "20180806", "20180807", "20180808", "20180809", "20180810", "20180811", "20180812", "20180813", "20180814", "20180815", "20180816", "20180817", "20180818", "20180819", "20180820", "20180821", "20180822", "20180823", "20180824", "20180825", "20180826", "20180827", "20180828", "20180829", "20180830", "20180831",
"20180901", "20180902", "20180903", "20180904", "20180905", "20180906", "20180907", "20180908", "20180909", "20180910", "20180911", "20180912", "20180913", "20180914", "20180915", "20180916", "20180917", "20180918", "20180919", "20180920", "20180921", "20180922", "20180923", "20180924", "20180925", "20180926", "20180927", "20180928", "20180929", "20180930",
"20181001", "20181002", "20181003", "20181004", "20181005", "20181006", "20181007", "20181008", "20181009", "20181010", "20181011", "20181012", "20181013", "20181014", "20181015", "20181016", "20181017", "20181018", "20181019", "20181020", "20181021", "20181022", "20181023", "20181024", "20181025", "20181026", "20181027", "20181028", "20181029", "20181030", "20181031",
"20181101", "20181102", "20181103", "20181104", "20181105", "20181106", "20181107", "20181108", "20181109", "20181110", "20181111", "20181112", "20181113", "20181114", "20181115", "20181116", "20181117", "20181118", "20181119", "20181120", "20181121", "20181122", "20181123", "20181124", "20181125", "20181126", "20181127", "20181128", "20181129", "20181130",
"20181201", "20181202", "20181203", "20181204", "20181205", "20181206", "20181207", "20181208", "20181209", "20181210", "20181211", "20181212", "20181213", "20181214", "20181215", "20181216", "20181217", "20181218", "20181219", "20181220", "20181221", "20181222", "20181223", "20181224", "20181225", "20181226", "20181227", "20181228", "20181229", "20181230", "20181231"]
reJMail = re.compile("J[0-9]{6}")
try:
conn = psycopg2.connect("dbname='staticdump' user='postgres' host='localhost' password='bollox'")
except:
print "Unable to connect to the database"
exit(-1)
cur = conn.cursor()
dictSolarSystemIdName = getSolarSystemIdNameDict()
dictCharacterIdName = getCharacterIdNameDict()
dictCorporationIdName = getCorporationIdNameDict()
for date in DATES:
counter = 0
jMailCounter = 0
dictKillmailIdHash = getKillmailHashes(date)
masterDict = {}
characterDict = {}
lossDict = {}
mapIdKillmail = {}
print "processing " + date
chunks = partition(dictKillmailIdHash)
for chunk in chunks:
pool = ThreadPool(100)
results = pool.map(getESI, chunk.items())
pool.close()
pool.join()
for killmailESI in results:
if killmailESI != [] and killmailESI["solar_system_id"] in dictSolarSystemIdName and (reJMail.match(dictSolarSystemIdName[killmailESI["solar_system_id"]] or dictSolarSystemIdName[killmailESI["solar_system_id"]] == "J1226-0")):
updateDictionaries(killmailESI, getZKB(killmailESI["killmail_id"], killmailESI["solar_system_id"]))
jMailCounter += 1
elif not killmailESI: # 20160824 has the problematic first Keepstar kill that does not appear on CREST (ESI unchecked), this (and the above killmailESI != []) is a temporary fix..
print("[] error...")
counter += 1
print "total kills: %i" % counter
print "total WH kills: %i" % jMailCounter
updateDB(cur, date)
conn.close()
| agpl-3.0 | -6,303,379,556,367,057,000 | 49.058824 | 380 | 0.619859 | false | 3.251194 | false | false | false |
JulienLeonard/PVG | lib/geoquad.py | 1 | 6499 | from utils import *
from geoutils import *
from curvebuilder import *
from circle import *
class GeoQuadNode:
def __init__(self,geoquad,bbox):
self.mgeoquad = geoquad
self.mbbox = bbox
self.msubnodes = []
def contain(self,point):
return self.mgeoquad.containpoint(point)
def split(self):
subgeoquads = self.mgeoquad.xysplit()
subbboxes = self.mbbox.split4()
for (subgeoquad,subbox) in zip(subgeoquads,subbboxes):
self.msubnodes.append(GeoQuadNode(subgeoquad,subbox))
def leaf(self,point,minsize):
if not self.contain(point):
return None
if self.mgeoquad.length() > minsize:
self.split()
for subnode in self.msubnodes:
containsubresult = subnode.leaf(point,minsize)
if containsubresult != None:
return containsubresult
else:
return self.mbbox
class GeoQuadTree:
def __init__(self,geoquad):
self.mroot = GeoQuadNode(geoquad,BBox(0.0,0.0,1.0,1.0))
def leaf(self,point,minsize):
return self.mroot.leaf(point,minsize)
#
# Geometric contour defined by 2 x 2 curves
#
class GeoQuad:
def __init__(self,left,up,right,down):
self.mleft = left
self.mup = up
self.mright = right
self.mdown = down
self.interx = CR(self.mleft,self.mright)
self.intery = CR(self.mdown,self.mup)
self.mpolygon = self.mleft.concat(self.mup).concat(self.mright.reverse()).concat(self.mdown.reverse())
self.mgeoquadtree = None
def polygon(self):
return self.mpolygon
def polygons(self):
return [self.mleft,self.mup,self.mright.reverse(),self.mdown.reverse()]
def length(self):
return self.polygon().length()
def xcurve(self,x):
x = R(0.0,1.0).trim(x)
return self.interx.sample(x).maponpoints(self.mdown.sample(x),self.mup.sample(x))
def ycurve(self,y):
y = R(0.0,1.0).trim(y)
return self.intery.sample(y).maponpoints(self.mleft.sample(y),self.mright.sample(y))
def xpoint(self,p):
x = R(0.0,1.0).trim(p.x())
y = R(0.0,1.0).trim(p.y())
return self.xcurve(x).sample(y)
def ypoint(self,p):
x = R(0.0,1.0).trim(p.x())
y = R(0.0,1.0).trim(p.y())
return self.ycurve(y).sample(x)
def containpoint(self,point):
return self.polygon().containpoint(point)
@staticmethod
def square(center = Point(0.5,0.5),size = 1.0,npointsperface = 10):
faces = [Polygon(Polygon([p1,p2]).points(npointsperface)) for (p1,p2) in pairs(Polygon.square(center,size).points())]
return GeoQuad(faces[0],faces[1],faces[2].reverse(),faces[3].reverse())
@staticmethod
def rectangle(x1,y1,x2,y2,npointsperface):
faces = [Polygon(Polygon([p1,p2]).points(npointsperface)) for (p1,p2) in pairs(Polygon.rectangle(x1,y1,x2,y2).points())]
return GeoQuad(faces[0],faces[1],faces[2].reverse(),faces[3].reverse())
@staticmethod
def circle(center = Point(0.5,0.5),size = 1.0,npointsperface = 10):
polygon = Circle(center,size/2.0).polygon(npointsperface*4).close()
return GeoQuad(polygon.subline(0.0,0.25),polygon.subline(0.25,0.5),polygon.subline(0.5,0.75).reverse(),polygon.subline(0.75,1.0).reverse())
#
# split the quad in 2 horizontally, returning 2 subquads
#
def ysplit(self,abscissa=0.5):
(newleft1,newleft2) = self.mleft.split(abscissa=abscissa)
(newright1,newright2) = self.mright.split(abscissa=abscissa)
newup1 = self.ycurve(abscissa)
return (GeoQuad(newleft1,newup1,newright1,self.mdown),GeoQuad(newleft2,self.mup,newright2,newup1))
def xsplit(self,abscissa=0.5,bylength=None):
if abscissa != None:
(newup1,newup2) = self.mup.split(abscissa=abscissa)
(newdown1,newdown2) = self.mdown.split(abscissa=abscissa)
newleft1 = self.xcurve(abscissa)
return (GeoQuad(self.mleft,newup1,newleft1,newdown1),GeoQuad(newleft1,newup2,self.mright,newdown2))
if bylength != None:
abs1 = bylength/self.mup.length()
abs2 = bylength/self.mdown.length()
(newup1,newup2) = self.mup.split(abscissa=abs1)
(newdown1,newdown2) = self.mdown.split(abscissa=abs2)
newleft1 = self.mappolygon(Polygon([Point(abs2,0.0),Point(abs1,1.0)]))
return (GeoQuad(self.mleft,newup1,newleft1,newdown1),GeoQuad(newleft1,newup2,self.mright,newdown2))
def xysplit(self,x=0.5,y=0.5):
newx1,newx2 = self.xsplit(x)
result = []
for newx in self.xsplit(x):
ysplit = newx.ysplit(y)
result.extend([ysplit[0],ysplit[1]])
return result
def reduce(self,amount):
margin = (1.0 - amount) * 0.5
frame = [self.xcurve(margin),
self.ycurve(1.0-margin),
self.xcurve(1.0-margin),
self.ycurve(margin)]
frame = [curve.subline(margin,1.0-margin) for curve in frame]
return GeoQuad(frame[0],frame[1],frame[2],frame[3])
#
# mapping
#
def mappolygon(self,polygon):
return Polygon([self.ypoint(p) for p in polygon.points()])
def unmappolygon(self,polygon):
newpoints = [self.unmappoint(point) for point in polygon.points()]
return Polygon(newpoints)
def unmappoint(self,point):
if self.mgeoquadtree == None:
puts("build mgeoquadtree")
self.mgeoquadtree = GeoQuadTree(self)
puts("mgeoquadtree built")
leaf = self.mgeoquadtree.leaf(point,self.mpolygon.length()/1000.0)
if leaf != None:
puts("point ", point.coords(),"found")
return leaf.center()
return None
#
#
#
def transverses2polygon(self,abs1,curve1,abs2,curve2):
up = self.mup.subline(abs1,abs2)
down = self.mdown.subline(abs1,abs2)
curve1new = curve1.maponpoints(Point(abs1,0.0),Point(abs1,1.0))
curve2new = curve2.maponpoints(Point(abs2,0.0),Point(abs2,1.0))
return Polygon.allconcat([self.mappolygon(curve1new),up,self.mappolygon(curve2new).reverse(),down.reverse()])
| gpl-2.0 | 3,057,372,195,883,663,000 | 34.717514 | 147 | 0.595476 | false | 3.074267 | false | false | false |
jasedit/scriptorium | setup.py | 2 | 1389 | """Setuptools file for a MultiMarkdown Python wrapper."""
import os
import re
from distutils.core import setup
from setuptools import find_packages
import pypandoc
with open(os.path.join('scriptorium', '_version.py'), 'r') as vfp:
vtext = vfp.read()
v_re = r"__version__ = \"(?P<ver>.*)\""
mo = re.search(v_re, vtext)
VER = mo.group("ver")
LONG_DESC = pypandoc.convert_file('README.md', 'rst')
setup(
name='scriptorium',
version=VER,
description='Multimarkdown and LaTeX framework for academic papers.',
long_description=LONG_DESC,
license='MIT',
author='Jason Ziglar',
author_email='[email protected]',
url="https://github.com/jasedit/scriptorium",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Text Processing :: Markup',
'Topic :: Text Processing :: Filters',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'
],
packages=find_packages(),
entry_points={
'console_scripts': ['scriptorium = scriptorium:main'],
},
package_data={'scriptorium': ['data/gitignore']},
install_requires=[
'pyyaml',
'argcomplete',
'pymmd>=0.3'
]
)
| mit | -2,037,072,477,922,044,000 | 29.866667 | 73 | 0.614831 | false | 3.723861 | false | false | false |
amlyj/pythonStudy | 2.7/data_analysis/study_numpy/numpy_ndarray.py | 1 | 5118 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-7-22 上午12:41
# @Author : tom.lee
# @docs : http://old.sebug.net/paper/books/scipydoc/numpy_intro.html
# @File : study_numpy.py
# @Software: PyCharm
"""
numpy
Numpy是Python的一个科学计算的库,提供了矩阵运算的功能,其一般与Scipy,matplotlib一起使用.
NumPy提供了两种基本的对象:
ndarray(N-dimensional array object)ndarray(数组)是存储单一数据类型的多维数组;
ufunc(universal function object)而 ufunc则是能够对数组进行处理的函数。
"""
import numpy as np
def split_line():
print '*' * 6 ** 2
def np_version():
"""
版本
:return:
"""
print np.version.version
def np_list():
"""
numpy 数组 :
只能存储一种数据结构,
使用 "numpy.array()"来创建,
使用" dtype = numpy.类型" 来显示指定
:return:
"""
# 创建
l = np.array([1, 2, 3], dtype=np.int8)
a = np.array([1, 2, 3, 4])
b = np.array((5, 6, 7, 8))
c = np.array([[1, 2, 3, 4], [4, 5, 6, 7], [7, 8, 9, 10]])
print 'l:', l
print 'a:', a
print 'b:', b
print 'c:', c
split_line()
# 类型
print l.dtype, c.dtype
split_line()
# 大小: 数组a的shape只有一个元素,因此它是一维数组。
# 而数组c的shape有两个元素,因此它是二维数组,其中第0轴的长度为3,第1轴的长度为4
print l.shape, c.shape
split_line()
# 改变数组每个轴的长度 : 只是改变每个轴的大小,数组元素在内存中的位置并没有改变
c.shape = 4, 3
print c
split_line()
# 当某个轴的元素为-1时,将根据数组元素的个数自动计算此轴的长度,因此下面的程序将数组c的shape改为了(2,6)
c.shape = 2, -1
print c
split_line()
# 使用数组的reshape方法,可以创建一个改变了尺寸的新数组,原数组的shape保持不变
# 注意此时数组a和d其实共享数据存储内存区域
d = a.reshape((2, 2))
print 'a:', a
print 'd:', d
split_line()
def np_list_create():
# 使用xrange创建一维数组 [start,end,步长)包含起始位置,不包含终止位置,
# 元素个数: (end-start)/步长
np_lst = np.arange(0, 10, 1)
print np_lst
print '大小:%d' % np_lst.shape
split_line()
# 等差数列
# linspace(strat,end,size), [start,end]包含起始位置和终止位置,一共创建size个元素
# 可以通过endpoint关键字指定是否包括终值
print np.linspace(0, 1, 12)
split_line()
# 等比数列
# logspace(开始指数,结束指数,数量,底数默认10)
print np.logspace(0, 2, 20)
split_line()
def np_list_by_byte():
"""
使用frombuffer, fromstring, fromfile等函数可以从字节序列创建数组
使用时一定要传入dtype参数
Python的字符串实际上是字节序列,每个字符占一个字节,
因此如果从字符串s创建一个8bit的整数数组的话,所得到的数组正好就是字符串中每个字符的ASCII编码
:return:
"""
s = 'abcdefg'
print np.frombuffer(s, dtype=np.int8)
split_line()
print np.fromstring(s, dtype=np.int8)
split_line()
# 如果从字符串s创建16bit的整数数组,那么两个相邻的字节就表示一个整数,
# 把字节98和字节97当作一个16位的整数, 它的值就是98*256+97 = 25185。
# 可以看出内存中是以little endian(低位字节在前)方式保存数据的。
# 所以字符串的长度必须是偶数
print np.fromstring('abcdefgh', dtype=np.int16)
split_line()
def np_list_by_func():
"""
通过函数创建数组
:return:
"""
# fromfunction 传入一个函数,和表示一个维度大小的可迭代对象(元组,列表)
# 即(10,)表示一维数组,一维元素10个,此时函数接收一个参数
# (5,6)表示二维数组,一维元素5个,二维元素6个,此时函数接收2个参数
print np.fromfunction(lambda x: x + 1, (10,))
print np.fromfunction(lambda x, y: (x + 1) * (y + 1), (5, 6))
split_line()
def np_list_opt():
"""
numpy 列表基本操作和python list基本一致
:return:
"""
l = np.arange(10, 1, -1)
print l
print '做小值:', l.min()
print '最大值:', l.max()
print '下标0的元素:', l[0]
split_line()
# 高级用法,不会共享内存空间,以上操作会共享内存空间
print l[np.array([1, 5, 3])] # 使用数组获取下标元素
print l[[1, 5, 3]] # 使用列表获取下标元素
split_line()
# 列表直接过滤
print l[l > 3] # 直接获取列表大于3的值
print l > 3 # 判断列表元素是否大于3返回一个boolean 列表
split_line()
if __name__ == '__main__':
# np_version()
# np_list()
np_list_create()
# np_list_by_byte()
# np_list_by_func()
# np_list_opt()
print np.fromfunction(lambda x: x, (10,))
| mit | 8,926,785,202,037,982,000 | 20.380952 | 71 | 0.594933 | false | 1.678505 | false | false | false |
odahoda/noisicaa | noisicaa/builtin_nodes/metronome/node_description.py | 1 | 1593 | #!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
from noisicaa import node_db
MetronomeDescription = node_db.NodeDescription(
uri='builtin://metronome',
display_name='Metronome',
type=node_db.NodeDescription.PROCESSOR,
node_ui=node_db.NodeUIDescription(
type='builtin://metronome',
),
builtin_icon='node-type-builtin',
processor=node_db.ProcessorDescription(
type='builtin://metronome',
),
ports=[
node_db.PortDescription(
name='out:left',
direction=node_db.PortDescription.OUTPUT,
types=[node_db.PortDescription.AUDIO],
),
node_db.PortDescription(
name='out:right',
direction=node_db.PortDescription.OUTPUT,
types=[node_db.PortDescription.AUDIO],
),
]
)
| gpl-2.0 | -3,700,266,251,660,509,000 | 31.510204 | 73 | 0.68801 | false | 3.811005 | false | false | false |
ain7/www.ain7.org | ain7/adhesions/migrations/0001_initial.py | 1 | 3525 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-30 23:26
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_change_at', models.DateTimeField(blank=True, editable=False, verbose_name='Modifi\xe9 pour la derni\xe8re fois \xe0')),
('old_id', models.IntegerField(blank=True, null=True, unique=True, verbose_name=b'old id')),
('dues_amount', models.FloatField(verbose_name='Montant de cotisation')),
('newspaper_amount', models.FloatField(blank=True, null=True, verbose_name='Montant Canal N7')),
('tender_type', models.IntegerField(choices=[(1, 'Esp\xe8ce'), (2, 'Ch\xe8que'), (4, 'Carte bancaire'), (5, 'Virement'), (6, 'Autre')], verbose_name='Mode de paiement')),
('validated', models.BooleanField(default=False, verbose_name='Valid\xe9')),
('date', models.DateTimeField(blank=True, null=True, verbose_name="date d'adh\xe9sion")),
('start_year', models.IntegerField(verbose_name='d\xe9but (ann\xe9e)')),
('end_year', models.IntegerField(verbose_name='fin (ann\xe9e)')),
('start_date', models.DateField(blank=True, null=True)),
('end_date', models.DateField(blank=True, null=True)),
('newspaper_subscription', models.BooleanField(default=False, verbose_name='Adh\xe9rer \xe0 Canal N7 - 15 euros/an')),
('user_authenticated', models.BooleanField(default=True)),
],
options={
'ordering': ['id'],
'verbose_name': 'Adh\xe9sions',
},
),
migrations.CreateModel(
name='SubscriptionConfiguration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.IntegerField(choices=[(0, 'Promotions ant\xe9rieures \xe0 2011'), (1, 'Promotions 2011 \xe0 2015'), (2, 'Retrait\xe9'), (3, 'Bienfaiteur'), (4, 'Sans emploi (sur justificatif)'), (5, '\xc9tudiant(e), trois ans'), (6, '\xc9tudiant(e), deux ans'), (7, '\xc9tudiant(e), un an'), (8, 'Couple'), (9, 'Soutien'), (10, '\xc9tudiant(e)')], verbose_name='Type')),
('dues_amount', models.IntegerField(verbose_name='Montant de cotisation')),
('newspaper_amount', models.IntegerField(blank=True, null=True, verbose_name='Montant Canal N7')),
('duration', models.IntegerField(default=1, verbose_name='Dur\xe9e')),
('year', models.IntegerField(verbose_name='Ann\xe9e')),
],
options={
'verbose_name': 'Configuration',
},
),
migrations.CreateModel(
name='SubscriptionKey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.UUIDField(default=uuid.uuid4, editable=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('expire_at', models.DateTimeField(editable=False)),
],
),
]
| lgpl-2.1 | 2,508,260,957,025,356,300 | 54.952381 | 386 | 0.57844 | false | 3.691099 | false | false | false |
DavidSouther/nucleosynth | src/canvas/assets/stars.py | 1 | 1870 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import csv
import sys
import json
import graph
from math import sin, cos, sqrt
reader = csv.reader(sys.stdin)
class Star:
"""Hold data about a star."""
def __init__(self):
self.name = "[unknown star]"
self.spherical = {
'ascension': 0,
'declination': 0,
'distance': 0
}
self.rectilinear = {
'x': 0,
'y': 0,
'z': 0
}
def __str__(self):
return "Star " + self.name + " at " + str(self.rectilinear) + "."
def findPosition(self):
a = self.spherical['ascension']
d = self.spherical['declination']
D = self.spherical['distance']
self.rectilinear = {
'x': D * cos(a) * cos(d),
'y': D * sin(a) * cos(d),
'z': D * sin(d)
}
def forJSON(self):
return {"position": self.rectilinear, "spectral": self.spectral}
def distance(self, star):
x, y, z = [
(self.rectilinear['x'] - star.rectilinear['x']),
(self.rectilinear['y'] - star.rectilinear['y']),
(self.rectilinear['z'] - star.rectilinear['z'])
]
return x*x + y*y + z*z
@staticmethod
def fromWiki(line):
star = Star()
star.name = line[2]
star.spectral = line[4]
star.spherical = {
'ascension': parseDMS(line[-4]),
'declination': parseHMS(line[-5]),
'distance': float(line[1])
}
star.findPosition()
return star
def parseDMS(dms):
d, ms = dms.split('d')
m, s = ms.split('m')
s = s[0:2]
return float(d) + (float(m)/60) + (float(s) / 3600)
def parseHMS(hms):
h, ms = hms.split('h')
m, s = ms.split('m')
s = ms[0:2]
return float(h) + (float(m)/60) + (float(s) / 3600)
def main():
stars = {}
for entry in reader:
star = Star.fromWiki(entry)
stars[star.name] = star
wormholes = graph.walk(stars)
for n, s in stars.iteritems():
stars[n] = s.forJSON()
print "Stars = " + json.dumps(stars)
print "Wormholes = " + json.dumps(wormholes)
if __name__ == "__main__":
main() | mit | -6,329,218,047,016,869,000 | 20.261364 | 67 | 0.591979 | false | 2.419146 | false | false | false |
kylebarlow/marchmadness | predict.py | 1 | 29819 | #!/usr/bin/env python3
# Requires Python 3
"""
March Madness prediction script
Copyright (C) 2013-2019 Kyle Barlow
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# Python standard library import statements
import argparse
import time
import os
import sys
import random
import copy
import multiprocessing
import queue
import pickle
import threading
import urllib.request
import itertools
import collections
# NumPy
import numpy as np
import pandas as pd
# Constants
use_multiprocessing = True
program_description = 'Python script to generate march madness brackets from ELO input (as in the format of, but not necessarily, the 538 data)'
default_output_file = 'output.txt'
source_url = 'https://projects.fivethirtyeight.com/march-madness-api/2021/fivethirtyeight_ncaa_forecasts.csv'
default_data_file = 'fivethirtyeight_ncaa_forecasts.csv' # Caches url results
region_pairings = ( ('east', 'west'), ('midwest', 'south') )
# How fast ELO changes
elo_k_factor = 2.5 # Based on not-so precise guessing in order to get statistics after many simulations to match 538 closely enough
# Mapping for strings describing each round to an integer (for indexing)
round_dictionary = {
0 : 'FIRST FOUR',
1 : 'ROUND OF 64',
2 : 'ROUND OF 32',
3 : 'ROUND OF 16',
4 : 'ELITE 8',
5 : 'FINAL 4',
6 : 'FINALS',
}
seed_pairs_by_round = {
1 : {
1:16, 16:1,
8:9, 9:8,
5:12, 12:5,
4:13, 13:4,
6:11, 11:6,
3:14, 14:3,
7:10, 10:7,
2:15, 15:2,
},
2 : {
1:8, 8:1,
4:5, 5:4,
3:6, 6:3,
2:7, 7:2,
},
3 : {
1:4, 4:1,
2:3, 3:2,
},
4 : {
1:2, 2:1,
},
}
class MonteCarloBracketSimulator(object):
def __init__(self, starting_bt):
self.highest_bt = starting_bt.copy()
self.last_bt = starting_bt.copy()
self.highest_score = starting_bt.expected_score()
self.last_score = self.highest_score
self.temperature = 100.0
def set_last_bt(self, bt):
self.last_bt = bt.copy()
self.last_score = bt.expected_score()
def boltzmann(self, bt):
bt_score = bt.expected_score()
score_delta = self.last_score - bt_score
boltz_factor = ( -1 * score_delta / self.temperature )
probability = np.exp( min(40.0, max(-40.0, boltz_factor) ) )
if probability < 1:
if random.random() > probability:
# print ( 'reject', probability, self.last_score, bt_score )
return False # reject
# else:
# print ( 'MC accept', probability, self.last_score, bt_score )
# else:
# print ( 'accept', probability, self.last_score, bt_score )
# Accept
self.last_bt = bt.copy()
self.last_score = bt_score
if self.highest_score == None or self.last_score > self.highest_score:
self.highest_score = self.last_score
self.highest_bt = bt.copy()
return True
def copy(self):
return pickle.loads( pickle.dumps(self) )
class Team(object):
def __init__(self, name, region, seed, elo, win_prob_by_round):
self.region = region.lower()
self.seed = seed
self.name = name
self.starting_elo = elo
self.elo = elo
self.win_prob_by_round = win_prob_by_round
# Keeps track of past ELO changes so we can undo them
self.elo_history = {}
@classmethod
def init_from_row(cls, row, separator_character = ','):
name = row['team_name']
region = row['team_region']
seed = row['team_seed']
win_prob_by_round = {}
for round_key in range(0, 7):
win_prob_by_round[round_key] = float( row[ 'rd%d_win' % (round_key + 1) ] )
if seed.endswith('a') or seed.endswith('b'):
seed = seed[:-1]
try:
seed = int(seed)
elo = float(row['team_rating'])
except ValueError:
print ('Error parsing this line:')
print (row)
raise
return cls(name, region, seed, elo, win_prob_by_round)
def __repr__(self):
return self.name
def __eq__(self, other):
# Only check equality based on names
return self.name == other.name
def __lt__(self, other):
return self.elo < other.elo
def update_elo(self, number_wins, win_prob, round_number):
elo_change = elo_k_factor * (number_wins - win_prob)
self.elo += elo_change
assert( round_number not in self.elo_history ) # We can only have played one match per round
self.elo_history[round_number] = elo_change
def undo_elo_update(self, starting_round_number):
'''
Undo changes to ELO in self for specific round, and all rounds greater than that round
'''
for round_number in range(starting_round_number, max( round_dictionary.keys() ) + 1 ):
if round_number in self.elo_history:
# Later round numbers may not be in history if team lost earlier, so we use this if to check
self.elo -= self.elo_history[round_number]
del self.elo_history[round_number]
def probability_of_victory(self, other, use_starting=False):
if use_starting:
prob = 1.0 / (1.0 + 10.0 ** ( (other.starting_elo - self.starting_elo) * 30.464 / 400.0) )
else:
prob = 1.0 / (1.0 + 10.0 ** ( (other.elo - self.elo) * 30.464 / 400.0) )
# print( 'prob_v', self, other, other.elo, self.elo, '%.2f' % prob )
return prob
def play_match(self, other, round_number, rigged = False, threshold_win_prob = None):
'''
Returns true if we beat other team, otherwise false
Will randomly pick winner based on ELO, unless is rigged (in which case self wins)
Updates ELOs
If threshold_win_prob is not None, then team must have at least that chance of winning to win
'''
win_prob = self.probability_of_victory(other)
number_wins = 0
if rigged:
number_wins += 1
elif threshold_win_prob != None and 1.0 - win_prob < threshold_win_prob:
number_wins += 1
elif random.random() < win_prob:
number_wins += 1
self.update_elo( number_wins, win_prob, round_number )
other.update_elo( 1 - number_wins, 1.0 - win_prob, round_number )
if number_wins == 1:
return True
else:
return False
class BracketTree(object):
def __init__(self, round_number, region_name = None, seeds = None):
self._children = []
self._parent = None
self._round_name = round_dictionary[round_number]
self._round_number = round_number
self._region_name = region_name
self._seeds = seeds
self._teams = []
self._winning_team_index = None
def copy(self):
# Return fast copy by pickling
return pickle.loads( pickle.dumps(self) )
def visualize(self, spacer_len = 0, print_score = True, view_by_round = False, top_level_call = True):
vis_lines = []
if print_score:
vis_lines.append( 'Expected score: %.2f' % self.expected_score() )
vis_lines.append( '{}{}'.format(spacer_len * '-', self._round_name) )
if self._winning_team_index == None:
for team in self._teams:
vis_lines.append( '{}{}'.format(spacer_len * ' ', team.name) )
else:
vis_lines.append( '{}{} ({}) def. {} ({})'.format(spacer_len * ' ', self._teams[self._winning_team_index].name, int(self._teams[self._winning_team_index].seed), self._teams[1-self._winning_team_index].name, int(self._teams[1-self._winning_team_index].seed)) )
for child in self._children:
if view_by_round:
vis_lines.extend( child.visualize( spacer_len = 0, print_score = False, view_by_round = True, top_level_call = False ) )
else:
vis_lines.extend( child.visualize( spacer_len = spacer_len + 2, print_score = False, view_by_round = False, top_level_call = False ) )
if top_level_call and view_by_round:
score_line = ''
if print_score:
score_line = vis_lines[0]
vis_lines = vis_lines[1:]
last_round_line = None
lines_by_round = collections.OrderedDict()
for i, vis_line in enumerate(vis_lines):
if i % 2 == 0:
last_round_line = vis_line
if last_round_line not in lines_by_round:
lines_by_round[last_round_line] = []
else:
lines_by_round[last_round_line].append( vis_line )
return_round_lines = []
if print_score:
return_round_lines.append(score_line)
for round_line in lines_by_round:
return_round_lines.append(round_line)
for team_line in lines_by_round[round_line]:
return_round_lines.append(team_line)
return_round_lines.append('')
return return_round_lines
return vis_lines
def add_team(self, team):
self._teams.append( team )
def add_child(self, child):
assert( child._round_number + 1 == self._round_number )
if self._region_name != None:
assert( child._region_name == self._region_name )
child.set_parent( self )
self._children.append(child)
def set_parent(self, parent):
self._parent = parent
def _init_add_children(self, regional_teams, seeds, cls):
# Helper function used by init_starting_bracket
assert( len(seeds) == len(regional_teams) )
assert( len(seeds) >= 2 and len(seeds) % 2 == 0 )
if len(seeds) > 2:
for winning_seed in seeds[:2]:
child = cls( self._round_number - 1, region_name = self._region_name )
child_seeds = [winning_seed]
current_round = self._round_number - 1
while current_round > 0:
new_child_seeds = [ seed_pairs_by_round[current_round][s] for s in child_seeds]
child_seeds.extend( new_child_seeds )
current_round -= 1
child_seeds.sort()
child._init_add_children(
{ k : regional_teams[k] for k in regional_teams if k in child_seeds },
child_seeds, cls,
)
self.add_child( child )
else:
for seed in seeds:
if len(regional_teams[seed]) > 1:
# First four seed, add one more child
child = cls( self._round_number - 1, region_name = self._region_name )
for team in regional_teams[seed]:
child.add_team(team)
self.add_child( child )
else:
# Not a first four seed
for team in regional_teams[seed]:
self.add_team( team )
@classmethod
def init_starting_bracket(cls):
'''
Uses round_dictionary to initialize a full bracket. Bracket is filled in according to results so far.
'''
teams = {}
min_seed = None
max_seed = None
if not os.path.isfile(default_data_file):
urllib.request.urlretrieve(source_url, default_data_file)
df = pd.read_csv(default_data_file)
df = df.loc[ df['gender'] == 'mens' ].copy().sort_values('forecast_date', ascending = False )
df = df.loc[ df['forecast_date'] == df.iloc[0]['forecast_date'] ].copy()
df = df.loc[ df['team_alive'] == 1 ].copy()
df = df.drop_duplicates( ['team_name'] )
# Read in team data
for index, row in df.iterrows():
team = Team.init_from_row(row)
if min_seed == None or team.seed < min_seed:
min_seed = team.seed
if max_seed == None or team.seed > max_seed:
max_seed = team.seed
if team.region not in teams:
teams[team.region] = {}
if team.seed not in teams[team.region]:
teams[team.region][team.seed] = [team]
else:
teams[team.region][team.seed].append( team )
# Initialize root node (finals) and semifinals
max_round = max(round_dictionary.keys())
finals = cls(max_round)
for region_names in region_pairings:
final_four = cls(max_round-1)
for region_name in region_names:
elite_eight = cls(max_round-2, region_name = region_name)
seeds = list( range(min_seed, max_seed + 1) )
elite_eight._init_add_children( teams[region_name], seeds, cls )
final_four.add_child( elite_eight )
finals.add_child( final_four )
return finals
def random_perturb(self, pop_size):
nodes = random.sample( self.all_nodes(), pop_size )
for node in nodes:
node.swap_winner()
# Run final verification after all swaps are complete
self.verify_bracket()
def single_random_perturb(self):
node = random.choice( self.all_nodes() )
node.swap_winner()
def all_nodes(self):
nodes = [ self ]
for child in self._children:
nodes.extend( child.all_nodes() )
return nodes
def all_teams(self):
all_teams = []
for node in self.all_nodes():
all_teams.extend( node._teams )
return all_teams
def swap_winner(self, threshold_win_prob = None):
assert( len(self._teams) == 2 )
current_winner = self._teams[ self._winning_team_index ]
current_loser = self._teams[ 1 - self._winning_team_index ]
loser_win_prob = current_loser.probability_of_victory(current_winner)
if threshold_win_prob != None and loser_win_prob < threshold_win_prob:
return
for team in self._teams:
team.undo_elo_update(self._round_number)
if self._parent != None:
self._parent.remove_team_upwards( self._teams[self._winning_team_index], self._teams[ 1 - self._winning_team_index] )
self._winning_team_index = 1 - self._winning_team_index
# Update ELOs according to swapped result
self._teams[self._winning_team_index].play_match( self._teams[ 1 - self._winning_team_index], self._round_number, rigged = True )
def remove_team_upwards(self, old_winner, new_winner):
'''
Removes a team that previously won in a child game
Resimulates new winner in new random match
'''
our_old_winner = self._teams[self._winning_team_index]
self._teams.remove( old_winner )
self._teams.append( new_winner )
assert( len(self._teams) == 2 )
# Undo ELO updates before new match
for team in self._teams:
team.undo_elo_update(self._round_number)
# Play match
if self._teams[0].play_match( self._teams[1], self._round_number ):
self._winning_team_index = 0
else:
self._winning_team_index = 1
# Recursive call upwards
if self._parent != None:
self._parent.remove_team_upwards( our_old_winner, self._teams[self._winning_team_index] )
def verify_bracket(self):
'''
Ensures that a bracket is valid and filled
Checks that if a team won a lower round, is present in the upper round
'''
assert( self._winning_team_index != None )
assert( len(self._teams) == 2 )
prev_round_winners = sorted( self._teams )
children_winners = sorted( [ child._teams[child._winning_team_index] for child in self._children ] )
if len( self._children ) == 2:
assert( prev_round_winners == children_winners )
elif len( self._children ) == 1:
assert( children_winners[0] in prev_round_winners )
for child in self._children:
child.verify_bracket()
def simulate_fill(self):
# Randomly fills in bracket based on ELO simulation
# Fills in blanks
assert( self._winning_team_index == None )
for child in self._children:
child.simulate_fill()
self._teams.append( child._teams[child._winning_team_index] )
assert( len( self._teams ) == 2 )
if self._teams[0].play_match( self._teams[1], self._round_number ):
self._winning_team_index = 0
else:
self._winning_team_index = 1
def all_team_names(self):
teams = set()
for child in self._children:
teams.update( child.all_team_names() )
teams.update( [team.name for team in self._teams] )
return teams
def winners_vector(self):
'''
Returns vector representing how far teams advanced
'''
winners_dict = self.winners_dict()
v = np.zeros( (len(winners_dict), len(round_dictionary)) )
team_names = sorted( winners_dict.keys() )
for i, team_name in enumerate(team_names):
if winners_dict[team_name] >= 0:
for j in range(0, winners_dict[team_name]+1):
v[i][j] += 1
return v
def team_names(self):
return sorted( self.winners_dict().keys() )
def winners_dict(self, furthest_round = None):
if furthest_round == None:
min_round = min(round_dictionary.keys())
furthest_round = {name : min_round - 1 for name in self.all_team_names()}
for team in self._teams:
if self._round_number - 1 > furthest_round[team.name]:
furthest_round[team.name] = self._round_number - 1
winning_team_name = self._teams[self._winning_team_index].name
if self._round_number > furthest_round[winning_team_name]:
furthest_round[winning_team_name] = self._round_number
for child in self._children:
child.winners_dict( furthest_round )
return furthest_round
def total_probability(self):
assert( len(self._teams) == 2 )
winning_team = self._teams[self._winning_team_index]
losing_team = self._teams[1-self._winning_team_index]
return_prob = winning_team.probability_of_victory(losing_team)
if len(self._children) == 2: # Skip first 4
for child in self._children:
return_prob = return_prob * child.total_probability()
if return_prob > 1.0 or return_prob < 0.0:
print( winning_team, losing_team, self._round_number, winning_team.probability_of_victory(losing_team), child_with_winner.total_probability(), self._children[0].total_probability(), self._children[1].total_probability(), winning_team.elo, losing_team.elo )
print( return_prob )
raise Exception()
return return_prob
# return 0
# for child in self._children:
# probability_of_victory *= child.total_probability()
# assert( self._winning_team_index != None )
# assert( len(self._teams) == 2 )
# winning_team = self._teams[self._winning_team_index]
# losing_team = self._teams[1-self._winning_team_index]
# probability_of_victory *= winning_team.probability_of_victory(losing_team)
# return probability_of_victory
def round_cbs_score(self):
# This dictionary is used to calculate the expected score of a bracket in leagues where
# additional points are awarded for correct picks in later rounds. Each key corresponds
# to the number of a round (see round_dictionary) above, and each value corresponds to
# the weight for each correct pick in that round. For example, a key/value pair of
# 3:2 would mean that a correct pick in the third round is worth twice as much as the baseline
# The seed of winner is also added to score (to give more points for picking low seeds)
default_cbs_scores = {
0:0,
1:1,
2:2,
3:3,
4:4,
5:6,
6:8
}
assert( self._winning_team_index != None )
assert( len(self._teams) == 2 )
winning_team = self._teams[self._winning_team_index]
return default_cbs_scores[self._round_number] + winning_team.seed
def round_yahoo_score(self):
default_yahoo_scores = {
0:0,
1:1,
2:2,
3:4,
4:8,
5:16,
6:32
}
assert( self._winning_team_index != None )
assert( len(self._teams) == 2 )
winning_team = self._teams[self._winning_team_index]
losing_team = self._teams[1-self._winning_team_index]
return max( [0, winning_team.seed - losing_team.seed] ) + default_yahoo_scores[self._round_number]
def expected_score(self):
# Expected value of our winner beating all possible opponents, recursive
score = 0.0
winning_team = self._teams[self._winning_team_index]
losing_team = self._teams[1-self._winning_team_index]
if len(self._children) == 2:
# Only recurse if two children (to avoid first four games)
child_with_loser = None
if self._children[0]._teams[0].name == losing_team.name or self._children[0]._teams[1].name == losing_team.name:
child_with_loser = self._children[0]
if self._children[1]._teams[0].name == losing_team.name or self._children[1]._teams[1].name == losing_team.name:
assert( child_with_loser == None )
child_with_loser = self._children[1]
assert( child_with_loser != None )
for possible_opponent in child_with_loser.all_teams():
prob_opponent = possible_opponent.win_prob_by_round[self._round_number-1]
score += winning_team.probability_of_victory(possible_opponent, use_starting=True) * prob_opponent
for child in self._children:
score += child.expected_score()
else:
score += self.round_score() * winning_team.probability_of_victory(losing_team, use_starting=True)
return score
def round_score(self):
# Have to change score function manually below for now
return self.round_cbs_score()
def score(self):
score = self.round_score()
for child in self._children:
score += child.round_score()
return score
def simulate_winners_vector(bt_pickle):
bt_copy = pickle.loads(bt_pickle)
bt_copy.simulate_fill()
return bt_copy.winners_vector()
class CallbackVectorQueue(object):
def __init__(self, initial_v):
self.q = queue.Queue()
self.v = initial_v
self.trials = 0
self.thread = threading.Thread(target=self.thread_run)
self.thread.daemon = True # Daemonize thread
self.thread.start()
def thread_run(self):
while True:
self.v += self.q.get()
self.trials += 1
def callback(self, v):
self.q.put(v)
def close(self):
while not self.q.empty():
time.sleep(0.001)
def run_stats( number_simulations = 10000 ):
bt = BracketTree.init_starting_bracket()
# Initial simulation to initialize vector
bt_pickle = pickle.dumps( bt )
initial_v = simulate_winners_vector(bt_pickle)
v_callback = CallbackVectorQueue(initial_v)
if use_multiprocessing:
pool = multiprocessing.Pool()
for sim_num in range(0, number_simulations):
if use_multiprocessing:
pool.apply_async( simulate_winners_vector, args = (bt_pickle,), callback = v_callback.callback )
else:
v_callback.callback( simulate_winners_vector(bt_pickle) )
if use_multiprocessing:
pool.close()
pool.join()
v_callback.close()
v = v_callback.v
v /= float( number_simulations )
print_list = []
# Run simulation to fill in team names
bt.simulate_fill()
for i, team_name in enumerate( bt.team_names() ):
champion_percentage = v[i][ len(round_dictionary) - 1 ]
l = list( reversed( v[i] ) )
l.append( team_name )
print_list.append( l )
print_list.sort( reverse = True )
for row in print_list:
line = ''
for x in row:
if isinstance(x, str):
line += x
else:
line += '%.2f ' % x
print ( line )
print ( 'Total trials: %d' % v_callback.trials )
def run_monte_carlo_helper(temp_steps, max_perturbations, mc, blank_bt):
# chance of fresh bracket start
if random.random() >= 0.95:
bt = blank_bt.copy()
bt.simulate_fill()
mc.set_last_bt( bt )
for temperature in temp_steps:
bt = mc.last_bt.copy()
# Perturb
bt.random_perturb( random.randint(1, max_perturbations) )
# bt.single_random_perturb()
# Score
mc.temperature = temperature
mc.boltzmann( bt )
return mc
def run_monte_carlo( num_trials = 10000, view_by_round = False ):
# Parameters for MC simulation
max_perturbations = 10
starting_temp = 20.0
ending_temp = 1.0
low_temp_final_steps = 500
# Output parameters
highest_mc_bt_cache = os.path.join('cache', 'highest_mc_bt.pickle') # Saves best bracket for reloading as starting point in later simulations
highest_vis_output = os.path.join('cache', 'highest_bracket.txt')
blank_bt = BracketTree.init_starting_bracket()
if os.path.isfile( highest_mc_bt_cache ):
with open(highest_mc_bt_cache, 'rb') as f:
bt = pickle.load(f)
else:
if not os.path.isdir( os.path.dirname( highest_mc_bt_cache ) ):
os.makedirs( os.path.dirname( highest_mc_bt_cache ) )
# Initial simulation
bt = blank_bt.copy()
bt.simulate_fill()
mc = MonteCarloBracketSimulator( bt )
temp_steps = list( np.arange(starting_temp, ending_temp, -0.005) )
temp_steps.extend( [ending_temp for x in range(low_temp_final_steps) ] )
def callback(thread_mc):
nonlocal mc
if thread_mc.highest_score > mc.highest_score:
mc = thread_mc
for trial in range(num_trials):
if use_multiprocessing:
pool = multiprocessing.Pool()
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
for cpu_count in range(cpu_count):
if use_multiprocessing:
pool.apply_async( run_monte_carlo_helper, args = (temp_steps, max_perturbations, mc.copy(), blank_bt), callback = callback )
else:
callback( run_monte_carlo_helper( temp_steps, max_perturbations, mc.copy(), blank_bt ) )
if use_multiprocessing:
pool.close()
pool.join()
print ( 'MC simulation complete (round {})'.format(trial) )
print ( 'Highest score: %.2f' % mc.highest_score )
print ( 'Last score: %.2f\n' % mc.last_score )
with open(highest_mc_bt_cache, 'wb') as f:
pickle.dump(mc.highest_bt, f)
with open(highest_vis_output, 'w') as f:
for line in mc.highest_bt.visualize():
f.write( line + '\n' )
if view_by_round:
print( '\n'.join( mc.highest_bt.visualize( view_by_round = True ) ) )
def run_quick_pick( score_thresh, view_by_round = False ):
while True:
bt = BracketTree.init_starting_bracket()
bt.simulate_fill()
if score_thresh == None or bt.expected_score() >= score_thresh:
break
print ( '\n'.join( bt.visualize( view_by_round = view_by_round ) ) )
def predictor():
# Setup argument parser
parser = argparse.ArgumentParser(description=program_description)
parser.add_argument(
'-s', '--stats',
type = int,
default = 0,
help = "Run many times to get statistics"
)
parser.add_argument(
'-m', '--monte_carlo',
type = int,
default = 0,
help = "How many outer loops of ramping monte carlo simulation"
)
parser.add_argument(
'-q', '--quick_pick',
default = False,
action = 'store_true',
help = 'Generate a "quick pick" style bracket'
)
parser.add_argument(
'--quick_thresh',
default = None,
type = float,
help = 'If running a quick pick, you can specify a minimum expected score threshold here'
)
parser.add_argument(
'--view_by_round',
default = False,
action = 'store_true',
help = 'Print output by round'
)
args = parser.parse_args()
if args.quick_pick:
run_quick_pick( args.quick_thresh, view_by_round = args.view_by_round )
if args.stats > 0:
run_stats( args.stats )
if args.monte_carlo > 0:
run_monte_carlo( args.monte_carlo, view_by_round = args.view_by_round )
if __name__ == "__main__":
predictor()
| gpl-3.0 | 5,411,448,162,556,504,000 | 35.542892 | 271 | 0.579765 | false | 3.631592 | false | false | false |
stephenfin/patchwork | patchwork/tests/utils.py | 1 | 9894 | # Patchwork - automated patch tracking system
# Copyright (C) 2008 Jeremy Kerr <[email protected]>
#
# SPDX-License-Identifier: GPL-2.0-or-later
import codecs
from datetime import datetime as dt
from datetime import timedelta
from email.utils import make_msgid
import os
from django.contrib.auth.models import User
from patchwork.models import Bundle
from patchwork.models import Check
from patchwork.models import Comment
from patchwork.models import CoverLetter
from patchwork.models import Patch
from patchwork.models import Person
from patchwork.models import Project
from patchwork.models import Series
from patchwork.models import SeriesReference
from patchwork.models import State
from patchwork.tests import TEST_PATCH_DIR
SAMPLE_DIFF = """--- /dev/null 2011-01-01 00:00:00.000000000 +0800
+++ a 2011-01-01 00:00:00.000000000 +0800
@@ -0,0 +1 @@
+a
"""
SAMPLE_CONTENT = 'Hello, world.'
def read_patch(filename, encoding=None):
"""Read a diff from a file."""
file_path = os.path.join(TEST_PATCH_DIR, filename)
if encoding is not None:
f = codecs.open(file_path, encoding=encoding)
else:
f = open(file_path)
result = f.read()
f.close()
return result
error_strings = {
'email': 'Enter a valid email address.',
}
def create_project(**kwargs):
"""Create a 'Project' object."""
num = Project.objects.count()
values = {
'linkname': 'test-project-%d' % num,
'name': 'Test Project %d' % num,
'listid': 'test%d.example.com' % num,
'listemail': 'test%[email protected]' % num,
'subject_match': '',
}
values.update(kwargs)
return Project.objects.create(**values)
def create_person(**kwargs):
"""Create a 'Person' object."""
num = Person.objects.count()
values = {
'email': 'test_person_%[email protected]' % num,
'name': 'test_person_%d' % num,
'user': None,
}
values.update(kwargs)
return Person.objects.create(**values)
def create_user(link_person=True, **kwargs):
"""Create a 'User' object.
Args:
link_person (bool): If true, create a linked Person object.
"""
num = User.objects.count()
values = {
'username': 'test_user_%d' % num,
'email': 'test_user_%[email protected]' % num,
'first_name': 'Tester',
'last_name': 'Num%d' % num,
}
values.update(kwargs)
# this one must be done rather specifically
user = User.objects.create_user(values['username'], values['email'],
values['username'],
first_name=values['first_name'],
last_name=values['last_name'])
if link_person:
# unfortunately we don't split on these
values['name'] = ' '.join([values.pop('first_name'),
values.pop('last_name')])
values.pop('username')
create_person(user=user, **values)
return user
def create_maintainer(project=None, **kwargs):
"""Create a 'User' and set as maintainer for provided project."""
if not project:
project = create_project()
user = create_user(**kwargs)
profile = user.profile
profile.maintainer_projects.add(project)
profile.save()
return user
def create_state(**kwargs):
"""Create 'State' object."""
num = State.objects.count()
values = {
'name': 'state_%d' % num,
'ordering': num,
'action_required': True,
}
values.update(kwargs)
return State.objects.create(**values)
def create_bundle(**kwargs):
"""Create 'Bundle' object."""
num = Bundle.objects.count()
values = {
'owner': create_user() if 'owner' not in kwargs else None,
'project': create_project() if 'project' not in kwargs else None,
'name': 'test_bundle_%d' % num,
}
values.update(kwargs)
return Bundle.objects.create(**values)
def create_patch(**kwargs):
"""Create 'Patch' object."""
num = Patch.objects.count()
# NOTE(stephenfin): Even though we could simply pass 'series' into the
# constructor, we don't as that's not what we do in the parser and not what
# our signal handlers (for events) expect
if 'series' in kwargs:
series = kwargs.pop('series')
else:
series = create_series(project=kwargs.pop('project', create_project()))
if 'number' in kwargs:
number = kwargs.pop('number', None)
elif series:
number = series.patches.count() + 1
# NOTE(stephenfin): We overwrite the provided project, if there is one, to
# maintain some degree of sanity
if series:
kwargs['project'] = series.project
values = {
'submitter': create_person() if 'submitter' not in kwargs else None,
'delegate': None,
'project': create_project() if 'project' not in kwargs else None,
'msgid': make_msgid(),
'state': create_state() if 'state' not in kwargs else None,
'name': 'testpatch%d' % num,
'headers': '',
'content': 'Patch testpatch%d' % num,
'diff': SAMPLE_DIFF,
}
values.update(kwargs)
if 'patch_project' not in values:
values['patch_project'] = values['project']
patch = Patch.objects.create(**values)
if series:
number = number or series.patches.count() + 1
series.add_patch(patch, number)
return patch
def create_cover(**kwargs):
"""Create 'CoverLetter' object."""
num = CoverLetter.objects.count()
# NOTE(stephenfin): Despite first appearances, passing 'series' to the
# 'create' function doesn't actually cause the relationship to be created.
# This is probably a bug in Django. However, it's convenient to do so we
# emulate that here. For more info, see [1].
#
# [1] https://stackoverflow.com/q/43119575/
if 'series' in kwargs:
series = kwargs.pop('series')
else:
series = create_series(project=kwargs.pop('project', create_project()))
# NOTE(stephenfin): We overwrite the provided project, if there is one, to
# maintain some degree of sanity
if series:
kwargs['project'] = series.project
values = {
'submitter': create_person() if 'person' not in kwargs else None,
'project': create_project() if 'project' not in kwargs else None,
'msgid': make_msgid(),
'name': 'testpatch%d' % num,
'headers': '',
'content': '',
}
values.update(kwargs)
cover = CoverLetter.objects.create(**values)
if series:
series.add_cover_letter(cover)
return cover
def create_comment(**kwargs):
"""Create 'Comment' object."""
values = {
'submitter': create_person() if 'submitter' not in kwargs else None,
'submission': create_patch() if 'submission' not in kwargs else None,
'msgid': make_msgid(),
'content': SAMPLE_CONTENT,
}
values.update(kwargs)
return Comment.objects.create(**values)
def create_check(**kwargs):
"""Create 'Check' object."""
values = {
'patch': create_patch() if 'patch' not in kwargs else None,
'user': create_user() if 'user' not in kwargs else None,
'date': dt.utcnow(),
'state': Check.STATE_SUCCESS,
'target_url': 'http://example.com/',
'description': '',
'context': 'jenkins-ci',
}
values.update(**kwargs)
return Check.objects.create(**values)
def create_series(**kwargs):
"""Create 'Series' object."""
values = {
'project': create_project() if 'project' not in kwargs else None,
'date': dt.utcnow(),
'submitter': create_person() if 'submitter' not in kwargs else None,
'total': 1,
}
values.update(**kwargs)
return Series.objects.create(**values)
def create_series_reference(**kwargs):
"""Create 'SeriesReference' object."""
values = {
'series': create_series() if 'series' not in kwargs else None,
'msgid': make_msgid(),
}
values.update(**kwargs)
return SeriesReference.objects.create(**values)
def _create_submissions(create_func, count=1, **kwargs):
"""Create 'count' Submission-based objects.
Args:
count (int): Number of patches to create
kwargs (dict): Overrides for various patch fields
"""
values = {
'project': create_project() if 'project' not in kwargs else None,
'submitter': create_person() if 'submitter' not in kwargs else None,
}
values.update(kwargs)
date = dt.utcnow()
objects = []
for i in range(0, count):
obj = create_func(date=date + timedelta(minutes=i),
**values)
objects.append(obj)
return objects
def create_patches(count=1, **kwargs):
"""Create 'count' unique patches.
This differs from 'create_patch', in that it will ensure all
patches have at least the same project and submitter. In addition,
it is possible to set other fields to the same value, by passing
them as kwargs.
Args:
count (int): Number of patches to create
kwargs (dict): Overrides for various patch fields
"""
values = {
'state': create_state() if 'state' not in kwargs else None
}
values.update(kwargs)
return _create_submissions(create_patch, count, **values)
def create_covers(count=1, **kwargs):
"""Create 'count' unique cover letters.
This differs from 'create_cover', in that it will ensure all cover
letters have at least the same project and submitter. In addition,
it is possible to set other fields to the same value, by passing
them as kwargs.
Args:
count (int): Number of cover letters to create
kwargs (dict): Overrides for various cover letter fields
"""
return _create_submissions(create_cover, count, **kwargs)
| gpl-2.0 | 5,798,578,965,626,591,000 | 27.34957 | 79 | 0.616636 | false | 3.873923 | true | false | false |
khoanguyen0791/cs170 | CS170_homework/Asg 61.py | 1 | 1653 | from PIL import Image
from colorPixelListPIL import *
def negative_color(pic,n):
span = 256/n
width, height = pic.size
ni = Image.new("RGB", (width,height),(255,255,255))
for w in range(width):
for h in range(height):
pix = pic.getpixel((w,h))
(r,g,b) = pix
newR = int(n-r/span)*256//n #+ 128//n
newG = int(n-g/span)*256//n #+ 128//n
newB = int(n-b/span)*256//n #+ 128//n
ni.putpixel((w,h),(newR, newG, newB))
return ni
def grayscale(pic):
width, height = pic.size
ni = pic.copy()
for w in range(width):
for h in range(height):
pix = pic.getpixel((w,h))
(r,g,b) = pix
avg = int((r+g+b)/3)
ni.putpixel((w,h),(avg,avg,avg))
return ni
def negative_range(pic):
width, height = pic.size
ni = pic.copy()
c1 = bisque4
c2 = SteelBlue2
c3 = CadetBlue
c4 = dark_sea_green
c5 = PaleTurquoise4
c6 = OliveDrab1
for w in range(width):
for h in range(height):
pix = pic.getpixel((w,h))
(r,g,b) = pix
if r <256/6*1:
pix = c1
elif r <256/6*2:
pix = c2
elif r <256/6*3:
pix = c3
elif r <256/6*4:
pix = c4
elif r <256/6*5:
pix = c5
elif r <256/6*6:
pix = c6
ni.putpixel((w,h),pix)
return ni
a = Image.open("original.jpg")
b = negative_color(a,7)
b.save("negated.jpg")
c = grayscale(a)
d = negative_range(c)
d.save("negated_1.jpg")
| apache-2.0 | 3,924,509,857,845,825,500 | 25.238095 | 55 | 0.477919 | false | 2.941281 | false | false | false |
turbulenz/turbulenz_local | turbulenz_local/decorators.py | 1 | 3134 | # Copyright (c) 2011,2013 Turbulenz Limited
from warnings import warn
from decorator import decorator
from simplejson import JSONEncoder, JSONDecoder
from pylons import request, response
from urlparse import urlparse
from turbulenz_local.lib.exceptions import PostOnlyException, GetOnlyException
# pylint: disable=C0103
_json_encoder = JSONEncoder(encoding='utf-8', separators=(',',':'))
_json_decoder = JSONDecoder(encoding='utf-8')
# pylint: enable=C0103
@decorator
def postonly(func, *args, **kwargs):
try:
_postonly()
return func(*args, **kwargs)
except PostOnlyException as e:
return e
def _postonly():
if request.method != 'POST':
headers = response.headers
headers['Content-Type'] = 'application/json; charset=utf-8'
headers['Cache-Control'] = 'no-store, no-cache, max-age=0'
headers['Allow'] = 'POST'
response.status_int = 405
raise PostOnlyException('{"ok":false,"msg":"Post Only!"}')
def _getonly():
if request.method != 'GET':
headers = response.headers
headers['Content-Type'] = 'application/json; charset=utf-8'
headers['Cache-Control'] = 'no-store, no-cache, max-age=0'
headers['Allow'] = 'GET'
response.status_int = 405
raise GetOnlyException('{"ok":false,"msg":"Get Only!"}')
@decorator
def jsonify(func, *args, **kwargs):
return _jsonify(func(*args, **kwargs))
def _jsonify(data):
# Sometimes we get back a string and we don't want to double-encode
# Checking for basestring instance catches both unicode and str.
if not isinstance(data, basestring):
if isinstance(data, (list, tuple)):
msg = "JSON responses with Array envelopes are susceptible to " \
"cross-site data leak attacks, see " \
"http://pylonshq.com/warnings/JSONArray"
warn(msg, Warning, 2)
data = _json_encoder.encode(data)
if 'callback' in request.params:
response.headers['Content-Type'] = 'text/javascript; charset=utf-8'
cbname = str(request.params['callback'])
data = '%s(%s);' % (cbname, data)
else:
response.headers['Content-Type'] = 'application/json; charset=utf-8'
return data
@decorator
def secure_get(func, *args, **kwargs):
try:
_getonly()
return _secure(request.GET, func, *args, **kwargs)
except GetOnlyException as e:
return e.value
@decorator
def secure_post(func, *args, **kwargs):
try:
_postonly()
return _secure(request.POST, func, *args, **kwargs)
except PostOnlyException as e:
return e.value
def _secure(requestparams, func, *args, **kwargs):
if 'data' in requestparams:
data = _json_decoder.decode(requestparams['data'])
if data is None:
data = dict()
else:
data = dict()
data.update(requestparams)
args = args[:-1] + (data,)
func_result = func(*args, **kwargs)
# pylint: disable=E1101
func_result['requestUrl'] = urlparse(request.url).path
# pylint: enable=E1101
return _jsonify(func_result)
| mit | 1,956,189,377,863,622,400 | 28.847619 | 78 | 0.63178 | false | 3.682726 | false | false | false |
atakan/Complex_Coeff_RKN | testing/timing_runs/400body_CPUtime_plot.py | 1 | 3030 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, Polygon
from matplotlib import rc
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib.ticker import LogLocator
P = 8.0*np.arctan(1.0)*4.0*np.sqrt(2.0)
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
#rc('font',**{'family':'serif','serif':['Computer Modern Roman']})
rc('font', family='serif')
mpl.rcParams['ps.usedistiller'] = 'xpdf'
mpl.rcParams['font.size'] = 11
IQRs = np.loadtxt('IQRs_N400.dat')
LF_50_IQRs = IQRs[0:7,0:2]
TJ_50_IQRs = IQRs[7:14,0:2]
Cha_50_IQRs = IQRs[14:21,0:2]
RKNa14_50_IQRs = IQRs[21:28,0:2]
RKNb11_50_IQRs = IQRs[28:35,0:2]
RKNac1_50_IQRs = IQRs[35:42,0:2]
RKNbc1_50_IQRs = IQRs[42:49,0:2]
RKNbr1_50_IQRs = IQRs[49:56,0:2]
RKNar1_50_IQRs = IQRs[56:63,0:2]
RKNb6_50_IQRs = IQRs[77:84,0:2]
metcf_LF = 1
metcf_Cha = 12.01
metcf_TJ = 3.004
metcf_RKNb5 = 5.003
metcf_RKNb6 = 6.024
metcf_RKNb11 = 11.03
metcf_RKNa14 = 14.04
metcf_RKNar1b = 21.73
metcf_RKNar1 = 5.005
metcf_RKNbr1 = 4.997
metcf_RKNac1 = 30.36
metcf_RKNbc1 = 28.62
fig=plt.figure(figsize=(9,6))
#fig=plt.figure()
ax=fig.add_subplot(111)
#ax.loglog(RKNb11_50_IQRs[:,0], RKNb11_50_IQRs[:,1], label='RKNb11')
#ax.loglog(RKNbc1_50_IQRs[:,0], RKNbc1_50_IQRs[:,1], label='RKNbc1')
#ax.loglog(RKNar1_50_IQRs[:,0], RKNar1_50_IQRs[:,1], label='RKNar1')
ax.loglog(0.01/LF_50_IQRs[:,0] *metcf_LF , LF_50_IQRs[:,1] , 'v-', label='Leapfrog')
ax.loglog(0.01/TJ_50_IQRs[:,0] *metcf_TJ , TJ_50_IQRs[:,1] , '+-', label='Triple Jump')
ax.loglog(0.01/Cha_50_IQRs[:,0] *metcf_Cha , Cha_50_IQRs[:,1] , '^-', label='Chambers')
ax.loglog(0.01/RKNb6_50_IQRs[:,0] *metcf_RKNb6 , RKNb6_50_IQRs[:,1 ], '*-', label='RKNb6' )
ax.loglog(0.01/RKNa14_50_IQRs[:,0]*metcf_RKNa14, RKNa14_50_IQRs[:,1], 'o-', label='RKNa14')
ax.loglog(0.01/RKNbr1_50_IQRs[:,0]*metcf_RKNbr1, RKNbr1_50_IQRs[:,1], 'p-', label='RKNbr1')
ax.loglog(0.01/RKNac1_50_IQRs[:,0]*metcf_RKNac1, RKNac1_50_IQRs[:,1], 's-', label='RKNac1')
ax.set_xlim(2e-2, 1e2)
#ax.set_ylim(1e-16,2e-2)
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(16)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(16)
plt.legend(loc='lower left')
plt.xlabel('CPU time (normalized to LF, $\delta t = 0.01$)', fontsize=18)
plt.ylabel('Inter quartile range for $r-r_\mathrm{GBS}$', fontsize=18)
leg = plt.gca().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=14)
majorLocator = LogLocator(100)
#majorFormatter = FormatStrFormatter('%d')
minorLocator = LogLocator(10)
ax.yaxis.set_major_locator(majorLocator)
#ax.xaxis.set_major_formatter(majorFormatter)
#for the minor ticks, use no labels; default NullFormatter
ax.yaxis.set_minor_locator(minorLocator)
plt.savefig('400body_CPUtime_plot.eps',
orientation='landscape',bbox_inches='tight')
plt.show()
| gpl-3.0 | -1,209,233,719,801,235,500 | 30.894737 | 96 | 0.674587 | false | 2.127809 | false | false | false |
robacklin/sigrok | sigrok-meter/mainwindow.py | 1 | 5394 | ##
## This file is part of the sigrok-meter project.
##
## Copyright (C) 2013 Uwe Hermann <[email protected]>
## Copyright (C) 2014 Jens Steinhauser <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
import datamodel
import os.path
import qtcompat
import samplingthread
import textwrap
QtCore = qtcompat.QtCore
QtGui = qtcompat.QtGui
class EmptyMessageListView(QtGui.QListView):
'''List view that shows a message if the model im empty.'''
def __init__(self, message, parent=None):
super(self.__class__, self).__init__(parent)
self._message = message
def paintEvent(self, event):
m = self.model()
if m and m.rowCount():
super(self.__class__, self).paintEvent(event)
return
painter = QtGui.QPainter(self.viewport())
painter.drawText(self.rect(), QtCore.Qt.AlignCenter, self._message)
class MainWindow(QtGui.QMainWindow):
'''The main window of the application.'''
def __init__(self, context, drivers):
super(self.__class__, self).__init__()
self.context = context
self.delegate = datamodel.MultimeterDelegate(self, self.font())
self.model = datamodel.MeasurementDataModel(self)
self.model.rowsInserted.connect(self.modelRowsInserted)
self.setup_ui()
self.thread = samplingthread.SamplingThread(self.context, drivers)
self.thread.measured.connect(self.model.update)
self.thread.error.connect(self.error)
self.thread.start()
def setup_ui(self):
self.setWindowTitle('sigrok-meter')
# Resizing the listView below will increase this again.
self.resize(350, 10)
p = os.path.abspath(os.path.dirname(__file__))
p = os.path.join(p, 'sigrok-logo-notext.png')
self.setWindowIcon(QtGui.QIcon(p))
actionQuit = QtGui.QAction(self)
actionQuit.setText('&Quit')
actionQuit.setIcon(QtGui.QIcon.fromTheme('application-exit'))
actionQuit.setShortcut('Ctrl+Q')
actionQuit.triggered.connect(self.close)
actionAbout = QtGui.QAction(self)
actionAbout.setText('&About')
actionAbout.setIcon(QtGui.QIcon.fromTheme('help-about'))
actionAbout.triggered.connect(self.show_about)
menubar = self.menuBar()
menuFile = menubar.addMenu('&File')
menuFile.addAction(actionQuit)
menuHelp = menubar.addMenu('&Help')
menuHelp.addAction(actionAbout)
self.listView = EmptyMessageListView('waiting for data...')
self.listView.setFrameShape(QtGui.QFrame.NoFrame)
self.listView.viewport().setBackgroundRole(QtGui.QPalette.Window)
self.listView.viewport().setAutoFillBackground(True)
self.listView.setMinimumWidth(260)
self.listView.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.listView.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.listView.setVerticalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.listView.setItemDelegate(self.delegate)
self.listView.setModel(self.model)
self.listView.setUniformItemSizes(True)
self.listView.setMinimumSize(self.delegate.sizeHint())
self.setCentralWidget(self.listView)
self.centralWidget().setContentsMargins(0, 0, 0, 0)
def closeEvent(self, event):
self.thread.stop()
event.accept()
@QtCore.Slot()
def show_about(self):
text = textwrap.dedent('''\
<div align="center">
<b>sigrok-meter 0.1.0</b><br/><br/>
Using libsigrok {} (lib version {}).<br/><br/>
<a href='http://www.sigrok.org'>
http://www.sigrok.org</a><br/>
<br/>
License: GNU GPL, version 3 or later<br/>
<br/>
This program comes with ABSOLUTELY NO WARRANTY;<br/>
for details visit
<a href='http://www.gnu.org/licenses/gpl.html'>
http://www.gnu.org/licenses/gpl.html</a>
</div>
'''.format(self.context.package_version, self.context.lib_version))
QtGui.QMessageBox.about(self, 'About sigrok-meter', text)
@QtCore.Slot(str)
def error(self, msg):
'''Error handler for the sampling thread.'''
QtGui.QMessageBox.critical(self, 'Error', msg)
self.close()
@QtCore.Slot(object, int, int)
def modelRowsInserted(self, parent, start, end):
'''Resize the list view to the size of the content.'''
rows = self.model.rowCount()
dh = self.delegate.sizeHint().height()
self.listView.setMinimumHeight(dh * rows)
| gpl-3.0 | 6,154,553,371,368,940,000 | 36.458333 | 83 | 0.649611 | false | 3.872218 | false | false | false |
Shadeslayer345/PyPong | Main/Game/Ball.py | 1 | 1418 | import pygame
from pygame.locals import *
from Colors import *
class Ball:
def __init__(self, surface, color, position):
self.layer = surface
self.color = color
self.init_X = position[0]
self.init_Y = position[1]
self.var_X = self.init_X
self.var_Y = self.init_Y
self.radius = position[2]
self.width = position[3]
self.change_X = 5
self.change_Y = 5
''' CREATING BALL '''
def create_Ball(self):
self.center = [self.var_X, self.var_Y]
pygame.draw.circle(self.layer,self.color,self.center,self.radius,self.width)
''' BALL MOVEMENT '''
def play_ball(self,direction=1):
self.var_X += self.change_X
self.var_Y += self.change_Y
def bounce_backX(self):
self.change_X *= -1
def bounce_backY(self):
self.change_Y *= -1
def score(self):
self.var_X = self.init_X
self.var_Y = self.init_Y
''' BALL POSITION (CENTER) '''
def get_Pos(self):
return {"x" : self.var_X, "y" : self.var_Y}
''' MANUAL CONTROL FUNCTIONS (DEBUGGING) '''
def moveUp(self):
self.var_Y -= self.change_Y
if self.var_Y <= 16:
self.var_Y += self.change_Y
def moveDown(self):
self.var_Y += self.change_Y
if (self.var_Y+self.radius) >= 486:
self.var_Y -= self.change_Y
def moveLeft(self):
self.var_X -= self.change_X
if self.var_X <= 16:
self.var_X += self.change_X
def moveRight(self):
self.var_X += self.change_X
if (self.var_X+self.radius) >= 566:
self.var_X -= self.change_X
| mit | -3,764,336,414,180,322,000 | 25.754717 | 78 | 0.646685 | false | 2.444828 | false | false | false |
ZeroCater/linty | interface/migrations/0002_auto_20160525_2315.py | 1 | 1906 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-26 06:15
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('interface', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Build',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ref', models.TextField()),
('sha', models.TextField()),
('status', models.TextField(choices=[(b'success', b'success'), (b'error', b'error'), (b'pending', b'pending'), (b'cancelled', b'cancelled')], default=b'pending')),
('result', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('finished_at', models.DateTimeField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Repo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner', models.TextField()),
('name', models.TextField()),
('webhook_url', models.URLField(blank=True, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='repos', to=settings.AUTH_USER_MODEL)),
],
),
migrations.DeleteModel(
name='Result',
),
migrations.AddField(
model_name='build',
name='repo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='interface.Repo'),
),
]
| mit | 2,182,037,319,607,319,000 | 38.708333 | 179 | 0.566107 | false | 4.292793 | false | false | false |
nth2say/simple_django_blog | simple_django_blog/settings.py | 1 | 2961 | # _*_ coding=utf-8 _*_
"""
Django settings for simple_django_blog project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '35*mn&ihlm(0&&)v_8xfddc%c7&271yyiks_cgqf)45vi!#_1g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'accounts',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'simple_django_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'),
os.path.join(BASE_DIR, 'blog', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'simple_django_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
os.path.join(BASE_DIR, 'blog', 'static'),
)
| mit | 5,607,113,428,644,445,000 | 25.4375 | 71 | 0.677812 | false | 3.41129 | false | false | false |
zcbenz/cefode-chromium | tools/telemetry/telemetry/core/browser_options_unittest.py | 11 | 2393 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import browser_options
class BrowserOptionsTest(unittest.TestCase):
def testDefaults(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', action='store', default=3)
parser.parse_args(['--browser', 'any'])
self.assertEquals(options.x, 3) # pylint: disable=E1101
def testDefaultsPlusOverride(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', action='store', default=3)
parser.parse_args(['--browser', 'any', '-x', 10])
self.assertEquals(options.x, 10) # pylint: disable=E1101
def testDefaultsDontClobberPresetValue(self):
options = browser_options.BrowserOptions()
setattr(options, 'x', 7)
parser = options.CreateParser()
parser.add_option('-x', action='store', default=3)
parser.parse_args(['--browser', 'any'])
self.assertEquals(options.x, 7) # pylint: disable=E1101
def testCount0(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', action='count', dest='v')
parser.parse_args(['--browser', 'any'])
self.assertEquals(options.v, None) # pylint: disable=E1101
def testCount2(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', action='count', dest='v')
parser.parse_args(['--browser', 'any', '-xx'])
self.assertEquals(options.v, 2) # pylint: disable=E1101
def testOptparseMutabilityWhenSpecified(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', dest='verbosity', action='store_true')
options_ret, _ = parser.parse_args(['--browser', 'any', '-x'])
self.assertEquals(options_ret, options)
self.assertTrue(options.verbosity)
def testOptparseMutabilityWhenNotSpecified(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', dest='verbosity', action='store_true')
options_ret, _ = parser.parse_args(['--browser', 'any'])
self.assertEquals(options_ret, options)
self.assertFalse(options.verbosity)
| bsd-3-clause | 4,838,091,917,329,945,000 | 38.883333 | 72 | 0.697033 | false | 3.786392 | true | false | false |
etal/biofrills | biofrills/sequtils.py | 1 | 2940 | """Utilities for manipulating sequence sets.
"""
import logging
import os.path
from collections import Counter
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
def clean_accession(rec_id):
"""Extract an accession key from one SeqRecord.
The id of the given record is processed to remove domain location info
added by HMMer. Most other records won't have a '/' character in the FASTA
header key.
"""
return rec_id.rsplit('/', 1)[0]
# Obsolete, but left here for backward compatibility for now
def clean_accessions(records):
"""Extract accession keys from an iterable of SeqRecords."""
return (clean_accession(rec.id) for rec in records)
def intersect_keys(keys, reffile, cache=False, clean_accs=False):
"""Extract SeqRecords from the index by matching keys.
keys - an iterable of sequence identifiers/accessions to select
reffile - name of a FASTA file to extract the specified sequences from
cache - save an index of the reference FASTA sequence offsets to disk?
clean_accs - strip HMMer extensions from sequence accessions?
"""
# Build/load the index of reference sequences
index = None
if cache:
refcache = reffile + '.sqlite'
if os.path.exists(refcache):
if os.stat(refcache).st_mtime < os.stat(reffile).st_mtime:
logging.warn("Outdated cache; rebuilding index")
else:
try:
index = (SeqIO.index_db(refcache,
key_function=clean_accession)
if clean_accs
else SeqIO.index_db(refcache))
except Exception:
logging.warn("Skipping corrupted cache; rebuilding index")
index = None
else:
refcache = ':memory:'
if index is None:
# Rebuild the index, for whatever reason
index = (SeqIO.index_db(refcache, [reffile], 'fasta',
key_function=clean_accession)
if clean_accs
else SeqIO.index_db(refcache, [reffile], 'fasta'))
# Extract records by key
if clean_accs:
keys = (clean_accession(k) for k in keys)
for key in keys:
try:
record = index[key]
except LookupError:
# Missing keys are rare, so it's faster not to check every time
logging.info("No match: %s", repr(key))
continue
yield record
def aa_frequencies(seq, gap_chars='-.'):
"""Calculate the amino acid frequencies in a sequence set."""
aa_counts = Counter(seq)
# Don't count gaps
for gap_char in gap_chars:
if gap_char in aa_counts:
del aa_counts[gap_char]
# Reduce to frequencies
scale = 1.0 / sum(aa_counts.values())
return dict((aa, cnt * scale) for aa, cnt in aa_counts.iteritems())
| bsd-2-clause | -1,387,220,131,905,263,000 | 33.186047 | 78 | 0.609524 | false | 4.07767 | false | false | false |
tks0123456789/kaggle-Otto | exp_NN4_RI_max_epochs.py | 2 | 5778 | """
Experiment for NN4(RI)
Aim: To find the best max_epochs for NN4(*, 1024, 1024, 1024) + RI(k = 3, m = 200)
max_epochs: [22, 24, ... ,98, 140]
Averaging 20 models
Summary
epochs 88 , loss 0.421860471364
Time:3:40:30 on i7-4790k 32G MEM GTX660
I got a different result, epochs 112 loss 0.422868, before I reinstalled ubuntu 14.04 LTS.
So I chose max_epochs = 112.
"""
import numpy as np
import scipy as sp
import pandas as pd
from pylearn2.models import mlp
from pylearn2.models.mlp import RectifiedLinear, Softmax, MLP
from pylearn2.costs.mlp.dropout import Dropout
from pylearn2.training_algorithms import sgd, learning_rule
from pylearn2.termination_criteria import EpochCounter
from pylearn2.datasets import DenseDesignMatrix
from pylearn2.train import Train
from theano.compat.python2x import OrderedDict
import theano.tensor as T
from theano import function
import pickle
import sklearn.preprocessing as pp
from sklearn import cross_validation
from sklearn.cross_validation import StratifiedKFold
from sklearn.preprocessing import scale
from sklearn.metrics import log_loss
from sklearn.grid_search import ParameterGrid
from datetime import datetime
import os
from utility import *
from predict import predict
import pylab
path = os.getcwd() + '/'
path_log = path + 'logs/'
file_train = path + 'train.csv'
training = pd.read_csv(file_train, index_col = 0)
num_train = training.shape[0]
y = training['target'].values
yMat = pd.get_dummies(training['target']).values
X = training.iloc[:,:93].values
scaler = pp.StandardScaler()
X2 = scaler.fit_transform(X ** .6)
kf = cross_validation.StratifiedKFold(y, n_folds=5, shuffle = True, random_state = 345)
for train_idx, valid_idx in kf:
break
y_train = yMat[train_idx]
y_valid = yMat[valid_idx]
training = DenseDesignMatrix(X = X2[train_idx], y = y_train)
valid = DenseDesignMatrix(X = X2[valid_idx], y = y_valid)
# [l1, l2, l3, l4, output]
nIter = 20
# Params for RI
m = 200
k = 3
# Params for NN
epochs = 20
epochs_add = 2
n_add = 60
bs = 64
mm = .97
lr = .01
dim2 = 1024
ir1 = .01
ir2 = .05
ip = .8
ir_out = .05
mcn_out = 2.5
scores = []
t0 = datetime.now()
predAll = [np.zeros(y_valid.shape) for s in range(n_add)]
for i in range(nIter):
seed = i + 3819
R = RImatrix(X.shape[1], m, k, rm_dup_cols = True, seed = seed)
R = np.abs(R.todense().astype(np.float32))
dim1 = R.shape[1]
l1 = RectifiedLinear(layer_name='l1', irange = ir1, dim = dim1, mask_weights = R)
l2 = RectifiedLinear(layer_name='l2', irange = ir2, dim = dim2, max_col_norm = 1.)
l3 = RectifiedLinear(layer_name='l3', irange = ir2, dim = dim2, max_col_norm = 1.)
l4 = RectifiedLinear(layer_name='l4', irange = ir2, dim = dim2, max_col_norm = 1.)
output = Softmax(layer_name='y', n_classes = 9, irange = ir_out,
max_col_norm = mcn_out)
mdl = MLP([l1, l2, l3, l4, output], nvis = X2.shape[1])
trainer = sgd.SGD(learning_rate=lr,
batch_size=bs,
learning_rule=learning_rule.Momentum(mm),
cost=Dropout(input_include_probs = {'l1':1.},
input_scales = {'l1':1.},
default_input_include_prob=ip,
default_input_scale=1/ip),
termination_criterion=EpochCounter(epochs),seed = seed)
decay = sgd.LinearDecayOverEpoch(start=2, saturate=20, decay_factor= .1)
experiment = Train(dataset = training, model=mdl, algorithm=trainer, extensions=[decay])
experiment.main_loop()
epochs_current = epochs
for s in range(n_add):
del mdl.monitor
trainer = sgd.SGD(learning_rate=lr * .1,
batch_size=bs,
learning_rule=learning_rule.Momentum(mm),
cost=Dropout(input_include_probs = {'l1':1.},
input_scales = {'l1':1.},
default_input_include_prob=ip,
default_input_scale=1/ip),
termination_criterion=EpochCounter(epochs_add),seed = seed)
experiment = Train(dataset = training, model=mdl, algorithm=trainer)
experiment.main_loop()
epochs_current += epochs_add
pred_train = predict(mdl, X2[train_idx].astype(np.float32))
pred_valid = predict(mdl, X2[valid_idx].astype(np.float32))
predAll[s] += pred_valid
scores.append({'epochs':epochs_current, 'nModels':i + 1, 'seed':seed,
'train':log_loss(y_train, pred_train),
'valid':log_loss(y_valid, pred_valid),
'valid_avg':log_loss(y_valid, predAll[s] / (i + 1))})
print scores[-1], datetime.now() - t0
df = pd.DataFrame(scores)
if os.path.exists(path_log) is False:
print 'mkdir', path_log
os.mkdir(path_log)
df.to_csv(path_log + 'exp_NN4_RI_max_epochs.csv')
keys = ['epochs']
grouped = df.groupby(keys)
print 'epochs',grouped['valid_avg'].last().idxmin(),', loss',grouped['valid_avg'].last().min()
# epochs 88 , loss 0.421860471364
g = grouped[['train', 'valid']].mean()
g['valid_avg'] = grouped['valid_avg'].last()
print g.iloc[[0,1,32,33,34,58,59],:]
# train valid valid_avg
# epochs
# 22 0.319737 0.468458 0.436766
# 24 0.313538 0.468300 0.435694
# 86 0.193640 0.486078 0.422321
# 88 0.190694 0.487625 0.421860
# 90 0.187374 0.487897 0.421998
# 138 0.134388 0.512527 0.423662
# 140 0.132642 0.514666 0.425003
ax = g.plot()
ax.set_title('NN4(RI) m=200, k=3')
ax.set_ylabel('Logloss')
fig = ax.get_figure()
fig.savefig(path_log + 'exp_NN4_RI_max_epochs.png')
| mit | -1,470,072,911,108,371,000 | 32.789474 | 94 | 0.619418 | false | 3.039453 | false | false | false |
openstack/python-openstackclient | openstackclient/compute/v2/service.py | 2 | 10505 | # Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Service action implementations"""
import logging
from novaclient import api_versions
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
LOG = logging.getLogger(__name__)
class DeleteService(command.Command):
_description = _("Delete compute service(s)")
def get_parser(self, prog_name):
parser = super(DeleteService, self).get_parser(prog_name)
parser.add_argument(
"service",
metavar="<service>",
nargs='+',
help=_("Compute service(s) to delete (ID only). If using "
"``--os-compute-api-version`` 2.53 or greater, the ID is "
"a UUID which can be retrieved by listing compute services "
"using the same 2.53+ microversion.")
)
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
result = 0
for s in parsed_args.service:
try:
compute_client.services.delete(s)
except Exception as e:
result += 1
LOG.error(_("Failed to delete compute service with "
"ID '%(service)s': %(e)s"), {'service': s, 'e': e})
if result > 0:
total = len(parsed_args.service)
msg = (_("%(result)s of %(total)s compute services failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListService(command.Lister):
_description = _("List compute services. Using "
"``--os-compute-api-version`` 2.53 or greater will "
"return the ID as a UUID value which can be used to "
"uniquely identify the service in a multi-cell "
"deployment.")
def get_parser(self, prog_name):
parser = super(ListService, self).get_parser(prog_name)
parser.add_argument(
"--host",
metavar="<host>",
help=_("List services on specified host (name only)")
)
parser.add_argument(
"--service",
metavar="<service>",
help=_("List only specified service binaries (name only). For "
"example, ``nova-compute``, ``nova-conductor``, etc.")
)
parser.add_argument(
"--long",
action="store_true",
default=False,
help=_("List additional fields in output")
)
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
if parsed_args.long:
columns = (
"ID",
"Binary",
"Host",
"Zone",
"Status",
"State",
"Updated At",
"Disabled Reason"
)
else:
columns = (
"ID",
"Binary",
"Host",
"Zone",
"Status",
"State",
"Updated At"
)
data = compute_client.services.list(parsed_args.host,
parsed_args.service)
return (columns,
(utils.get_item_properties(
s, columns,
) for s in data))
class SetService(command.Command):
_description = _("Set compute service properties")
def get_parser(self, prog_name):
parser = super(SetService, self).get_parser(prog_name)
parser.add_argument(
"host",
metavar="<host>",
help=_("Name of host")
)
parser.add_argument(
"service",
metavar="<service>",
help=_("Name of service (Binary name), for example "
"``nova-compute``")
)
enabled_group = parser.add_mutually_exclusive_group()
enabled_group.add_argument(
"--enable",
action="store_true",
help=_("Enable service")
)
enabled_group.add_argument(
"--disable",
action="store_true",
help=_("Disable service")
)
parser.add_argument(
"--disable-reason",
default=None,
metavar="<reason>",
help=_("Reason for disabling the service (in quotes). "
"Should be used with --disable option.")
)
up_down_group = parser.add_mutually_exclusive_group()
up_down_group.add_argument(
'--up',
action='store_true',
help=_('Force up service. Requires ``--os-compute-api-version`` '
'2.11 or greater.'),
)
up_down_group.add_argument(
'--down',
action='store_true',
help=_('Force down service. Requires ``--os-compute-api-version`` '
'2.11 or greater.'),
)
return parser
@staticmethod
def _find_service_by_host_and_binary(cs, host, binary):
"""Utility method to find a compute service by host and binary
:param host: the name of the compute service host
:param binary: the compute service binary, e.g. nova-compute
:returns: novaclient.v2.services.Service dict-like object
:raises: CommandError if no or multiple results were found
"""
services = cs.list(host=host, binary=binary)
# Did we find anything?
if not len(services):
msg = _('Compute service for host "%(host)s" and binary '
'"%(binary)s" not found.') % {
'host': host, 'binary': binary}
raise exceptions.CommandError(msg)
# Did we find more than one result? This should not happen but let's
# be safe.
if len(services) > 1:
# TODO(mriedem): If we have an --id option for 2.53+ then we can
# say to use that option to uniquely identify the service.
msg = _('Multiple compute services found for host "%(host)s" and '
'binary "%(binary)s". Unable to proceed.') % {
'host': host, 'binary': binary}
raise exceptions.CommandError(msg)
return services[0]
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
cs = compute_client.services
if (parsed_args.enable or not parsed_args.disable) and \
parsed_args.disable_reason:
msg = _("Cannot specify option --disable-reason without "
"--disable specified.")
raise exceptions.CommandError(msg)
# Starting with microversion 2.53, there is a single
# PUT /os-services/{service_id} API for updating nova-compute
# services. If 2.53+ is used we need to find the nova-compute
# service using the --host and --service (binary) values.
requires_service_id = (
compute_client.api_version >= api_versions.APIVersion('2.53'))
service_id = None
if requires_service_id:
# TODO(mriedem): Add an --id option so users can pass the service
# id (as a uuid) directly rather than make us look it up using
# host/binary.
service_id = SetService._find_service_by_host_and_binary(
cs, parsed_args.host, parsed_args.service).id
result = 0
enabled = None
try:
if parsed_args.enable:
enabled = True
if parsed_args.disable:
enabled = False
if enabled is not None:
if enabled:
args = (service_id,) if requires_service_id else (
parsed_args.host, parsed_args.service)
cs.enable(*args)
else:
if parsed_args.disable_reason:
args = (service_id, parsed_args.disable_reason) if \
requires_service_id else (
parsed_args.host,
parsed_args.service,
parsed_args.disable_reason)
cs.disable_log_reason(*args)
else:
args = (service_id,) if requires_service_id else (
parsed_args.host, parsed_args.service)
cs.disable(*args)
except Exception:
status = "enabled" if enabled else "disabled"
LOG.error("Failed to set service status to %s", status)
result += 1
force_down = None
if parsed_args.down:
force_down = True
if parsed_args.up:
force_down = False
if force_down is not None:
if compute_client.api_version < api_versions.APIVersion(
'2.11'):
msg = _('--os-compute-api-version 2.11 or later is '
'required')
raise exceptions.CommandError(msg)
try:
args = (service_id, force_down) if requires_service_id else (
parsed_args.host, parsed_args.service, force_down)
cs.force_down(*args)
except Exception:
state = "down" if force_down else "up"
LOG.error("Failed to set service state to %s", state)
result += 1
if result > 0:
msg = _("Compute service %(service)s of host %(host)s failed to "
"set.") % {"service": parsed_args.service,
"host": parsed_args.host}
raise exceptions.CommandError(msg)
| apache-2.0 | -4,229,876,431,149,876,000 | 36.78777 | 79 | 0.523179 | false | 4.587336 | false | false | false |
mathebox/django_hana_pyhdb | django_hana/models.py | 2 | 2900 | from django.contrib.gis.db import models
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class HanaGeometryColumns(models.Model):
"""
Maps to the HANA ST_GEOMETRY_COLUMNS view.
"""
schema_name = models.CharField(max_length=256, null=False)
table_name = models.CharField(max_length=256, null=False)
column_name = models.CharField(max_length=256, null=False)
srs_id = models.IntegerField(null=False)
srs_name = models.CharField(max_length=256)
data_type_name = models.CharField(max_length=16)
class Meta:
app_label = 'gis'
db_table = 'ST_GEOMETRY_COLUMNS'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'column_name'
def __str__(self):
return '%s - %s (SRID: %s)' % (self.table_name, self.column_name, self.srid)
class HanaSpatialRefSys(models.Model, SpatialRefSysMixin):
"""
Maps to the SAP HANA SYS.ST_SPATIAL_REFERENCE_SYSTEMS view.
"""
owner_name = models.CharField(max_length=256)
srs_id = models.IntegerField(null=False)
srs_name = models.CharField(max_length=256, null=False)
round_earth = models.CharField(max_length=7, null=False)
axis_order = models.CharField(max_length=12, null=False)
snap_to_grid = models.FloatField()
tolerance = models.FloatField()
semi_major_axis = models.FloatField()
semi_minor_axis = models.FloatField()
inv_flattening = models.FloatField()
min_x = models.FloatField()
max_x = models.FloatField()
min_y = models.FloatField()
max_y = models.FloatField()
min_z = models.FloatField()
max_z = models.FloatField()
organization = models.CharField(max_length=256)
organization_coordsys_id = models.IntegerField(null=False)
srs_type = models.CharField(max_length=11, null=False)
linear_unit_of_measure = models.CharField(max_length=256, null=False)
angular_unit_of_measure = models.CharField(max_length=256)
polygon_format = models.CharField(max_length=16, null=False)
storage_format = models.CharField(max_length=8, null=False)
definition = models.CharField(max_length=5000)
transform_definition = models.CharField(max_length=5000)
objects = models.GeoManager()
class Meta:
app_label = 'gis'
db_table = 'ST_SPATIAL_REFERENCE_SYSTEMS'
managed = False
@property
def wkt(self):
return self.definition
@classmethod
def wkt_col(cls):
return 'definition'
| bsd-3-clause | 3,511,851,707,603,256,000 | 33.117647 | 84 | 0.672414 | false | 3.629537 | false | false | false |
neogi/machine-learning | regression/simple_linear_regression/simple_linear_regression.py | 1 | 1816 | """
Simple Linear Regression: y = w0 + w1*x
Objective: Estimate w0 and w1, given x and y
where x = input feature
y = output
w0 = intercept
w1 = slope
"""
# Imports
import numpy as np
# Functions
def simple_linear_regression(input_feature, output):
"""
Purpose: Compute intercept and slope
Input : input_feature (x), output (y)
Output : Estimate of intercept (w0) and slope (w1)
"""
mean_input_feature = np.mean(input_feature)
mean_output = np.mean(output)
slope = np.sum(input_feature * output - input_feature * mean_output)/np.sum(input_feature * input_feature - input_feature * mean_input_feature)
intercept = mean_output - slope * mean_input_feature
return(intercept, slope)
def get_regression_predictions(input_feature, intercept, slope):
"""
Purpose: Compute predictions
Input : input_feature (x), intercept (w0), slope (w1)
Output : Predicted output based on estimated intercept, slope and input feature
"""
predicted_output = intercept + slope * input_feature
return(predicted_output)
def get_residual_sum_of_squares(input_feature, output, intercept, slope):
"""
Purpose: Compute Residual Sum of Squares (RSS)
Input : input_feature (x), output (y),
intercept (w0), slope (w1)
Output : Residual sum of squares = sum((actual output (y) - predicted output)^2)
"""
RSS = np.sum((output - (intercept + slope * input_feature)) ** 2)
return(RSS)
def inverse_regression_predictions(output, intercept, slope):
"""
Purpose: Compute Residual Sum of Squares (RSS)
Input : output (y), intercept (w0), slope (w1)
Output : Estimate of input based on intercept, slope and given output
"""
estimated_input = (output - intercept)/slope
return(estimated_input)
| gpl-3.0 | -4,981,055,031,569,154,000 | 33.923077 | 147 | 0.66685 | false | 3.639279 | false | false | false |
NeurodataWithoutBorders/api-python | examples/create_scripts/link_test-e.py | 1 | 1405 | #!/usr/bin/python
import sys
from nwb import nwb_file
from nwb import nwb_utils as ut
"""
Test extension defining a link
"""
OUTPUT_DIR = "../created_nwb_files/"
file_name = __file__[0:-3] + ".nwb"
########################################################################
# create a new NWB file
settings = {}
settings["file_name"] = OUTPUT_DIR + file_name
settings["identifier"] = ut.create_identifier("test link extension.")
settings["mode"] = "w"
settings["start_time"] = "Sat Jul 04 2015 3:14:16"
settings["description"] = ("Test making a link in the /analysis group "
"that is defined by an extension.")
# specify the extension (Could be more than one. Only one used now).
settings['extensions'] = ["extensions/e-link_test.py"]
# create the NWB file object. this manages the file
print("Creating " + settings["file_name"])
f = nwb_file.open(**settings)
########################################################################
# This example, stores spike times for multiple sweeps
# create the group for the spike times
# The group ("aibs_spike_times") is defined in the extension
ast = f.make_group("aibs_spike_times")
# some sample data
times = [1.1, 1.2, 1.3, 1.4, 1.5]
#
pto = ast.set_dataset("pixel_time_offsets", times)
# now make the link
ptl = ast.set_dataset("pto_link", pto, attrs={"hello": "Natasha"})
ptl.set_attr("Mary", "bendrich")
# all done; close the file
f.close()
| bsd-3-clause | -8,733,948,675,525,176,000 | 27.1 | 72 | 0.614947 | false | 3.337292 | false | false | false |
DMCTruong/Discord-Bot | Bot.py | 1 | 2291 | ##########################################################################################
# Program Name : Discord Bot
# Author : DMCTruong
# Last Updated : August 31, 2017
# License : MIT
# Description : A general purpose bot written for Discord
##########################################################################################
# To do List:
# - Add a calculator
# - Add a translator: https://pypi.python.org/pypi/googletrans
# - Add a better tutorial on how to install and get the keys for the configurations
# - Add better documentation of the code
# - Add better documentation of installations
# - Redo the restart command
# - Return the user's avatar?
# - Update pyrebase commands
import discord
from discord.ext import commands
import logging
import asyncio
import configurations
from modules import database
from modules import help
from modules import miscellaneous
from modules import music
from modules import owner_only
from modules import timer
# ------------- Logging --------------
if configurations.DISCORD_LOG in ["y", "Y", "yes", "Yes", "YES"]:
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
# if configurations.COMMAND_LOG in ["y", "Y", "yes", "Yes", "YES"]:
# if configurations.ERROR_LOG in ["y", "Y", "yes", "Yes", "YES"]:
# -------- Initialize the Bot --------
bot = commands.Bot(configurations.PREFIX)
bot.add_cog(database.Database(bot))
bot.add_cog(help.Help(bot))
bot.add_cog(miscellaneous.Miscellaneous(bot))
bot.add_cog(music.Music(bot))
bot.add_cog(owner_only.Owner_Only(bot))
bot.add_cog(timer.Time(bot))
print("\nPlease wait while the Bot logs in ...")
@bot.event
async def on_ready():
print("\nLogged in as:\n")
print("Username : " + bot.user.name)
print("ID Number: " + bot.user.id)
print('\nType ">>help" in the Discord chat for the list of commands.')
print("=============================================")
await bot.change_presence(game=discord.Game(name='>>help for commands!'))
bot.run(configurations.BOT_TOKEN) | mit | 1,855,225,235,320,659,700 | 31.742857 | 94 | 0.613706 | false | 3.755738 | true | false | false |
cwoebker/relo | relo/local/index.py | 1 | 9731 | #!/usr/bin/env python
# encoding: utf-8
import os, time, re
from metaphone import dm as double_metaphone
from relo.core.config import conf
from relo.core.log import logger
from relo.local import util
from relo.core.interfaces import Backend
from relo.yapsy.PluginManager import PluginManager
import hashlib
from progressbar import ProgressBar, RotatingMarker, Bar, Percentage, ETA, FormatLabel
from relo.core.backend import *
##### Inverted Index Variables #####
# Words which should not be indexed
STOP_WORDS = ("the", "of", "to", "and", "a", "in", "is", "it", "you", "that")
# Do not index any words shorter than this
MIN_WORD_LENGTH = 3
# Consider these characters to be punctuation (they will be replaced with spaces prior to word extraction)
PUNCTUATION_CHARS = ".,;:!?@£$%^&*()-–<>[]{}\\|/`~'\""
# A redis key to store a list of metaphones present in this project
REDIS_KEY_METAPHONES = "id:%(project_id)s:metaphones"
# A redis key to store a list of item IDs which have the given metaphone within the given project
REDIS_KEY_METAPHONE = "id:%(project_id)s:mp:%(metaphone)s"
# A redis key to store a list of documents present in this project
REDIS_KEY_DOCUMENTS = "id:%(project_id)s:docs"
# A redis key to store meta information which are associated with the document within the given project
REDIS_KEY_DOCUMENT = "id:%(project_id)s:doc:%(document)s"
# A redis key to store a list of projects stored in the database
REDIS_KEY_PROJECTS = "projects"
class CustomIndex(object):
def __init__(self):
pass
def setUpBackend(self):
self.backendManager = PluginManager(plugin_info_ext='relo')
self.backendManager.setPluginPlaces(["relo/core/backend"])
self.backendManager.locatePlugins()
self.backendManager.loadPlugins("<class 'relo.core.interfaces.Backend'>", ['redis'])
for plugin in self.backendManager.getAllPlugins():
self.backendManager.activatePluginByName(plugin.name)
for plugin in self.backendManager.getAllPlugins():
if plugin.name == conf.readConfig('core.index'):
self.db = plugin.plugin_object
self.db.init()
def setUpProject(self, type):
self.db.addProject(REDIS_KEY_PROJECTS, self.directory, type)
def listProject(self):
for root, subFolders, files in os.walk(self.directory):
for file in files:
if file.startswith('.'):
continue
itempath = os.path.join(root, file)
if os.path.islink(itempath):
#print "link found" + itempath
continue
self.db.addSet(REDIS_KEY_DOCUMENTS % {"project_id": self.directory}, itempath)
def run(self):
pass
def __end__(self):
pass
class MetaIndex(CustomIndex):
"""
Main indexing class
"""
def __init__(self, directory, hidden=False):
self.directory = os.path.abspath(directory)
logger.head("Relo Index | meta | " + directory)
self.setUpBackend()
def run(self):
sTime = time.time()
logger.log("Preparing Index...")
max = util.countFiles(self.directory)
logger.info("Indexing %d files..." % max)
pTime = time.time()
widgets = [FormatLabel(self.directory), ' ', Percentage(), ' ', Bar('/'), ' ', RotatingMarker(), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=max).start()
for root, subFolders, files in os.walk(self.directory):
for file in files:
if file.startswith('.'):
continue
itempath = os.path.join(root, file)
if os.path.islink(itempath):
#print "link found" + itempath
continue
size = os.path.getsize(itempath)
md5 = hashlib.md5()
with open(itempath, 'rb') as f:
for chunk in iter(lambda: f.read(8192), ''):
md5.update(chunk)
hash = md5.digest()
modified = time.ctime(os.path.getmtime(itempath))
type = util.getFileType(itempath)
key = REDIS_KEY_DOCUMENT % {"project_id": self.directory, "document": itempath}
self.db.addMeta(key, modified, hash, size, type)
pbar.update(pbar.currval + 1)
#print "ADD:", itempath, modified, hash, size, type
pbar.finish()
eTime = time.time()
iTime = eTime - pTime
setupTime = pTime - sTime
tTime = eTime - sTime
logger.debug("(Setup : %0.2fs) - (Index : %0.2fs) - (Total : %0.2fs)" % (setupTime, iTime, tTime))
def __end__(self):
self.db.end()
class InvertedIndex(CustomIndex):
def __init__(self, directory, hidden=False):
self.directory = os.path.abspath(directory)
logger.head("| Relo Index | content | " + directory)
self.setUpBackend()
self.punctuation_regex = re.compile(r"[%s]" % re.escape(PUNCTUATION_CHARS))
super(InvertedIndex, self).__init__()
def setUpDocType(self, extList):
self.extList = extList
self.docTypeManager = PluginManager(plugin_info_ext='relo')
self.docTypeManager.setPluginPlaces(["relo/core/doctype"])
self.numPlugins = self.docTypeManager.locatePlugins()
self.docTypeManager.loadPlugins("<class 'relo.core.interfaces.DocType'>", extList=extList)
pluginList = []
for plugin in self.docTypeManager.getAllPlugins():
self.docTypeManager.activatePluginByName(plugin.name)
pluginList.append(plugin.plugin_object.meta())
def get_words_from_text(self, text):
"""Extract a list of words to index from the given text"""
if not text:
return []
text = self.punctuation_regex.sub(" ", text)
words = text.split()
words = [word for word in text.split() if len(word) >= MIN_WORD_LENGTH and word.lower() not in STOP_WORDS]
return words
def get_metaphones(self, words):
"""Get the metaphones for a given list of words"""
metaphones = set()
for word in words:
metaphone = double_metaphone(unicode(word, errors='ignore'))
metaphones.add(metaphone[0].strip())
if(metaphone[1]):
metaphones.add(metaphone[1].strip())
return metaphones
def index_item(self, item, content):
"""Indexes a certain content"""
words = self.get_words_from_text(content)
metaphones = self.get_metaphones(words)
for metaphone in metaphones:
self._link_item_and_metaphone(item, metaphone)
def _link_item_and_metaphone(self, item, metaphone):
# Add the item to the metaphone key
redis_key = REDIS_KEY_METAPHONE % {"project_id": self.directory, "metaphone": metaphone}
self.db.addSet(redis_key, item)
# Make sure we record that this project contains this metaphone
redis_key = REDIS_KEY_METAPHONES % {"project_id": self.directory}
self.db.addSet(redis_key, metaphone)
def remove_project(self):
"""Remove the existing index for the project"""
# Remove all the existing index data
redis_key = REDIS_KEY_METAPHONES % {"project_id": self.directory}
project_metaphones = self.db.smembers(redis_key)
if project_metaphones is None:
project_metaphones = []
self.db.delete(redis_key)
for project_metaphone in project_metaphones:
self.db.redis.delete(REDIS_KEY_METAPHONE % {"project_id": self.directory, "metaphone": project_metaphone})
return True
def load(self, itempath):
for plugin in self.docTypeManager.getAllPlugins():
if plugin.name == util.getFileType(itempath).upper():
return plugin.plugin_object.load(itempath)
plugin = self.docTypeManager.getPluginByName("DEFAULT")
return plugin.plugin_object.load(itempath)
def run(self):
sTime = time.time()
logger.log("Preparing Index...")
count = util.countFiles(self.directory)
size, list = util.recursiveListFiles(self.directory, False)
extList = ['default']
for item in list:
type = util.getFileType(item)
#print repr(item) + '----' + repr(type)
if type not in extList:
extList.append(type)
del list
self.setUpDocType(extList)
del extList
logger.info("Indexing %d files..." % count)
pTime = time.time()
widgets = [FormatLabel(self.directory), ' ', Percentage(), ' ', Bar('/'), ' ', RotatingMarker(), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=count).start()
for root, subFolders, files in os.walk(self.directory):
for file in files:
if file.startswith('.'):
continue
itempath = os.path.join(root, file)
if os.path.islink(itempath):
#print "link found" + itempath
continue
content = self.load(itempath)
#logger.debug(itempath + ' loaded')
self.index_item(itempath, content)
#logger.debug(itempath + ' searched')
pbar.update(pbar.currval + 1)
pbar.finish()
eTime = time.time()
iTime = eTime - pTime
setupTime = pTime - sTime
tTime = eTime - sTime
logger.debug("(Setup : %0.2fs) - (Index : %0.2fs) - (Total : %0.2fs)" % (setupTime, iTime, tTime))
def __end__(self):
self.db.end()
| bsd-3-clause | -936,636,815,801,567,000 | 38.225806 | 118 | 0.598684 | false | 3.683453 | false | false | false |
ansobolev/shs | shs/gui/PlotFrame.py | 1 | 13356 | # -*- coding: utf-8 -*-
import math
import os
import wx
from wx.lib.mixins.listctrl import getListCtrlSelection
try:
from wx.lib.pubsub.pub import Publisher
except ImportError:
from wx.lib.pubsub import pub
import matplotlib.cm as cm
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
from matplotlib.backends.backend_wx import \
_load_bitmap
from FitDialog import FitDialog
import fit
import mbox
class MyCustomToolbar(NavigationToolbar):
EXPORT_DATA = wx.NewId()
def __init__(self, plotCanvas):
# create the default toolbar
NavigationToolbar.__init__(self, plotCanvas)
# find where icons are located
path = os.path.dirname(__file__)
icon_file = os.path.join(path, 'data-export-icon.png')
self.AddSimpleTool(self.EXPORT_DATA, _load_bitmap(icon_file),
'Export data', 'Export current data to file')
wx.EVT_TOOL(self, self.EXPORT_DATA, self._on_export_data)
def _on_export_data(self, evt):
if not hasattr(self, 'dirname'):
self.dirname = os.path.expanduser('~')
dlg = wx.DirDialog(self, "Choose a directory to export data to", self.dirname, wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
self.dirname = dlg.GetPath()
else:
dlg.Destroy()
return 1
dlg.Destroy()
# write each axis data in separate file
for axis in self.canvas.figure.get_axes():
# axis title - the name of the file
title = axis.get_title()
l = [t.get_text() for t in self.canvas.figure.legends[0].get_texts()]
if os.sep in l[0]:
l = [t.split(os.sep) for t in l]
l = ['.'.join(t[1:3]) for t in l]
# getting data
x_max = 0
y = []
for line in axis.get_lines():
x_c = len(line.get_xdata())
if x_c > x_max:
x_max = x_c
x = line.get_xdata()
y.append(line.get_ydata())
# printing data to file
f = open(os.path.join(self.dirname, title.replace('/','_') + '.dat'), 'w')
head = [' X '] + l
hl = [len(t) for t in l]
hf = '{0[0]:7} '
for i in range(1, len(l) + 1):
hf += ' {0[%i]:%i} ' % (i, hl[i-1])
f.write(hf.format(head) + '\n')
y_max = [len(yi) for yi in y]
for xi in range(x_max):
is_y = [yi > xi for yi in y_max]
data = [x[xi]]
df = '{0[0]:^7.3f} '
for yi, is_yi in enumerate(is_y):
if is_yi:
data.append(y[yi][xi])
df += ' {0[%i]:^%i.5f} ' % (len(data) - 1, hl[yi-1])
else:
df += ' ' * (hl[yi-1] + 2)
f.write(df.format(data) + '\n')
f.close()
mbox.DataExported(self.dirname)
class PlotFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: PlotFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.CreateMplFigure()
self.PlotsCtrl = wx.ListCtrl(self.panel, -1, style=wx.LC_REPORT|wx.SUNKEN_BORDER)
self.ByCalcsChkBox = wx.CheckBox(self.panel, -1, 'Group by calcs')
self.ReplotBtn = wx.Button(self.panel, -1, "Replot!")
self.ShowInfoBtn = wx.Button(self.panel, -1, "Show info")
self.FitBtn = wx.Button(self.panel, -1, "Begin fit")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.ReplotBtnPress, self.ReplotBtn)
self.Bind(wx.EVT_BUTTON, self.InfoBtnPress, self.ShowInfoBtn)
self.Bind(wx.EVT_BUTTON, self.FitBtnPress, self.FitBtn)
self.Bind(wx.EVT_CHECKBOX, self.ByCalcsCheck, self.ByCalcsChkBox)
self.Bind(wx.EVT_CLOSE, self.OnClose, self)
self.Center()
# end wxGlade
self.PlotsCtrl.InsertColumn(0,'Data', width = 100)
def CreateMplFigure(self):
self.panel = wx.Panel(self)
self.dpi = 100
self.fig = Figure((8.0, 6.4), dpi=self.dpi)
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.axes = self.fig.add_subplot(111)
self.toolbar = MyCustomToolbar(self.canvas)
def __set_properties(self):
self.fitting = False
self.fit_points = []
self.SetTitle(self.title)
def __do_layout(self):
PCSizer = wx.BoxSizer(wx.VERTICAL)
PCSizer.Add(self.ByCalcsChkBox, 0, wx.ALL | wx.ALIGN_RIGHT, 5)
PCSizer.Add(self.ReplotBtn, 0, wx.ALL | wx.EXPAND, 5)
PCSizer.Add(self.PlotsCtrl, 1, wx.ALL |wx.EXPAND, 5)
PCSizer.Add(self.ShowInfoBtn, 0, wx.ALL |wx.EXPAND, 5)
PCSizer.Add(self.FitBtn, 0, wx.ALL |wx.EXPAND, 5)
PCSizer.Add((30, 30), 1)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
vbox.Add(self.toolbar, 0, wx.EXPAND)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(PCSizer, 0, wx.ALL | wx.EXPAND, 5)
hbox.Add(vbox, 1, wx.ALL | wx.EXPAND, 5)
self.panel.SetSizer(hbox)
hbox.Fit(self)
# Methods to be implemented in subclasses
def ReplotBtnPress(self, evt):
self.replot()
def InfoBtnPress(self, event):
pass
def FitBtnPress(self, event):
pass
def ByCalcsCheck(self, event):
self.initplot()
self.replot()
def OnClose(self, event):
pass
class PlotFuncFrame(PlotFrame):
title = 'Plot'
def __init__(self, *args, **kwds):
PlotFrame.__init__(self, *args, **kwds)
pub.subscribe(self.plot, 'data.plot')
def plot(self, message):
self.data = message
self.initplot()
self.replot()
def initplot(self):
self.PlotsCtrl.DeleteAllItems()
# all data are the same for different calcs
assert len(set([d.y_titles for d in self.data])) == 1
# graphs - different graphs
# leg - different lines on a graph
if self.ByCalcsChkBox.IsChecked():
graphs = [d.title for d in self.data]
self.leg = self.data[0].y_titles
else:
graphs = self.data[0].y_titles
self.leg = [d.title for d in self.data]
for i, s in enumerate(graphs):
self.PlotsCtrl.InsertStringItem(i, s)
# adjusting column width
self.PlotsCtrl.SetColumnWidth(0, wx.LIST_AUTOSIZE_USEHEADER)
wh = self.PlotsCtrl.GetColumnWidth(0);
self.PlotsCtrl.SetColumnWidth(0, wx.LIST_AUTOSIZE)
wc = self.PlotsCtrl.GetColumnWidth(0);
if wh > wc:
self.PlotsCtrl.SetColumnWidth(0, wx.LIST_AUTOSIZE_USEHEADER)
self.PlotsCtrl.Select(0, 1)
def replot(self, cfd = True):
sind = getListCtrlSelection(self.PlotsCtrl)
print sind
ng = len(sind)
ncols = round(ng**0.5)
if ncols == 0.:
ncols = 1.
nrows = math.ceil(ng / ncols)
self.fig.clear()
# clear fitting data as well
if cfd:
self.fit_points = []
self.FitBtn.SetLabel("Begin fit")
self.fitting = False
for i, igraph in enumerate(sind):
title = self.PlotsCtrl.GetItemText(igraph)
axes = self.fig.add_subplot(nrows,ncols,i+1)
axes.set_title(title)
if self.ByCalcsChkBox.IsChecked():
if not hasattr(self.data[igraph],'var_x'):
x = self.data[igraph].x
else:
x = range(len(self.data[igraph].x))
axes.get_xaxis().set_ticks(x)
axes.get_xaxis().set_ticklabels(self.data[igraph].x, rotation=60, size='x-small')
for y in self.data[igraph].y:
axes.plot(x, y)
else:
for d in self.data:
if not hasattr(d,'var_x'):
x = d.x
else:
x = range(len(d.x))
axes.get_xaxis().set_ticks(x)
axes.get_xaxis().set_ticklabels(d.x, rotation=60, size='x-small')
axes.plot(x, d.y[igraph])
# get legend
lines = self.fig.axes[0].get_lines()
self.fig.legend(lines, self.leg, 1)
self.fig.tight_layout()
self.canvas.draw()
def InfoBtnPress(self, evt):
if self.info is None:
mbox.NoInfo()
return 1
mbox.ShowPlotInfo(self.calcs, self.info)
def FitBtnPress(self, evt):
sind = getListCtrlSelection(self.PlotsCtrl)
if len(sind) > 1:
print 'There should be one axis!'
return
sind = sind[0]
if not self.fitting:
# begin fit; show dialog
dlg = FitDialog(self, sets = self.leg)
if not dlg.ShowModal() == wx.ID_OK:
dlg.Destroy()
return
# get data from dialog
opts, iset = dlg.GetFitOptions()
dlg.Destroy()
# some quirks to begin fitting
self.FitBtn.SetLabel("Finish fit")
self.fitting = True
self.canvas.Bind(wx.EVT_LEFT_DCLICK, self.OnCanvasClick)
# get fit set according to the state of GBC checkbox
if self.ByCalcsChkBox.IsChecked():
fit_set = (self.data[sind][self.x], self.data[sind][self.PlotsCtrl.GetItemText(iset)])
else:
fit_set = (self.data[iset][self.x], self.data[iset][self.PlotsCtrl.GetItemText(sind)])
self.fit = fit.Fit(opts, fit_set)
else:
# try to end fit
if not self.fit.is_enough(len(self.fit_points)):
return
self.canvas.Unbind(wx.EVT_LEFT_DCLICK)
# fitting itself
p, x, y = self.fit.fit(self.fit_points)
self.replot()
ax = self.fig.gca()
ax.plot(x, y, '--x')
self.canvas.draw()
self.AddFitInfo(self.fit.FitInfo())
def OnCanvasClick(self, evt):
if self.fit.is_enough(len(self.fit_points)):
self.canvas.Unbind(wx.EVT_LEFT_DCLICK)
return
ax = self.fig.gca()
p = ax.transData.inverted().transform(evt.GetPositionTuple())
ax.axvline(x = p[0], c = 'r')
self.fit_points.append(p[0])
print 'Selected x = %f' % (p[0])
self.canvas.draw()
def AddFitInfo(self, info):
'Adds fitting info to self.info'
print info
def OnClose(self, evt):
pub.unsubscribe(self.plot, 'data.plot')
self.Destroy()
# end of class PlotFrame
class PlotCorrFrame(PlotFrame):
title = 'Correlations'
def __init__(self, *args, **kwds):
PlotFrame.__init__(self, *args, **kwds)
pub.subscribe(self.plot, 'corr.plot')
def plot(self, message):
self.calcs = message[0]
self.data = message[1]
# a number of tuples (x, y1, ... yn)
# self.names = self.data[0][1].dtype.names
self.names = message[2]
self.initplot()
self.replot()
def initplot(self):
self.PlotsCtrl.DeleteAllItems()
if self.ByCalcsChkBox.IsChecked():
data = self.calcs
self.leg = self.names
else:
data = self.names
self.leg = self.calcs
for i, s in enumerate(data):
self.PlotsCtrl.InsertStringItem(i, s)
# adjusting column width
self.PlotsCtrl.SetColumnWidth(0, wx.LIST_AUTOSIZE_USEHEADER)
wh = self.PlotsCtrl.GetColumnWidth(0);
self.PlotsCtrl.SetColumnWidth(0, wx.LIST_AUTOSIZE)
wc = self.PlotsCtrl.GetColumnWidth(0);
if wh > wc:
self.PlotsCtrl.SetColumnWidth(0, wx.LIST_AUTOSIZE_USEHEADER)
self.PlotsCtrl.Select(0, 1)
def replot(self):
sind = getListCtrlSelection(self.PlotsCtrl)
ng = len(sind)
ncols = round(ng**0.5)
if ncols == 0.:
ncols = 1.
nrows = math.ceil(ng / ncols)
self.fig.clear()
for i, igraph in enumerate(sind):
color = iter(cm.get_cmap('prism')([x/24. for x in range(24)]))
title = self.PlotsCtrl.GetItemText(igraph)
axes = self.fig.add_subplot(nrows,ncols,i+1)
axes.set_title(title)
sdata = []
if self.ByCalcsChkBox.IsChecked():
for ins in range(len(self.names)):
sdata.append(axes.scatter(self.data[igraph][ins][0], self.data[igraph][ins][1], c = next(color)))
else:
for ds in self.data:
sdata.append(axes.scatter(ds[igraph][0], ds[igraph][1], c = next(color)))
# get legend
self.fig.legend(sdata, self.leg, scatterpoints = 1)
self.canvas.draw()
def OnClose(self, evt):
Publisher().unsubscribe(self.plot,('corr.plot'))
self.Destroy()
| mit | 882,997,367,532,550,500 | 34.616 | 117 | 0.539757 | false | 3.436069 | false | false | false |
exhuma/wickedjukebox | wickedjukebox/database.py | 1 | 11563 | from wickedjukebox import Base
from sqlalchemy.types import (
Integer,
Unicode,
Date,
String,
Boolean,
Float,
DateTime,
)
from sqlalchemy.orm import (
relationship,
)
from sqlalchemy import (
UniqueConstraint,
Column,
ForeignKey,
Index,
func,
Table,
PrimaryKeyConstraint
)
ALBUM_TYPE_ALBUM="album"
channel_album_data = Table("channel_album_data", Base.metadata,
Column('channel_id', Integer, ForeignKey("channel.id"), nullable=False),
Column('album_id', Integer, ForeignKey("album.id"), nullable=False),
Column('played', Integer, nullable=False, default=0),
PrimaryKeyConstraint('channel_id', 'album_id')
)
channel_song_data = Table("channel_song_data", Base.metadata,
Column('channel_id', Integer, ForeignKey('channel.id'), nullable=False),
Column('song_id', Integer, ForeignKey('song.id'), nullable=False),
Column('played', Integer, nullable=False, default=0),
Column('voted', Integer, nullable=False, default=0),
Column('skipped', Integer, nullable=False, default=0),
Column('lastPlayed', DateTime, default=None),
Column('cost', Integer, default=5),
PrimaryKeyConstraint('channel_id', 'song_id')
)
song_has_genre = Table("song_has_genre", Base.metadata,
Column('song_id', Integer, ForeignKey('song.id'), nullable=False),
Column('genre_id', Integer, ForeignKey('genre.id'), nullable=False),
PrimaryKeyConstraint('song_id', 'genre_id')
)
song_has_tag = Table("song_has_tag", Base.metadata,
Column('song_id', Integer, nullable=False),
Column('tag', String(32), ForeignKey('tag.label'), nullable=False),
PrimaryKeyConstraint('song_id', 'tag')
)
user_album_stats = Table("user_album_stats", Base.metadata,
Column('user_id', Integer, ForeignKey('users.id'), nullable=False),
Column('album_id', Integer, ForeignKey('album.id'), nullable=False),
Column('when', DateTime, nullable=False),
PrimaryKeyConstraint('user_id', 'album_id')
)
user_song_standing = Table("user_song_standing", Base.metadata,
Column('user_id', Integer, ForeignKey('users.id'), nullable=False),
Column('song_id', Integer, ForeignKey('song.id'), nullable=False),
Column('standing', String(12), nullable=False),
PrimaryKeyConstraint('user_id', 'song_id')
)
user_song_stats = Table("user_song_stats", Base.metadata,
Column('user_id', Integer, ForeignKey('users.id'), nullable=False),
Column('song_id', Integer, ForeignKey('song.id'), nullable=False),
Column('when', DateTime, nullable=False),
PrimaryKeyConstraint('user_id', 'song_id', 'when')
)
class Album(Base):
__tablename__ = "album"
__table_args__ = (
UniqueConstraint('path'),
Index('album_name_idx', 'name'),
Index('album_type_idx', 'type'),
)
id = Column(Integer, nullable=False, primary_key=True)
artist_id = Column(Integer, ForeignKey('artist.id'), nullable=False)
name = Column(Unicode(128), default=None)
release_date = Column(Date, default=None)
added = Column(DateTime, default=None)
downloaded = Column(Integer, nullable=False, default=0)
type = Column(String(32), nullable=False, default=ALBUM_TYPE_ALBUM)
path = Column(Unicode(255), nullable=False)
class Artist(Base):
__tablename__ = "artist"
__table_args__ = (
UniqueConstraint('name'),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Unicode(128), default=None)
added = Column(DateTime, nullable=False)
class Channel(Base):
__tablename__ = "channel"
__table_args__ = (
UniqueConstraint('name'),
)
id = Column(Integer, nullable=False, primary_key=True)
name = Column(Unicode(32), nullable=False)
public = Column(Boolean, default=True)
backend = Column(Unicode(64), nullable=False)
backend_params = Column(Unicode, nullable=False, default=u'')
ping = Column(DateTime, default=None)
active = Column(Boolean, default=False)
status = Column(Integer, default=None)
class DynamicPlaylist(Base):
__tablename__ = "dynamicPlaylist"
id = Column(Integer, nullable=False, primary_key=True)
channel_id = Column(Integer, default=None)
group_id = Column(Integer, nullable=False)
probability = Column(Float, nullable=False) # COMMENT 'Probability at which a song is picked from the playlisy (0.0-1.0)',
label = Column(Unicode(64), default=None)
query = Column(Unicode)
class Event(Base):
__tablename__ = "events"
__table_args__ = (
Index('events_date_idx', 'startdate', 'enddate'),
)
id = Column(Integer, nullable=False, primary_key=True)
title = Column(Unicode, nullable=False)
startdate = Column(DateTime, nullable=False)
enddate = Column(DateTime, nullable=False)
lat = Column(Float, default=None)
lon = Column(Float, default=None)
class Genre(Base):
__tablename__ = "genre"
__table_args__ = (
UniqueConstraint('name'),
)
id = Column(Integer, nullable=False, primary_key=True)
name = Column(Unicode(128), default=None)
added = Column(DateTime, nullable=False)
class Group(Base):
__tablename__ = "groups"
id = Column(Integer, nullable=False, primary_key=True)
title = Column(Unicode(32), nullable=False)
admin = Column(Boolean, nullable=False, default=False)
nocredits = Column(Integer, nullable=False, default=0)
queue_skip = Column(Integer, nullable=False, default=0)
queue_remove = Column(Integer, nullable=False, default=0)
queue_add = Column(Integer, nullable=False, default=0)
class Lastfm_queue(Base):
__tablename__ = "lastfm_queue"
queue_id = Column(Integer, nullable=False, primary_key=True)
song_id = Column(Integer, ForeignKey('song.id', onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
time_played = Column(DateTime, nullable=False)
time_started = Column(DateTime, nullable=False)
class Log(Base):
__tablename__ = "log"
priority = Column(Unicode(32), nullable=False)
message = Column(Unicode, nullable=False)
date = Column(DateTime, nullable=False, default=func.now(), primary_key=True)
class Queue(Base):
__tablename__ = "queue"
__table_args__ = (
Index('queue_song_idx', 'song_id'),
Index('queue_user_idx', 'user_id'),
Index('queue_channel_idx', 'channel_id'),
)
id = Column(Integer, primary_key=True, nullable=False)
song_id = Column(Integer, ForeignKey('song.id', onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
user_id = Column(Integer, ForeignKey('users.id', onupdate="CASCADE", ondelete="CASCADE"), default=None)
channel_id = Column(Integer, ForeignKey('channel.id', onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
position = Column(Integer, default=0)
added = Column(DateTime, nullable=False)
class RenderPreset(Base):
__tablename__ = "render_presets"
__table_args__ = (
Index('render_presets_idx1', 'category', 'preset'),
)
id = Column(Integer, primary_key=True, nullable=False)
category = Column(Unicode(64), nullable=False)
preset = Column(Unicode(64), nullable=False)
hmax = Column(Integer, nullable=False)
wmax = Column(Integer, nullable=False)
placeholder = Column(Unicode(64), default=None)
noproportion = Column(Boolean, nullable=False, default=False)
force_mime = Column(String(16), nullable=False)
class Setting(Base):
__tablename__ = "setting"
__table_args__ = (
PrimaryKeyConstraint('var', 'channel_id', 'user_id'),
)
var = Column(Unicode(32), nullable=False)
value = Column(Unicode)
channel_id = Column(Integer, ForeignKey( 'channel.id', onupdate="CASCADE", ondelete="CASCADE"), nullable=False, default=0)
user_id = Column(Integer, ForeignKey( 'users.id', onupdate="CASCADE", ondelete="CASCADE"), nullable=False, default=0)
class SettingText(Base):
__tablename__ = "setting_text"
var = Column(Unicode(32), ForeignKey('setting.var'), nullable=False, primary_key=True)
text_en = Column(Unicode, nullable=False)
class Shoutbox(Base):
__tablename__ = "shoutbox"
__table_args__ = (
Index('shoutbox_added_idx', 'added'),
Index('shoutbox_user_idx', 'user_id'),
)
id = Column(Integer, nullable=False, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id', onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
message = Column(Unicode(255), nullable=False)
added = Column(DateTime, nullable=False)
class Song(Base):
__tablename__ = "song"
__table_args__ = (
UniqueConstraint('localpath'),
)
id = Column(Integer, nullable=False, primary_key=True)
artist_id = Column(Integer, ForeignKey('artist.id', onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
album_id = Column(Integer, ForeignKey('album.id', onupdate="CASCADE", ondelete="CASCADE"), default=None)
track_no = Column(Integer, default=None)
title = Column(Unicode(128), default=None)
duration = Column(Float, default=None)
year = Column(Integer, default=None)
localpath = Column(Unicode(255), nullable=False)
downloaded = Column(Integer, default=0)
lastScanned = Column(DateTime, default=None)
bitrate = Column(Integer, default=None)
filesize = Column(Integer, default=None)
checksum = Column(String(14), default=None)
lyrics = Column(Unicode)
broken = Column(Boolean, default=0)
dirty = Column(Boolean, default=0)
added = Column(DateTime, nullable=False)
genres = relationship('genre', secondary=song_has_genre)
tags = relationship('tag', secondary=song_has_tag)
class State(Base):
__tablename__ = "state"
channel_id = Column(Integer, nullable=False, primary_key=True)
state = Column(String(64), primary_key=True, nullable=False)
value = Column(String(255), default=None)
class Tag(Base):
__tablename__ = "tag"
label = Column(Unicode(32), primary_key=True, nullable=False)
inserted = Column(DateTime, nullable=False, default=func.now())
modified = Column(DateTime, nullable=False, default='0000-00-00 00:00:00')
class Users(Base):
__tablename__ = "users"
__table_args__ = (
UniqueConstraint('username'),
UniqueConstraint('cookie'),
)
id = Column(Integer, nullable=False, primary_key=True)
username = Column(Unicode(32), nullable=False)
cookie = Column(Unicode(32), nullable=False)
password = Column(Unicode(32), nullable=False)
fullname = Column(Unicode(64), nullable=False)
email = Column(Unicode(128), nullable=False)
credits = Column(Integer, nullable=False)
group_id = Column(Integer, ForeignKey('groups.id', onupdate="CASCADE", ondelete="CASCADE"), nullable=False)
downloads = Column(Integer, nullable=False, default=0)
votes = Column(Integer, nullable=False, default=0)
skips = Column(Integer, nullable=False, default=0)
selects = Column(Integer, nullable=False, default=0)
added = Column(DateTime, nullable=False)
proof_of_life = Column(DateTime, nullable=False)
proof_of_listening = Column(DateTime, default=None)
ip = Column(Unicode(32), nullable=False)
picture = Column(Unicode(255), nullable=False)
lifetime = Column(Integer, nullable=False)
channel_id = Column(Integer, nullable=False, default=1)
| bsd-3-clause | 2,195,029,730,774,627,300 | 39.57193 | 126 | 0.659258 | false | 3.690712 | false | false | false |
Tche333/Hello_World | plugin.video.belgium/addon.py | 1 | 2399 | # -*- coding: iso-8859-1 -*-
import urllib, os, sys
import channel
#if channel.in_xbmc:
#icon = xbmc.translatePath(os.path.join(__home__, 'resources/rtl-tvi.png'))
channels = {'rtltvi': {'name': 'RTL-TVI', 'icon': 'rtl-tvi.png', 'module': 'rtl'},
'clubrtl': {'name': 'Club RTL', 'icon': 'club-rtl.png', 'module': 'rtl'},
'plugrtl': {'name': 'Plug RTL', 'icon': 'plug-rtl.png', 'module': 'rtl'},
'rtbf': {'name': 'RTBF', 'icon': 'rtbf-all.png'},
'tvcom': {'name': 'TV Com', 'icon': 'tvcom.jpg'},
'vtm': {'name': 'VTM', 'icon': 'vtm.jpg'},
'een': {'name': 'EEn', 'icon': 'een.png'},
}
def show_channels():
for channel_id, ch in channels.iteritems():
if channel.in_xbmc:
icon = xbmc.translatePath(os.path.join(channel.home, 'resources/' + ch['icon']))
channel.addDir(ch['name'], icon, channel_id=channel_id, action='show_categories')
else:
print ch['name'], channel_id, 'show_categories'
def get_params():
param = {}
if len(sys.argv) < 3:
return {}
paramstring = sys.argv[2]
if len(paramstring) >= 2:
params = sys.argv[2]
cleanedparams = params.replace('?', '')
if (params[len(params) - 1] == '/'):
params = params[0:len(params) - 2]
print cleanedparams
pairsofparams = cleanedparams.split('&')
print pairsofparams
param = {}
for i in range(len(pairsofparams)):
splitparams = {}
splitparams = pairsofparams[i].split('=')
if (len(splitparams)) == 2:
try:
param[splitparams[0]] = urllib.unquote_plus(splitparams[1])
except:
pass
return param
print "==============================="
print " Video Belgium"
print "==============================="
print
params = get_params()
channel_id = params.get('channel_id')
print 'channel_id:', channel_id
if params.get('action', False) is False:
show_channels()
elif channel_id:
context = channels[channel_id]
context.update(params)
import sys
channel_module_name = context.get('module', channel_id)
__import__(channel_module_name)
sys.modules[channel_module_name].Channel(context)
if channel.in_xbmc:
channel.xbmcplugin.endOfDirectory(int(sys.argv[1]))
| gpl-2.0 | -933,369,316,216,869,900 | 33.271429 | 93 | 0.540642 | false | 3.417379 | false | false | false |
simon-weber/gmusicapi | setup.py | 2 | 2676 | #!/usr/bin/env python
import re
from setuptools import setup, find_packages
import sys
import warnings
if sys.version_info[:3] < (3, 5, 0):
warnings.warn("gmusicapi does not officially support versions below "
"Python 3.5.0", RuntimeWarning)
# This hack is from http://stackoverflow.com/a/7071358/1231454;
# the version is kept in a seperate file and gets parsed - this
# way, setup.py doesn't have to import the package.
VERSIONFILE = 'gmusicapi/_version.py'
version_line = open(VERSIONFILE).read()
version_re = r"^__version__ = ['\"]([^'\"]*)['\"]"
match = re.search(version_re, version_line, re.M)
if match:
version = match.group(1)
else:
raise RuntimeError("Could not find version in '%s'" % VERSIONFILE)
setup(
name='gmusicapi',
version=version,
author='Simon Weber',
author_email='[email protected]',
url='http://pypi.python.org/pypi/gmusicapi/',
packages=find_packages(),
scripts=[],
license=open('LICENSE').read(),
description='An unofficial api for Google Play Music.',
long_description=(open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read()),
install_requires=[
'validictory >= 0.8.0, != 0.9.2', # error messages
'decorator >= 3.3.1', # > 3.0 likely work, but not on pypi
'mutagen >= 1.34', # EasyID3 TPE2 mapping to albumartist
('requests >= 1.1.0, != 1.2.0,' # session.close, memory view TypeError
'!= 2.2.1, != 2.8.0, != 2.8.1,'
'!= 2.12.0, != 2.12.1, != 2.12.2,' # idna regression broke streaming urls
'!= 2.18.2'), # SSLError became ConnectionError
'python-dateutil >= 1.3, != 2.0', # 2.0 is python3-only
'proboscis >= 1.2.5.1', # runs_after
'protobuf >= 3.0.0',
'oauth2client >= 1.1', # TokenRevokeError
'appdirs >= 1.1.0', # user_log_dir
'gpsoauth >= 0.2.0', # mac -> android_id, validation, pycryptodome
'MechanicalSoup >= 0.4.0',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Software Development :: Libraries :: Python Modules',
],
include_package_data=True,
zip_safe=False,
)
| bsd-3-clause | 2,195,354,397,772,003,800 | 38.940299 | 95 | 0.556054 | false | 3.544371 | false | false | false |
NUDelta/gaze | util.py | 2 | 3259 | # Copyright (C) 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for the Quickstart."""
__author__ = '[email protected] (Alain Vongsouvanh)'
from urlparse import urlparse
import httplib2
from apiclient.discovery import build
from oauth2client.appengine import StorageByKeyName
from oauth2client.client import AccessTokenRefreshError
import sessions
from model import Credentials
# Load the secret that is used for client side sessions
# Create one of these for yourself with, for example:
# python -c "import os; print os.urandom(64)" > session.secret
SESSION_SECRET = open('session.secret').read()
def get_full_url(request_handler, path):
"""Return the full url from the provided request handler and path."""
pr = urlparse(request_handler.request.url)
return '%s://%s%s' % (pr.scheme, pr.netloc, path)
def load_session_credentials(request_handler):
"""Load credentials from the current session."""
session = sessions.LilCookies(request_handler, SESSION_SECRET)
userid = session.get_secure_cookie(name='userid')
if userid:
return userid, StorageByKeyName(Credentials, userid, 'credentials').get()
else:
return None, None
def store_userid(request_handler, userid):
"""Store current user's ID in session."""
session = sessions.LilCookies(request_handler, SESSION_SECRET)
session.set_secure_cookie(name='userid', value=userid)
def create_service(service, version, creds=None):
"""Create a Google API service.
Load an API service from a discovery document and authorize it with the
provided credentials.
Args:
service: Service name (e.g 'mirror', 'oauth2').
version: Service version (e.g 'v1').
creds: Credentials used to authorize service.
Returns:
Authorized Google API service.
"""
# Instantiate an Http instance
http = httplib2.Http()
if creds:
# Authorize the Http instance with the passed credentials
creds.authorize(http)
return build(service, version, http=http)
def auth_required(handler_method):
"""A decorator to require that the user has authorized the Glassware."""
def check_auth(self, *args):
self.userid, self.credentials = load_session_credentials(self)
self.mirror_service = create_service('mirror', 'v1', self.credentials)
# TODO: Also check that credentials are still valid.
if self.credentials:
try:
self.credentials.refresh(httplib2.Http())
return handler_method(self, *args)
except AccessTokenRefreshError:
# Access has been revoked.
store_userid(self, '')
credentials_entity = Credentials.get_by_key_name(self.userid)
if credentials_entity:
credentials_entity.delete()
self.redirect('/auth')
return check_auth
| bsd-3-clause | -4,627,289,110,335,351,000 | 31.59 | 77 | 0.727217 | false | 3.959903 | false | false | false |
huleiak47/python-hlutils | hlutils/tools/plot.py | 1 | 2114 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
r'''
Plot a function.
'''
# Changes:
# ** version 1.0 2014-05-07 Hulei **
# 1. first version
import os
import sys
import argparse
import matplotlib.pyplot as plt
from math import *
def parse_cmd():
parser = argparse.ArgumentParser()
parser.add_argument("--start", '-s', default = 0.0, type = float, help = "start of the range")
parser.add_argument("--end", '-e', default = 10.0, type = float, help = "end of the range")
parser.add_argument("--quality", "-q", default = 200, type = int, help = "quality of the range")
parser.add_argument("--xlabel", "-x", default = "x", help = "set label of x")
parser.add_argument("--ylabel", "-y", default = "y", help = "set label of y")
parser.add_argument("--title", "-t", default = None, help = "set title")
parser.add_argument("--style", "-Y", default = "-", help = "set style, can be .,o,^")
parser.add_argument("expression", nargs = "+", help = "a python expression, like: 1 * x**2 + 2 * x + 3")
return parser.parse_args(sys.argv[1:])
def plot_expression(expression, xlist, style):
xx = []
ylist = []
exp = eval("lambda x: %s" % expression)
for x in xlist:
try:
ylist.append(exp(x))
xx.append(x)
except Exception:
pass
plt.plot(xx, ylist, style, label = expression)
def main():
ns = parse_cmd()
xlist = []
step = (ns.end - ns.start) / ns.quality
val = ns.start
for i in range(ns.quality):
xlist.append(val)
val += step
index = 0
for expression in ns.expression:
color = ("r", "b", "g", "c", "y", "k")[index % 6]
plot_expression(expression, xlist, color + ns.style)
index += 1
plt.legend(bbox_to_anchor=(0.01, 0.99), loc=2, borderaxespad=0.)
plt.grid(True)
plt.xlabel(ns.xlabel)
plt.ylabel(ns.ylabel)
plt.show()
if __name__ == "__main__":
try:
main()
except Exception as e:
import traceback
traceback.print_exc()
print(type(e).__name__, ":", str(e))
sys.exit(1)
| apache-2.0 | -5,390,034,267,684,475,000 | 28.361111 | 108 | 0.566225 | false | 3.262346 | false | false | false |
rafaelvieiras/PseudoTV_Live | plugin.video.pseudotv.live/plugin.py | 1 | 13265 | # Copyright (C) 2020 Lunatixz
#
#
# This file is part of PseudoTV Live.
#
# PseudoTV Live is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PseudoTV Live is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PseudoTV Live. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: utf-8 -*-
from resources.lib.globals import *
from resources.lib.builder import Builder
class Plugin:
def __init__(self, sysARG=sys.argv):
log('__init__, sysARG = ' + str(sysARG))
self.sysARG = sysARG
self.CONTENT_TYPE = 'episodes'
self.CACHE_ENABLED = False
self.myBuilder = Builder()
self.myBuilder.plugin = self
self.myPlayer = MY_PLAYER
self.playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
self.maxDays = getSettingInt('Max_Days')
self.seekTol = getSettingInt('Seek_Tolerance')
self.usePlaylist = bool(getSettingInt('Playback_Method'))
def buildMenu(self, name=None):
log('buildMenu, name = %s'%name)
MAIN_MENU = [(LANGUAGE(30008), '', '')]#,#todo
# (LANGUAGE(30009), '', '')]
UTIL_MENU = [#(LANGUAGE(30010), '', '', LANGUAGE(30008)),
(LANGUAGE(30011), '', '', LANGUAGE(30008)),
(LANGUAGE(30096), '', '', LANGUAGE(30096)),
(LANGUAGE(30012)%(getPluginMeta(PVR_CLIENT).get('name',''),ADDON_NAME,), '', '', LANGUAGE(30008)),
(LANGUAGE(30065)%(getPluginMeta(PVR_CLIENT).get('name','')), '', '', LANGUAGE(30008)),
(LANGUAGE(30081), '', '', LANGUAGE(30008)),
(LANGUAGE(30013), '', '', LANGUAGE(30008))]
CHAN_MENU = [(LANGUAGE(30014), '', '', LANGUAGE(30009)),
(LANGUAGE(30015), '', '', LANGUAGE(30009))]
if name is None: items = MAIN_MENU
elif name == LANGUAGE(30008): items = UTIL_MENU
elif name == LANGUAGE(30009): items = CHAN_MENU
else: return
[self.addDir(*item) for item in items]
def deleteFiles(self, channels=True):
log('utilities, deleteFiles')
msg = 30096 if channels else 30011
if yesnoDialog('%s ?'%(LANGUAGE(msg))):
if channels:
self.myBuilder.channels.delete()
else:
[func() for func in [self.myBuilder.m3u.delete,self.myBuilder.xmltv.delete]]
return
def utilities(self, name):
log('utilities, name = %s'%name)
if name == LANGUAGE(30010): self.myBuilder.buildService(reloadPVR=True)
elif name == LANGUAGE(30011): self.deleteFiles()
elif name == LANGUAGE(30096): self.deleteFiles(channels=True)
elif name == LANGUAGE(30012)%(getPluginMeta(PVR_CLIENT).get('name',''),ADDON_NAME,): configurePVR()
elif name == LANGUAGE(30065)%(getPluginMeta(PVR_CLIENT).get('name','')): brutePVR()
elif name == LANGUAGE(30013): REAL_SETTINGS.openSettings()
elif name == LANGUAGE(30081): textviewer(getProperty('USER_LOG'),usemono=True)
else: return
xbmc.executebuiltin('Action(Back,10025)')
def channels(self, name):
log('channels, name = %s'%name)
if name == LANGUAGE(30014): self.buildChannels()
elif name == LANGUAGE(30015): return #todo prompt user, self.myBuilder.playlist.clearChannelList()
else: return
xbmc.executebuiltin('Action(back)')
def buildChannels(self):
log('buildChannels')
channelList = self.myBuilder.createChannelItems()
items = [(item['name'], item['number'], item['path'], '', item['logo']) for item in channelList]
for item in items: self.addDir(*item)
def contextPlay(self, writer, isPlaylist=False):
stpos = 0
writer = loadJSON(writer.replace(' / "',' , "').replace(" / ",", "))# current item
if not writer:
return notificationDialog(LANGUAGE(30001))
log('contextPlay, writer = %s, isPlaylist = %s'%(dumpJSON(writer),isPlaylist))
self.playlist.clear()
xbmc.sleep(100)
if not isPlaylist:
liz = buildItemListItem(writer)
listitems = [liz]
else:
channelData = writer.get('data',{})
if not channelData:
return notificationDialog(LANGUAGE(30001))
pvritem = self.myBuilder.jsonRPC.getPVRposition(channelData.get('name',''), channelData.get('id',''), isPlaylist=isPlaylist)
nowitem = pvritem.get('broadcastnow',{})
nextitems = pvritem.get('broadcastnext',[])[slice(0, PAGE_LIMIT)] # list of upcoming items, truncate for speed.
nextitems.insert(0,nowitem)
for pos, nextitem in enumerate(nextitems):
if loadJSON(nextitem.get('writer',{})).get('file','') == writer.get('file',''):
stpos = pos
break
log('contextPlay, writer stpos = %s'%(stpos))
listitems = ([buildItemListItem(loadJSON(nextitem.get('writer',''))) for nextitem in nextitems])
[self.playlist.add(lz.getPath(),lz,idx) for idx,lz in enumerate(listitems)]
if isPlaylistRandom(): self.playlist.unshuffle()
return self.myPlayer.play(self.playlist, startpos=stpos)
def playRadio(self, name, id):
log('playRadio, id = %s'%(id))
pvritem = self.myBuilder.jsonRPC.getPVRposition(name, id, radio=True)
nowitem = pvritem.get('broadcastnow',{}) # current item
writer = loadJSON(nowitem.get('writer',{}))
if not writer:
notificationDialog(LANGUAGE(30001))
return xbmcplugin.setResolvedUrl(int(self.sysARG[1]), False, xbmcgui.ListItem())
json_response = self.myBuilder.jsonRPC.requestList(id, writer.get('data',{}).get('path',''), 'music', page=250)
if json_response:
setCurrentChannelItem(pvritem)
self.playlist.clear()
xbmc.sleep(100)
listitems = [buildItemListItem(item, mType='music') for item in json_response]
[self.playlist.add(lz.getPath(),lz,idx) for idx,lz in enumerate(listitems)]
if isPlaylistRandom(): self.playlist.unshuffle()
log('playRadio, Playlist size = %s'%(self.playlist.size()))
return self.myPlayer.play(self.playlist)
def playChannel(self, name, id, radio=False, isPlaylist=False, failed=False):
log('playChannel, id = %s, isPlaylist = %s'%(id,isPlaylist))
found = False
liz = xbmcgui.ListItem()
listitems = [liz] #empty listitem required to pass failed playback.
pvritem = self.myBuilder.jsonRPC.getPVRposition(name, id, isPlaylist=isPlaylist)
nowitem = pvritem.get('broadcastnow',{}) # current item
nextitems = pvritem.get('broadcastnext',[])[slice(0, PAGE_LIMIT)] # list of upcoming items, truncate for speed.
ruleslist = []#check pre-play channel rules.
if nowitem:
found = True
setCurrentChannelItem(pvritem)
progress = nowitem['progress']
runtime = nowitem['runtime']
liz = buildItemListItem(loadJSON(nowitem.get('writer',{})))
if (progress > self.seekTol):
# near end, avoid loopback; override last listitem and queue next show.
if (progress > ((runtime * 60) - 45)): #45sec endtime offset
log('playChannel, progress = %s near end, queue nextitem'%(progress))
liz = buildItemListItem(loadJSON(nextitems[0].get('writer',{})))
else:
log('playChannel, progress = %s within seek tolerance setting seek.'%(progress))
liz.setProperty('totaltime' , str((runtime * 60)))
liz.setProperty('resumetime' , str(progress))
liz.setProperty('startoffset', str(progress))
# remove bct pre-roll from stack://
url = liz.getPath()
info = liz.getVideoInfoTag()
writer = loadJSON(info.getWritingCredits())
file = writer.get('originalfile','')
if url.startswith('stack://') and not url.startswith('stack://%s'%(file)):
log('playChannel, playing stack with url = %s'%(url))
paths = url.split(' , ')
for path in paths:
if file not in path:
paths.remove(path)
elif file in path:
break
liz.setPath('stack://%s'%(' , '.join(paths)))
listitems = [liz]
if isPlaylist:
self.playlist.clear()
xbmc.sleep(100)
listitems.extend([buildItemListItem(loadJSON(nextitem.get('writer',''))) for nextitem in nextitems])
[self.playlist.add(lz.getPath(),lz,idx) for idx,lz in enumerate(listitems)]
if isPlaylistRandom(): self.playlist.unshuffle()
log('playChannel, Playlist size = %s'%(self.playlist.size()))
return self.myPlayer.play(self.playlist)
# else:
# listitems.extend([buildItemListItem(loadJSON(nextitem.get('writer',''))) for nextitem in nextitems])
# paths = [lz.getPath() for lz in listitems]
# liz.setPath('stack://%s'%(' , '.join(paths)))
# listitems = [liz]
#todo found == False set fallback to nextitem? with playlist and failed == True?
xbmcplugin.setResolvedUrl(int(self.sysARG[1]), found, listitems[0])
def addLink(self, name, channel, path, mode='',icon=ICON, liz=None, total=0):
if liz is None:
liz=xbmcgui.ListItem(name)
liz.setInfo(type="Video", infoLabels={"mediatype":"video","label":name,"title":name})
liz.setArt({'thumb':icon,'logo':icon,'icon':icon})
log('addLink, name = %s'%(name))
u=self.sysARG[0]+"?url="+urllib.parse.quote(path)+"&channel="+str(channel)+"&name="+urllib.parse.quote(name)+"&mode="+str(mode)
xbmcplugin.addDirectoryItem(handle=int(self.sysARG[1]),url=u,listitem=liz,totalItems=total)
def addDir(self, name, channel, path, mode='',icon=ICON, liz=None):
log('addDir, name = %s'%(name))
if liz is None:
liz=xbmcgui.ListItem(name)
liz.setInfo(type="Video", infoLabels={"mediatype":"video","label":name,"title":name})
liz.setArt({'thumb':icon,'logo':icon,'icon':icon})
liz.setProperty('IsPlayable', 'false')
u=self.sysARG[0]+"?url="+urllib.parse.quote(path)+"&channel="+str(channel)+"&name="+urllib.parse.quote(name)+"&mode="+str(mode)
xbmcplugin.addDirectoryItem(handle=int(self.sysARG[1]),url=u,listitem=liz,isFolder=True)
def getParams(self):
return dict(urllib.parse.parse_qsl(self.sysARG[2][1:]))
def run(self):
params=self.getParams()
name = (urllib.parse.unquote(params.get("name",'')) or None)
channel = (params.get("channel",'') or None)
url = (params.get("url",'') or None)
id = (params.get("id",'') or None)
radio = (params.get("radio",'') or 'False')
mode = (params.get("mode",'') or None)
log("Name: %s" %(name))
log("Channel: %s"%(channel))
log("URL: %s" %(url))
log("ID: %s" %(id))
log("Radio: %s" %(radio))
log("Mode: %s" %(mode))
if channel is None:
if mode is None: self.buildMenu(name)
elif mode == 'play':
if radio == 'True':
self.playRadio(name, id)
else:
self.playChannel(name, id, isPlaylist=self.usePlaylist)
elif mode == 'Utilities': self.utilities(name)
elif mode == 'Channels': self.channels(name)
xbmcplugin.setContent(int(self.sysARG[1]) , self.CONTENT_TYPE)
xbmcplugin.addSortMethod(int(self.sysARG[1]) , xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(int(self.sysARG[1]) , xbmcplugin.SORT_METHOD_NONE)
xbmcplugin.addSortMethod(int(self.sysARG[1]) , xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.addSortMethod(int(self.sysARG[1]) , xbmcplugin.SORT_METHOD_TITLE)
xbmcplugin.endOfDirectory(int(self.sysARG[1]), cacheToDisc=self.CACHE_ENABLED)
if __name__ == '__main__': Plugin(sys.argv).run() | gpl-3.0 | -4,419,914,003,886,265,300 | 47.24 | 140 | 0.568262 | false | 3.846042 | false | false | false |
zzxuanyuan/osgparse | osgparse/slidingwindow.py | 1 | 6204 | # This file does regression test using Random Forest Regression.
# The input file should be insert_total_jobs.csv that contains total jobs info.
import matplotlib.pyplot as plt
import math
import sys
import pandas as pd
import numpy as np
class SlidingWindow:
def __init__(self, job_instances_file, attribute = "DesktopEndDateMinute", train_window_size = 20, test_window_size = 1):
self.df = pd.read_csv(job_instances_file, header=0)
self.attribute = attribute
if attribute != "DesktopEndDateMinute":
print "ERROR: attribute is not DesktopEndDateMinute"
return -1
self.value_counts = self.df[attribute].value_counts()
self.index_list = sorted(self.value_counts.index)
self.train_window_size = train_window_size
self.test_window_size = test_window_size
self.cur_train_attr_start = None
self.cur_train_attr_end = None
self.cur_test_attr_start = None
self.cur_test_attr_end = None
self.cur_train_index_start = None
self.cur_train_index_end = None
self.cur_test_index_start = None
self.cur_test_index_end = None
self.cur_train_line_start = None
self.cur_train_line_end = None
self.cur_test_line_start = None
self.cur_test_line_end = None
self.next_window_index = None
self.next_window_line_start = None
self.df_train = None
self.df_test = None
# self.machine_learning_engine = osgparse.mlengine.MachineLearningEngine()
# self.regression_engine = osg
def slide_depreciated(self):
if self.cur_train_attr_start == None and self.cur_train_attr_end == None and self.cur_test_attr_start == None and self.cur_test_attr_end == None:
self.cur_train_index_start = 0
self.cur_train_attr_start = self.index_list[0]
self.cur_train_line_start = 0
self.next_window_line_start = 0
else:
# find right attrribute positions to avoid reaching the end of data
self.cur_train_index_start += self.test_window_size
self.cur_train_attr_start = self.index_list[self.cur_train_index_start]
self.cur_train_line_start = self.next_window_line_start
self.cur_train_index_end = self.cur_train_index_start + self.train_window_size - 1
self.cur_test_index_start = self.cur_train_index_end + 1
self.cur_test_index_end = self.cur_test_index_start + self.test_window_size - 1
if self.cur_test_index_end >= len(self.index_list):
print "Reach the end of DataFrame!"
return "EOF"
self.cur_train_attr_end = self.index_list[self.cur_train_index_end]
self.cur_test_attr_start = self.index_list[self.cur_test_index_start]
self.cur_test_attr_end = self.index_list[self.cur_test_index_end]
accumulate_line = 0
self.next_window_index = self.cur_train_index_start + self.test_window_size
for idx in range(self.cur_train_index_start, self.cur_train_index_end + 1):
if idx == self.next_window_index:
self.next_window_line_start += accumulate_line
accumulate_line += self.value_counts[self.index_list[idx]]
self.cur_train_line_end = self.cur_train_line_start + accumulate_line - 1
self.cur_test_line_start = self.cur_train_line_end + 1
accumulate_line = 0
for idx in range(self.cur_test_index_start, self.cur_test_index_end + 1):
accumulate_line += self.value_counts[self.index_list[idx]]
self.cur_test_line_end = self.cur_test_line_start + accumulate_line - 1
self.df_train = self.df[self.cur_train_line_start:self.cur_train_line_end+1]
self.df_test = self.df[self.cur_test_line_start:self.cur_test_line_end+1]
return (self.df_train, self.df_test)
def slide(self):
if self.cur_train_attr_start == None and self.cur_train_attr_end == None and self.cur_test_attr_start == None and self.cur_test_attr_end == None:
self.cur_train_index_start = 0
self.cur_train_attr_start = self.index_list[0]
self.cur_train_line_start = 0
self.next_window_line_start = 0
else:
# find right attrribute positions to avoid reaching the end of data
self.cur_train_index_start += self.test_window_size
self.cur_train_attr_start = self.index_list[self.cur_train_index_start]
self.cur_train_line_start = self.next_window_line_start
self.cur_train_index_end = self.cur_train_index_start + self.train_window_size - 1
self.cur_test_index_start = self.cur_train_index_end + 1
self.cur_test_index_end = self.cur_test_index_start + self.test_window_size - 1
if self.cur_test_index_end >= len(self.index_list):
print "Reach the end of DataFrame!"
return "EOF"
self.cur_train_attr_end = self.index_list[self.cur_train_index_end]
self.cur_test_attr_start = self.index_list[self.cur_test_index_start]
self.cur_test_attr_end = self.index_list[self.cur_test_index_end]
accumulate_line = 0
self.next_window_index = self.cur_train_index_start + self.test_window_size
for idx in range(self.cur_train_index_start, self.cur_train_index_end + 1):
if idx == self.next_window_index:
self.next_window_line_start += accumulate_line
accumulate_line += self.value_counts[self.index_list[idx]]
self.cur_train_line_end = self.cur_train_line_start + accumulate_line - 1
self.cur_test_line_start = self.cur_train_line_end + 1
self.df_train = self.df[self.cur_train_line_start:self.cur_train_line_end+1]
# print self.df[self.cur_test_line_start:]
if self.attribute != "DesktopEndDateMinute":
print "ERROR: attribute is no damn DesktopEndDateMinute!"
return -1
# self.df_test = self.df[self.cur_test_line_start:][(self.df["DesktopStartDateMinute"] <= self.cur_test_attr_start) & (self.df["DesktopEndDateMinute"] > self.cur_test_attr_start)]
self.df_test = self.df.loc[self.cur_test_line_start:].query('DesktopStartDateMinute <= @self.cur_test_attr_start and DesktopEndDateMinute > @self.cur_test_attr_start')
# print "cur_train_attr_start = ", self.cur_train_attr_start, "cur_train_attr_end = ", self.cur_train_attr_end
# print "cur_test_attr_start = ", self.cur_test_attr_start, "cur_test_attr_end = ", self.cur_test_attr_end
# print "df_train = ", self.df_train
# print "df_test = ", self.df_test
# print "cur_test_attr_start = ", self.cur_test_attr_start
# print self.df_test[['DesktopStartDateMinute','DesktopEndDateMinute']]
return (self.df_train, self.df_test, self.cur_test_attr_start)
def get_values(self, attribute):
return self.df[attribute].value_counts().index
| bsd-3-clause | -3,957,543,696,479,553,000 | 49.032258 | 180 | 0.721954 | false | 2.782063 | true | false | false |
UltrosBot/Ultros3K | tests/storage/data/test_toml.py | 1 | 3529 | # coding=utf-8
import os
import secrets
import shutil
import tempfile
from nose.tools import assert_equal, assert_true
from unittest import TestCase
from ultros.core.storage.data.toml import TOMLData
from ultros.core.storage.manager import StorageManager
__author__ = "Gareth Coles"
class TestTOML(TestCase):
def setUp(self):
self.directory = os.path.join(tempfile.gettempdir(), secrets.token_urlsafe(10))
if not os.path.exists(self.directory):
os.mkdir(self.directory)
self.config_dir = os.path.join(self.directory, "config")
self.data_dir = os.path.join(self.directory, "data")
if not os.path.exists(self.config_dir):
os.mkdir(self.config_dir)
if not os.path.exists(self.data_dir):
os.mkdir(self.data_dir)
current_dir = os.path.dirname(__file__)
tests_dir = os.path.join(current_dir, "../../")
shutil.copy(os.path.join(tests_dir, "files/test.toml"), os.path.join(self.data_dir, "test.toml"))
self.manager = StorageManager(
ultros=None,
config_location=self.config_dir,
data_location=self.data_dir
)
def tearDown(self):
self.manager.shutdown()
del self.manager
if os.path.exists(self.directory):
shutil.rmtree(self.directory)
def test_dict_functionality(self):
"""
TOML data testing: Dict functionality
"""
def _data_object() -> TOMLData:
return self.manager.get_data(
"test.toml", None
)
data_obj = _data_object()
assert_equal(
len(data_obj),
6
)
assert_equal(
data_obj.copy(),
data_obj.data
)
assert_equal(
data_obj.get("test"),
"test"
)
assert_equal(
list(data_obj.items()),
[
("test", "test"),
("herp", "derp"),
("int", 1),
("float", 1.1),
("boolean", True),
("other_boolean", False)
]
)
assert_equal(
list(data_obj.keys()),
["test", "herp", "int", "float", "boolean", "other_boolean"]
)
assert_equal(
list(data_obj.values()),
["test", "derp", 1, 1.1, True, False]
)
assert_true(
"test" in data_obj
)
assert_equal(
data_obj["test"],
"test"
)
assert_equal(
list(data_obj),
["test", "herp", "int", "float", "boolean", "other_boolean"]
)
assert_equal(
len(data_obj),
6
)
def test_read(self):
"""
TOML data testing: Reading
"""
def _data_object() -> TOMLData:
return self.manager.get_data(
"test.toml", None
)
data_obj = _data_object()
assert_equal(
data_obj["test"],
"test"
)
assert_equal(
data_obj["herp"],
"derp"
)
assert_equal(
data_obj["int"],
1
)
assert_equal(
data_obj["float"],
1.1
)
assert_equal(
data_obj["boolean"],
True
)
assert_equal(
data_obj["other_boolean"],
False
)
| artistic-2.0 | -5,916,081,568,290,232,000 | 21.194969 | 105 | 0.474072 | false | 3.943017 | true | false | false |
DonJayamanne/pythonVSCode | pythonFiles/vscode_datascience_helpers/dataframes/vscodeGetDataFrameInfo.py | 1 | 1893 | # Query Jupyter server for the info about a dataframe
import json as _VSCODE_json
import pandas as _VSCODE_pd
import pandas.io.json as _VSCODE_pd_json
import builtins as _VSCODE_builtins
import vscodeDataFrameHelpers as _VSCODE_dataFrameHelpers
# Function to do our work. It will return the object
def _VSCODE_getDataFrameInfo(df):
df = _VSCODE_dataFrameHelpers._VSCODE_convertToDataFrame(df)
rowCount = _VSCODE_dataFrameHelpers._VSCODE_getRowCount(df)
# If any rows, use pandas json to convert a single row to json. Extract
# the column names and types from the json so we match what we'll fetch when
# we ask for all of the rows
if rowCount:
try:
row = df.iloc[0:1]
json_row = _VSCODE_pd_json.to_json(None, row, date_format="iso")
columnNames = list(_VSCODE_json.loads(json_row))
except:
columnNames = list(df)
else:
columnNames = list(df)
# Compute the index column. It may have been renamed
indexColumn = df.index.name if df.index.name else "index"
columnTypes = _VSCODE_builtins.list(df.dtypes)
# Make sure the index column exists
if indexColumn not in columnNames:
columnNames.insert(0, indexColumn)
columnTypes.insert(0, "int64")
# Then loop and generate our output json
columns = []
for n in _VSCODE_builtins.range(0, _VSCODE_builtins.len(columnNames)):
column_type = columnTypes[n]
column_name = str(columnNames[n])
colobj = {}
colobj["key"] = column_name
colobj["name"] = column_name
colobj["type"] = str(column_type)
columns.append(colobj)
# Save this in our target
target = {}
target["columns"] = columns
target["indexColumn"] = indexColumn
target["rowCount"] = rowCount
# return our json object as a string
return _VSCODE_json.dumps(target)
| mit | 2,155,433,398,067,761,400 | 34.716981 | 80 | 0.664554 | false | 3.647399 | false | false | false |
BigPeet/pr0tagger | src/collect_data_from_pr0gramm.py | 1 | 5373 | import sys
import time
import datetime
import argparse
import pr0gramm
import data_collection
import logging_setup
import logging
LOG = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(
description="Collect data from pr0gramm.com")
parser.add_argument(
"--no_sfw", help="disable SFW content", action="store_true")
parser.add_argument(
"--nsfw", help="enable NSFW content", action="store_true")
parser.add_argument(
"--nsfl", help="enable NSFL content", action="store_true")
parser.add_argument(
"--no_images", help="disable images", action="store_true")
parser.add_argument(
"--allow_videos", help="enable video content", action="store_true")
parser.add_argument(
"--last_id", "-id", help="the last promoted id use as anchor point", type=int, default=None)
parser.add_argument("--age_threshold", "-age",
help="a submission must be the given amount of hours old to be downloaded (Default: 5)",
type=int, default=5)
parser.add_argument("--min_num_of_tags", "-min",
help="a submission must have the given amount of tags to be downloaded (Default: 5)",
type=int, default=5)
parser.add_argument(
"--search_backwards", help="search for submission older than last_id, instead of newer", action="store_true")
parser.add_argument("--media_directory", "-o",
help="the download directory for media content (images, videos)", type=str, default="/tmp")
parser.add_argument("--annotation_file", "-ann",
help="the annotation file that should be created/edited for the downloaded media content",
type=str, default="/tmp/annotation.txt")
parser.add_argument("--json_directory", "-jd",
help="the download directory for the retrieved json content", type=str, default="/tmp")
parser.add_argument("--data_source", "-ds",
help="the type of source that should be used when downloading media (0=IMAGE, 1=THUMBNAIL, 2=FULL_SIZE), Default: 0",
type=int, choices=range(3), default=0)
parser.add_argument("--no_download", "-nd",
help="disables downloading the media content for submissions", action="store_true")
parser.add_argument(
"--save_json", "-sj", help="enables saving the retrieved json content locally", action="store_true")
parser.add_argument("--use_local_storage", "-l",
help="enables using previously locally stored json contents instead of retrieving remotely",
action="store_true")
parser.add_argument(
"--waiting_time", "-t", help="set the waiting time for lookups in hours (Default: 5)", type=int, default=5)
parser.add_argument("logging_json_config", "-lc",
help="the logging json dictionary used to initialize the logging framework (Default: ../etc/logging.json)",
type=str,
default="../etc/logging.json")
parser.add_argument("logging_file", "-lf",
help="specify a log file, per default the log file is chosen based on the logging_json_config",
type=str,
default=None)
logging_setup.setup_logging(args.logging_json_config, log_file=args.logging_file)
args = parser.parse_args()
run_collection_process(args)
def run_collection_process(args):
collector = initialize_collector(args)
waiting_time_in_seconds = args.waiting_time * 60 * 60
while(True):
LOG.info("Start collecting from ID: {}.".format(collector.getLastId()))
collector.collectDataBatch()
LOG.info("Collected {0} item(s). Last ID: {1}".format(
collector.getSizeOfLastBatch(),
collector.getLastId()))
LOG.info("Going to sleep for {0} hours until {1}.".format(
args.waiting_time,
datetime.datetime.now() + datetime.timedelta(hours=args.waiting_time)))
if collector.getSizeOfLastBatch() <= 0:
# TODO: give some status updates while waiting
time.sleep(waiting_time_in_seconds)
def initialize_collector(args):
api = initialize_api(args)
collector = data_collection.DataCollector(api)
collector.setLastId(args.last_id)
collector.setAgeThreshold(hours=args.age_threshold)
collector.setMinimumNumberOfTags(args.min_num_of_tags)
if args.search_backwards:
collector.useBackwardsSearch()
collector.setMediaDirectory(args.media_directory)
collector.setAnnotationFile(args.annotation_file)
collector.setJsonDir(args.json_directory)
collector.setDataSource(args.data_source)
collector.setDownloadMedia(not args.no_download)
collector.setSaveJSON(args.save_json)
collector.setUseLocalStorage(args.use_local_storage)
return collector
def initialize_api(args):
api = pr0gramm.API()
if args.no_sfw:
api.disableSFW()
if args.nsfw:
api.enableNSFW()
if args.nsfl:
api.enableNSFL()
if args.no_images:
api.disableImages()
if args.allow_videos:
api.enableVideos()
return api
if __name__ == "__main__":
main()
| mit | 6,436,225,341,543,186,000 | 39.398496 | 141 | 0.628513 | false | 4.101527 | false | false | false |
apple/swift-lldb | packages/Python/lldbsuite/test/functionalities/data-formatter/data-formatter-objc/ObjCDataFormatterTestCase.py | 5 | 1335 | # encoding: utf-8
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ObjCDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def appkit_tester_impl(self, commands):
self.build()
self.appkit_common_data_formatters_command()
commands()
def appkit_common_data_formatters_command(self):
"""Test formatters for AppKit classes."""
self.target, process, thread, bkpt = lldbutil.run_to_source_breakpoint(
self, '// Set break point at this line.',
lldb.SBFileSpec('main.m', False))
# The stop reason of the thread should be breakpoint.
self.expect(
"thread list",
STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped', 'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type synth clear', check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
| apache-2.0 | 8,963,193,377,505,250,000 | 30.046512 | 77 | 0.669663 | false | 3.836207 | true | false | false |
aj-may/django-bootstrap-typeahead | django_bootstrap_typeahead/fields.py | 1 | 1247 | from django.forms import ModelChoiceField, ModelMultipleChoiceField
from .widgets import TypeaheadInput, MultipleTypeaheadInput
class TypeaheadField(ModelChoiceField):
"""A Typeahead Text field"""
def __init__(self, queryset, builder=False, required=True, label=None,
initial=None, help_text='', limit_choices_to=None,
*args, **kwargs):
super(TypeaheadField, self).__init__(
queryset, required=required,
widget=TypeaheadInput(queryset=queryset, builder=builder),
label=label, initial=initial, help_text=help_text,
limit_choices_to=limit_choices_to, empty_label='', *args, **kwargs
)
class MultipleTypeaheadField(ModelMultipleChoiceField):
"""A Typeahead Multiple choice field for Tags"""
def __init__(self, queryset, builder=False, required=True, label=None,
initial=None, help_text='', limit_choices_to=None,
*args, **kwargs):
super(MultipleTypeaheadField, self).__init__(
queryset, required=required,
widget=MultipleTypeaheadInput(queryset=queryset, builder=builder),
label=label, initial=initial, help_text=help_text, *args, **kwargs
)
| mit | 4,217,662,503,702,507,000 | 39.225806 | 78 | 0.648757 | false | 4.314879 | false | false | false |
kenb123/Basic-Expression-Lexicon-Variation-Algorithms-BELVA | src/gui/belva_qt4_interface.py | 1 | 19809 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#--------------------------------------------------------------------------------------------------
# pyOwaspBELVA - Contextual custom dictionary builder with character and word variations for pen-testers
# Copyright (C) 2016 OWASP Foundation / Kenneth F. Belva
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
# This project is named after my amazing father:
# Albert Joseph BELVA
#
# And, it is dedicated to him and his memory.
#
# This dedication and project is to raise awareness for
# Lewy Body Disease / Dementia which my father lived with
# since his mid-60s until his passing at 72.
#
# More information on Lewy Body Dementia may be found here:
# https://en.wikipedia.org/wiki/Dementia_with_Lewy_bodies
#
# Please add this dedication to every file in the project.
# Thank you much. -Ken
#--------------------------------------------------------------------------------------------------
import os, time, datetime, sys, sqlite3
from PyQt4 import QtGui
# converted Qt4 UI from Qt Converter & cmd; pyuic4 design.ui -o design.py
import src.gui.design
from src.db.belvaDbInitalize import belvaInitDB
from src.db.belvaDbInitalize import belvaRemoveDB
from src.db.belvaSqlDBroutines import count_text_words
from src.db.belvaSqlDBroutines import count_burp_words
from src.db.belvaSqlDBroutines import count_zap_words
from src.db.belvaSqlDBroutines import get_all_burp_words
from src.db.belvaSqlDBroutines import create_consolidated_list
from src.db.belvaSqlDBroutines import count_consolidated_list
from src.db.belvaSqlDBroutines import get_all_consolidated_words
from src.pluginSystem.pluginControlSystem import get_policy_mutate_names
from src.pluginSystem.pluginControlSystem import get_policy_select_names
from src.pluginSystem.pluginControlSystem import get_substitution_names
from src.pluginSystem.pluginControlSystem import return_substitution_dict
from src.threadQueue.aptQueueControlSystem import send_words_to_queue
from src.belvaCommonRoutines import iterative_function
from src.belvaCommonRoutines import get_positions
from src.dataImport.belvaDataImport import belvaDataImport
#--------------------------------------------------------------------------------------------------
class BELVA_AppUI(QtGui.QMainWindow, src.gui.design.Ui_MainWindow):
#--------------------------------------------------------------------------------------------------
def __init__(self):
super(self.__class__, self).__init__()
self.setupUi(self)
#UPDATE WINDOW BAR GUI VERSION NUMBER
self.textBrowser_help_text.append("Follow / Contact me on Twitter: @infosecmaverick")
self.textBrowser_help_text.append("Help on the OWASP Project Page: http://bit.ly/1okrO1T")
# self.textBrowser_help_text.append(" https://www.owasp.org/index.php/OWASP_Basic_Expression_%26_Lexicon_Variation_Algorithms_%28BELVA%29_Project")
self.textBrowser_help_text.append("Topics will include:")
self.textBrowser_help_text.append(" How to import burp xml files for org specific content")
self.textBrowser_help_text.append(" How to import ZAP raw files for org specific content")
self.textBrowser_help_text.append(" How to create user id combinations")
self.textBrowser_help_text.append(" How to write a plugin")
self.textBrowser_help_text.moveCursor(QtGui.QTextCursor.Start)
self.progressBar.setValue(0)
#set the default directory to the localized importExternalSources Folder
# current_directory = os.getcwd()
current_directory = os.path.dirname(os.path.abspath(__file__))
current_directory = current_directory.replace("/src/gui", "")
current_directory = current_directory.replace("\src\gui", "")
self.lineEdit_input_src_dir.setText(current_directory + "/importExternalSources/")
#set the default directory to the localized outputFile Folder
self.lineEdit_output_src_dir.setText(current_directory+ "/outputFile/output.txt")
#load boxes....
policy_names = []
subsuitition_names = []
policy_mutate_names = get_policy_mutate_names()
policy_select_names = get_policy_select_names()
subsuitition_names = get_substitution_names()
for policy_name in policy_mutate_names:
self.listWidget_policies_mutate.addItem(policy_mutate_names[policy_name])
for policy_name in policy_select_names:
self.listWidget_policies_select.addItem(policy_select_names[policy_name])
for subsuitition_name in subsuitition_names:
self.listWidget_substitutions.addItem(subsuitition_names[subsuitition_name])
self.pushButton_input_src_dir.clicked.connect(self.input_src_dir) # When the button is pressed
self.pushButton_output_src_dir.clicked.connect(self.output_src_dir) # When the button is pressed
self.pushButton_run_belva.clicked.connect(self.run_belva) # When the button is pressed
def form_checks(self):
# default value should be false
passed_checks = False
# we can put error checking here
passed_checks = True
return passed_checks
#=================================================
# assuming we pass the checks, we write an API layer into UI design
#=================================================
def run_belva(self):
if self.form_checks():
# self.textBrowser_results_window.clear() # In case there are any existing elements in the list
self.progressBar.setValue(0)
self.textBrowser_status_msgs.clear()
self.textBrowser_status_msgs_brief.clear()
input_directory = self.lineEdit_input_src_dir.text()
output_file = self.lineEdit_output_src_dir.text()
global_gui_status_msgs = self.textBrowser_status_msgs
global_gui_status_msgs_brief = self.textBrowser_status_msgs_brief
global_gui_progressBar = self.progressBar
start_time = time.time()
# your code
# global_gui_status_msgs, global_gui_status_msgs_brief, global_gui_progressBar,
#------------------------------------
# This should really be passed in via parameters but need to
# research signals and slots for QT4... until then....
#------------------------------------
policy_mutate_names = []
policy_select_names = []
subsuitition_names = []
policy_mutate_names = get_policy_mutate_names()
policy_select_names = get_policy_select_names()
subsuitition_names = get_substitution_names()
#------------------------------------
#------------------------------------
# Create database to normalize data and have unique words
#------------------------------------
MD5_string = belvaInitDB()
#idea - have form to auto generate substitution and policy plugins...
policy_mutate_descriptions_selected = []
for policy_description_selected in self.listWidget_policies_mutate.selectedItems():
policy_mutate_descriptions_selected.append(policy_description_selected.text())
policy_select_descriptions_selected = []
for policy_description_selected in self.listWidget_policies_select.selectedItems():
policy_select_descriptions_selected.append(policy_description_selected.text())
substitution_descriptions_selected = []
for substitution_description_selected in self.listWidget_substitutions.selectedItems():
substitution_descriptions_selected.append(substitution_description_selected.text())
#------------------------------------
# Translate Descriptions back into plugin names
#------------------------------------
policy_mutate_plugin_names = []
for policy_description in policy_mutate_descriptions_selected:
for policy_name in policy_mutate_names:
if policy_mutate_names[policy_name] == policy_description:
policy_mutate_plugin_names.append(policy_name)
policy_select_plugin_names = []
for policy_description in policy_select_descriptions_selected:
for policy_name in policy_select_names:
if policy_select_names[policy_name] == policy_description:
policy_select_plugin_names.append(policy_name)
substitution_plugin_names = []
for substitution_description in substitution_descriptions_selected:
for substitution_name in subsuitition_names:
if subsuitition_names[substitution_name] == substitution_description:
substitution_plugin_names.append(substitution_name)
#------------------------------------
# Get files to import / separate large from small
#------------------------------------
small_filename_dict = {}
large_filename_dict = {}
for root, directories, filenames in os.walk(input_directory):
for filename in filenames:
full_path_w_file = os.path.join(root,filename)
filename, file_extension = os.path.splitext(full_path_w_file)
# small_filename_dict[full_path_w_file] = file_extension
# filename = os.path.basename(full_path_w_file)
# 10 MB
if ((os.path.getsize(full_path_w_file) >= 10485760) and (file_extension == '.txt')):
large_filename_dict[full_path_w_file] = file_extension
else:
small_filename_dict[full_path_w_file] = file_extension
#------------------------------------
# Get words to filter
#------------------------------------
remove_common_words = []
# common_words_dir = os.getcwd() + "/filterDictionaries/"
common_words_dir = os.path.dirname(os.path.abspath(__file__))
common_words_dir = common_words_dir.replace("/src/gui", "")
common_words_dir = common_words_dir.replace("\src\gui", "")
common_words_dir = common_words_dir + "/filterDictionaries/"
for root, directories, filenames in os.walk(common_words_dir):
for filename in filenames:
full_path_w_file = os.path.join(root,filename)
f = open(full_path_w_file,'r')
for line in f:
if str(line).strip():
remove_common_words.append(str(line).strip().lower())
f.close()
f = None
#-------------------
#------------------------------------
# Import Data from Wordlists, ZAP and burp
#------------------------------------
self.textBrowser_status_msgs.append("Starting: reading through files...")
self.textBrowser_status_msgs.append("Starting: removing common words...")
self.textBrowser_status_msgs.append("Starting: creating temp word dictionary...")
all_consolidated_words = belvaDataImport(global_gui_status_msgs, global_gui_status_msgs_brief, global_gui_progressBar, small_filename_dict, MD5_string, remove_common_words)
total_word_count = len(all_consolidated_words)
self.textBrowser_status_msgs_brief.clear()
self.textBrowser_status_msgs.append("Total Number of Unique Consolidated Words for small files: " + str(total_word_count))
# no words found!
# gui.belva_qt4_global_gui_vars.global_gui_window = self.textBrowser_results_window
# gui.belva_qt4_routines_delete.run_app(nmap_text, masscan_network_text, masscan_ports_text)
#------------------------------------
# Set progress bar for end user info
#------------------------------------
self.progressBar.setMinimum(0)
self.progressBar.setMaximum(int(total_word_count))
self.progressBar.setValue(0)
count = 0
positions_ds = {}
subsitution_dictionary = {}
# all_consolidated_words = get_all_consolidated_words(MD5_string)
self.textBrowser_status_msgs.append("Mutating finalized temp word dictionary for small files...")
for substitution_plugin_name in substitution_plugin_names:
#------------------------------------
# retrieve dictionary from substitution selected
#------------------------------------
subsitution_dictionary = return_substitution_dict(substitution_plugin_name)
self.textBrowser_status_msgs.append("Using substitution plug-in: " + substitution_plugin_name)
#------------------------------------
# cycle through finalized list of words
#------------------------------------
if int(total_word_count) > 20:
break_up_queue = round(int(total_word_count) / 20)
else:
break_up_queue = int(total_word_count)
if all_consolidated_words:
for word in all_consolidated_words:
# the illusion of progress
count = self.progressBar.value() + 1
self.progressBar.setValue(count)
self.textBrowser_status_msgs_brief.setText("Now processing word " + str(count) + " of " + str(total_word_count) + " : " + str(word).strip())
send_words_to_queue(all_consolidated_words, subsitution_dictionary, policy_mutate_plugin_names, policy_select_plugin_names, output_file)
#------------------------------------
# process large files
#------------------------------------
self.textBrowser_status_msgs_brief.clear()
if large_filename_dict:
self.textBrowser_status_msgs.append("Now processing large files...")
for full_path in large_filename_dict:
total_word_count = -1
with open(full_path, 'r', errors='replace') as f:
for total_word_count, l in enumerate(f):
pass
if total_word_count == -1:
total_word_count = 0
elif total_word_count >= 0:
total_word_count += 1
break_up_queue = round(total_word_count / 20)
filename = os.path.basename(full_path)
self.textBrowser_status_msgs.append("Now processing large file: " + str(filename) + " with a word count of: " + str(total_word_count))
self.progressBar.setMinimum(0)
self.progressBar.setMaximum(int(total_word_count))
self.progressBar.setValue(0)
count = 0
f = open(full_path,'r', errors='replace')
words_array = []
for line in f:
count = self.progressBar.value() + 1
self.progressBar.setValue(count)
self.textBrowser_status_msgs_brief.setText("Processing through word " + str(count) + " of " + str(total_word_count) + " : " + str(line).strip())
if str(line).strip():
if not(str(line).strip() in remove_common_words):
words_array.append(str(line).strip())
if len(words_array) == break_up_queue:
for substitution_plugin_name in substitution_plugin_names:
subsitution_dictionary = return_substitution_dict(substitution_plugin_name)
send_words_to_queue(words_array, subsitution_dictionary, policy_mutate_plugin_names, policy_select_plugin_names, output_file)
words_array = []
if (len(words_array) <= break_up_queue) and (len(words_array) > 0) :
for substitution_plugin_name in substitution_plugin_names:
subsitution_dictionary = return_substitution_dict(substitution_plugin_name)
send_words_to_queue(words_array, subsitution_dictionary, policy_mutate_plugin_names, policy_select_plugin_names, output_file)
f.close()
f = None
# total word count for output file...
total_word_count = -1
with open(output_file, 'r', errors='replace') as f:
for total_word_count, l in enumerate(f):
pass
if total_word_count == -1:
total_word_count = 0
elif total_word_count >= 0:
total_word_count += 1
elapsed_time = time.time() - start_time
self.textBrowser_status_msgs_brief.clear()
self.textBrowser_status_msgs.append("Finished Mutating temp word dictionary")
#------------------------------------
# Clean up temporary files
#------------------------------------
self.textBrowser_status_msgs.append("Cleaning up temporary data....")
belvaRemoveDB(MD5_string)
self.textBrowser_status_msgs.append("Please Find the final custom dictionary here:")
self.textBrowser_status_msgs.append(output_file)
self.textBrowser_status_msgs.append("Total number of words in output file: " + str(total_word_count))
self.textBrowser_status_msgs.append("Elapsed run time: " + str(datetime.timedelta(seconds=int(elapsed_time))))
self.textBrowser_status_msgs.append("FINISHED!!!")
def input_src_dir(self):
directory = QtGui.QFileDialog.getExistingDirectory(self,"Pick a folder")
self.lineEdit_input_src_dir.clear()
self.lineEdit_input_src_dir.setText(directory)
def output_src_dir(self):
output_file = QtGui.QFileDialog.getOpenFileName(self,"Pick Output File")
self.lineEdit_output_src_dir.clear()
self.lineEdit_output_src_dir.setText(output_file)
def launch_gui():
app = QtGui.QApplication(sys.argv)
form = BELVA_AppUI()
form.show()
app.exec_()
| gpl-3.0 | -2,192,608,054,345,816,600 | 42.345733 | 184 | 0.558635 | false | 4.43452 | false | false | false |
minimalparts/PeARS | app/utilities/mkPositionalIndex.py | 2 | 3855 | #Make/update positional index for a given file
#The index has the form
#word,word_freq:doc1,freq_in_doc1(pos1,pos2,...posn);doc2,freq_in_doc2(pos1,pos2,...posn), etc
#to,993427:23,2(3,6);35,1(34)
import sys
import os
import re
path_to_PeARS = os.path.dirname(__file__)
index={} #This is the positional index, of the form word:WordEntry
individual_index={} #This is the positional index for the individual file
word_positions={} #This is a temporary dictionary with positions for each word in the document being processed
#WordEntry contains all the information pertaining to a word
#(freq, docs in which it appears)
class WordEntry:
def __init__(self, freq):
self.freq=freq
self.docs=[]
#Load existing index file
def load_index(path_to_index):
index_file=open(path_to_index)
for line in index_file:
try:
line=line.rstrip('\n')
pair=line.split(':')
word_freq=pair[0].split(',')
word=word_freq[0]
freq=int(word_freq[1])
index[word]=WordEntry(freq)
docs=pair[1].rstrip(';') #Remove last ; before splitting
docs=docs.split(';')
for d in docs:
name=d.split(',')[0]
m=re.search(',([0-9]+)',d)
dfreq=0
if m:
dfreq_str=m.group(1)
dfreq=int(dfreq_str)
#print name,dfreq
m=re.search(',[0-9]+\[(.*)\]',d)
positions=[]
if m:
positions_strings=m.group(1).split(", ")
#print positions_strings
positions=[]
for p in positions_strings:
intp=int(p)
positions.append(intp)
index[word].docs.append([name,dfreq,positions])
except:
#print "ERROR processing",line
continue
##########################################
#Read new document
##########################################
def readWordPositions(input_file):
c=0 #Initialise wordcount for this document
f=open(input_file,'r')
for line in f:
line=line.rstrip('\n')
words=line.split()
for w in words:
m=re.search('(.*_.).*',w)
if m:
w=m.group(1)
c+=1
if w not in word_positions:
word_positions[w]=[c]
else:
word_positions[w].append(c)
def mkWordEntries(docname):
for k,v in word_positions.items():
#General index
if k not in index:
entry=WordEntry(len(v))
entry.docs.append([docname,len(v),v])
index[k]=entry
else:
entry=index[k]
entry.freq+=len(v)
entry.docs.append([docname,len(v),v])
index[k]=entry
#Individual index
if k not in individual_index:
entry=WordEntry(len(v))
entry.docs.append([docname,len(v),v])
individual_index[k]=entry
else:
entry=individual_index[k]
entry.freq+=len(v)
entry.docs.append([docname,len(v),v])
individual_index[k]=entry
def writeIndex(path_to_index):
out=open(path_to_index,'w')
for k,v in index.items():
line=k+","+str(v.freq)+":"
for d in v.docs:
line=line+d[0]+","+str(d[1])+str(d[2])+";"
out.write(line+'\n')
def writeIndividualIndex(path_to_ind_index):
out=open(path_to_ind_index,'w')
for k,v in individual_index.items():
line=k+","+str(v.freq)+":"
for d in v.docs:
line=line+d[0]+","+str(d[1])+str(d[2])+";"
out.write(line+'\n')
def runScript(a1,a2):
input_file=a1
pear = a2 #The pear which will host these pages (local folder or Raspberry Pi)
index.clear()
individual_index.clear()
word_positions.clear()
if os.path.exists(input_file):
path_to_index=os.path.join(path_to_PeARS, pear+"/index.txt")
if os.path.exists(path_to_index):
load_index(path_to_index)
m=re.search(".*\/(.*)\.txt",input_file)
docname=m.group(1)
path_to_ind_index=os.path.join(path_to_PeARS, pear+"/indexes/"+docname+".txt")
readWordPositions(input_file)
mkWordEntries(docname)
writeIndex(path_to_index)
writeIndividualIndex(path_to_ind_index)
else:
print "ERROR: file",input_file,"does not exist. Bye."
# when executing as script
if __name__ == '__main__':
runScript(sys.argv[1],sys.argv[2]) #Input file and pear
| mit | -4,771,580,195,341,388,000 | 25.22449 | 110 | 0.642542 | false | 2.722458 | false | false | false |
pydanny/cached-property | setup.py | 1 | 1838 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import codecs
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
__version__ = "1.5.2"
def read(fname):
return codecs.open(
os.path.join(os.path.dirname(__file__), fname), "r", "utf-8"
).read()
readme = read("README.md")
history = read("HISTORY.md")
if sys.argv[-1] == "publish":
try:
import wheel
import twine
except: # Yes, this is not how we usually do try/except
raise ImportError('Run "pip install wheel twine"')
os.system("python setup.py sdist bdist_wheel")
os.system("twine upload dist/*")
os.system("git tag -a %s -m 'version %s'" % (__version__, __version__))
os.system("git push --tags")
sys.exit()
setup(
name="cached-property",
version=__version__,
description="A decorator for caching properties in classes.",
long_description=readme + "\n\n" + history,
long_description_content_type="text/x-md",
author="Daniel Greenfeld",
author_email="[email protected]",
url="https://github.com/pydanny/cached-property",
py_modules=["cached_property"],
include_package_data=True,
license="BSD",
zip_safe=False,
keywords="cached-property",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| bsd-3-clause | -2,087,425,194,871,310,800 | 27.71875 | 75 | 0.610446 | false | 3.75102 | false | false | false |
tensorflow/examples | lite/examples/model_personalization/converter/tfltransfer/tflite_transfer_converter_test.py | 1 | 7290 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tflite_transfer_converter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import unittest
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.regularizers import l2
# pylint: disable=g-bad-import-order
from tfltransfer import bases
from tfltransfer import heads
from tfltransfer import optimizers
from tfltransfer import tflite_transfer_converter
# pylint: enable=g-bad-import-order
DEFAULT_INPUT_SIZE = 64
DEFAULT_BATCH_SIZE = 128
LEARNING_RATE = 0.001
class TestTfliteTransferConverter(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestTfliteTransferConverter, cls).setUpClass()
cls._default_base_model_dir = tempfile.mkdtemp('tflite-transfer-test-base')
model = tf.keras.Sequential([
layers.Dense(
units=DEFAULT_INPUT_SIZE, input_shape=(DEFAULT_INPUT_SIZE,))
])
model.build()
model.save(cls._default_base_model_dir, save_format='tf')
def setUp(self):
super(TestTfliteTransferConverter, self).setUp()
self._default_base_model = bases.SavedModelBase(
TestTfliteTransferConverter._default_base_model_dir)
def test_mobilenet_v2_saved_model_and_keras_model(self):
input_size = DEFAULT_INPUT_SIZE
output_size = 5
head_model = tf.keras.Sequential([
layers.Dense(
units=32,
input_shape=(input_size,),
activation='relu',
kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01)),
layers.Dense(
units=output_size,
kernel_regularizer=l2(0.01),
bias_regularizer=l2(0.01)),
])
head_model.compile(loss='categorical_crossentropy', optimizer='sgd')
converter = tflite_transfer_converter.TFLiteTransferConverter(
output_size, self._default_base_model, heads.KerasModelHead(head_model),
optimizers.SGD(LEARNING_RATE), DEFAULT_BATCH_SIZE)
models = converter._convert()
parameter_shapes = [(input_size, 32), (32,), (32, output_size),
(output_size,)]
self.assertSignatureEqual(models['initialize'], [()], parameter_shapes)
self.assertSignatureEqual(models['bottleneck'], [(1, input_size)],
[(1, input_size)])
self.assertSignatureEqual(models['inference'],
[(1, input_size)] + parameter_shapes,
[(1, output_size)])
self.assertSignatureEqual(models['optimizer'],
parameter_shapes + parameter_shapes,
parameter_shapes)
def test_mobilenet_v2_saved_model_and_softmax_classifier_model(self):
input_size = DEFAULT_INPUT_SIZE
output_size = 5
batch_size = DEFAULT_BATCH_SIZE
converter = tflite_transfer_converter.TFLiteTransferConverter(
output_size, self._default_base_model,
heads.SoftmaxClassifierHead(batch_size, (input_size,), output_size),
optimizers.SGD(LEARNING_RATE), batch_size)
models = converter._convert()
parameter_shapes = [(input_size, output_size), (output_size,)]
self.assertSignatureEqual(models['initialize'], [()], parameter_shapes)
self.assertSignatureEqual(models['bottleneck'], [(1, input_size)],
[(1, input_size)])
self.assertSignatureEqual(models['train_head'],
[(batch_size, input_size),
(batch_size, output_size)] + parameter_shapes,
[()] + parameter_shapes)
self.assertSignatureEqual(models['inference'],
[(1, input_size)] + parameter_shapes,
[(1, output_size)])
self.assertSignatureEqual(models['optimizer'],
parameter_shapes + parameter_shapes,
parameter_shapes)
def test_mobilenet_v2_base_and_softmax_classifier_model(self):
input_size = 224
output_size = 5
batch_size = DEFAULT_BATCH_SIZE
base = bases.MobileNetV2Base(image_size=input_size)
head = heads.SoftmaxClassifierHead(batch_size, base.bottleneck_shape(),
output_size)
optimizer = optimizers.SGD(LEARNING_RATE)
converter = tflite_transfer_converter.TFLiteTransferConverter(
output_size, base, head, optimizer, batch_size)
models = converter._convert()
parameter_shapes = [(7 * 7 * 1280, output_size), (output_size,)]
self.assertSignatureEqual(models['initialize'], [()], parameter_shapes)
self.assertSignatureEqual(models['bottleneck'],
[(1, input_size, input_size, 3)],
[(1, 7, 7, 1280)])
self.assertSignatureEqual(models['train_head'],
[(batch_size, 7, 7, 1280),
(batch_size, output_size)] + parameter_shapes,
[()] + parameter_shapes)
self.assertSignatureEqual(models['inference'],
[(1, 7, 7, 1280)] + parameter_shapes,
[(1, output_size)])
self.assertSignatureEqual(models['optimizer'],
parameter_shapes + parameter_shapes,
parameter_shapes)
def test_mobilenet_v2_base_and_softmax_classifier_model_adam(self):
input_size = 224
output_size = 5
batch_size = DEFAULT_BATCH_SIZE
base = bases.MobileNetV2Base(image_size=input_size)
head = heads.SoftmaxClassifierHead(batch_size, base.bottleneck_shape(),
output_size)
optimizer = optimizers.Adam()
converter = tflite_transfer_converter.TFLiteTransferConverter(
output_size, base, head, optimizer, batch_size)
models = converter._convert()
param_shapes = [(7 * 7 * 1280, output_size), (output_size,)]
self.assertSignatureEqual(
models['optimizer'],
param_shapes + param_shapes + param_shapes + param_shapes + [()],
param_shapes + param_shapes + param_shapes + [()])
def assertSignatureEqual(self, model, expected_inputs, expected_outputs):
interpreter = tf.lite.Interpreter(model_content=model)
inputs = [
input_['shape'].tolist() for input_ in interpreter.get_input_details()
]
outputs = [
output['shape'].tolist() for output in interpreter.get_output_details()
]
self.assertEqual(inputs, [list(dims) for dims in expected_inputs])
self.assertEqual(outputs, [list(dims) for dims in expected_outputs])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 8,388,310,416,511,532,000 | 39.054945 | 80 | 0.627846 | false | 4.086323 | true | false | false |
ndparker/tdi | tdi/markup/soup/parser.py | 1 | 27632 | # -*- coding: ascii -*-
r"""
:Copyright:
Copyright 2006 - 2015
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=====================
Markup Parser Logic
=====================
Soup Parser
~~~~~~~~~~~
This module provides a very lenient HTML/XML lexer. The `SoupLexer` class is
initialized with a listener object, which receives all low level events
(like starttag, endtag, text etc). Listeners must implement the
`ListenerInterface`.
On top of the lexer there's `SoupParser` class, which actually implements the
`ListenerInterface` itself (the parser listens to the lexer). The parser adds
HTML semantics to the lexed data and passes the events to a building listener
(`BuildingListenerInterface`). In addition to the events sent by the lexer the
`SoupParser` class generates endtag events (with empty data arguments) for
implicitly closed elements. Furthermore it knows about CDATA elements like
``<script>`` or ``<style>`` and modifies the lexer state accordingly.
The actual semantics are provided by a DTD query class (implementing
`DTDInterface`.)
"""
if __doc__:
# pylint: disable = redefined-builtin
__doc__ = __doc__.encode('ascii').decode('unicode_escape')
__author__ = r"Andr\xe9 Malo".encode('ascii').decode('unicode_escape')
__docformat__ = "restructuredtext en"
import re as _re
from ..._exceptions import LexerEOFError, LexerFinalizedError
from ... import interfaces as _interfaces
from . import dtd as _dtd
class SoupLexer(object):
"""
(X)HTML Tagsoup Lexer
The lexer works hard to preserve the original data. In order to achieve
this goal, it does not validate the input and recognizes its input in a
quite lenient way.
:Groups:
- `Lexer states` :
`TEXT`,
`CDATA`,
`MARKUP`,
`STARTTAG`,
`ENDTAG`,
`COMMENT`,
`MSECTION`,
`DECL`,
`PI`,
`EMPTY`,
`FINAL`
- `Regex Matchers` :
`_START_MATCH`,
`_ATT_ITER`,
`_COMMENT_SEARCH`,
`_MSECTION_MATCH`,
`_MSECTIONINVALID_MATCH`,
`_MEND_SEARCH`,
`_MSEND_SEARCH`,
`_DECL_MATCH`
:CVariables:
`TEXT` : ``int``
Lexer state ``TEXT`` (between tags)
`CDATA` : ``int``
Lexer state ``CDATA`` (between (P)CDATA tags)
`MARKUP` : ``int``
Lexer state ``MARKUP`` (``<``)
`STARTTAG` : ``int``
Lexer state ``STARTTAG`` (``<[letter]``)
`ENDTAG` : ``int``
Lexer state ``ENDTAG`` (``</``)
`COMMENT` : ``int``
Lexer state ``COMMENT`` (``<!--``)
`MSECTION` : ``int``
Lexer state ``MSECTION`` (``<![``)
`DECL` : ``int``
Lexer state ``DECL`` (``<!``)
`PI` : ``int``
Lexer state ``PI`` (``<?``)
`EMPTY` : ``int``
Lexer state ``EMPTY`` (``<>``)
`FINAL` : ``int``
Lexer state ``FINAL``
`_LEXERS` : ``tuple``
The state lexer method names (``('method', ...)``)
`_STATES` : ``tuple``
The state names (``('name', ...)``)
:IVariables:
`_state` : ``int``
The current lexer state
`_lexers` : ``list``
The state lexer methods (``[method, ...]``)
`_listener` : `ListenerInterface`
The listener the events shall be sent to
`_buffer` : ``str``
Current unprocessed buffer
`_conditional_ie_comments` : ``bool``
Handle conditional IE comments as text?
"""
# pylint: disable = no-member
def __init__(self, listener, conditional_ie_comments=True):
r"""
Initialization
:Parameters:
`listener` : `ListenerInterface`
The event listener
`conditional_ie_comments` : ``bool``
Handle conditional IE comments as text?
Conditional comments are described in full detail
at `MSDN`_\.
.. _MSDN: http://msdn.microsoft.com/en-us/library/
ms537512%28v=vs.85%29.aspx
"""
self._listener = listener
self._normalize = None
self._cdata_name = None
self._state = self.TEXT
self._lexers = [getattr(self, name) for name in self._LEXERS]
self._buffer = ''
self._conditional_ie_comments = bool(conditional_ie_comments)
def feed(self, food):
"""
Feed the lexer with new data
:Parameters:
`food` : ``str``
The data to process
"""
self._buffer += food
self._lex()
def finalize(self):
"""
Finalize the lexer
This processes the rest buffer (if any)
:Exceptions:
- `LexerEOFError` : The rest buffer could not be consumed
"""
self._lex()
if self._buffer:
raise LexerEOFError(
"Unfinished parser state %s" % self._STATES[self._state]
)
self._state = self.FINAL
def cdata(self, normalize, name):
""" Set CDATA state """
if self._state != self.FINAL:
self._state = self.CDATA
self._normalize = normalize
self._cdata_name = normalize(name)
def _lex(self):
""" Parse the current buffer """
while self._buffer:
if self._lexers[self._state]():
break
def _lex_text(self):
"""
Text lexer
State: We are between tags or at the very beginning of the document
and look for a ``<``.
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
pos = data.find('<')
if pos == 0:
self._state = self.MARKUP
return False
elif pos == -1:
self._buffer = ''
else:
self._buffer, data = data[pos:], data[:pos]
self._state = self.MARKUP
self._listener.handle_text(data)
return False
def _lex_cdata(self):
"""
(PR)CDATA lexer
State: We are inside a text element and looking for the end tag only
:Return: Unfinished state?
:Rtype: ``bool``
"""
incomplete = False
data, pos = self._buffer, 0
while True:
pos = data.find('<', pos)
if pos == -1:
pos = len(data)
self._buffer = ''
break
else:
char = data[pos + 1:pos + 2]
if char == '/':
self._state = self.ENDTAG
break
elif char == '':
incomplete = True
break
else:
pos += 1
if pos > 0:
self._buffer, data = data[pos:], data[:pos]
self._listener.handle_text(data)
return incomplete
#: Regex matcher for a tagname character
#:
#: :Type: ``callable``
_TAGNAME_MATCH = _re.compile(r'[a-zA-Z0-9]').match
def _lex_markup(self):
"""
Markup lexer
State: We've hit a ``<`` character and now find out, what it's
becoming
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
if len(data) < 2:
return True
char = data[1]
state = (self.ENDTAG, self.DECL, self.PI, self.EMPTY, -1)[
"/!?>".find(char)
]
if state == -1:
if self._TAGNAME_MATCH(char):
state = self.STARTTAG
else:
state = self.TEXT
self._buffer = data[1:]
self._listener.handle_text(data[0])
self._state = state
return False
#: Regex matcher for a start tag
#:
#: :Type: ``callable``
_START_MATCH = _re.compile(r'''
<
(?P<name>[^ \t\r\n\f/>]+)
(?P<attr>
[^"'>]*
(?:
(?:
"[^"]*"
| '[^']*'
)
[^"'>]*
)*
)
[ \t\r\n\f]*
>
''', _re.X).match
#: Regex iterator for extracting start tag attributes
#:
#: :Type: ``callable``
_ATT_ITER = _re.compile(r'''
[ \t\r\n\f]*
(?P<name>(?:/|[^ \t\r\n\f/=>]*)) # attribute name
[ \t\r\n\f]*
(?:
=
(?P<value> # optional value
[ \t\r\n\f]*"[^"]*"
| [ \t\r\n\f]*'[^']*'
| [^ \t\r\n\f/>]*
)
)?
''', _re.X).finditer
def _lex_start(self):
"""
Starttag lexer
State: We've hit a ``<x`` and now look for the ``>``.
:Return: Unfinished State?
:Rtype: ``bool``
"""
data = self._buffer
match = self._START_MATCH(data)
if match is None:
return True
pos = match.end()
self._buffer, data = data[pos:], data[:pos]
name, attrstring = match.group('name', 'attr')
attr, closed = [], False
if attrstring:
for match in self._ATT_ITER(attrstring):
key, value = match.group('name', 'value')
if key == '/' and value is None:
closed = True
continue
if key or value is not None:
if value:
value = value.strip()
attr.append((key.strip(), value))
else: # bug in Python < 2.3.5 (fixed in rev 37262)
break
self._state = self.TEXT
self._listener.handle_starttag(name, attr, closed, data)
return False
def _lex_end(self):
"""
Endtag lexer
State: We've hit ``</``.
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
pos = data.find('>') + 1
if pos == 0:
return True
self._buffer, data = data[pos:], data[:pos]
name = data[2:-1].strip()
if self._cdata_name is not None and \
self._normalize(name) != self._cdata_name:
self._state = self.CDATA
self._listener.handle_text(data)
else:
self._cdata_name = self._normalize = None
self._state = self.TEXT
self._listener.handle_endtag(name, data)
return False
#: Regex searcher for finding the end of a comment
#:
#: :Type: ``callable``
_COMMENT_SEARCH = _re.compile(r'--[ \t\r\n\f]*>').search
#: Regex searcher for matching IE conditional comment
#:
#: :Type: ``callable``
_IE_COMMENT_MATCH = _re.compile(r'''
\[[ \t\r\n\f]* (?:
[iI][fF] | [eE][lL][sS][eE] | [eE][nN][dD][iI][fF]
) [^\]]+]>
''', _re.X).match
def _lex_comment(self):
"""
Comment lexer
State: We've hit ``<!--``.
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
if len(data) < 7:
return True
if self._conditional_ie_comments:
match = iec = self._IE_COMMENT_MATCH(data, 4)
else:
match = iec = None
if match is None:
match = self._COMMENT_SEARCH(data, 4)
if match is None:
return True
pos = match.end()
self._buffer, data = data[pos:], data[:pos]
self._state = self.TEXT
if iec:
self._listener.handle_text(data)
else:
self._listener.handle_comment(data)
return False
#: List of MS-specific marked section names (lowercased)
#:
#: :Type: ``tuple``
_MSSECTIONS = ('if', 'else', 'endif')
#: Regex matcher for the start of a marked section
#:
#: :Type: ``callable``
_MSECTION_MATCH = _re.compile(r'''
<!\[[ \t\r\n\f]*(?P<name>[^\][ \t\r\n\f>]+)(?=[\][ \t\r\n\f>])
''', _re.X).match
#: Regex matcher for the start of an invalid marked section
#:
#: :Type: ``callable``
_MSECTIONINVALID_MATCH = _re.compile(r'<!\[[ \t\r\n\f]*[\][>]').match
#: Regex searcher for the end of a marked section
#:
#: :Type: ``callable``
_MEND_SEARCH = _re.compile(r'][ \t\r\n\f]*][ \t\r\n\f]*>').search
#: Regex searcher for the end of a MS specific marked section
#:
#: :Type: ``callable``
_MSEND_SEARCH = _re.compile(r'][ \t\r\n\f]*(?:--)?[ \t\r\n\f]*>').search
def _lex_msection(self):
"""
Marked section lexer
State: We've hit a ``<![`` and now seek the end
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
match = self._MSECTION_MATCH(data)
if match is None:
match = self._MSECTIONINVALID_MATCH(data)
if match is not None: # pass invalid msection as text
pos = match.end()
self._buffer = data[pos:]
data = data[:pos]
self._state = self.TEXT
self._listener.handle_text(data)
return False
return True
name = match.group('name')
start = match.end()
if self._conditional_ie_comments and name.lower() in self._MSSECTIONS:
match = iec = self._MSEND_SEARCH(data, start)
else:
pos = data.find('[', start)
if pos >= 0:
start = pos + 1
match = self._MEND_SEARCH(data, start)
iec = None
if match is None:
return True
pos, end = match.end(), match.start()
value = data[start:end]
self._buffer, data = data[pos:], data[:pos]
self._state = self.TEXT
if iec:
self._listener.handle_text(data)
else:
self._listener.handle_msection(name, value, data)
return False
#: Regex matcher for a complete declaration
#:
#: This regex seems a bit nasty, but it should catch all stuff allowed
#: in declarations (including doctype). Some day, it probably needs to
#: be replaced it by real lexer states...
#:
#: :Type: ``callable``
_DECL_MATCH = _re.compile(r'''
<!
(?P<name>[^\][ \t\r\n\f>]*)
(?P<value>
[^"'<>-]* # any nonspecial
(?:
(?:
"[^"]*" # double quoted string
| '[^']*' # single quoted string (valid?)
| <!\[ # marked section
[^\]]*
(?:
](?![ \t\r\n\f]*][ \t\r\n\f]*>)
[^\]]*
)*
][ \t\r\n\f]*][ \t\r\n\f]*>
| <(?!!\[) # declaration
# hopefully not a doctype
# (but unlikely, because we are
# probably already in a DT subset)
[^"'>-]*
(?:
(?:
"[^"]*"
| '[^']*'
| -- # comment
[^-]*
(?:-[^-]+)*
--
| -(?!-) # just a hyphen
)
[^"'>-]*
)*
>
| -- # comment
[^-]*
(?:-[^-]+)*
--
| -(?!-) # just a hyphen
)
[^"'<>-]* # more non-specials
)*
)
>
''', _re.X).match
def _lex_decl(self):
"""
Declaration lexer
State: We've hit a ``<!`` and now peek inside
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
if len(data) < 3:
return True
if data.startswith('<!--'):
self._state = self.COMMENT
return False
elif data.startswith('<!['):
self._state = self.MSECTION
return False
elif data == '<!-':
return True
match = self._DECL_MATCH(data)
if match is None:
return True
name, value = match.group('name', 'value')
pos = match.end()
self._buffer, data = data[pos:], data[:pos]
self._state = self.TEXT
self._listener.handle_decl(name, value.strip(), data)
return False
def _lex_pi(self):
"""
Processing instruction lexer
State: We've hit a ``<?`` and now peek inside
:Return: Unfinished state?
:Rtype: ``bool``
"""
data = self._buffer
pos = data.find('?>', 2)
if pos == -1:
return True
pos += 2
self._buffer, data = data[pos:], data[:pos]
self._state = self.TEXT
self._listener.handle_pi(data)
return False
def _lex_empty(self):
"""
Empty tag lexer
State: We've hit a ``<>``
:Return: Unfinished state?
:Rtype: ``bool``
"""
self._buffer, data = self._buffer[2:], self._buffer[:2]
self._state = self.TEXT
self._listener.handle_starttag('', [], False, data)
return False
def _lex_final(self):
"""
Called after the lexer was finalized
State: after all
:Exceptions:
- `LexerFinalizedError` : The lexer was already finalized
(raised always)
"""
raise LexerFinalizedError("The lexer was already finalized")
_LEXERS = []
_STATES = []
for _idx, (_statename, _funcname) in enumerate([
# pylint: disable = bad-whitespace
('FINAL', '_lex_final'),
('TEXT', '_lex_text'),
('CDATA', '_lex_cdata'),
('MARKUP', '_lex_markup'),
('STARTTAG', '_lex_start'),
('ENDTAG', '_lex_end'),
('COMMENT', '_lex_comment'),
('MSECTION', '_lex_msection'),
('DECL', '_lex_decl'),
('PI', '_lex_pi'),
('EMPTY', '_lex_empty'),
]): # noqa
setattr(SoupLexer, _statename, _idx)
_LEXERS.append(_funcname)
_STATES.append(_statename)
SoupLexer._LEXERS = tuple(_LEXERS) # pylint: disable = protected-access
SoupLexer._STATES = tuple(_STATES) # pylint: disable = protected-access
del _idx, _statename, _funcname # pylint: disable = undefined-loop-variable
del _LEXERS, _STATES
from ... import c
c = c.load('impl')
if c is not None:
DEFAULT_LEXER = c.SoupLexer
else:
DEFAULT_LEXER = SoupLexer # pylint: disable = invalid-name
del c
class SoupParser(object):
"""
=========================
(X)HTML Tag Soup Parser
=========================
Overview
~~~~~~~~
The parser is actually a tagsoup parser by design in order to process
most of the "HTML" that can be found out there. Of course, if the HTML
is well-formed and valid, this would be the best. There is only as
much HTML syntax applied as necessary to parse it. You can influence
these syntax definitions by picking another lexer. You can change
the semantics by picking another dtd query class.
This parser guarantees, that for each not-self-closing starttag event also
an endtag event is generated (if the endtag is not actually there, the
data parameter is an empty string). This also happens for empty tags (like
``br``). On the other hand, there may be more endtag events than starttag
events, because of unbalanced or wrongly nested tags.
Special constructs, which are comments, PIs, marked sections and
declarations may occur anywhere, i.e. they are not closing elements
implicitly.
The default lexer does not deal with NET tags (<h1/Heading/). Neither
does it handle unfinished starttags by SGML rules like ``<map<area>``.
It *does* know about empty tags (``<>`` and ``</>``).
CDATA elements and comments are handled in a simplified way. Once
the particular state is entered, it's only left, when the accompanying
end marker was found (``<script>...</script>``, ``<!-- ... -->``).
Anything in between is text.
How is it used?
~~~~~~~~~~~~~~~
The parser API is "streamy" on the input side and event based on the
output side. So, what you need first is a building listener, which will
receive all generated parser events and process them. Such is listener
object is expected to implement the `BuildingListenerInterface`.
Now you create a `SoupParser` instance and pass the listener object to
the contructor and the parser is ready to be fed. You can feed as many
chunks of input data you like into the parser by using the `feed`
method. Every feed call may generate mutiple events on the output side.
When you're done feeding, call the parser's `finalize` method in order
to clean up. This also flushes pending events to the listener.
:IVariables:
`listener` : `BuildingListenerInterface`
The building listener to send the events to
`lexer` : `SoupLexer`
The lexer instance
`_tagstack` : ``list``
The current tag stack
`_inempty` : ``bool``
indicates if the last tag on the stack is an empty one
`_lastopen` : ``str``
Stores the last seen open tag name
"""
__implements__ = [
_interfaces.ListenerInterface, _interfaces.ParserInterface
]
def __init__(self, listener, dtd, lexer=None):
"""
Initialization
:Parameters:
`listener` : `ListenerInterface`
The building listener
`dtd` : `DTDInterface`
DTD query object
`lexer` : ``callable``
Lexer class/factory. This mus be a callable taking an
event listener and returning a lexer instance. If omitted or
``None``, the default lexer will be used (`DEFAULT_LEXER`).
"""
self._tagstack, self._inempty, self._lastopen = [], False, ''
self.listener = listener
self._is_nestable = dtd.nestable
self._is_cdata = dtd.cdata
self._is_empty = dtd.empty
if lexer is None:
lexer = DEFAULT_LEXER
self._lexer = lexer(self)
self._normalize = listener.decoder.normalize
@classmethod
def html(cls, listener):
"""
Construct a parser using the `HTMLDTD`
:Parameters:
`listener` : `BuildingListenerInterface`
The building listener
:Return: The new parser instance
:Rtype: `SoupParser`
"""
return cls(listener, _dtd.HTMLDTD())
@classmethod
def xml(cls, listener):
"""
Construct a parser using the `XMLDTD`
:Parameters:
`listener` : `ListenerInterface`
The building listener
:Return: The new parser instance
:Rtype: `SoupParser`
"""
return cls(listener, _dtd.XMLDTD())
def _close_empty(self):
""" Ensure we close last empty tag """
if self._inempty:
self._inempty = False
self.listener.handle_endtag(self._tagstack.pop()[1], '')
#########################################################################
# ListenerInterface #####################################################
#########################################################################
def handle_text(self, data):
""" :See: `ListenerInterface` """
self._close_empty()
self.listener.handle_text(data)
def handle_starttag(self, name, attrs, closed, data):
""" :See: `ListenerInterface` """
self._close_empty()
if name == '' and not attrs:
name = self._lastopen
else:
self._lastopen = name
tagstack = self._tagstack
nestable = self._is_nestable
starttag = self._normalize(name)
while tagstack and not nestable(tagstack[-1][0], starttag):
self.listener.handle_endtag(tagstack.pop()[1], '')
if closed:
self.listener.handle_starttag(name, attrs, closed, data)
else:
if self._is_cdata(starttag):
self._lexer.cdata(self._normalize, starttag)
self.listener.handle_starttag(name, attrs, closed, data)
tagstack.append((starttag, name))
if self._is_empty(starttag):
self._inempty = True
def handle_endtag(self, name, data):
""" :See: `ListenerInterface` """
tagstack = self._tagstack
if tagstack:
if name == '':
name = tagstack[-1][1]
endtag = self._normalize(name)
if endtag in dict(tagstack):
toclose, original = tagstack.pop()
self._inempty = False
while toclose != endtag:
self.listener.handle_endtag(original, '')
toclose, original = tagstack.pop()
self._close_empty()
self.listener.handle_endtag(name, data)
def handle_comment(self, data):
""" :See: `ListenerInterface` """
self._close_empty()
self.listener.handle_comment(data)
def handle_msection(self, name, value, data):
""" :See: `ListenerInterface` """
self._close_empty()
self.listener.handle_msection(name, value, data)
def handle_decl(self, name, value, data):
""" :See: `ListenerInterface` """
self._close_empty()
self.listener.handle_decl(name, value, data)
def handle_pi(self, data):
""" :See: `ListenerInterface` """
self._close_empty()
self.listener.handle_pi(data)
def handle_escape(self, escaped, data):
""" :See: `ListenerInterface` """
# pylint: disable = unused-argument
raise AssertionError()
#########################################################################
# ParserInterface #######################################################
#########################################################################
def feed(self, food):
""" :See: `ParserInterface` """
self._lexer.feed(food)
def finalize(self):
"""
:See: `ParserInterface`
:Exceptions:
- `LexerEOFError` : EOF in the middle of a state
"""
if self._lexer is not None:
self._lexer, _ = None, self._lexer.finalize() # noqa
tagstack = self._tagstack
while tagstack:
self.listener.handle_endtag(tagstack.pop()[1], '')
from ... import c
c = c.load('impl')
if c is not None:
DEFAULT_PARSER = c.SoupParser
else:
DEFAULT_PARSER = SoupParser # pylint: disable = invalid-name
del c
| apache-2.0 | 503,483,889,974,661,100 | 28.648069 | 78 | 0.501846 | false | 4.194293 | false | false | false |
olabini/ioke | share/pygments/lexer.py | 2 | 6882 | #add IokeLexer to __all__ at the top of agile.py like so:
__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer',
'RubyLexer', 'RubyConsoleLexer', 'PerlLexer', 'LuaLexer',
'MiniDLexer', 'IoLexer', 'IokeLexer', 'TclLexer', 'ClojureLexer',
'Python3Lexer', 'Python3TracebackLexer']
#Then insert the following IokeLexer with the other class definitions:
class IokeLexer(RegexLexer):
"""
For `Ioke <http://ioke.org/>`_ (a strongly typed, dynamic,
prototype based programming language) source.
"""
name = 'Ioke'
filenames = ['*.ik']
aliases = ['ioke', 'ik']
mimetypes = ['text/x-iokesrc']
tokens = {
'interpolatableText': [
(r'(\\b|\\e|\\t|\\n|\\f|\\r|\\"|\\\\|\\#|\\\Z|\\u[0-9a-fA-F]{1,4}|\\[0-3]?[0-7]?[0-7])', String.Escape),
(r'#{', Punctuation, 'textInterpolationRoot')
],
'text': [
(r'(?<!\\)"', String, '#pop'),
include('interpolatableText'),
(r'[^"]', String)
],
'documentation': [
(r'(?<!\\)"', String.Doc, '#pop'),
include('interpolatableText'),
(r'[^"]', String.Doc)
],
'textInterpolationRoot': [
(r'}', Punctuation, '#pop'),
include('root')
],
'slashRegexp': [
(r'(?<!\\)/[oxpniums]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\/', String.Regex),
(r'[^/]', String.Regex)
],
'squareRegexp': [
(r'(?<!\\)][oxpniums]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\]', String.Regex),
(r'[^\]]', String.Regex)
],
'squareText': [
(r'(?<!\\)]', String, '#pop'),
include('interpolatableText'),
(r'[^\]]', String)
],
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r';(.*?)\n', Comment),
(r'\A#!(.*?)\n', Comment),
#Regexps
(r'#/', String.Regex, 'slashRegexp'),
(r'#r\[', String.Regex, 'squareRegexp'),
#Symbols
(r':[a-zA-Z0-9_!:?]+', String.Symbol),
(r'[a-zA-Z0-9_!:?]+:(?![a-zA-Z0-9_!?])', String.Other),
(r':"(\\\\|\\"|[^"])*"', String.Symbol),
#Documentation
(r'((?<=fn\()|(?<=fnx\()|(?<=method\()|(?<=macro\()|(?<=lecro\()|(?<=syntax\()|(?<=dmacro\()|(?<=dlecro\()|(?<=dlecrox\()|(?<=dsyntax\())[\s\n\r]*"', String.Doc, 'documentation'),
#Text
(r'"', String, 'text'),
(r'#\[', String, 'squareText'),
#Mimic
(r'[a-zA-Z0-9_][a-zA-Z0-9!?_:]+(?=\s*=.*mimic\s)', Name.Entity),
#Assignment
(r'[a-zA-Z_][a-zA-Z0-9_!:?]*(?=[\s]*[+*/-]?=[^=].*($|\.))', Name.Variable),
# keywords
(r'(break|cond|continue|do|ensure|for|for:dict|for:set|if|let|loop|p:for|p:for:dict|p:for:set|return|unless|until|while|with)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
# Origin
(r'(eval|mimic|print|println)(?![a-zA-Z0-9!:_?])', Keyword),
# Base
(r'(cell\?|cellNames|cellOwner\?|cellOwner|cells|cell|documentation|hash|identity|mimic|removeCell\!|undefineCell\!)(?![a-zA-Z0-9!:_?])', Keyword),
# Ground
(r'(stackTraceAsText)(?![a-zA-Z0-9!:_?])', Keyword),
#DefaultBehaviour Literals
(r'(dict|list|message|set)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
#DefaultBehaviour Case
(r'(case|case:and|case:else|case:nand|case:nor|case:not|case:or|case:otherwise|case:xor)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
#DefaultBehaviour Reflection
(r'(asText|become\!|derive|freeze\!|frozen\?|in\?|is\?|kind\?|mimic\!|mimics|mimics\?|prependMimic\!|removeAllMimics\!|removeMimic\!|same\?|send|thaw\!|uniqueHexId)(?![a-zA-Z0-9!:_?])', Keyword),
#DefaultBehaviour Aspects
(r'(after|around|before)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
# DefaultBehaviour
(r'(kind|cellDescriptionDict|cellSummary|genSym|inspect|notice)(?![a-zA-Z0-9!:_?])', Keyword),
(r'(use|destructuring)', Keyword.Reserved),
#DefaultBehavior BaseBehavior
(r'(cell\?|cellOwner\?|cellOwner|cellNames|cells|cell|documentation|identity|removeCell!|undefineCell)(?![a-zA-Z0-9!:_?])', Keyword),
#DefaultBehavior Internal
(r'(internal:compositeRegexp|internal:concatenateText|internal:createDecimal|internal:createNumber|internal:createRegexp|internal:createText)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
#DefaultBehaviour Conditions
(r'(availableRestarts|bind|error\!|findRestart|handle|invokeRestart|rescue|restart|signal\!|warn\!)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
# constants
(r'(nil|false|true)(?![a-zA-Z0-9!:_?])', Name.Constant),
# names
(r'(Arity|Base|Call|Condition|DateTime|Aspects|Pointcut|Assignment|BaseBehavior|Boolean|Case|AndCombiner|Else|NAndCombiner|NOrCombiner|NotCombiner|OrCombiner|XOrCombiner|Conditions|Definitions|FlowControl|Internal|Literals|Reflection|DefaultMacro|DefaultMethod|DefaultSyntax|Dict|FileSystem|Ground|Handler|Hook|IO|IokeGround|Struct|LexicalBlock|LexicalMacro|List|Message|Method|Mixins|NativeMethod|Number|Origin|Pair|Range|Reflector|Regexp|Regexp Match|Rescue|Restart|Runtime|Sequence|Set|Symbol|System|Text|Tuple)(?![a-zA-Z0-9!:_?])', Name.Builtin),
# functions
(ur'(generateMatchMethod|aliasMethod|\u03bb|\u028E|fnx|fn|method|dmacro|dlecro|syntax|macro|dlecrox|lecrox|lecro|syntax)(?![a-zA-Z0-9!:_?])', Name.Function),
# Numbers
(r'-?0[xX][0-9a-fA-F]+', Number.Hex),
(r'-?(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'-?\d+', Number.Integer),
(r'#\(', Punctuation),
# Operators
(ur'(&&>>|\|\|>>|\*\*>>|:::|::|\.\.\.|===|\*\*>|\*\*=|&&>|&&=|\|\|>|\|\|=|\->>|\+>>|!>>|<>>>|<>>|&>>|%>>|#>>|@>>|/>>|\*>>|\?>>|\|>>|\^>>|~>>|\$>>|=>>|<<=|>>=|<=>|<\->|=~|!~|=>|\+\+|\-\-|<=|>=|==|!=|&&|\.\.|\+=|\-=|\*=|\/=|%=|&=|\^=|\|=|<\-|\+>|!>|<>|&>|%>|#>|\@>|\/>|\*>|\?>|\|>|\^>|~>|\$>|<\->|\->|<<|>>|\*\*|\?\||\?&|\|\||>|<|\*|\/|%|\+|\-|&|\^|\||=|\$|!|~|\?|#|\u2260|\u2218|\u2208|\u2209)', Operator),
(r'(and|nand|or|xor|nor|return|import)(?![a-zA-Z0-9_!?])', Operator),
# Punctuation
(r'(\`\`|\`|\'\'|\'|\.|\,|@|@@|\[|\]|\(|\)|{|})', Punctuation),
#kinds
(r'[A-Z][a-zA-Z0-9_!:?]*', Name.Class),
#default cellnames
(r'[a-z_][a-zA-Z0-9_!:?]*', Name)
]
}
| mit | -44,385,216,918,597,900 | 42.283019 | 562 | 0.487504 | false | 3.010499 | false | false | false |
FreeCX/programming-challenges | build.py | 1 | 1246 | #!/bin/env/python3
from pathlib import Path
CARGO_CONF = (
('name', '"programming-challenges"'),
('version', '"0.1.0"'),
('authors', '["Alexey Golubev <[email protected]>"]')
)
CARGO_DEPS = (('lazy_static', '"*"'), ('time', '"*"'))
CARGO_EXTRA = (
('sdl2', {
'version': '"*"',
'default-features': 'false',
'features': ['ttf']
}),
)
def extra(extra_list):
r = ''
fx = lambda x: f'"{x}"'
for item in extra_list:
name, extra = item
r += f'[dependencies.{name}]\n'
for k, v in extra.items():
r += f'{k} = [' + ','.join(map(fx, v)) + ']' if isinstance(k, list) else f'{k} = {v}\n'
return r
if __name__ == '__main__':
binary = lambda f: f'[[bin]]\nname = "{f.stem}"\npath = "{f}"\n'
config = lambda d: '\n'.join(map(lambda l: '{} = {}'.format(*l), d))
split = lambda f: f.suffix == '.rs'
binaries = sorted(map(binary, filter(split, Path('./src').iterdir())))
with open('Cargo.toml', 'w') as f:
f.write('[package]\n{}\n'.format(config(CARGO_CONF)))
f.write('\n{}\n'.format('\n'.join(binaries)))
f.write('[dependencies]\n{}'.format(config(CARGO_DEPS)))
f.write('\n\n{}'.format(extra(CARGO_EXTRA)))
| unlicense | -8,131,688,700,271,944,000 | 30.15 | 99 | 0.507223 | false | 2.897674 | false | false | false |
chrislit/abydos | abydos/distance/_lorentzian.py | 1 | 4266 | # Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._lorentzian.
Lorentzian distance
"""
from math import log1p
from typing import Any, Optional
from ._token_distance import _TokenDistance
from ..tokenizer import _Tokenizer
__all__ = ['Lorentzian']
class Lorentzian(_TokenDistance):
r"""Lorentzian distance.
For two multisets X and Y drawn from an alphabet S, Lorentzian distance is
.. math::
dist_{Lorentzian}(X, Y) =
\sum_{i \in S} log(1 + |A_i - B_i|)
Notes
-----
No primary source for this measure could be located, but it is included
in surveys and catalogues, such as :cite:`Deza:2016` and :cite:`Cha:2008`.
.. versionadded:: 0.4.0
"""
def __init__(
self, tokenizer: Optional[_Tokenizer] = None, **kwargs: Any
) -> None:
"""Initialize Lorentzian instance.
Parameters
----------
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
**kwargs
Arbitrary keyword arguments
Other Parameters
----------------
qval : int
The length of each q-gram. Using this parameter and tokenizer=None
will cause the instance to use the QGram tokenizer with this
q value.
.. versionadded:: 0.4.0
"""
super(Lorentzian, self).__init__(tokenizer=tokenizer, **kwargs)
def dist_abs(self, src: str, tar: str) -> float:
"""Return the Lorentzian distance of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Lorentzian distance
Examples
--------
>>> cmp = Lorentzian()
>>> cmp.dist_abs('cat', 'hat')
2.772588722239781
>>> cmp.dist_abs('Niall', 'Neil')
4.852030263919617
>>> cmp.dist_abs('aluminum', 'Catalan')
10.1095256359474
>>> cmp.dist_abs('ATCG', 'TAGC')
6.931471805599453
.. versionadded:: 0.4.0
"""
self._tokenize(src, tar)
alphabet = self._total().keys()
return sum(
log1p(abs(self._src_tokens[tok] - self._tar_tokens[tok]))
for tok in alphabet
)
def dist(self, src: str, tar: str) -> float:
"""Return the normalized Lorentzian distance of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Normalized Lorentzian distance
Examples
--------
>>> cmp = Lorentzian()
>>> cmp.dist('cat', 'hat')
0.6666666666666667
>>> cmp.dist('Niall', 'Neil')
0.7777777777777778
>>> cmp.dist('aluminum', 'Catalan')
0.9358355851062377
>>> cmp.dist('ATCG', 'TAGC')
1.0
.. versionadded:: 0.4.0
"""
if src == tar:
return 0.0
elif not src or not tar:
return 1.0
score = self.dist_abs(src, tar)
alphabet = self._total().keys()
return score / sum(
log1p(max(self._src_tokens[tok], self._tar_tokens[tok]))
for tok in alphabet
)
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 | -7,457,465,277,585,206,000 | 25.012195 | 78 | 0.570323 | false | 3.975769 | false | false | false |
blahu/pynet-mat | LearnPythonCourse/class4/ex1.py | 1 | 1130 | #!/usr/bin/env python
#
# Class 4
# Exercise 1
# Author Mateusz Blaszczyk
""" I. Prompt a user to input an IP address. Re-using some of the
code from class3, exercise4--determine if the IP address is valid.
Continue prompting the user to re-input an IP address until a valid IP
address is input. """
# initialize ip_addr
ip_addr = ''
while True:
# initialize the valid flag
valid = True
# prompt user for IP addres
ip_addr = raw_input ("Please enter a valid IP address: ")
octets = ip_addr.split(".")
if len(octets) != 4:
valid = False
else:
a,b,c,d = octets
if int(a) < 1 or int(a) > 223 or int(a) == 127:
valid = False
elif ( (int(b) not in range (0, 256)) or
(int(c) not in range (0, 256)) or
(int(d) not in range (0, 256))) :
valid = False
elif int(a) == 169 and int(b) == 254:
valid = False
if valid:
# break if valid, otherwise rinse and repeat
print "[%s] is valid!" % ip_addr
break
else:
print "[%s] is not valid!" % ip_addr
| apache-2.0 | 452,814,335,054,505,200 | 24.111111 | 70 | 0.555752 | false | 3.403614 | false | false | false |
franciscod/python-telegram-bot | telegram/utils/promise.py | 2 | 1487 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
""" This module contains the Promise class """
from threading import Event
class Promise(object):
"""A simple Promise implementation for the run_async decorator"""
def __init__(self, pooled_function, args, kwargs):
self.pooled_function = pooled_function
self.args = args
self.kwargs = kwargs
self.done = Event()
self._result = None
def run(self):
try:
self._result = self.pooled_function(*self.args, **self.kwargs)
except:
raise
finally:
self.done.set()
def result(self, timeout=None):
self.done.wait(timeout=timeout)
return self._result
| gpl-2.0 | -7,974,667,931,289,572,000 | 31.326087 | 74 | 0.683927 | false | 3.997312 | false | false | false |
EndyKaufman/django-postgres-angularjs-blog | app/project/validator.py | 1 | 2470 | # -*- coding: utf-8 -*-
from project import helpers
import resource
from django.utils.translation import get_language
def create(request):
data = request.DATA
if data is False:
return {'code': 'no_data'}, 404, False
data = helpers.set_null_values_if_not_exist(data, resource.get_fields())
if data['name'] is None:
return {'code': 'project/no_name'}, 404, False
if data['title_%s' % get_language()] is None:
return {'code': 'project/no_title'}, 404, False
if data['description_%s' % get_language()] is None:
return {'code': 'project/no_description'}, 404, False
user = helpers.get_user(request)
if not user or not request.user.is_superuser:
return {'code': 'no_access'}, 404, False
if user is None:
return {'code': 'account/not_active'}, 404, False
data, code, item = resource.get_item_by_name(request, data['name'])
if item is not False:
return {'code': 'project/exists', 'values': [data['name']]}, 404, False
return {'code': 'ok'}, 200, True
def update(request, project_id):
"""Update record"""
data = request.DATA
if data is False:
return {'code': 'no_data'}, 404, False
data = helpers.set_null_values_if_not_exist(data, resource.get_fields())
if data['name'] is None:
return {'code': 'project/no_name'}, 404, False
if data['title_%s' % get_language()] is None:
return {'code': 'project/no_title'}, 404, False
if data['description_%s' % get_language()] is None:
return {'code': 'project/no_description'}, 404, False
user = helpers.get_user(request)
if not user or not request.user.is_superuser:
return {'code': 'no_access'}, 404, False
if user is None:
return {'code': 'account/not_active'}, 404, False
data, code, item = resource.get_item_by_name(request, data['name'])
if (item is not False) and (int(item.id) != int(project_id)):
return {'code': 'project/exists', 'values': [data['text']]}, 404, False
return {'code': 'ok'}, 200, True
def delete(request):
"""Update record"""
data = request.DATA
if data is False:
return {'code': 'no_data'}, 404, False
user = helpers.get_user(request)
if not user or not request.user.is_superuser:
return {'code': 'no_access'}, 404, False
if user is None:
return {'code': 'account/not_active'}, 404, False
return {'code': 'ok'}, 200, True
| mit | 733,088,023,179,624,400 | 28.404762 | 79 | 0.606478 | false | 3.57971 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.