repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
MattAgile/workshop-ecosystem
|
jenkins/setup.py
|
24
|
1134
|
#!/usr/bin/env python3
from datetime import datetime, timezone
from os import makedirs
from os.path import dirname, abspath, join, basename
from shlex import split
from shutil import rmtree
from subprocess import run
FORMAT = 'html'
SECOND = 1
MINUTE = 60 * SECOND
START_TIME = datetime.now()
sourcedir = dirname(abspath(__file__))
project_name = basename(sourcedir)
outdir = join('/tmp/', project_name)
rmtree(outdir, ignore_errors=True)
makedirs(outdir, exist_ok=True)
run('clear')
cmd = split(f'sphinx-build -a -E -j auto --color -b {FORMAT} {sourcedir} {outdir}')
run(cmd)
last = run('git log -1 --format="%ad" --date=iso', shell=True, capture_output=True).stdout.strip().decode()
last = datetime.strptime(last, '%Y-%m-%d %H:%M:%S %z')
delta = datetime.now(tz=timezone.utc) - last
since = round(delta.total_seconds() / MINUTE)
duration = datetime.now() - START_TIME
duration_seconds = round(duration.total_seconds())
duration_minutes = round(duration_seconds / MINUTE, 1)
print(f'\n\n')
print(f'Build took: {duration_seconds} seconds ({duration_minutes} minutes)')
print(f'Last commit: {last}')
print(f'Since: {since}m')
|
apache-2.0
|
hadim/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
|
Chapter5_LossFunctions/DarkWorldsMetric.py
|
92
|
20375
|
""" DarkWorldsMetricMountianOsteric.py
Custom evaluation metric for the 'Observing Dark Worlds' competition.
[Description of metric, or reference to documentation.]
Update: Made for the training set only so users can check there results from the training c
@Author: David Harvey
Created: 22 August 2012
"""
import numpy as np
import math as mt
import itertools as it
import csv as c
import getopt as gt
import sys as sys
import argparse as ap
import string as st
import random as rd
def calc_delta_r(x_predicted,y_predicted,x_true,y_true):
""" Compute the scalar distance between predicted halo centers
and the true halo centers. Predictions are matched to the closest
halo center.
Notes: It takes in the predicted and true positions, and then loops over each possible configuration and finds the most optimal one.
Arguments:
x_predicted, y_predicted: vector for predicted x- and y-positions (1 to 3 elements)
x_true, y_true: vector for known x- and y-positions (1 to 3 elements)
Returns:
radial_distance: vector containing the scalar distances between the predicted halo centres and the true halo centres (1 to 3 elements)
true_halo_idexes: vector containing indexes of the input true halos which matches the predicted halo indexes (1 to 3 elements)
measured_halo_indexes: vector containing indexes of the predicted halo position with the reference to the true halo position.
e.g if true_halo_indexes=[0,1] and measured_halo_indexes=[1,0] then the first x,y coordinates of the true halo position matches the second input of the predicted x,y coordinates.
"""
num_halos=len(x_true) #Only works for number of halos > 1
num_configurations=mt.factorial(num_halos) #The number of possible different comb
configurations=np.zeros([num_halos,num_configurations],int) #The array of combinations
#I will pass back
distances = np.zeros([num_configurations],float) #The array of the distances
#for all possible combinations
radial_distance=[] #The vector of distances
#I will pass back
#Pick a combination of true and predicted
a=['01','012'] #Input for the permutatiosn, 01 number halos or 012
count=0 #For the index of the distances array
true_halo_indexes=[] #The tuples which will show the order of halos picked
predicted_halo_indexes=[]
distances_perm=np.zeros([num_configurations,num_halos],float) #The distance between each
#true and predicted
#halo for every comb
true_halo_indexes_perm=[] #log of all the permutations of true halos used
predicted_halo_indexes_perm=[] #log of all the predicted permutations
for perm in it.permutations(a[num_halos-2],num_halos):
which_true_halos=[]
which_predicted_halos=[]
for j in xrange(num_halos): #loop through all the true halos with the
distances_perm[count,j]=np.sqrt((x_true[j]-x_predicted[int(perm[j])])**2\
+(y_true[j]-y_predicted[int(perm[j])])**2)
#This array logs the distance between true and
#predicted halo for ALL configurations
which_true_halos.append(j) #log the order in which I try each true halo
which_predicted_halos.append(int(perm[j])) #log the order in which I true
#each predicted halo
true_halo_indexes_perm.append(which_true_halos) #this is a tuple of tuples of
#all of thifferent config
#true halo indexes
predicted_halo_indexes_perm.append(which_predicted_halos)
distances[count]=sum(distances_perm[count,0::]) #Find what the total distances
#are for each configuration
count=count+1
config = np.where(distances == min(distances))[0][0] #The configuration used is the one
#which has the smallest distance
radial_distance.append(distances_perm[config,0::]) #Find the tuple of distances that
#correspond to this smallest distance
true_halo_indexes=true_halo_indexes_perm[config] #Find the tuple of the index which refers
#to the smallest distance
predicted_halo_indexes=predicted_halo_indexes_perm[config]
return radial_distance,true_halo_indexes,predicted_halo_indexes
def calc_theta(x_predicted, y_predicted, x_true, y_true, x_ref, y_ref):
""" Calculate the angle the predicted position and the true position, where the zero degree corresponds to the line joing the true halo position and the reference point given.
Arguments:
x_predicted, y_predicted: vector for predicted x- and y-positions (1 to 3 elements)
x_true, y_true: vector for known x- and y-positions (1 to 3 elements)
Note that the input of these are matched up so that the first elements of each
vector are associated with one another
x_ref, y_ref: scalars of the x,y coordinate of reference point
Returns:
Theta: A vector containing the angles of the predicted halo w.r.t the true halo
with the vector joining the reference point and the halo as the zero line.
"""
num_halos=len(x_predicted)
theta=np.zeros([num_halos+1],float) #Set up the array which will pass back the values
phi = np.zeros([num_halos],float)
psi = np.arctan( (y_true-y_ref)/(x_true-x_ref) )
# Angle at which the halo is at
#with respect to the reference point
phi[x_true != x_ref] = np.arctan((y_predicted[x_true != x_predicted]-\
y_true[x_true != x_predicted])\
/(x_predicted[x_true != x_predicted]-\
x_true[x_true != x_predicted])) # Angle of the estimate
#wrt true halo centre
#Before finding the angle with the zero line as the line joiing the halo and the reference
#point I need to convert the angle produced by Python to an angle between 0 and 2pi
phi =convert_to_360(phi, x_predicted-x_true,\
y_predicted-y_true)
psi = convert_to_360(psi, x_true-x_ref,\
y_true-y_ref)
theta = phi-psi #The angle with the baseline as the line joing the ref and the halo
theta[theta< 0.0]=theta[theta< 0.0]+2.0*mt.pi #If the angle of the true pos wrt the ref is
#greater than the angle of predicted pos
#and the true pos then add 2pi
return theta
def convert_to_360(angle, x_in, y_in):
""" Convert the given angle to the true angle in the range 0:2pi
Arguments:
angle:
x_in, y_in: the x and y coordinates used to determine the quartile
the coordinate lies in so to add of pi or 2pi
Returns:
theta: the angle in the range 0:2pi
"""
n = len(x_in)
for i in xrange(n):
if x_in[i] < 0 and y_in[i] > 0:
angle[i] = angle[i]+mt.pi
elif x_in[i] < 0 and y_in[i] < 0:
angle[i] = angle[i]+mt.pi
elif x_in[i] > 0 and y_in[i] < 0:
angle[i] = angle[i]+2.0*mt.pi
elif x_in[i] == 0 and y_in[i] == 0:
angle[i] = 0
elif x_in[i] == 0 and y_in[i] > 0:
angle[i] = mt.pi/2.
elif x_in[i] < 0 and y_in[i] == 0:
angle[i] = mt.pi
elif x_in[i] == 0 and y_in[i] < 0:
angle[i] = 3.*mt.pi/2.
return angle
def get_ref(x_halo,y_halo,weight):
""" Gets the reference point of the system of halos by weighted averaging the x and y
coordinates.
Arguments:
x_halo, y_halo: Vector num_halos referring to the coordinates of the halos
weight: the weight which will be assigned to the position of the halo
num_halos: number of halos in the system
Returns:
x_ref, y_ref: The coordinates of the reference point for the metric
"""
#Find the weighted average of the x and y coordinates
x_ref = np.sum([x_halo*weight])/np.sum([weight])
y_ref = np.sum([y_halo*weight])/np.sum([weight])
return x_ref,y_ref
def main_score( nhalo_all, x_true_all, y_true_all, x_ref_all, y_ref_all, sky_prediction):
"""abstracts the score from the old command-line interface.
sky_prediction is a dx2 array of predicted x,y positions
-camdp"""
r=np.array([],dtype=float) # The array which I will log all the calculated radial distances
angle=np.array([],dtype=float) #The array which I will log all the calculated angles
#Load in the sky_ids from the true
num_halos_total=0 #Keep track of how many halos are input into the metric
for selectskyinsolutions, sky in enumerate(sky_prediction): #Loop through each line in result.csv and analyse each one
nhalo=int(nhalo_all[selectskyinsolutions])#How many halos in the
#selected sky?
x_true=x_true_all[selectskyinsolutions][0:nhalo]
y_true=y_true_all[selectskyinsolutions][0:nhalo]
x_predicted=np.array([],dtype=float)
y_predicted=np.array([],dtype=float)
for i in xrange(nhalo):
x_predicted=np.append(x_predicted,float(sky[0])) #get the predicted values
y_predicted=np.append(y_predicted,float(sky[1]))
#The solution file for the test data provides masses
#to calculate the centre of mass where as the Training_halo.csv
#direct provides x_ref y_ref. So in the case of test data
#we need to calculate the ref point from the masses using
#Get_ref()
x_ref=x_ref_all[selectskyinsolutions]
y_ref=y_ref_all[selectskyinsolutions]
num_halos_total=num_halos_total+nhalo
#Single halo case, this needs to be separately calculated since
#x_ref = x_true
if nhalo == 1:
#What is the radial distance between the true and predicted position
r=np.append(r,np.sqrt( (x_predicted-x_true)**2 \
+ (y_predicted-y_true)**2))
#What is the angle between the predicted position and true halo position
if (x_predicted-x_true) != 0:
psi = np.arctan((y_predicted-y_true)/(x_predicted-x_true))
else: psi=0.
theta = convert_to_360([psi], [x_predicted-x_true], [y_predicted-y_true])
angle=np.append(angle,theta)
else:
#r_index_index, contains the radial distances of the predicted to
#true positions. These are found by matching up the true halos to
#the predicted halos such that the average of all the radial distances
#is optimal. it also contains indexes of the halos used which are used to
#show which halo has been mathced to which.
r_index_index = calc_delta_r(x_predicted, y_predicted, x_true, \
y_true)
r=np.append(r,r_index_index[0][0])
halo_index= r_index_index[1] #The true halos indexes matched with the
predicted_index=r_index_index[2] #predicted halo index
angle=np.append(angle,calc_theta\
(x_predicted[predicted_index],\
y_predicted[predicted_index],\
x_true[halo_index],\
y_true[halo_index],x_ref,\
y_ref)) # Find the angles of the predicted
#position wrt to the halo and
# add to the vector angle
# Find what the average distance the estimate is from the halo position
av_r=sum(r)/len(r)
#In order to quantify the orientation invariance we will express each angle
# as a vector and find the average vector
#R_bar^2=(1/N Sum^Ncos(theta))^2+(1/N Sum^Nsin(theta))**2
N = float(num_halos_total)
angle_vec = np.sqrt(( 1.0/N * sum(np.cos(angle)) )**2 + \
( 1.0/N * sum(np.sin(angle)) )**2)
W1=1./1000. #Weight the av_r such that < 1 is a good score > 1 is not so good.
W2=1.
metric = W1*av_r + W2*angle_vec #Weighted metric, weights TBD
print 'Your average distance in pixels you are away from the true halo is', av_r
print 'Your average angular vector is', angle_vec
print 'Your score for the training data is', metric
return metric
def main(user_fname, fname):
""" Script to compute the evaluation metric for the Observing Dark Worlds competition. You can run it on your training data to understand how well you have done with the training data.
"""
r=np.array([],dtype=float) # The array which I will log all the calculated radial distances
angle=np.array([],dtype=float) #The array which I will log all the calculated angles
#Load in the sky_ids from the true
true_sky_id=[]
sky_loader = c.reader(open(fname, 'rb')) #Load in the sky_ids from the solution file
for row in sky_loader:
true_sky_id.append(row[0])
#Load in the true values from the solution file
nhalo_all=np.loadtxt(fname,usecols=(1,),delimiter=',',skiprows=1)
x_true_all=np.loadtxt(fname,usecols=(4,6,8),delimiter=',',skiprows=1)
y_true_all=np.loadtxt(fname,usecols=(5,7,9),delimiter=',',skiprows=1)
x_ref_all=np.loadtxt(fname,usecols=(2,),delimiter=',',skiprows=1)
y_ref_all=np.loadtxt(fname,usecols=(3,),delimiter=',',skiprows=1)
for row in sky_loader:
true_sky_id.append(row[1])
num_halos_total=0 #Keep track of how many halos are input into the metric
sky_prediction = c.reader(open(user_fname, 'rb')) #Open the result.csv
try: #See if the input file from user has a header on it
#with open('JoyceTest/trivialUnitTest_Pred.txt', 'r') as f:
with open(user_fname, 'r') as f:
header = float((f.readline()).split(',')[1]) #try and make where the
#first input would be
#a float, if succeed it
#is not a header
print 'THE INPUT FILE DOES NOT APPEAR TO HAVE A HEADER'
except :
print 'THE INPUT FILE APPEARS TO HAVE A HEADER, SKIPPING THE FIRST LINE'
skip_header = sky_prediction.next()
for sky in sky_prediction: #Loop through each line in result.csv and analyse each one
sky_id = str(sky[0]) #Get the sky_id of the input
does_it_exist=true_sky_id.count(sky_id) #Is the input sky_id
#from user a real one?
if does_it_exist > 0: #If it does then find the matching solutions to the sky_id
selectskyinsolutions=true_sky_id.index(sky_id)-1
else: #Otherwise exit
print 'Sky_id does not exist, formatting problem: ',sky_id
sys.exit(2)
nhalo=int(nhalo_all[selectskyinsolutions])#How many halos in the
#selected sky?
x_true=x_true_all[selectskyinsolutions][0:nhalo]
y_true=y_true_all[selectskyinsolutions][0:nhalo]
x_predicted=np.array([],dtype=float)
y_predicted=np.array([],dtype=float)
for i in xrange(nhalo):
x_predicted=np.append(x_predicted,float(sky[2*i+1])) #get the predicted values
y_predicted=np.append(y_predicted,float(sky[2*i+2]))
#The solution file for the test data provides masses
#to calculate the centre of mass where as the Training_halo.csv
#direct provides x_ref y_ref. So in the case of test data
#we need to calculae the ref point from the masses using
#Get_ref()
x_ref=x_ref_all[selectskyinsolutions]
y_ref=y_ref_all[selectskyinsolutions]
num_halos_total=num_halos_total+nhalo
#Single halo case, this needs to be separately calculated since
#x_ref = x_true
if nhalo == 1:
#What is the radial distance between the true and predicted position
r=np.append(r,np.sqrt( (x_predicted-x_true)**2 \
+ (y_predicted-y_true)**2))
#What is the angle between the predicted position and true halo position
if (x_predicted-x_true) != 0:
psi = np.arctan((y_predicted-y_true)/(x_predicted-x_true))
else: psi=0.
theta = convert_to_360([psi], [x_predicted-x_true], [y_predicted-y_true])
angle=np.append(angle,theta)
else:
#r_index_index, contains the radial distances of the predicted to
#true positions. These are found by matching up the true halos to
#the predicted halos such that the average of all the radial distances
#is optimal. it also contains indexes of the halos used which are used to
#show which halo has been mathced to which.
r_index_index = calc_delta_r(x_predicted, y_predicted, x_true, \
y_true)
r=np.append(r,r_index_index[0][0])
halo_index= r_index_index[1] #The true halos indexes matched with the
predicted_index=r_index_index[2] #predicted halo index
angle=np.append(angle,calc_theta\
(x_predicted[predicted_index],\
y_predicted[predicted_index],\
x_true[halo_index],\
y_true[halo_index],x_ref,\
y_ref)) # Find the angles of the predicted
#position wrt to the halo and
# add to the vector angle
# Find what the average distance the estimate is from the halo position
av_r=sum(r)/len(r)
#In order to quantify the orientation invariance we will express each angle
# as a vector and find the average vector
#R_bar^2=(1/N Sum^Ncos(theta))^2+(1/N Sum^Nsin(theta))**2
N = float(num_halos_total)
angle_vec = np.sqrt(( 1.0/N * sum(np.cos(angle)) )**2 + \
( 1.0/N * sum(np.sin(angle)) )**2)
W1=1./1000. #Weight the av_r such that < 1 is a good score > 1 is not so good.
W2=1.
metric = W1*av_r + W2*angle_vec #Weighted metric, weights TBD
print 'Your average distance in pixels you are away from the true halo is', av_r
print 'Your average angular vector is', angle_vec
print 'Your score for the training data is', metric
if __name__ == "__main__":
#For help just typed 'python DarkWorldsMetric.py -h'
parser = ap.ArgumentParser(description='Work out the Metric for your input file')
parser.add_argument('inputfile',type=str,nargs=1,help='Input file of halo positions. Needs to be in the format SkyId,halo_x1,haloy1,halox_2,halo_y2,halox3,halo_y3 ')
parser.add_argument('reffile',type=str,nargs=1,help='This should point to Training_halos.csv')
args = parser.parse_args()
user_fname=args.inputfile[0]
filename = (args.reffile[0]).count('Training_halos.csv')
if filename == 0:
fname=args.reffile[0]+str('Training_halos.csv')
else:
fname=args.reffile[0]
main(user_fname, fname)
|
mit
|
amith01994/intellij-community
|
python/testData/buildout/site.py
|
83
|
20097
|
"""Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix (including Mac OSX), it starts with sys.prefix and
sys.exec_prefix (if different) and appends
lib/python<version>/site-packages as well as lib/site-python.
On other platforms (such as Windows), it tries each of the
prefixes directly, as well as with lib/site-packages appended. The
resulting directories, if they exist, are appended to sys.path, and
also inspected for path configuration files.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.5/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.5/site-packages/bar
/usr/local/lib/python2.5/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
import __builtin__
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = False # buildout does not support user sites.
# for distutils.commands.install
USER_SITE = None
USER_BASE = None
def makepath(*paths):
dir = os.path.abspath(os.path.join(*paths))
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if hasattr(m, '__loader__'):
continue # don't mess with a PEP 302-supplied __file__
try:
m.__file__ = os.path.abspath(m.__file__)
except AttributeError:
continue
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s)
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Process a .pth file within the site-packages directory:
For each line in the file, either combine it with sitedir to a path
and add that to known_paths, or execute it if it starts with 'import '.
"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
with f:
for line in f:
if line.startswith("#"):
continue
if line.startswith(("import ", "import\t")):
exec line
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
dotpth = os.extsep + "pth"
names = [name for name in names if name.endswith(dotpth)]
for name in sorted(names):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if sys.flags.no_user_site:
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
USER_BASE is the root directory for all Python versions
USER_SITE is the user specific site-packages directory
USER_SITE/.. can be used for data.
"""
global USER_BASE, USER_SITE, ENABLE_USER_SITE
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
#if sys.platform in ('os2emx', 'riscos'):
# # Don't know what to put here
# USER_BASE = ''
# USER_SITE = ''
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
USER_BASE = env_base if env_base else joinuser(base, "Python")
USER_SITE = os.path.join(USER_BASE,
"Python" + sys.version[0] + sys.version[2],
"site-packages")
else:
USER_BASE = env_base if env_base else joinuser("~", ".local")
USER_SITE = os.path.join(USER_BASE, "lib",
"python" + sys.version[:3],
"site-packages")
if ENABLE_USER_SITE and os.path.isdir(USER_SITE):
addsitedir(USER_SITE, known_paths)
return known_paths
def addsitepackages(known_paths):
"""Add site packages, as determined by zc.buildout.
See original_addsitepackages, below, for the original version."""
setuptools_path = 'c:\\src\\django\\buildout15\\eggs\\setuptools-0.6c12dev_r88124-py2.6.egg'
sys.path.append(setuptools_path)
known_paths.add(os.path.normcase(setuptools_path))
import pkg_resources
buildout_paths = [
'c:\\src\\django\\buildout15\\src',
'c:\\src\\django\\buildout15\\eggs\\setuptools-0.6c12dev_r88124-py2.6.egg'
]
for path in buildout_paths:
sitedir, sitedircase = makepath(path)
if not sitedircase in known_paths and os.path.exists(sitedir):
sys.path.append(sitedir)
known_paths.add(sitedircase)
pkg_resources.working_set.add_entry(sitedir)
sys.__egginsert = len(buildout_paths) # Support distribute.
original_paths = [
'C:\\Python26\\lib\\site-packages'
]
for path in original_paths:
if path == setuptools_path or path not in known_paths:
addsitedir(path, known_paths)
return known_paths
def original_addsitepackages(known_paths):
"""Add site-packages (and possibly site-python) to sys.path"""
sitedirs = []
seen = []
for prefix in PREFIXES:
if not prefix or prefix in seen:
continue
seen.append(prefix)
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"site-packages"))
sitedirs.append(os.path.join(prefix, "lib", "site-python"))
else:
sitedirs.append(prefix)
sitedirs.append(os.path.join(prefix, "lib", "site-packages"))
if sys.platform == "darwin":
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
sitedirs.append(
os.path.expanduser(
os.path.join("~", "Library", "Python",
sys.version[:3], "site-packages")))
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
__builtin__.quit = Quitter('quit')
__builtin__.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = file(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print self.__lines[i]
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
key = raw_input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
__builtin__.copyright = _Printer("copyright", sys.copyright)
if sys.platform[:4] == 'java':
__builtin__.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
else:
__builtin__.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
__builtin__.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the built-in 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
__builtin__.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print >>sys.stderr, \
"'import sitecustomize' failed; use -v for traceback"
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print>>sys.stderr, \
"'import usercustomize' failed; use -v for traceback"
def main():
global ENABLE_USER_SITE
abs__file__()
known_paths = removeduppaths()
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
addbuilddir()
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
known_paths = addusersitepackages(known_paths)
known_paths = addsitepackages(known_paths)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print "sys.path = ["
for dir in sys.path:
print " %r," % (dir,)
print "]"
print "USER_BASE: %r (%s)" % (USER_BASE,
"exists" if os.path.isdir(USER_BASE) else "doesn't exist")
print "USER_SITE: %r (%s)" % (USER_SITE,
"exists" if os.path.isdir(USER_SITE) else "doesn't exist")
print "ENABLE_USER_SITE: %r" % ENABLE_USER_SITE
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print os.pathsep.join(buffer)
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print textwrap.dedent(help % (sys.argv[0], os.pathsep))
sys.exit(10)
if __name__ == '__main__':
_script()
|
apache-2.0
|
likaiwalkman/cassandra
|
pylib/cqlshlib/sslhandling.py
|
28
|
3725
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import ConfigParser
import ssl
def ssl_settings(host, config_file, env=os.environ):
"""
Function wcich generates SSL setting for cassandra.Cluster
Params:
* host .........: hostname of Cassandra node.
* env ..........: environment variables. SSL factory will use, if passed,
SSL_CERTFILE and SSL_VALIDATE variables.
* config_file ..: path to cqlsh config file (usually ~/.cqlshrc).
SSL factory will use, if set, certfile and validate
options in [ssl] section, as well as host to certfile
mapping in [certfiles] section.
[certfiles] section is optional, 'validate' setting in [ssl] section is
optional too. If validation is enabled then SSL certfile must be provided
either in the config file or as an environment variable.
Environment variables override any options set in cqlsh config file.
"""
configs = ConfigParser.SafeConfigParser()
configs.read(config_file)
def get_option(section, option):
try:
return configs.get(section, option)
except ConfigParser.Error:
return None
ssl_validate = env.get('SSL_VALIDATE')
if ssl_validate is None:
ssl_validate = get_option('ssl', 'validate')
ssl_validate = ssl_validate is None or ssl_validate.lower() != 'false'
ssl_version_str = env.get('SSL_VERSION')
if ssl_version_str is None:
ssl_version_str = get_option('ssl', 'version')
if ssl_version_str is None:
ssl_version_str = "TLSv1"
ssl_version = getattr(ssl, "PROTOCOL_%s" % ssl_version_str, None)
if ssl_version is None:
sys.exit("%s is not a valid SSL protocol, please use one of SSLv23, "
"TLSv1, TLSv1.1, or TLSv1.2" % (ssl_version_str,))
ssl_certfile = env.get('SSL_CERTFILE')
if ssl_certfile is None:
ssl_certfile = get_option('certfiles', host)
if ssl_certfile is None:
ssl_certfile = get_option('ssl', 'certfile')
if ssl_validate and ssl_certfile is None:
sys.exit("Validation is enabled; SSL transport factory requires a valid certfile "
"to be specified. Please provide path to the certfile in [ssl] section "
"as 'certfile' option in %s (or use [certfiles] section) or set SSL_CERTFILE "
"environment variable." % (config_file,))
if not ssl_certfile is None:
ssl_certfile = os.path.expanduser(ssl_certfile)
userkey = get_option('ssl', 'userkey')
if userkey:
userkey = os.path.expanduser(userkey)
usercert = get_option('ssl', 'usercert')
if usercert:
usercert = os.path.expanduser(usercert)
return dict(ca_certs=ssl_certfile,
cert_reqs=ssl.CERT_REQUIRED if ssl_validate else ssl.CERT_NONE,
ssl_version=ssl_version,
keyfile=userkey, certfile=usercert)
|
apache-2.0
|
hpcleuven/easybuild-framework
|
test/framework/sandbox/easybuild/tools/module_naming_scheme/test_module_naming_scheme_more.py
|
3
|
2985
|
##
# Copyright 2013-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Implementation of a test module naming scheme.
@author: Kenneth Hoste (Ghent University)
"""
import os
from vsc.utils import fancylogger
from easybuild.framework.easyconfig.default import DEFAULT_CONFIG
from easybuild.tools.module_naming_scheme import ModuleNamingScheme
from easybuild.tools.ordereddict import OrderedDict
# prefer hashlib.sha1 (only in Python 2.5 and up) over sha.sha
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
_log = fancylogger.getLogger('TestModuleNamingSchemeMore', fname=False)
class TestModuleNamingSchemeMore(ModuleNamingScheme):
"""Class implementing a test module naming scheme that uses some 'unusual' easyconfig parameters."""
REQUIRED_KEYS = ['name', 'version', 'toolchain', 'moduleclass', 'sources', 'description']
def det_full_module_name(self, ec):
"""
Determine full module name from given easyconfig, according to a testing module naming scheme,
using some 'unusual' easyconfig parameters.
@param ec: dict-like object with easyconfig parameter values (e.g. 'name', 'version', etc.)
@return: string with full module name, e.g.: GCC/068d21a1331fc0295c3cb7e048430fa33a89fe69
"""
res = ''
for key in self.REQUIRED_KEYS:
if isinstance(ec[key], dict):
res += '%s=>' % key
for item_key in sorted(ec[key].keys()):
res += '%s:%s,' % (item_key, ec[key][item_key])
else:
res += str(ec[key])
ec_sha1 = sha1(res).hexdigest()
_log.debug("SHA1 for string '%s' obtained for %s: %s" % (res, ec, ec_sha1))
return os.path.join(ec['name'], ec_sha1)
def is_short_modname_for(self, modname, name):
"""
Determine whether the specified (short) module name is a module for software with the specified name.
"""
return modname.startswith(name)
|
gpl-2.0
|
elationfoundation/PyOrgMode
|
build/lib/PyOrgMode/test_simple-agenda.py
|
2
|
2101
|
import PyOrgMode
import copy
try:
import unittest2 as unittest
except ImportError:
import unittest
def Get_Scheduled_Elements(element, data=[]):
"""
Grab the data from all scheduled elements for all the tree defined by 'element' recursively.
Returns all the elements as an array.
"""
if hasattr(element,"content"):
for child in element.content:
if hasattr(child,"TYPE"):
if child.TYPE == "SCHEDULE_ELEMENT":
# This element is scheduled, we are creating a copy of it
data.append(copy.deepcopy(child.parent))
Get_Scheduled_Elements(child,data)
return data
class TestAgenda(unittest.TestCase):
def test_agenda(self):
# Creating the input and output files data structures
input_file = PyOrgMode.OrgDataStructure()
output_file = PyOrgMode.OrgDataStructure()
# Loading from agenda.org file
input_file.load_from_file("agenda.org")
# Get the scheduled elements (those with SCHEDULE, DEADLINE in them, not in the node name)
scheduled_elements = Get_Scheduled_Elements(input_file.root)
# Assign these element to the root (reparent each elements recursively, relevel them cleanly)
output_file.root.append_clean(scheduled_elements)
output_file.save_to_file("test_scheduled_output.org")
saved = open("test_scheduled_output.org").readlines()
self.assertEqual(saved, ['* Element 1\n',
' SCHEDULED: <2011-02-08>\n',
'* Element 3\n',
' DEADLINE: <2011-02-08>\n',
'** Test\n',
'** Element 4\n',
' SCHEDULED: <2011-02-08>\n',
'*** Couic\n',
'* Element 4\n',
' SCHEDULED: <2011-02-08>\n',
'** Couic\n'])
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
nirmeshk/oh-mainline
|
vendor/packages/Django/tests/modeltests/generic_relations/models.py
|
48
|
2987
|
"""
34. Generic relations
Generic relations let an object have a foreign key to any object through a
content-type/object-id field. A ``GenericForeignKey`` field can point to any
object, be it animal, vegetable, or mineral.
The canonical example is tags (although this example implementation is *far*
from complete).
"""
from __future__ import unicode_literals
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class TaggedItem(models.Model):
"""A tag on an item."""
tag = models.SlugField()
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
class Meta:
ordering = ["tag", "content_type__name"]
def __str__(self):
return self.tag
class ValuableTaggedItem(TaggedItem):
value = models.PositiveIntegerField()
@python_2_unicode_compatible
class Comparison(models.Model):
"""
A model that tests having multiple GenericForeignKeys
"""
comparative = models.CharField(max_length=50)
content_type1 = models.ForeignKey(ContentType, related_name="comparative1_set")
object_id1 = models.PositiveIntegerField()
content_type2 = models.ForeignKey(ContentType, related_name="comparative2_set")
object_id2 = models.PositiveIntegerField()
first_obj = generic.GenericForeignKey(ct_field="content_type1", fk_field="object_id1")
other_obj = generic.GenericForeignKey(ct_field="content_type2", fk_field="object_id2")
def __str__(self):
return "%s is %s than %s" % (self.first_obj, self.comparative, self.other_obj)
@python_2_unicode_compatible
class Animal(models.Model):
common_name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
tags = generic.GenericRelation(TaggedItem)
comparisons = generic.GenericRelation(Comparison,
object_id_field="object_id1",
content_type_field="content_type1")
def __str__(self):
return self.common_name
@python_2_unicode_compatible
class Vegetable(models.Model):
name = models.CharField(max_length=150)
is_yucky = models.BooleanField(default=True)
tags = generic.GenericRelation(TaggedItem)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Mineral(models.Model):
name = models.CharField(max_length=150)
hardness = models.PositiveSmallIntegerField()
# note the lack of an explicit GenericRelation here...
def __str__(self):
return self.name
class GeckoManager(models.Manager):
def get_query_set(self):
return super(GeckoManager, self).get_query_set().filter(has_tail=True)
class Gecko(models.Model):
has_tail = models.BooleanField()
objects = GeckoManager()
|
agpl-3.0
|
ColdSauce/IsSittingOnButt
|
server/env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langgreekmodel.py
|
2763
|
12628
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = {
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
# flake8: noqa
|
apache-2.0
|
alexholcombe/twoWords
|
honoursClass2015/twoWordsExperimentInvertedbackMayAlexContinue2.py
|
2
|
52750
|
#Alex Holcombe [email protected]
#See the github repository for more information: https://github.com/alexholcombe/twoWords
from __future__ import print_function
from psychopy import monitors, visual, event, data, logging, core, sound, gui
import psychopy.info
import numpy as np
from math import atan, log, ceil
import copy
import time, sys, os#, pylab
try:
from noiseStaircaseHelpers import printStaircase, toStaircase, outOfStaircase, createNoise, plotDataAndPsychometricCurve
except ImportError:
print('Could not import from noiseStaircaseHelpers.py (you need that file to be in the same directory)')
try:
import stringResponse
except ImportError:
print('Could not import stringResponse.py (you need that file to be in the same directory)')
wordEccentricity=3 #means degrees of angle away from fixation
tasks=['T1']; task = tasks[0]
#THINGS THAT COULD PREVENT SUCCESS ON A STRANGE MACHINE
#same screen or external screen? Set scrn=0 if one screen. scrn=1 means display stimulus on second screen.
#widthPix, heightPix
quitFinder = False #if checkRefreshEtc, quitFinder becomes True
autopilot=False
demo=False #False
exportImages= False #quits after one trial
subject='Hubert' #user is prompted to enter true subject name
if autopilot: subject='auto'
if os.path.isdir('.'+os.sep+'data'):
dataDir='data'
else:
print('"data" directory does not exist, so saving data in present working directory')
dataDir='.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
showRefreshMisses=True #flicker fixation at refresh rate, to visualize if frames missed
feedback=True
autoLogging=False
refreshRate = 60.; #100
if demo:
refreshRate = 60.; #100
staircaseTrials = 25
prefaceStaircaseTrialsN = 20 #22
prefaceStaircaseNoise = np.array([5,20,20,20, 50,50,50,5,80,80,80,5,95,95,95]) #will be recycled / not all used, as needed
descendingPsycho = True #psychometric function- more noise means worse performance
threshCriterion = 0.58
numWordsInStream = 24
wordsUnparsed="the, and, for, you, say, but, his, not, she, can, who, get, her, all, one, out, see, him, now, how, its, our, two, way" #24 most common words
lettersUnparsed = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".upper()
wordList = wordsUnparsed.split(",") #split into list
for i in range(len(wordList)):
wordList[i] = wordList[i].replace(" ", "") #delete spaces
bgColor = [-.7,-.7,-.7] # [-1,-1,-1]
cueColor = [1.,1.,1.]
letterColor = [1.,1.,1.]
cueRadius = 7 #6 deg in Goodbourn & Holcombe
widthPix= 1280 #monitor width in pixels of Agosta
heightPix= 800 #800 #monitor height in pixels
monitorwidth = 38.7 #monitor width in cm
scrn=1 #0 to use main screen, 1 to use external screen connected to computer
fullscr=True #True to use fullscreen, False to not. Timing probably won't be quite right if fullscreen = False
allowGUI = False
if demo: monitorwidth = 23#18.0
if exportImages:
widthPix = 600; heightPix = 600
monitorwidth = 13.0
fullscr=False; scrn=0
framesSaved=0
if demo:
scrn=0; fullscr=False
widthPix = 800; heightPix = 600
monitorname='testMonitor'
allowGUI = True
viewdist = 57. #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
print('pixelperdegree=',pixelperdegree)
# create a dialog from dictionary
infoFirst = { 'Do staircase (only)': False, 'Check refresh etc':True, 'Fullscreen (timing errors if not)': False, 'Screen refresh rate':refreshRate }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='Dual-RSVP experiment OR staircase to find thresh noise level for performance criterion',
order=['Do staircase (only)', 'Check refresh etc', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating'},
#fixed=['Check refresh etc'])#this attribute can't be changed by the user
)
if not OK.OK:
print('User cancelled from dialog box'); core.quit()
doStaircase = infoFirst['Do staircase (only)']
checkRefreshEtc = infoFirst['Check refresh etc']
fullscr = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
if checkRefreshEtc:
quitFinder = True
if quitFinder:
import os
applescript="\'tell application \"Finder\" to quit\'"
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
#letter size 2.5 deg #I've changed the SOA and letterDurMs
SOAms = 180 #Battelli, Agosta, Goodbourn, Holcombe mostly using 133
#Minimum SOAms should be 84 because any shorter, I can't always notice the second ring when lag1. 71 in Martini E2 and E1b (actually he used 66.6 but that's because he had a crazy refresh rate of 90 Hz)
letterDurMs = 400 #23.6 in Martini E2 and E1b (actually he used 22.2 but that's because he had a crazy refresh rate of 90 Hz)
ISIms = SOAms - letterDurMs
letterDurFrames = int( np.floor(letterDurMs / (1000./refreshRate)) )
cueDurFrames = letterDurFrames
ISIframes = int( np.floor(ISIms / (1000./refreshRate)) )
#have set ISIframes and letterDurFrames to integer that corresponds as close as possible to originally intended ms
rateInfo = 'total SOA=' + str(round( (ISIframes + letterDurFrames)*1000./refreshRate, 2)) + ' or ' + str(ISIframes + letterDurFrames) + ' frames, comprising\n'
rateInfo+= 'ISIframes ='+str(ISIframes)+' or '+str(ISIframes*(1000./refreshRate))+' ms and letterDurFrames ='+str(letterDurFrames)+' or '+str(round( letterDurFrames*(1000./refreshRate), 2))+'ms'
logging.info(rateInfo); print(rateInfo)
trialDurFrames = int( numWordsInStream*(ISIframes+letterDurFrames) ) #trial duration in frames
monitorname = 'testmonitor'
waitBlank = False
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#relying on monitorwidth cm (39 for Mitsubishi to do deg calculations) and gamma info in calibratn
mon.setSizePix( (widthPix,heightPix) )
units='deg' #'cm'
def openMyStimWindow(): #make it a function because have to do it several times, want to be sure is identical each time
myWin = visual.Window(monitor=mon,size=(widthPix,heightPix),allowGUI=allowGUI,units=units,color=bgColor,colorSpace='rgb',fullscr=fullscr,screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor
return myWin
myWin = openMyStimWindow()
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
#print(runInfo)
logging.info(runInfo)
print('Finished runInfo- which assesses the refresh and processes of this computer')
#check screen refresh is what assuming it is ##############################################
Hzs=list()
myWin.flip(); myWin.flip();myWin.flip();myWin.flip();
myWin.setRecordFrameIntervals(True) #otherwise myWin.fps won't work
print('About to measure frame flips')
for i in range(50):
myWin.flip()
Hzs.append( myWin.fps() ) #varies wildly on successive runs!
myWin.setRecordFrameIntervals(False)
# end testing of screen refresh########################################################
Hzs = np.array( Hzs ); Hz= np.median(Hzs)
msPerFrame= 1000./Hz
refreshMsg1= 'Frames per second ~='+ str( np.round(Hz,1) )
refreshRateTolerancePct = 3
pctOff = abs( (np.median(Hzs)-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
defaultNoiseLevel = 0.0 #to use if no staircase, can be set by user
trialsPerCondition = 5 #default value
dlgLabelsOrdered = list()
if doStaircase:
myDlg = gui.Dlg(title="Staircase to find appropriate noisePercent", pos=(200,400))
else:
myDlg = gui.Dlg(title="RSVP experiment", pos=(200,400))
if not autopilot:
myDlg.addField('Subject name (default="Hubert"):', 'Hubert', tip='or subject code')
dlgLabelsOrdered.append('subject')
if doStaircase:
easyTrialsCondText = 'Num preassigned noise trials to preface staircase with (default=' + str(prefaceStaircaseTrialsN) + '):'
myDlg.addField(easyTrialsCondText, tip=str(prefaceStaircaseTrialsN))
dlgLabelsOrdered.append('easyTrials')
myDlg.addField('Staircase trials (default=' + str(staircaseTrials) + '):', tip="Staircase will run until this number is reached or it thinks it has precise estimate of threshold")
dlgLabelsOrdered.append('staircaseTrials')
pctCompletedBreak = 101
else:
myDlg.addField('\tPercent noise dots=', defaultNoiseLevel, tip=str(defaultNoiseLevel))
dlgLabelsOrdered.append('defaultNoiseLevel')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 50
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Screen apparently NOT the desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels!!'
myDlg.addText(msgWrongResolution, color='Red')
logging.error(msgWrongResolution)
print(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at a trials response screen', color=[-1.,1.,-1.]) # color='DimGrey') color names stopped working along the way, for unknown reason
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
if not autopilot:
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
if doStaircase:
if len(thisInfo[dlgLabelsOrdered.index('staircaseTrials')]) >0:
staircaseTrials = int( thisInfo[ dlgLabelsOrdered.index('staircaseTrials') ] ) #convert string to integer
print('staircaseTrials entered by user=',staircaseTrials)
logging.info('staircaseTrials entered by user=',staircaseTrials)
if len(thisInfo[dlgLabelsOrdered.index('easyTrials')]) >0:
prefaceStaircaseTrialsN = int( thisInfo[ dlgLabelsOrdered.index('easyTrials') ] ) #convert string to integer
print('prefaceStaircaseTrialsN entered by user=',thisInfo[dlgLabelsOrdered.index('easyTrials')])
logging.info('prefaceStaircaseTrialsN entered by user=',prefaceStaircaseTrialsN)
else: #not doing staircase
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition =',trialsPerCondition)
defaultNoiseLevel = int (thisInfo[ dlgLabelsOrdered.index('defaultNoiseLevel') ])
else:
print('User cancelled from dialog box.')
logging.flush()
core.quit()
if not demo:
allowGUI = False
myWin = openMyStimWindow()
#set up output data file, log file, copy of program code, and logging
infix = ''
if doStaircase:
infix = 'staircase_'
fileName = os.path.join(dataDir, subject + '_' + infix+ timeAndDateStr)
if not demo and not exportImages:
dataFile = open(fileName+'.txt', 'w')
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileName + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logFname = fileName+'.log'
ppLogF = logging.LogFile(logFname,
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#errors, data and warnings will be sent to this logfile
if demo or exportImages:
dataFile = sys.stdout; logF = sys.stdout
logging.console.setLevel(logging.ERROR) #only show this level messages and higher
logging.console.setLevel(logging.ERROR) #DEBUG means set console to receive nearly all messges, INFO next level, EXP, DATA, WARNING and ERROR
if fullscr and not demo and not exportImages:
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=False, ## True means report on everything
userProcsDetailed=True, ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
#randomSeed='set:42', ## a way to record, and optionally set, a random seed of type str for making reproducible random sequences
## None -> default
## 'time' will use experimentRuntime.epoch as the value for the seed, different value each time the script is run
##'set:time' --> seed value is set to experimentRuntime.epoch, and initialized: random.seed(info['randomSeed'])
##'set:42' --> set & initialize to str('42'), and will give the same sequence of random.random() for all runs of the script
)
logging.info(runInfo)
logging.flush()
wordListThisTrial = list() #to put in KiuYan stuff
textStimuliStream1 = list()
textStimuliStream2 = list() #used for second, simultaneous RSVP stream
def calcAndPredrawStimuli(thisTrial,wordList): #called before each trial
global wordListThisTrial, textStimuliStream1, textStimuliStream2
textStimuliStream1 = list()
textStimuliStream2 = list()
if len(wordList) < numWordsInStream:
print('Error! Your word list must have at least ',numWordsInStream,'strings')
idxsIntoWordList = np.arange( len(wordList) ) #create a list of indexes of the entire word liste.g 0, 1, 2, 3, 4, 5 up to 23
print('wordList=',wordList)
#if the condition is the orthographically similar one
orthographicallySimilarLists = [['bed','now','ban','tap','ton','dab','paw','map','win'],['top','bib','ten','box','wet','man','urn','pit','but']]
orthographicallyDissimilarLists = [['off','egg','her','for','elk','joy','zoo','fax','yes'],['sky','fox','fig','elf','ski','zig','cog','woo','his']]
print('thisTrial[orthographicalsim]= ',thisTrial['orthographicalsim'])
whichList = np.random.randint(0, 1)
if thisTrial['orthographicalsim'] == True:
listForThisCondition = copy.deepcopy(orthographicallySimilarLists[whichList])
elif thisTrial['orthographicalsim'] == False:
listForThisCondition = copy.deepcopy(orthographicallyDissimilarLists[whichList])
print('Using dissimilar list')
listForThisConditionLeft = copy.deepcopy(listForThisCondition)
listForThisConditionRight = copy.deepcopy(listForThisCondition)
np.random.shuffle(listForThisConditionLeft)
np.random.shuffle(listForThisConditionRight)
print('listForThisConditionLeft=',listForThisConditionLeft)
print('length of wordList before kiuyan modification=',len(wordList))
wordListThisTrial = copy.deepcopy(wordList) #to put in KiuYan stuff
for word in listForThisConditionLeft:
print('adding left intron to position ',len(wordListThisTrial), ' in wordListThisTrial')
#print('inside loop:',word)
wordListThisTrial.append(word) #add new words to end of wordList
for word in listForThisConditionRight: #after the left ones, add to the wordList even more by putting the right ones
print('adding right intron to position ',len(wordListThisTrial), ' in wordListThisTrial')
#print('inside loop:',word)
wordListThisTrial.append(word) #add new words to end of wordList
numWordsToLoadUp = numWordsInStream + 2*len(listForThisCondition)
print('numWordsToLoadUp=',numWordsToLoadUp, ' length of wordListThisTrial=',len(wordListThisTrial))
for i in range(0,numWordsToLoadUp): #draw the words that will be used on this trial, the first 26 of the shuffled list
word = wordListThisTrial[ i ] # #[ idxsIntoWordList[i] ] the below is the way that Psychopy draws texton screen
#bucket of words on left stream
textStimulusStream1 = visual.TextStim(myWin,text=word,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
#bucket of words on right stream
textStimulusStream2 = visual.TextStim(myWin,text=word,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
textStimulusStream1.setPos([-wordEccentricity,0]) #left this is bucket of words
textStimuliStream1.append(textStimulusStream1) #add to list of text stimuli that comprise stream 1
textStimulusStream2.setPos([wordEccentricity,0]) #right this is bucket of words
textStimuliStream2.append(textStimulusStream2) #add to list of text stimuli
print('Kiuyan modified wordListThisTrial =',wordListThisTrial)
idxsStream1 = idxsIntoWordList #first RSVP stream
np.random.shuffle(idxsStream1)
idxsStream2 = copy.deepcopy(idxsIntoWordList)
np.random.shuffle(idxsStream2)
toSubstituteOnLeft = range(9)
np.random.shuffle(toSubstituteOnLeft)
toSubstituteOnRight = range(9)
np.random.shuffle(toSubstituteOnRight)
for i in range(9):
idxOfWordToSubstituteLeft = numWordsInStream+toSubstituteOnLeft[i]
#print('Using # ',idxOfWordToSubstituteLeft,' as intron for left')
idxsStream1[i+8] = idxOfWordToSubstituteLeft
idxOfWordToSubstituteRight = numWordsInStream + 9 + toSubstituteOnRight[i]
print('Using # ',idxOfWordToSubstituteRight,' as intron for right')
idxsStream2[i+8] = idxOfWordToSubstituteRight
print('idxsStream1=',idxsStream1, ' idxsStream2=',idxsStream2)
return idxsStream1, idxsStream2
#create click sound for keyboard
try:
click=sound.Sound('406__tictacshutup__click-1-d.wav')
except: #in case file missing, create inferiro click manually
logging.warn('Could not load the desired click sound file, instead using manually created inferior click')
click=sound.Sound('D',octave=4, sampleRate=22050, secs=0.015, bits=8)
if showRefreshMisses:
fixSizePix = 32 #2.6 #make fixation bigger so flicker more conspicuous
else: fixSizePix = 32
fixColor = [1,1,1]
if exportImages: fixColor= [0,0,0]
fixatnNoiseTexture = np.round( np.random.rand(fixSizePix/4,fixSizePix/4) ,0 ) *2.0-1 #Can counterphase flicker noise texture to create salient flicker if you break fixation
fixation= visual.PatchStim(myWin, tex=fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False)
fixationBlank= visual.PatchStim(myWin, tex= -1*fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False) #reverse contrast
fixationPoint= visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=(1,1,1),size=4,units='pix',autoLog=autoLogging)
respPromptStim = visual.TextStim(myWin,pos=(0, -.9),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim = visual.TextStim(myWin,pos=(0, -.8),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim.setText('Hit ENTER to accept. Backspace to edit')
respStim = visual.TextStim(myWin,pos=(0,0),colorSpace='rgb',color=(1,1,0),alignHoriz='center', alignVert='center',height=3,units='deg',autoLog=autoLogging)
clickSound, badKeySound = stringResponse.setupSoundsForResponse()
requireAcceptance = False
nextText = visual.TextStim(myWin,pos=(0, .1),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
NextRemindCountText = visual.TextStim(myWin,pos=(0,.2),colorSpace='rgb',color= (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
screenshot= False; screenshotDone = False
stimList = []
#SETTING THE CONDITIONS
cuePositions = np.array([10, 11, 12, 13, 14]) #changed this experiment from 6-10 to 10-14 so that its centred in the stream
for cuePos in cuePositions:
for rightResponseFirst in [False,True]:
for bothWordsFlipped in [True]:
for orthographicalsim in [True,False]:
stimList.append( {'cuePos':cuePos, 'rightResponseFirst':rightResponseFirst, 'flipHoriz':bothWordsFlipped, 'flipVert':bothWordsFlipped,
'leftStreamFlip':bothWordsFlipped, 'orthographicalsim':orthographicalsim, 'rightStreamFlip':bothWordsFlipped} ) # add 'orthographicalsim':orthographical similarity
trials = data.TrialHandler(stimList,trialsPerCondition,method='sequential') #'random' #constant stimuli method
trialsForPossibleStaircase = data.TrialHandler(stimList,trialsPerCondition) #independent randomization, just to create random trials for staircase phase
numRightWrongEachCuepos = np.zeros([ len(cuePositions), 1 ]); #summary results to print out at end
logging.info( 'numtrials=' + str(trials.nTotal) + ' and each trialDurFrames='+str(trialDurFrames)+' or '+str(trialDurFrames*(1000./refreshRate))+ \
' ms' + ' task=' + task)
def numberToLetter(number): #0 = A, 25 = Z
#if it's not really a letter, return @
if number < 0 or number > 25:
return ('@')
else: #it's probably a letter
try:
return chr( ord('A')+number )
except:
return('@')
def letterToNumber(letter): #A = 0, Z = 25
#if it's not really a letter, return -999
#HOW CAN I GENERICALLY TEST FOR LENGTH. EVEN IN CASE OF A NUMBER THAT' SNOT PART OF AN ARRAY?
try:
#if len(letter) > 1:
# return (-999)
if letter < 'A' or letter > 'Z':
return (-999)
else: #it's a letter
return ord(letter)-ord('A')
except:
return (-999)
def wordToIdx(word,wordList):
#if it's not in the list of stimuli, return -999
try:
#http://stackoverflow.com/questions/7102050/how-can-i-get-a-python-generator-to-return-none-rather-than-stopiteration
firstMatchIdx = next((i for i, val in enumerate(wordList) if val.upper()==word), None) #return i (index) unless no matches, in which case return None
#print('Looked for ',word,' in ',wordList,'\nfirstMatchIdx =',firstMatchIdx)
return firstMatchIdx
except:
print('Unexpected error in wordToIdx with word=',word)
return (None)
#print header for data file
print('experimentPhase\ttrialnum\tsubject\ttask\t',file=dataFile,end='')
print('noisePercent\tleftStreamFlip\trightStreamFlip\tflipHoriz\tflipVert\torthographicalsim\t',end='',file=dataFile)#added tabs for the two flips
if task=='T1':
numRespsWanted = 2
dataFile.write('rightResponseFirst\t')
for i in range(numRespsWanted):
dataFile.write('cuePos'+str(i)+'\t') #have to use write to avoid ' ' between successive text, at least until Python 3
dataFile.write('answer'+str(i)+'\t')
dataFile.write('response'+str(i)+'\t')
dataFile.write('correct'+str(i)+'\t')
dataFile.write('responsePosRelative'+str(i)+'\t')
print('timingBlips',file=dataFile)
#end of header
def oneFrameOfStim( n,cue,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,
noise,proportnNoise,allFieldCoords,numNoiseDots ):
#defining a function to draw each frame of stim.
#seq1 is an array of indices corresponding to the appropriate pre-drawn stimulus, contained in textStimuli
SOAframes = letterDurFrames+ISIframes
cueFrames = thisTrial['cuePos']*SOAframes #cuesPos is global variable
stimN = int( np.floor(n/SOAframes) )
frameOfThisLetter = n % SOAframes #every SOAframes, new letter
showLetter = frameOfThisLetter < letterDurFrames #if true, it's not time for the blank ISI. it's still time to draw the letter
#print 'n=',n,' SOAframes=',SOAframes, ' letterDurFrames=', letterDurFrames, ' (n % SOAframes) =', (n % SOAframes) #DEBUGOFF
thisStimIdx = seq1[stimN] #which letter, from A to Z (1 to 26), should be shown?
if seq2 is not None:
thisStim2Idx = seq2[stimN]
#so that any timing problems occur just as often for every frame, always draw the letter and the cue, but simply draw it in the bgColor when it's not meant to be on
cue.setLineColor( bgColor )
if type(cueFrames) not in [tuple,list,np.ndarray]: #scalar. But need collection to do loop based on it
cueFrames = list([cueFrames])
for cueFrame in cueFrames: #check whether it's time for any cue
if n>=cueFrame and n<cueFrame+cueDurFrames:
cue.setLineColor( cueColor )
if showLetter:
textStimuliStream1[thisStimIdx].setColor( letterColor )
#print('thisStimIdx=',thisStimIdx,' thisStim2Idx=',thisStim2Idx)
textStimuliStream2[thisStim2Idx].setColor( letterColor )
else:
textStimuliStream1[thisStimIdx].setColor( bgColor )
textStimuliStream2[thisStim2Idx].setColor( bgColor )
textStimuliStream1[thisStimIdx].flipVert = True #added this and the next line in to try to invert
textStimuliStream2[thisStim2Idx].flipVert = True
textStimuliStream1[thisStimIdx].flipHoriz = thisTrial['leftStreamFlip']
textStimuliStream2[thisStim2Idx].flipHoriz = thisTrial['rightStreamFlip']
textStimuliStream1[thisStimIdx].draw()
textStimuliStream2[thisStim2Idx].draw()
cue.draw()
refreshNoise = False #Not recommended because takes longer than a frame, even to shuffle apparently. Or may be setXYs step
if proportnNoise>0 and refreshNoise:
if frameOfThisLetter ==0:
np.random.shuffle(8, 9, 10, 11, 12, 13, 14, 15, 16)
dotCoords = allFieldCoords[0:numNoiseDots]
noise.setXYs(dotCoords)
if proportnNoise>0:
noise.draw()
return True
# #######End of function definition that displays the stimuli!!!! #####################################
#############################################################################################################################
cue = visual.Circle(myWin,
radius=cueRadius,#Martini used circles with diameter of 12 deg
lineColorSpace = 'rgb',
lineColor=bgColor,
lineWidth=4.0, #in pixels. Was thinner (2 pixels) in letter AB experiments
units = 'deg',
fillColorSpace = 'rgb',
fillColor=None, #beware, with convex shapes fill colors don't work
pos= [0,0], #the anchor (rotation and vertices are position with respect to this)
interpolate=True,
autoLog=False)#this stim changes too much for autologging to be useful
ltrHeight = 2.5 #Martini letters were 2.5deg high
#All noise dot coordinates ultimately in pixels, so can specify each dot is one pixel
noiseFieldWidthDeg=ltrHeight *1.0
noiseFieldWidthPix = int( round( noiseFieldWidthDeg*pixelperdegree ) )
def timingCheckAndLog(ts,trialN):
#check for timing problems and log them
#ts is a list of the times of the clock after each frame
interframeIntervs = np.diff(ts)*1000
#print ' interframe intervs were ',around(interframeIntervs,1) #DEBUGOFF
frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss
longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)
idxsInterframeLong = np.where( interframeIntervs > longFrameLimit ) [0] #frames that exceeded 150% of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong >0 and (not demo):
longFramesStr = 'ERROR,'+str(numCasesInterframeLong)+' frames were longer than '+str(longFrameLimit)+' ms'
if demo:
longFramesStr += 'not printing them all because in demo mode'
else:
longFramesStr += ' apparently screen refreshes skipped, interframe durs were:'+\
str( np.around( interframeIntervs[idxsInterframeLong] ,1 ) )+ ' and was these frames: '+ str(idxsInterframeLong)
if longFramesStr != None:
logging.error( 'trialnum='+str(trialN)+' '+longFramesStr )
if not demo:
flankingAlso=list()
for idx in idxsInterframeLong: #also print timing of one before and one after long frame
if idx-1>=0:
flankingAlso.append(idx-1)
else: flankingAlso.append(np.NaN)
flankingAlso.append(idx)
if idx+1<len(interframeIntervs): flankingAlso.append(idx+1)
else: flankingAlso.append(np.NaN)
flankingAlso = np.array(flankingAlso)
flankingAlso = flankingAlso[np.negative(np.isnan(flankingAlso))] #remove nan values
flankingAlso = flankingAlso.astype(np.integer) #cast as integers, so can use as subscripts
logging.info( 'flankers also='+str( np.around( interframeIntervs[flankingAlso], 1) ) ) #because this is not an essential error message, as previous one already indicates error
#As INFO, at least it won't fill up the console when console set to WARNING or higher
return numCasesInterframeLong
#end timing check
trialClock = core.Clock()
numTrialsCorrect = 0;
numTrialsApproxCorrect = 0; #this is not recorded in the datafile as a variable. #added Trials into the string to define hopefully!
numTrialsEachCorrect= np.zeros( numRespsWanted )
numTrialsEachApproxCorrect= np.zeros( numRespsWanted )
def do_RSVP_stim(thisTrial, seq1, seq2, proportnNoise,trialN):
#relies on global variables:
# textStimuli, logging, bgColor
# thisTrial should have 'cuePos'
global framesSaved #because change this variable. Can only change a global variable if you declare it
cuesPos = [] #will contain the positions in the stream of all the cues (targets)
cuesPos.append(thisTrial['cuePos'])
cuesPos = np.array(cuesPos)
noise = None; allFieldCoords=None; numNoiseDots=0
if proportnNoise > 0: #generating noise is time-consuming, so only do it once per trial. Then shuffle noise coordinates for each letter
(noise,allFieldCoords,numNoiseDots) = createNoise(proportnNoise,myWin,noiseFieldWidthPix, bgColor)
preDrawStimToGreasePipeline = list() #I don't know why this works, but without drawing it I have consistent timing blip first time that draw ringInnerR for phantom contours
cue.setLineColor(bgColor)
preDrawStimToGreasePipeline.extend([cue])
for stim in preDrawStimToGreasePipeline:
stim.draw()
myWin.flip(); myWin.flip()
#end preparation of stimuli
core.wait(.1);
trialClock.reset()
fixatnPeriodMin = 0.3
fixatnPeriodFrames = int( (np.random.rand(1)/2.+fixatnPeriodMin) *refreshRate) #random interval between 800ms and 1.3s
ts = list(); #to store time of each drawing, to check whether skipped frames
for i in range(fixatnPeriodFrames+20): #prestim fixation interval
#if i%4>=2 or demo or exportImages: #flicker fixation on and off at framerate to see when skip frame
# fixation.draw()
#else: fixationBlank.draw()
fixationPoint.draw()
myWin.flip() #end fixation interval
#myWin.setRecordFrameIntervals(True); #can't get it to stop detecting superlong frames
t0 = trialClock.getTime()
for n in range(trialDurFrames): #this is the loop for this trial's stimulus!
worked = oneFrameOfStim( n,cue,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,
noise,proportnNoise,allFieldCoords,numNoiseDots ) #draw letter and possibly cue and noise on top
fixationPoint.draw()
if exportImages:
myWin.getMovieFrame(buffer='back') #for later saving
framesSaved +=1
myWin.flip()
t=trialClock.getTime()-t0; ts.append(t);
#end of big stimulus loop
myWin.setRecordFrameIntervals(False);
if task=='T1':
respPromptStim.setText('What was circled?',log=False)
else: respPromptStim.setText('Error: unexpected task',log=False)
postCueNumBlobsAway=-999 #doesn't apply to non-tracking and click tracking task
correctAnswerIdxsStream1 = np.array( seq1[cuesPos] )
correctAnswerIdxsStream2 = np.array( seq2[cuesPos] )
#print('correctAnswerIdxsStream1=',correctAnswerIdxsStream1, 'wordList[correctAnswerIdxsStream1[0]]=',wordList[correctAnswerIdxsStream1[0]])
return cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2,ts
def handleAndScoreResponse(passThisTrial,response,responseAutopilot,task,stimSequence,cuePos,correctAnswerIdx):
#Handle response, calculate whether correct, ########################################
#responses are actual characters
#correctAnswer is index into stimSequence
#autopilot is global variable
if autopilot or passThisTrial:
response = responseAutopilot
#print('handleAndScoreResponse correctAnswerIdxs=',correctAnswerIdxs,'\nstimSequence=',stimSequence, '\nwords=',wordList)
correct = 0
approxCorrect = 0
posOfResponse = -999
responsePosRelative = -999
idx = correctAnswerIdx
correctAnswer = wordListThisTrial[idx].upper()
responseString= ''.join(['%s' % char for char in response])
responseString= responseString.upper()
#print('correctAnswer=',correctAnswer ,' responseString=',responseString)
if correctAnswer == responseString:
correct = 1
#print('correct=',correct)
responseWordIdx = wordToIdx(responseString,wordListThisTrial)
if responseWordIdx is None: #response is not in the wordList
posOfResponse = -999
logging.warn('Response was not present in the stimulus stream')
else:
posOfResponse= np.where( responseWordIdx==stimSequence )
posOfResponse= posOfResponse[0] #list with two entries, want first which will be array of places where the response was found in the sequence
if len(posOfResponse) > 1:
logging.error('Expected response to have occurred in only one position in stream')
posOfResponse = posOfResponse[0] #first element of list (should be only one element long
responsePosRelative = posOfResponse - cuePos
approxCorrect = abs(responsePosRelative)<= 3 #Vul efficacy measure of getting it right to within plus/minus
#print('wordToIdx(',responseString,',',wordList,')=',responseWordIdx,' stimSequence=',stimSequence,'\nposOfResponse = ',posOfResponse) #debugON
#print response stuff to dataFile
#header was answerPos0, answer0, response0, correct0, responsePosRelative0
print(cuePos,'\t', end='', file=dataFile)
print(correctAnswer, '\t', end='', file=dataFile) #answer0
print(responseString, '\t', end='', file=dataFile) #response0
print(correct, '\t', end='',file=dataFile) #correct0
print(responsePosRelative, '\t', end='',file=dataFile) #responsePosRelative0
return correct,approxCorrect,responsePosRelative
#end handleAndScoreResponses
def play_high_tone_correct_low_incorrect(correct, passThisTrial=False):
highA = sound.Sound('G',octave=5, sampleRate=6000, secs=.3, bits=8)
low = sound.Sound('F',octave=3, sampleRate=6000, secs=.3, bits=8)
highA.setVolume(0.9)
low.setVolume(1.0)
if correct:
highA.play()
elif passThisTrial:
high= sound.Sound('G',octave=4, sampleRate=2000, secs=.08, bits=8)
for i in range(2):
high.play(); low.play();
else: #incorrect
low.play()
expStop=False
nDoneMain = -1 #change to zero once start main part of experiment
if doStaircase:
#create the staircase handler
useQuest = True
if useQuest:
staircase = data.QuestHandler(startVal = 95,
startValSd = 80,
stopInterval= 1, #sd of posterior has to be this small or smaller for staircase to stop, unless nTrials reached
nTrials = staircaseTrials,
#extraInfo = thisInfo,
pThreshold = threshCriterion, #0.25,
gamma = 1./26,
delta=0.02, #lapse rate, I suppose for Weibull function fit
method = 'quantile', #uses the median of the posterior as the final answer
stepType = 'log', #will home in on the 80% threshold. But stepType = 'log' doesn't usually work
minVal=1, maxVal = 100
)
print('created QUEST staircase')
else:
stepSizesLinear = [.2,.2,.1,.1,.05,.05]
stepSizesLog = [log(1.4,10),log(1.4,10),log(1.3,10),log(1.3,10),log(1.2,10)]
staircase = data.StairHandler(startVal = 0.1,
stepType = 'log', #if log, what do I want to multiply it by
stepSizes = stepSizesLog, #step size to use after each reversal
minVal=0, maxVal=1,
nUp=1, nDown=3, #will home in on the 80% threshold
nReversals = 2, #The staircase terminates when nTrials have been exceeded, or when both nReversals and nTrials have been exceeded
nTrials=1)
print('created conventional staircase')
if prefaceStaircaseTrialsN > len(prefaceStaircaseNoise): #repeat array to accommodate desired number of easyStarterTrials
prefaceStaircaseNoise = np.tile( prefaceStaircaseNoise, ceil( prefaceStaircaseTrialsN/len(prefaceStaircaseNoise) ) )
prefaceStaircaseNoise = prefaceStaircaseNoise[0:prefaceStaircaseTrialsN]
phasesMsg = ('Doing '+str(prefaceStaircaseTrialsN)+'trials with noisePercent= '+str(prefaceStaircaseNoise)+' then doing a max '+str(staircaseTrials)+'-trial staircase')
print(phasesMsg); logging.info(phasesMsg)
#staircaseStarterNoise PHASE OF EXPERIMENT
corrEachTrial = list() #only needed for easyStaircaseStarterNoise
staircaseTrialN = -1; mainStaircaseGoing = False
while (not staircase.finished) and expStop==False: #staircase.thisTrialN < staircase.nTrials
if staircaseTrialN+1 < len(prefaceStaircaseNoise): #still doing easyStaircaseStarterNoise
staircaseTrialN += 1
noisePercent = prefaceStaircaseNoise[staircaseTrialN]
else:
if staircaseTrialN+1 == len(prefaceStaircaseNoise): #add these non-staircase trials so QUEST knows about them
mainStaircaseGoing = True
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise)
staircase.importData(100-prefaceStaircaseNoise, np.array(corrEachTrial))
printStaircase(staircase, descendingPsycho, briefTrialUpdate=False, printInternalVal=True, alsoLog=False)
try: #advance the staircase
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
noisePercent = 100. - staircase.next() #will step through the staircase, based on whether told it (addResponse) got it right or wrong
staircaseTrialN += 1
except StopIteration: #Need this here, even though test for finished above. I can't understand why finished test doesn't accomplish this.
print('stopping because staircase.next() returned a StopIteration, which it does when it is finished')
break #break out of the trials loop
#print('staircaseTrialN=',staircaseTrialN)
idxsStream1, idxsStream2 = calcAndPredrawStimuli(wordList)
for i in range(8,16): #make sure each position points to one of the orthographically dis/similar one
print(i)#
idxsStream1
cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \
do_RSVP_stim(cuePos, idxsStream1, idxsStream2, noisePercent/100.,staircaseTrialN)
numCasesInterframeLong = timingCheckAndLog(ts,staircaseTrialN)
expStop,passThisTrial,responses,responsesAutopilot = \
stringResponse.collectStringResponse(numRespsWanted,respPromptStim,respStim,acceptTextStim,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
if not expStop:
if mainStaircaseGoing:
print('staircase\t', end='', file=dataFile)
else:
print('staircase_preface\t', end='', file=dataFile)
#header start 'trialnum\tsubject\ttask\t'
print(staircaseTrialN,'\t', end='', file=dataFile) #first thing printed on each line of dataFile
print(subject,'\t',task,'\t', round(noisePercent,2),'\t', end='', file=dataFile)
correct,approxCorrect,responsePosRelative= handleAndScoreResponse(
passThisTrial,responses,responseAutopilot,task,sequenceLeft,cuesPos[0],correctAnswerIdx )
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
core.wait(.06)
if feedback:
play_high_tone_correct_low_incorrect(correct, passThisTrial=FALSE)
print('staircaseTrialN=', staircaseTrialN,' noisePercent=',round(noisePercent,3),' T1approxCorrect=',T1approxCorrect) #debugON
corrEachTrial.append(T1approxCorrect)
if mainStaircaseGoing:
staircase.addResponse(T1approxCorrect, intensity = 100-noisePercent) #Add a 1 or 0 to signify a correct/detected or incorrect/missed trial
#print('Have added an intensity of','{:.3f}'.format(100-noisePercent), 'T1approxCorrect =', T1approxCorrect, ' to staircase') #debugON
#ENDING STAIRCASE PHASE
if staircaseTrialN+1 < len(prefaceStaircaseNoise) and (staircaseTrialN>=0): #exp stopped before got through staircase preface trials, so haven't imported yet
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise[0:staircaseTrialN+1])
staircase.importData(100-prefaceStaircaseNoise[0:staircaseTrialN], np.array(corrEachTrial))
print('framesSaved after staircase=',framesSaved) #debugON
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = ('prefaceStaircase phase' if expStop else '')
msg += ('ABORTED' if expStop else 'Finished') + ' staircase part of experiment at ' + timeAndDateStr
logging.info(msg); print(msg)
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
#print('staircase.quantile=',round(staircase.quantile(),2),' sd=',round(staircase.sd(),2))
threshNoise = round(staircase.quantile(),3)
if descendingPsycho:
threshNoise = 100- threshNoise
threshNoise = max( 0, threshNoise ) #e.g. ff get all trials wrong, posterior peaks at a very negative number
msg= 'Staircase estimate of threshold = ' + str(threshNoise) + ' with sd=' + str(round(staircase.sd(),2))
logging.info(msg); print(msg)
myWin.close()
#Fit and plot data
fit = None
try:
intensityForCurveFitting = staircase.intensities
if descendingPsycho:
intensityForCurveFitting = 100-staircase.intensities #because fitWeibull assumes curve is ascending
fit = data.FitWeibull(intensityForCurveFitting, staircase.data, expectedMin=1/26., sems = 1.0/len(staircase.intensities))
except:
print("Fit failed.")
plotDataAndPsychometricCurve(staircase,fit,descendingPsycho,threshCriterion)
#save figure to file
pylab.savefig(fileName+'.pdf')
print('The plot has been saved, as '+fileName+'.pdf')
pylab.show() #must call this to actually show plot
else: #not staircase
noisePercent = defaultNoiseLevel
phasesMsg = 'Experiment will have '+str(trials.nTotal)+' trials. Letters will be drawn with superposed noise of ' + "{:.2%}".format(defaultNoiseLevel)
print(phasesMsg); logging.info(phasesMsg)
nDoneMain =0
while nDoneMain < trials.nTotal and expStop==False: #MAIN EXPERIMENT LOOP
if nDoneMain==0:
msg='Starting main (non-staircase) part of experiment'
logging.info(msg); print(msg)
thisTrial = trials.next() #get a proper (non-staircase) trial
sequenceStream1, sequenceStream2 = calcAndPredrawStimuli(thisTrial,wordList)
cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \
do_RSVP_stim(thisTrial, sequenceStream1, sequenceStream2, noisePercent/100.,nDoneMain)
numCasesInterframeLong = timingCheckAndLog(ts,nDoneMain)
#call for each response
expStop = list(); passThisTrial = list(); responses=list(); responsesAutopilot=list()
numCharsInResponse = len(wordList[0])
dL = [None]*numRespsWanted #dummy list for null values
expStop = copy.deepcopy(dL); responses = copy.deepcopy(dL); responsesAutopilot = copy.deepcopy(dL); passThisTrial=copy.deepcopy(dL)
responseOrder = range(numRespsWanted)
if thisTrial['rightResponseFirst']: #change order of indices depending on rightResponseFirst. response0, answer0 etc refer to which one had to be reported first
responseOrder.reverse()
for i in responseOrder:
x = 3* wordEccentricity*(i*2-1) #put it 3 times farther out than stimulus, so participant is sure which is left and which right
expStop[i],passThisTrial[i],responses[i],responsesAutopilot[i] = stringResponse.collectStringResponse(
numCharsInResponse,x,respPromptStim,respStim,acceptTextStim,fixationPoint,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
expStop = np.array(expStop).any(); passThisTrial = np.array(passThisTrial).any()
if not expStop:
print('main\t', end='', file=dataFile) #first thing printed on each line of dataFile to indicate main part of experiment, not staircase
print(nDoneMain,'\t', end='', file=dataFile)
print(subject,'\t',task,'\t', round(noisePercent,3),'\t', end='', file=dataFile)
print(thisTrial['leftStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['rightStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['flipHoriz'],'\t', end='', file=dataFile)
print(thisTrial['flipVert'],'\t', end='', file=dataFile)
print(thisTrial['rightResponseFirst'],'\t', end='', file=dataFile)
print(thisTrial['orthographicalsim'],'\t', end='', file=dataFile)
i = 0
eachCorrect = np.ones(numRespsWanted)*-999; eachApproxCorrect = np.ones(numRespsWanted)*-999
for i in range(numRespsWanted): #scored and printed to dataFile in left first, right second order even if collected in different order
if i==0:
sequenceStream = sequenceStream1; correctAnswerIdxs = correctAnswerIdxsStream1;
else: sequenceStream = sequenceStream2; correctAnswerIdxs = correctAnswerIdxsStream2;
correct,approxCorrect,responsePosRelative = (
handleAndScoreResponse(passThisTrial,responses[i],responsesAutopilot[i],task,sequenceStream,thisTrial['cuePos'],correctAnswerIdxs ) )
eachCorrect[i] = correct
eachApproxCorrect[i] = approxCorrect
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
print('correct=',correct,' approxCorrect=',approxCorrect,' eachCorrect=',eachCorrect, ' responsePosRelative=', responsePosRelative)
numTrialsCorrect += eachCorrect.all() #so count -1 as 0
numTrialsEachCorrect += eachCorrect #list numRespsWanted long
numTrialsApproxCorrect += eachApproxCorrect.all()
numTrialsEachApproxCorrect += eachApproxCorrect #list numRespsWanted long
if exportImages: #catches one frame of response
myWin.getMovieFrame() #I cant explain why another getMovieFrame, and core.wait is needed
framesSaved +=1; core.wait(.1)
myWin.saveMovieFrames('images_sounds_movies/frames.png') #mov not currently supported
expStop=True
core.wait(.1)
if feedback: play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
nDoneMain+=1
dataFile.flush(); logging.flush()
print('nDoneMain=', nDoneMain,' trials.nTotal=',trials.nTotal) #' trials.thisN=',trials.thisN
if (trials.nTotal > 6 and nDoneMain > 2 and nDoneMain %
( trials.nTotal*pctCompletedBreak/100. ) ==1): #dont modulus 0 because then will do it for last trial
nextText.setText('Press "SPACE" to continue!')
nextText.draw()
progressMsg = 'Completed ' + str(nDoneMain) + ' of ' + str(trials.nTotal) + ' trials'
NextRemindCountText.setText(progressMsg)
NextRemindCountText.draw()
myWin.flip() # myWin.flip(clearBuffer=True)
waiting=True
while waiting:
if autopilot: break
elif expStop == True:break
for key in event.getKeys(): #check if pressed abort-type key
if key in ['space','ESCAPE']:
waiting=False
if key in ['ESCAPE']:
expStop = True
myWin.clearBuffer()
core.wait(.2); time.sleep(.2)
#end main trials loop
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = 'Finishing at '+timeAndDateStr
print(msg); logging.info(msg)
if expStop:
msg = 'user aborted experiment on keypress with trials done=' + str(nDoneMain) + ' of ' + str(trials.nTotal+1)
print(msg); logging.error(msg)
if not doStaircase and (nDoneMain >0):
msg = 'Of ' + str(nDoneMain)+' trials, on '+str(numTrialsCorrect*1.0/nDoneMain*100.)+'% of all trials all targets reported exactly correct'
print(msg); logging.info(msg)
msg= 'All targets approximately correct in '+ str( round(numTrialsApproxCorrect*1.0/nDoneMain*100,1)) + '% of trials'
print(msg); logging.info(msg)
for i in range(numRespsWanted):
msg = 'stream'+str(i)+': '+str( round(numTrialsEachCorrect[i]*1.0/nDoneMain*100.,2) ) + '% correct'
print(msg); logging.info(msg)
msg = 'stream' + str(i) + ': '+ str( round(numTrialsEachApproxCorrect[i]*1.0/nDoneMain*100,2) ) +'% approximately correct'
print(msg); logging.info(msg)
logging.flush(); dataFile.close()
myWin.close() #have to close window if want to show a plot
|
mit
|
sjlehtin/django
|
tests/select_related_regress/models.py
|
106
|
3371
|
from django.db import models
class Building(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return "Building: %s" % self.name
class Device(models.Model):
building = models.ForeignKey('Building', models.CASCADE)
name = models.CharField(max_length=10)
def __str__(self):
return "device '%s' in building %s" % (self.name, self.building)
class Port(models.Model):
device = models.ForeignKey('Device', models.CASCADE)
port_number = models.CharField(max_length=10)
def __str__(self):
return "%s/%s" % (self.device.name, self.port_number)
class Connection(models.Model):
start = models.ForeignKey(
Port,
models.CASCADE,
related_name='connection_start',
unique=True,
)
end = models.ForeignKey(
Port,
models.CASCADE,
related_name='connection_end',
unique=True,
)
def __str__(self):
return "%s to %s" % (self.start, self.end)
# Another non-tree hierarchy that exercises code paths similar to the above
# example, but in a slightly different configuration.
class TUser(models.Model):
name = models.CharField(max_length=200)
class Person(models.Model):
user = models.ForeignKey(TUser, models.CASCADE, unique=True)
class Organizer(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
class Student(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
class Class(models.Model):
org = models.ForeignKey(Organizer, models.CASCADE)
class Enrollment(models.Model):
std = models.ForeignKey(Student, models.CASCADE)
cls = models.ForeignKey(Class, models.CASCADE)
# Models for testing bug #8036.
class Country(models.Model):
name = models.CharField(max_length=50)
class State(models.Model):
name = models.CharField(max_length=50)
country = models.ForeignKey(Country, models.CASCADE)
class ClientStatus(models.Model):
name = models.CharField(max_length=50)
class Client(models.Model):
name = models.CharField(max_length=50)
state = models.ForeignKey(State, models.SET_NULL, null=True)
status = models.ForeignKey(ClientStatus, models.CASCADE)
class SpecialClient(Client):
value = models.IntegerField()
# Some model inheritance exercises
class Parent(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
class Child(Parent):
value = models.IntegerField()
class Item(models.Model):
name = models.CharField(max_length=10)
child = models.ForeignKey(Child, models.SET_NULL, null=True)
def __str__(self):
return self.name
# Models for testing bug #19870.
class Fowl(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
class Hen(Fowl):
pass
class Chick(Fowl):
mother = models.ForeignKey(Hen, models.CASCADE)
class Base(models.Model):
name = models.CharField(max_length=10)
lots_of_text = models.TextField()
class Meta:
abstract = True
class A(Base):
a_field = models.CharField(max_length=10)
class B(Base):
b_field = models.CharField(max_length=10)
class C(Base):
c_a = models.ForeignKey(A, models.CASCADE)
c_b = models.ForeignKey(B, models.CASCADE)
is_published = models.BooleanField(default=False)
|
bsd-3-clause
|
mitodl/micromasters
|
courses/views.py
|
1
|
4278
|
"""Views for courses"""
from django.db import transaction
from rest_framework import (
viewsets,
mixins,
status,
)
from rest_framework.views import APIView
from rest_framework.authentication import SessionAuthentication, TokenAuthentication
from rest_framework.exceptions import (
APIException,
NotFound,
ValidationError,
)
from rest_framework.generics import CreateAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from courses.catalog_serializers import CatalogProgramSerializer
from courses.models import Program, CourseRun
from courses.serializers import ProgramSerializer, CourseRunSerializer
from dashboard.models import ProgramEnrollment
from profiles.models import Profile
from profiles.serializers import ProfileImageSerializer
class ResourceConflict(APIException):
"""Custom exception for Conflict Status Code"""
status_code = status.HTTP_409_CONFLICT
default_detail = 'The resource already exists.'
class ProgramViewSet(viewsets.ReadOnlyModelViewSet):
"""API for the Program collection"""
authentication_classes = (
SessionAuthentication,
TokenAuthentication,
)
permission_classes = (
IsAuthenticated,
)
queryset = Program.objects.filter(live=True)
serializer_class = ProgramSerializer
class ProgramLearnersView(APIView):
"""API for Learners enrolled in the Program"""
authentication_classes = (
SessionAuthentication,
TokenAuthentication,
)
permission_classes = (
IsAuthenticated,
)
serializer_class = ProfileImageSerializer
def get(self, request, *args, **kargs): # pylint: disable=unused-argument
"""
Get eight random learners with images and
the total count of visible learners in the program
"""
program_id = self.kwargs["program_id"]
users = ProgramEnrollment.objects.filter(
program_id=program_id
).values_list('user', flat=True)
queryset = Profile.objects.exclude(
image_small__exact=''
).filter(user__in=users).exclude(
account_privacy='private'
).exclude(
user=request.user
).order_by('?')
learners_result = {
'learners_count': queryset.count(),
'learners': ProfileImageSerializer(queryset[:8], many=True).data
}
return Response(
status=status.HTTP_200_OK,
data=learners_result
)
class ProgramEnrollmentListView(CreateAPIView):
"""API for the User Program Enrollments"""
serializer_class = ProgramSerializer
authentication_classes = (
SessionAuthentication,
TokenAuthentication,
)
permission_classes = (
IsAuthenticated,
)
@transaction.atomic
def create(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""
Create an enrollment for the current user
"""
program_id = request.data.get('program_id')
if not isinstance(program_id, int):
raise ValidationError('A `program_id` parameter must be specified')
serializer = self.get_serializer_class()
try:
program = Program.objects.get(live=True, pk=program_id)
except Program.DoesNotExist:
raise NotFound('The specified program has not been found or it is not live yet')
_, created = ProgramEnrollment.objects.get_or_create(
user=request.user,
program=program,
)
status_code = status.HTTP_201_CREATED if created else status.HTTP_200_OK
return Response(
status=status_code,
data=serializer(program, context={'request': request}).data
)
class CourseRunViewSet(viewsets.ReadOnlyModelViewSet):
"""API for the CourseRun model"""
serializer_class = CourseRunSerializer
queryset = CourseRun.objects.all()
class CatalogViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
"""API for program/course catalog list"""
serializer_class = CatalogProgramSerializer
queryset = Program.objects.filter(live=True).prefetch_related(
"course_set__courserun_set", "programpage__thumbnail_image"
)
|
bsd-3-clause
|
polimediaupv/edx-platform
|
lms/djangoapps/instructor/views/instructor_task_helpers.py
|
133
|
5257
|
"""
A collection of helper utility functions for working with instructor
tasks.
"""
import json
import logging
from util.date_utils import get_default_time_display
from bulk_email.models import CourseEmail
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from instructor_task.views import get_task_completion_info
log = logging.getLogger(__name__)
def email_error_information():
"""
Returns email information marked as None, used in event email
cannot be loaded
"""
expected_info = [
'created',
'sent_to',
'email',
'number_sent',
'requester',
]
return {info: None for info in expected_info}
def extract_email_features(email_task):
"""
From the given task, extract email content information
Expects that the given task has the following attributes:
* task_input (dict containing email_id and to_option)
* task_output (optional, dict containing total emails sent)
* requester, the user who executed the task
With this information, gets the corresponding email object from the
bulk emails table, and loads up a dict containing the following:
* created, the time the email was sent displayed in default time display
* sent_to, the group the email was delivered to
* email, dict containing the subject, id, and html_message of an email
* number_sent, int number of emails sent
* requester, the user who sent the emails
If task_input cannot be loaded, then the email cannot be loaded
and None is returned for these fields.
"""
# Load the task input info to get email id
try:
task_input_information = json.loads(email_task.task_input)
except ValueError:
log.error("Could not parse task input as valid json; task input: %s", email_task.task_input)
return email_error_information()
email = CourseEmail.objects.get(id=task_input_information['email_id'])
email_feature_dict = {
'created': get_default_time_display(email.created),
'sent_to': task_input_information['to_option'],
'requester': str(getattr(email_task, 'requester')),
}
features = ['subject', 'html_message', 'id']
email_info = {feature: unicode(getattr(email, feature)) for feature in features}
# Pass along email as an object with the information we desire
email_feature_dict['email'] = email_info
# Translators: number sent refers to the number of emails sent
number_sent = _('0 sent')
if hasattr(email_task, 'task_output') and email_task.task_output is not None:
try:
task_output = json.loads(email_task.task_output)
except ValueError:
log.error("Could not parse task output as valid json; task output: %s", email_task.task_output)
else:
if 'succeeded' in task_output and task_output['succeeded'] > 0:
num_emails = task_output['succeeded']
number_sent = ungettext(
"{num_emails} sent",
"{num_emails} sent",
num_emails
).format(num_emails=num_emails)
if 'failed' in task_output and task_output['failed'] > 0:
num_emails = task_output['failed']
number_sent += ", "
number_sent += ungettext(
"{num_emails} failed",
"{num_emails} failed",
num_emails
).format(num_emails=num_emails)
email_feature_dict['number_sent'] = number_sent
return email_feature_dict
def extract_task_features(task):
"""
Convert task to dict for json rendering.
Expects tasks have the following features:
* task_type (str, type of task)
* task_input (dict, input(s) to the task)
* task_id (str, celery id of the task)
* requester (str, username who submitted the task)
* task_state (str, state of task eg PROGRESS, COMPLETED)
* created (datetime, when the task was completed)
* task_output (optional)
"""
# Pull out information from the task
features = ['task_type', 'task_input', 'task_id', 'requester', 'task_state']
task_feature_dict = {feature: str(getattr(task, feature)) for feature in features}
# Some information (created, duration, status, task message) require additional formatting
task_feature_dict['created'] = task.created.isoformat()
# Get duration info, if known
duration_sec = 'unknown'
if hasattr(task, 'task_output') and task.task_output is not None:
try:
task_output = json.loads(task.task_output)
except ValueError:
log.error("Could not parse task output as valid json; task output: %s", task.task_output)
else:
if 'duration_ms' in task_output:
duration_sec = int(task_output['duration_ms'] / 1000.0)
task_feature_dict['duration_sec'] = duration_sec
# Get progress status message & success information
success, task_message = get_task_completion_info(task)
status = _("Complete") if success else _("Incomplete")
task_feature_dict['status'] = status
task_feature_dict['task_message'] = task_message
return task_feature_dict
|
agpl-3.0
|
ament/ament_lint
|
ament_clang_format/setup.py
|
1
|
1420
|
from setuptools import find_packages
from setuptools import setup
package_name = 'ament_clang_format'
setup(
name=package_name,
version='0.11.2',
packages=find_packages(exclude=['test']),
data_files=[
('share/' + package_name, ['package.xml']),
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
],
install_requires=['setuptools', 'pyyaml'],
package_data={'': [
'configuration/.clang-format',
]},
zip_safe=False,
author='Dirk Thomas',
author_email='[email protected]',
maintainer='Audrow Nash',
maintainer_email='[email protected]',
url='https://github.com/ament/ament_lint',
download_url='https://github.com/ament/ament_lint/releases',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='Check C++ code style using clang-format.',
long_description="""\
The ability to check code against style conventions using clang-format
and generate xUnit test result files.""",
license='Apache License, Version 2.0, BSD',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'ament_clang_format = ament_clang_format.main:main',
],
},
)
|
apache-2.0
|
tigeli/futurice-ldap-user-manager
|
fum/groups/tests.py
|
2
|
1459
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from fum.models import Groups, Users
from mockldap import MockLdap
from fum.common.ldap_test_suite import LdapSuite
class GroupsTestCase(LdapSuite):
def setUp(self):
super(GroupsTestCase, self).setUp()
self.gx_name = "TestGroup7"
self.gx = Groups.objects.create(name=self.gx_name, description="This is a test group")
self.u1 = Users.objects.create(first_name="Teemu", last_name="Testari", username="ttes9", google_status=Users.ACTIVEPERSON)
self.u2 = Users.objects.create(first_name="Teemu2", last_name="Testari2", username="ttes8", google_status=Users.ACTIVEPERSON)
def tearDown(self):
self.gx.delete()
self.u1.delete()
self.u2.delete()
super(GroupsTestCase, self).tearDown()
def test_adding_group(self):
self.assertEqual(len(Groups.objects.filter(name=self.gx_name)), 1)
self.assertEqual(unicode(self.gx), self.gx_name)
def test_adding_members(self):
self.gx.users.add(self.u1)
self.assertEqual(len(self.gx.users.all()), 1)
self.gx.users.add(self.u2)
self.assertEqual(len(self.gx.users.all()), 2)
self.gx.users.remove(self.u1)
self.gx.users.remove(self.u2)
|
bsd-3-clause
|
schqiushui/HTL21-KitKat-3.4.X
|
Documentation/target/tcm_mod_builder.py
|
4981
|
41422
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
gpl-2.0
|
RedHenLab/NLP
|
MWEtoolkit-interface/web_mwetoolkit/settings.py
|
1
|
3009
|
"""
Django settings for gsoc15 project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i%k4_t#l0^&n(ethr2mnl&i9hpgom^%sk3b^szd7u_gdwp95!d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
CRISPY_FAIL_SILENTLY = not DEBUG
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'interface',
'crispy_forms',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'web_mwetoolkit.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
WSGI_APPLICATION = 'web_mwetoolkit.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/web_mwetoolkit/static/'
# STATIC_ROOT = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static/'),
)
STATICFILES_FINDER = ['AppDirectoriesFinder',]
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates/'),
)
|
gpl-2.0
|
matpow2/cuwo
|
terraingen/tgen2/external/pe-parse/python/setup.py
|
3
|
2310
|
# Copyright (c) 2013, Wesley Shields <[email protected]>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from distutils.core import setup, Extension
INCLUDE_DIRS = ['/usr/local/include',
'/opt/local/include',
'/usr/include',
'../parser-library']
LIBRARY_DIRS = ['/usr/lib',
'/usr/local/lib']
extension_mod = Extension('pepy',
sources = ['pepy.cpp',
'../parser-library/parse.cpp',
'../parser-library/buffer.cpp'],
extra_compile_args = ["-g", "-O0"], # Debug only
include_dirs = INCLUDE_DIRS,
library_dirs = LIBRARY_DIRS)
setup (name = 'pepy',
version = '0.1',
description = 'python bindings for pe-parse',
author = 'Wesley Shields',
author_email = '[email protected]',
license = 'BSD',
long_description = 'Python bindings for pe-parse',
ext_modules = [extension_mod])
|
gpl-3.0
|
lumig242/Hue-Integration-with-CDAP
|
desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/chart/surface_chart.py
|
10
|
2960
|
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
Integer,
Bool,
Alias,
Sequence,
)
from openpyxl.descriptors.excel import ExtensionList
from openpyxl.descriptors.nested import (
NestedInteger,
NestedBool,
)
from ._chart import ChartBase
from .axis import TextAxis, NumericAxis, SeriesAxis
from .shapes import ShapeProperties
from .series import Series
class BandFormat(Serialisable):
tagname = "bandFmt"
idx = NestedInteger()
spPr = Typed(expected_type=ShapeProperties, allow_none=True)
__elements__ = ('idx', 'spPr')
def __init__(self,
idx=0,
spPr=None,
):
self.idx = idx
self.spPr = spPr
class BandFormats(Serialisable):
tagname = "bandFmts"
bandFmt = Sequence(expected_type=BandFormat, allow_none=True)
__elements__ = ('bandFmt',)
def __init__(self,
bandFmt=(),
):
self.bandFmt = bandFmt
class _SurfaceChartBase(ChartBase):
wireframe = NestedBool(allow_none=True)
ser = Sequence(expected_type=Series, allow_none=True)
bandFmts = Typed(expected_type=BandFormats, allow_none=True)
_series_type = "surface"
__elements__ = ('wireframe', 'ser', 'bandFmts')
def __init__(self,
wireframe=None,
ser=(),
bandFmts=None,
):
self.wireframe = wireframe
self.ser = ser
self.bandFmts = bandFmts
super(_SurfaceChartBase, self).__init__()
class SurfaceChart(_SurfaceChartBase):
tagname = "surfaceChart"
wireframe = _SurfaceChartBase.wireframe
ser = _SurfaceChartBase.ser
bandFmts = _SurfaceChartBase.bandFmts
extLst = Typed(expected_type=ExtensionList, allow_none=True)
x_axis = Typed(expected_type=TextAxis)
y_axis = Typed(expected_type=NumericAxis)
z_axis = Typed(expected_type=SeriesAxis, allow_none=True)
__elements__ = _SurfaceChartBase.__elements__ + ('axId',)
def __init__(self, axId=None, extLst=None, **kw ):
self.x_axis = TextAxis()
self.y_axis = NumericAxis()
self.z_axis = None
super(SurfaceChart, self).__init__(**kw)
class SurfaceChart3D(_SurfaceChartBase):
tagname = "surface3DChart"
wireframe = _SurfaceChartBase.wireframe
ser = _SurfaceChartBase.ser
bandFmts = _SurfaceChartBase.bandFmts
extLst = SurfaceChart.extLst
x_axis = Typed(expected_type=TextAxis)
y_axis = Typed(expected_type=NumericAxis)
z_axis = Typed(expected_type=SeriesAxis)
__elements__ = _SurfaceChartBase.__elements__ + ('axId',)
def __init__(self, axId=None, **kw):
self.x_axis = TextAxis()
self.y_axis = NumericAxis()
self.z_axis = SeriesAxis()
super(SurfaceChart3D, self).__init__(**kw)
|
apache-2.0
|
jruben/jruben.github.io
|
node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/_luabuiltins.py
|
95
|
6914
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._luabuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names and modules of lua functions
It is able to re-generate itself, but for adding new functions you
probably have to add some callbacks (see function module_callbacks).
Do not edit the MODULES dict by hand.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
MODULES = {'basic': ['_G',
'_VERSION',
'assert',
'collectgarbage',
'dofile',
'error',
'getfenv',
'getmetatable',
'ipairs',
'load',
'loadfile',
'loadstring',
'next',
'pairs',
'pcall',
'print',
'rawequal',
'rawget',
'rawset',
'select',
'setfenv',
'setmetatable',
'tonumber',
'tostring',
'type',
'unpack',
'xpcall'],
'coroutine': ['coroutine.create',
'coroutine.resume',
'coroutine.running',
'coroutine.status',
'coroutine.wrap',
'coroutine.yield'],
'debug': ['debug.debug',
'debug.getfenv',
'debug.gethook',
'debug.getinfo',
'debug.getlocal',
'debug.getmetatable',
'debug.getregistry',
'debug.getupvalue',
'debug.setfenv',
'debug.sethook',
'debug.setlocal',
'debug.setmetatable',
'debug.setupvalue',
'debug.traceback'],
'io': ['io.close',
'io.flush',
'io.input',
'io.lines',
'io.open',
'io.output',
'io.popen',
'io.read',
'io.tmpfile',
'io.type',
'io.write'],
'math': ['math.abs',
'math.acos',
'math.asin',
'math.atan2',
'math.atan',
'math.ceil',
'math.cosh',
'math.cos',
'math.deg',
'math.exp',
'math.floor',
'math.fmod',
'math.frexp',
'math.huge',
'math.ldexp',
'math.log10',
'math.log',
'math.max',
'math.min',
'math.modf',
'math.pi',
'math.pow',
'math.rad',
'math.random',
'math.randomseed',
'math.sinh',
'math.sin',
'math.sqrt',
'math.tanh',
'math.tan'],
'modules': ['module',
'require',
'package.cpath',
'package.loaded',
'package.loadlib',
'package.path',
'package.preload',
'package.seeall'],
'os': ['os.clock',
'os.date',
'os.difftime',
'os.execute',
'os.exit',
'os.getenv',
'os.remove',
'os.rename',
'os.setlocale',
'os.time',
'os.tmpname'],
'string': ['string.byte',
'string.char',
'string.dump',
'string.find',
'string.format',
'string.gmatch',
'string.gsub',
'string.len',
'string.lower',
'string.match',
'string.rep',
'string.reverse',
'string.sub',
'string.upper'],
'table': ['table.concat',
'table.insert',
'table.maxn',
'table.remove',
'table.sort']}
if __name__ == '__main__':
import re
import urllib.request, urllib.parse, urllib.error
import pprint
# you can't generally find out what module a function belongs to if you
# have only its name. Because of this, here are some callback functions
# that recognize if a gioven function belongs to a specific module
def module_callbacks():
def is_in_coroutine_module(name):
return name.startswith('coroutine.')
def is_in_modules_module(name):
if name in ['require', 'module'] or name.startswith('package'):
return True
else:
return False
def is_in_string_module(name):
return name.startswith('string.')
def is_in_table_module(name):
return name.startswith('table.')
def is_in_math_module(name):
return name.startswith('math')
def is_in_io_module(name):
return name.startswith('io.')
def is_in_os_module(name):
return name.startswith('os.')
def is_in_debug_module(name):
return name.startswith('debug.')
return {'coroutine': is_in_coroutine_module,
'modules': is_in_modules_module,
'string': is_in_string_module,
'table': is_in_table_module,
'math': is_in_math_module,
'io': is_in_io_module,
'os': is_in_os_module,
'debug': is_in_debug_module}
def get_newest_version():
f = urllib.request.urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">Lua \1</A>')
for line in f:
m = r.match(line)
if m is not None:
return m.groups()[0]
def get_lua_functions(version):
f = urllib.request.urlopen('http://www.lua.org/manual/%s/' % version)
r = re.compile(r'^<A HREF="manual.html#pdf-(.+)">\1</A>')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def get_function_module(name):
for mod, cb in module_callbacks().items():
if cb(name):
return mod
if '.' in name:
return name.split('.')[0]
else:
return 'basic'
def regenerate(filename, modules):
f = open(filename)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
f = open(filename, 'w')
f.write(header)
f.write('MODULES = %s\n\n' % pprint.pformat(modules))
f.write(footer)
f.close()
def run():
version = get_newest_version()
print('> Downloading function index for Lua %s' % version)
functions = get_lua_functions(version)
print('> %d functions found:' % len(functions))
modules = {}
for full_function_name in functions:
print('>> %s' % full_function_name)
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
regenerate(__file__, modules)
run()
|
mit
|
density215/d215-miniblog
|
django/contrib/gis/tests/test_measure.py
|
231
|
8820
|
"""
Distance and Area objects to allow for sensible and convienient calculation
and conversions. Here are some tests.
"""
from django.contrib.gis.measure import Distance, Area, D, A
from django.utils import unittest
class DistanceTest(unittest.TestCase):
"Testing the Distance object"
def testInit(self):
"Testing initialisation from valid units"
d = Distance(m=100)
self.assertEqual(d.m, 100)
d1, d2, d3 = D(m=100), D(meter=100), D(metre=100)
for d in (d1, d2, d3):
self.assertEqual(d.m, 100)
d = D(nm=100)
self.assertEqual(d.m, 185200)
y1, y2, y3 = D(yd=100), D(yard=100), D(Yard=100)
for d in (y1, y2, y3):
self.assertEqual(d.yd, 100)
mm1, mm2 = D(millimeter=1000), D(MiLLiMeTeR=1000)
for d in (mm1, mm2):
self.assertEqual(d.m, 1.0)
self.assertEqual(d.mm, 1000.0)
def testInitInvalid(self):
"Testing initialisation from invalid units"
self.assertRaises(AttributeError, D, banana=100)
def testAccess(self):
"Testing access in different units"
d = D(m=100)
self.assertEqual(d.km, 0.1)
self.assertAlmostEqual(d.ft, 328.084, 3)
def testAccessInvalid(self):
"Testing access in invalid units"
d = D(m=100)
self.failIf(hasattr(d, 'banana'))
def testAddition(self):
"Test addition & subtraction"
d1 = D(m=100)
d2 = D(m=200)
d3 = d1 + d2
self.assertEqual(d3.m, 300)
d3 += d1
self.assertEqual(d3.m, 400)
d4 = d1 - d2
self.assertEqual(d4.m, -100)
d4 -= d1
self.assertEqual(d4.m, -200)
try:
d5 = d1 + 1
except TypeError, e:
pass
else:
self.fail('Distance + number should raise TypeError')
try:
d5 = d1 - 1
except TypeError, e:
pass
else:
self.fail('Distance - number should raise TypeError')
try:
d1 += 1
except TypeError, e:
pass
else:
self.fail('Distance += number should raise TypeError')
try:
d1 -= 1
except TypeError, e:
pass
else:
self.fail('Distance -= number should raise TypeError')
def testMultiplication(self):
"Test multiplication & division"
d1 = D(m=100)
d3 = d1 * 2
self.assertEqual(d3.m, 200)
d3 = 2 * d1
self.assertEqual(d3.m, 200)
d3 *= 5
self.assertEqual(d3.m, 1000)
d4 = d1 / 2
self.assertEqual(d4.m, 50)
d4 /= 5
self.assertEqual(d4.m, 10)
a5 = d1 * D(m=10)
self.assertTrue(isinstance(a5, Area))
self.assertEqual(a5.sq_m, 100*10)
try:
d1 *= D(m=1)
except TypeError, e:
pass
else:
self.fail('Distance *= Distance should raise TypeError')
try:
d5 = d1 / D(m=1)
except TypeError, e:
pass
else:
self.fail('Distance / Distance should raise TypeError')
try:
d1 /= D(m=1)
except TypeError, e:
pass
else:
self.fail('Distance /= Distance should raise TypeError')
def testUnitConversions(self):
"Testing default units during maths"
d1 = D(m=100)
d2 = D(km=1)
d3 = d1 + d2
self.assertEqual(d3._default_unit, 'm')
d4 = d2 + d1
self.assertEqual(d4._default_unit, 'km')
d5 = d1 * 2
self.assertEqual(d5._default_unit, 'm')
d6 = d1 / 2
self.assertEqual(d6._default_unit, 'm')
def testComparisons(self):
"Testing comparisons"
d1 = D(m=100)
d2 = D(km=1)
d3 = D(km=0)
self.assertTrue(d2 > d1)
self.assertTrue(d1 == d1)
self.assertTrue(d1 < d2)
self.failIf(d3)
def testUnitsStr(self):
"Testing conversion to strings"
d1 = D(m=100)
d2 = D(km=3.5)
self.assertEqual(str(d1), '100.0 m')
self.assertEqual(str(d2), '3.5 km')
self.assertEqual(repr(d1), 'Distance(m=100.0)')
self.assertEqual(repr(d2), 'Distance(km=3.5)')
def testUnitAttName(self):
"Testing the `unit_attname` class method"
unit_tuple = [('Yard', 'yd'), ('Nautical Mile', 'nm'), ('German legal metre', 'german_m'),
('Indian yard', 'indian_yd'), ('Chain (Sears)', 'chain_sears'), ('Chain', 'chain')]
for nm, att in unit_tuple:
self.assertEqual(att, D.unit_attname(nm))
class AreaTest(unittest.TestCase):
"Testing the Area object"
def testInit(self):
"Testing initialisation from valid units"
a = Area(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_mi=100)
self.assertEqual(a.sq_m, 258998811.0336)
def testInitInvaliA(self):
"Testing initialisation from invalid units"
self.assertRaises(AttributeError, A, banana=100)
def testAccess(self):
"Testing access in different units"
a = A(sq_m=100)
self.assertEqual(a.sq_km, 0.0001)
self.assertAlmostEqual(a.sq_ft, 1076.391, 3)
def testAccessInvaliA(self):
"Testing access in invalid units"
a = A(sq_m=100)
self.failIf(hasattr(a, 'banana'))
def testAddition(self):
"Test addition & subtraction"
a1 = A(sq_m=100)
a2 = A(sq_m=200)
a3 = a1 + a2
self.assertEqual(a3.sq_m, 300)
a3 += a1
self.assertEqual(a3.sq_m, 400)
a4 = a1 - a2
self.assertEqual(a4.sq_m, -100)
a4 -= a1
self.assertEqual(a4.sq_m, -200)
try:
a5 = a1 + 1
except TypeError, e:
pass
else:
self.fail('Area + number should raise TypeError')
try:
a5 = a1 - 1
except TypeError, e:
pass
else:
self.fail('Area - number should raise TypeError')
try:
a1 += 1
except TypeError, e:
pass
else:
self.fail('Area += number should raise TypeError')
try:
a1 -= 1
except TypeError, e:
pass
else:
self.fail('Area -= number should raise TypeError')
def testMultiplication(self):
"Test multiplication & division"
a1 = A(sq_m=100)
a3 = a1 * 2
self.assertEqual(a3.sq_m, 200)
a3 = 2 * a1
self.assertEqual(a3.sq_m, 200)
a3 *= 5
self.assertEqual(a3.sq_m, 1000)
a4 = a1 / 2
self.assertEqual(a4.sq_m, 50)
a4 /= 5
self.assertEqual(a4.sq_m, 10)
try:
a5 = a1 * A(sq_m=1)
except TypeError, e:
pass
else:
self.fail('Area * Area should raise TypeError')
try:
a1 *= A(sq_m=1)
except TypeError, e:
pass
else:
self.fail('Area *= Area should raise TypeError')
try:
a5 = a1 / A(sq_m=1)
except TypeError, e:
pass
else:
self.fail('Area / Area should raise TypeError')
try:
a1 /= A(sq_m=1)
except TypeError, e:
pass
else:
self.fail('Area /= Area should raise TypeError')
def testUnitConversions(self):
"Testing default units during maths"
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = a1 + a2
self.assertEqual(a3._default_unit, 'sq_m')
a4 = a2 + a1
self.assertEqual(a4._default_unit, 'sq_km')
a5 = a1 * 2
self.assertEqual(a5._default_unit, 'sq_m')
a6 = a1 / 2
self.assertEqual(a6._default_unit, 'sq_m')
def testComparisons(self):
"Testing comparisons"
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = A(sq_km=0)
self.assertTrue(a2 > a1)
self.assertTrue(a1 == a1)
self.assertTrue(a1 < a2)
self.failIf(a3)
def testUnitsStr(self):
"Testing conversion to strings"
a1 = A(sq_m=100)
a2 = A(sq_km=3.5)
self.assertEqual(str(a1), '100.0 sq_m')
self.assertEqual(str(a2), '3.5 sq_km')
self.assertEqual(repr(a1), 'Area(sq_m=100.0)')
self.assertEqual(repr(a2), 'Area(sq_km=3.5)')
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(DistanceTest))
s.addTest(unittest.makeSuite(AreaTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__=="__main__":
run()
|
bsd-3-clause
|
sudeepdutt/mic
|
scripts/gdb/linux/lists.py
|
630
|
2897
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# list tools
#
# Copyright (c) Thiebaud Weksteen, 2015
#
# Authors:
# Thiebaud Weksteen <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
list_head = utils.CachedType("struct list_head")
def list_check(head):
nb = 0
if (head.type == list_head.get_type().pointer()):
head = head.dereference()
elif (head.type != list_head.get_type()):
raise gdb.GdbError('argument must be of type (struct list_head [*])')
c = head
try:
gdb.write("Starting with: {}\n".format(c))
except gdb.MemoryError:
gdb.write('head is not accessible\n')
return
while True:
p = c['prev'].dereference()
n = c['next'].dereference()
try:
if p['next'] != c.address:
gdb.write('prev.next != current: '
'current@{current_addr}={current} '
'prev@{p_addr}={p}\n'.format(
current_addr=c.address,
current=c,
p_addr=p.address,
p=p,
))
return
except gdb.MemoryError:
gdb.write('prev is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
try:
if n['prev'] != c.address:
gdb.write('next.prev != current: '
'current@{current_addr}={current} '
'next@{n_addr}={n}\n'.format(
current_addr=c.address,
current=c,
n_addr=n.address,
n=n,
))
return
except gdb.MemoryError:
gdb.write('next is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
c = n
nb += 1
if c == head:
gdb.write("list is consistent: {} node(s)\n".format(nb))
return
class LxListChk(gdb.Command):
"""Verify a list consistency"""
def __init__(self):
super(LxListChk, self).__init__("lx-list-check", gdb.COMMAND_DATA,
gdb.COMPLETE_EXPRESSION)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if len(argv) != 1:
raise gdb.GdbError("lx-list-check takes one argument")
list_check(gdb.parse_and_eval(argv[0]))
LxListChk()
|
gpl-2.0
|
johannesu/cnn-cells
|
network.py
|
1
|
1240
|
import lasagne
def setup(input_var, box_size = 29,):
# Setup network
net = lasagne.layers.InputLayer(
shape=(None, 1, box_size, box_size),
input_var=input_var)
# stage 1 : filter bank -> squashing -> max-pooling
net = lasagne.layers.Conv2DLayer(
net,
num_filters=30,
filter_size=(3, 3),
nonlinearity = lasagne.nonlinearities.rectify)
net = lasagne.layers.MaxPool2DLayer(net, pool_size=(2, 2))
# stage 2 : filter bank -> squashing -> max-pooling
net = lasagne.layers.Conv2DLayer(
net,
num_filters=30,
filter_size=(3, 3),
nonlinearity = lasagne.nonlinearities.rectify)
net = lasagne.layers.MaxPool2DLayer(net, pool_size=(2, 2))
# last stage: stanard 2-layer fully connected neural network with 50% dropout
net = lasagne.layers.DenseLayer(
lasagne.layers.dropout(net, p=.5),
num_units=100,
nonlinearity=lasagne.nonlinearities.rectify)
# Softmax output
net = lasagne.layers.DenseLayer(
lasagne.layers.dropout(net, p=.5),
num_units=2,
nonlinearity=lasagne.nonlinearities.softmax)
return net
|
mit
|
ToonTownInfiniteRepo/ToontownInfinite
|
toontown/coghq/DistributedLawOfficeFloorAI.py
|
4
|
5468
|
import cPickle
import CogDisguiseGlobals
import FactoryEntityCreatorAI
import FactorySpecs
import LawOfficeBase
import LevelSuitPlannerAI
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObjectAI
from direct.task import Task
from otp.level import DistributedLevelAI
from otp.level import LevelSpec
from toontown.ai.ToonBarrier import *
from toontown.coghq import DistributedBattleFactoryAI
from toontown.coghq import DistributedLawOfficeElevatorIntAI
from toontown.coghq import LawOfficeLayout
from toontown.suit import DistributedFactorySuitAI
from toontown.toonbase import ToontownGlobals, ToontownBattleGlobals
class DistributedLawOfficeFloorAI(DistributedLevelAI.DistributedLevelAI, LawOfficeBase.LawOfficeBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLawOfficeAI')
def __init__(self, air, lawOfficeId, zoneId, entranceId, avIds, spec):
DistributedLevelAI.DistributedLevelAI.__init__(self, air, zoneId, entranceId, avIds)
LawOfficeBase.LawOfficeBase.__init__(self)
self.setLawOfficeId(lawOfficeId)
self.layout = None
self.elevator = None
self.level = None
self.spec = spec
def createEntityCreator(self):
return FactoryEntityCreatorAI.FactoryEntityCreatorAI(level = self)
def getBattleCreditMultiplier(self):
return ToontownBattleGlobals.getFactoryCreditMultiplier(self.lawOfficeId)
def generate(self):
self.notify.info('generate')
self.notify.info('start factory %s %s creation, frame=%s' % (self.lawOfficeId, self.doId, globalClock.getFrameCount()))
self.layout = LawOfficeLayout.LawOfficeLayout(self.lawOfficeId)
self.startFloor()
def startFloor(self):
self.notify.info('loading spec')
self.factorySpec = LevelSpec.LevelSpec(self.spec)
if __dev__:
self.notify.info('creating entity type registry')
typeReg = self.getEntityTypeReg()
self.factorySpec.setEntityTypeReg(typeReg)
self.notify.info('creating entities')
DistributedLevelAI.DistributedLevelAI.generate(self, self.factorySpec)
self.notify.info('creating cogs')
cogSpecModule = FactorySpecs.getCogSpecModule(self.lawOfficeId)
self.planner = LevelSuitPlannerAI.LevelSuitPlannerAI(self.air, self, DistributedFactorySuitAI.DistributedFactorySuitAI, DistributedBattleFactoryAI.DistributedBattleFactoryAI, cogSpecModule.CogData, cogSpecModule.ReserveCogData, cogSpecModule.BattleCells)
suitHandles = self.planner.genSuits()
messenger.send('plannerCreated-' + str(self.doId))
self.suits = suitHandles['activeSuits']
self.reserveSuits = suitHandles['reserveSuits']
self.d_setSuits()
scenario = 0
description = '%s|%s|%s|%s' % (self.lawOfficeId, self.entranceId, scenario, self.avIdList)
for avId in self.avIdList:
self.air.writeServerEvent('DAOffice Entered', avId, description)
self.notify.info('finish factory %s %s creation' % (self.lawOfficeId, self.doId))
def delete(self):
self.notify.info('delete: %s' % self.doId)
suits = self.suits
for reserve in self.reserveSuits:
suits.append(reserve[0])
self.planner.destroy()
del self.planner
for suit in suits:
if not suit.isDeleted():
suit.factoryIsGoingDown()
suit.requestDelete()
DistributedLevelAI.DistributedLevelAI.delete(self, False)
def readyForNextFloor(self):
toonId = self.air.getAvatarIdFromSender()
self._DistributedLawOfficeFloorAI__barrier.clear(toonId)
def dumpEveryone(self):
pass
def getTaskZoneId(self):
return self.lawOfficeId
def getLawOfficeId(self):
return self.lawOfficeId
def d_setForemanConfronted(self, avId):
if avId in self.avIdList:
self.sendUpdate('setForemanConfronted', [avId])
else:
self.notify.warning('%s: d_setForemanConfronted: av %s not in av list %s' % (self.doId, avId, self.avIdList))
def setVictors(self, victorIds):
activeVictors = []
activeVictorIds = []
for victorId in victorIds:
toon = self.air.doId2do.get(victorId)
if toon is not None:
activeVictors.append(toon)
activeVictorIds.append(victorId)
scenario = 0
description = '%s|%s|%s|%s' % (self.lawOfficeId, self.entranceId, scenario, activeVictorIds)
for avId in activeVictorIds:
self.air.writeServerEvent('DAOffice Defeated', avId, description)
for toon in activeVictors:
simbase.air.questManager.toonDefeatedFactory(toon, self.lawOfficeId, activeVictors)
def b_setDefeated(self):
self.d_setDefeated()
self.setDefeated()
def d_setDefeated(self):
self.sendUpdate('setDefeated')
def setDefeated(self):
pass
def getCogLevel(self):
return self.cogLevel
def d_setSuits(self):
self.sendUpdate('setSuits', [self.getSuits(), self.getReserveSuits()])
def getSuits(self):
suitIds = []
for suit in self.suits:
suitIds.append(suit.doId)
return suitIds
def getReserveSuits(self):
suitIds = []
for suit in self.reserveSuits:
suitIds.append(suit[0].doId)
return suitIds
|
mit
|
nisse3000/pymatgen
|
pymatgen/io/feff/tests/test_outputs.py
|
4
|
3050
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
import os
from pymatgen.io.feff.outputs import LDos, Xmu
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
test_dir_reci = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', 'feff_reci_dos')
class FeffLdosTest(unittest.TestCase):
filepath1 = os.path.join(test_dir, 'feff.inp')
filepath2 = os.path.join(test_dir, 'ldos')
l = LDos.from_file(filepath1, filepath2)
reci_feffinp = os.path.join(test_dir_reci, 'feff.inp')
reci_ldos = os.path.join(test_dir_reci, 'ldos')
reci_dos = LDos.from_file(reci_feffinp, reci_ldos)
def test_init(self):
efermi = FeffLdosTest.l.complete_dos.efermi
self.assertEqual(efermi, -11.430,
"Did not read correct Fermi energy from ldos file")
def test_complete_dos(self):
complete_dos = FeffLdosTest.l.complete_dos
self.assertEqual(complete_dos.as_dict()['spd_dos']["s"]['efermi'],
- 11.430,
"Failed to construct complete_dos dict properly")
def test_as_dict_and_from_dict(self):
l2 = FeffLdosTest.l.charge_transfer_to_string()
d = FeffLdosTest.l.as_dict()
l3 = LDos.from_dict(d).charge_transfer_to_string()
self.assertEqual(l2, l3, "Feffldos to and from dict does not match")
def test_reci_init(self):
efermi = FeffLdosTest.reci_dos.complete_dos.efermi
self.assertEqual(efermi, -9.672,
"Did not read correct Fermi energy from ldos file")
def test_reci_complete_dos(self):
complete_dos = FeffLdosTest.reci_dos.complete_dos
self.assertEqual(complete_dos.as_dict()['spd_dos']["s"]['efermi'],
-9.672,
"Failed to construct complete_dos dict properly")
def test_reci_charge(self):
charge_trans = FeffLdosTest.reci_dos.charge_transfer
self.assertEqual(charge_trans['0']['Na']['s'], 0.241)
self.assertEqual(charge_trans['1']['O']['tot'], -0.594)
class XmuTest(unittest.TestCase):
def test_init(self):
filepath1 = os.path.join(test_dir, 'xmu.dat')
filepath2 = os.path.join(test_dir, 'feff.inp')
x = Xmu.from_file(filepath1, filepath2)
self.assertEqual(x.absorbing_atom, 'O',
"failed to read xmu.dat file properly")
def test_as_dict_and_from_dict(self):
filepath1 = os.path.join(test_dir, 'xmu.dat')
filepath2 = os.path.join(test_dir, 'feff.inp')
x = Xmu.from_file(filepath1, filepath2)
data=x.data.tolist()
d=x.as_dict()
x2 = Xmu.from_dict(d)
data2= x2.data.tolist()
self.assertEqual(data, data2, "Xmu to and from dict does not match")
if __name__ == '__main__':
unittest.main()
|
mit
|
umitproject/packet-manipulator
|
umit/pm/backend/umpa/context/sniff.py
|
2
|
2062
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Adriano Monteiro Marques
#
# Author: Francesco Piccinno <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from __future__ import with_statement
from datetime import datetime
from threading import Thread, Lock
from umit.pm.core.i18n import _
from umit.pm.backend.scapy import *
def register_sniff_context(BaseSniffContext):
class SniffContext(BaseSniffContext):
"""
A sniff context for controlling various options.
"""
has_stop = False
has_pause = False
has_restart = False
def __init__(self, *args, **kwargs):
BaseSniffContext.__init__(self, *args, **kwargs)
self.title = _('%s capture') % self.iface
def get_percentage(self):
return 100.0
def _start(self):
self.summary = _('Sniff is not avaiable with this backend')
return False
return SniffContext
|
gpl-2.0
|
Aeronavics/MissionPlanner
|
ExtLibs/Mavlink/pymavlink/generator/mavparse.py
|
1
|
19826
|
#!/usr/bin/env python
'''
mavlink python parse functions
Copyright Andrew Tridgell 2011
Released under GNU GPL version 3 or later
'''
from __future__ import print_function
from builtins import range
from builtins import object
import errno
import operator
import os
import sys
import time
import xml.parsers.expat
PROTOCOL_0_9 = "0.9"
PROTOCOL_1_0 = "1.0"
PROTOCOL_2_0 = "2.0"
# message flags
FLAG_HAVE_TARGET_SYSTEM = 1
FLAG_HAVE_TARGET_COMPONENT = 2
class MAVParseError(Exception):
def __init__(self, message, inner_exception=None):
self.message = message
self.inner_exception = inner_exception
self.exception_info = sys.exc_info()
def __str__(self):
return self.message
class MAVField(object):
def __init__(self, name, type, print_format, xml, description='', enum='', display='', units=''):
self.name = name
self.name_upper = name.upper()
self.description = description
self.array_length = 0
self.enum = enum
self.display = display
self.units = units
self.omit_arg = False
self.const_value = None
self.print_format = print_format
lengths = {
'float' : 4,
'double' : 8,
'char' : 1,
'int8_t' : 1,
'uint8_t' : 1,
'uint8_t_mavlink_version' : 1,
'int16_t' : 2,
'uint16_t' : 2,
'int32_t' : 4,
'uint32_t' : 4,
'int64_t' : 8,
'uint64_t' : 8,
}
if type=='uint8_t_mavlink_version':
type = 'uint8_t'
self.omit_arg = True
self.const_value = xml.version
aidx = type.find("[")
if aidx != -1:
assert type[-1:] == ']'
self.array_length = int(type[aidx+1:-1])
type = type[0:aidx]
if type == 'array':
type = 'int8_t'
if type in lengths:
self.type_length = lengths[type]
self.type = type
elif (type+"_t") in lengths:
self.type_length = lengths[type+"_t"]
self.type = type+'_t'
else:
raise MAVParseError("unknown type '%s'" % type)
if self.array_length != 0:
self.wire_length = self.array_length * self.type_length
else:
self.wire_length = self.type_length
self.type_upper = self.type.upper()
def gen_test_value(self, i):
'''generate a testsuite value for a MAVField'''
if self.const_value:
return self.const_value
elif self.type == 'float':
return 17.0 + self.wire_offset*7 + i
elif self.type == 'double':
return 123.0 + self.wire_offset*7 + i
elif self.type == 'char':
return chr(ord('A') + (self.wire_offset + i)%26)
elif self.type in [ 'int8_t', 'uint8_t' ]:
return (5 + self.wire_offset*67 + i) & 0xFF
elif self.type in ['int16_t', 'uint16_t']:
return (17235 + self.wire_offset*52 + i) & 0xFFFF
elif self.type in ['int32_t', 'uint32_t']:
return (963497464 + self.wire_offset*52 + i)&0xFFFFFFFF
elif self.type in ['int64_t', 'uint64_t']:
return 93372036854775807 + self.wire_offset*63 + i
else:
raise MAVParseError('unknown type %s' % self.type)
def set_test_value(self):
'''set a testsuite value for a MAVField'''
if self.array_length:
self.test_value = []
for i in range(self.array_length):
self.test_value.append(self.gen_test_value(i))
else:
self.test_value = self.gen_test_value(0)
if self.type == 'char' and self.array_length:
v = ""
for c in self.test_value:
v += c
self.test_value = v[:-1]
class MAVType(object):
def __init__(self, name, id, linenumber, description=''):
self.name = name
self.name_lower = name.lower()
self.linenumber = linenumber
self.id = int(id)
self.description = description
self.fields = []
self.fieldnames = []
self.extensions_start = None
def base_fields(self):
'''return number of non-extended fields'''
if self.extensions_start is None:
return len(self.fields)
return len(self.fields[:self.extensions_start])
class MAVEnumParam(object):
def __init__(self, index, description=''):
self.index = index
self.description = description
class MAVEnumEntry(object):
def __init__(self, name, value, description='', end_marker=False, autovalue=False, origin_file='', origin_line=0):
self.name = name
self.value = value
self.description = description
self.param = []
self.end_marker = end_marker
self.autovalue = autovalue # True if value was *not* specified in XML
self.origin_file = origin_file
self.origin_line = origin_line
class MAVEnum(object):
def __init__(self, name, linenumber, description=''):
self.name = name
self.description = description
self.entry = []
self.start_value = None
self.highest_value = 0
self.linenumber = linenumber
class MAVXML(object):
'''parse a mavlink XML file'''
def __init__(self, filename, wire_protocol_version=PROTOCOL_0_9):
self.filename = filename
self.basename = os.path.basename(filename)
if self.basename.lower().endswith(".xml"):
self.basename = self.basename[:-4]
self.basename_upper = self.basename.upper()
self.message = []
self.enum = []
# we use only the day for the parse_time, as otherwise
# it causes a lot of unnecessary cache misses with ccache
self.parse_time = time.strftime("%a %b %d %Y")
self.version = 2
self.include = []
self.wire_protocol_version = wire_protocol_version
# setup the protocol features for the requested protocol version
if wire_protocol_version == PROTOCOL_0_9:
self.protocol_marker = ord('U')
self.sort_fields = False
self.little_endian = False
self.crc_extra = False
self.crc_struct = False
self.command_24bit = False
self.allow_extensions = False
elif wire_protocol_version == PROTOCOL_1_0:
self.protocol_marker = 0xFE
self.sort_fields = True
self.little_endian = True
self.crc_extra = True
self.crc_struct = False
self.command_24bit = False
self.allow_extensions = False
elif wire_protocol_version == PROTOCOL_2_0:
self.protocol_marker = 0xFD
self.sort_fields = True
self.little_endian = True
self.crc_extra = True
self.crc_struct = True
self.command_24bit = True
self.allow_extensions = True
else:
print("Unknown wire protocol version")
print("Available versions are: %s %s" % (PROTOCOL_0_9, PROTOCOL_1_0, PROTOCOL_2_0))
raise MAVParseError('Unknown MAVLink wire protocol version %s' % wire_protocol_version)
in_element_list = []
def check_attrs(attrs, check, where):
for c in check:
if c not in attrs:
raise MAVParseError('expected missing %s "%s" attribute at %s:%u' % (
where, c, filename, p.CurrentLineNumber))
def start_element(name, attrs):
in_element_list.append(name)
in_element = '.'.join(in_element_list)
#print in_element
if in_element == "mavlink.messages.message":
check_attrs(attrs, ['name', 'id'], 'message')
self.message.append(MAVType(attrs['name'], attrs['id'], p.CurrentLineNumber))
elif in_element == "mavlink.messages.message.extensions":
self.message[-1].extensions_start = len(self.message[-1].fields)
elif in_element == "mavlink.messages.message.field":
check_attrs(attrs, ['name', 'type'], 'field')
print_format = attrs.get('print_format', None)
enum = attrs.get('enum', '')
display = attrs.get('display', '')
units = attrs.get('units', '')
if units:
units = '[' + units + ']'
new_field = MAVField(attrs['name'], attrs['type'], print_format, self, enum=enum, display=display, units=units)
if self.message[-1].extensions_start is None or self.allow_extensions:
self.message[-1].fields.append(new_field)
elif in_element == "mavlink.enums.enum":
check_attrs(attrs, ['name'], 'enum')
self.enum.append(MAVEnum(attrs['name'], p.CurrentLineNumber))
elif in_element == "mavlink.enums.enum.entry":
check_attrs(attrs, ['name'], 'enum entry')
# determine value and if it was automatically assigned (for possible merging later)
if 'value' in attrs:
value = eval(attrs['value'])
autovalue = False
else:
value = self.enum[-1].highest_value + 1
autovalue = True
# check lowest value
if (self.enum[-1].start_value is None or value < self.enum[-1].start_value):
self.enum[-1].start_value = value
# check highest value
if (value > self.enum[-1].highest_value):
self.enum[-1].highest_value = value
# append the new entry
self.enum[-1].entry.append(MAVEnumEntry(attrs['name'], value, '', False, autovalue, self.filename, p.CurrentLineNumber))
elif in_element == "mavlink.enums.enum.entry.param":
check_attrs(attrs, ['index'], 'enum param')
self.enum[-1].entry[-1].param.append(MAVEnumParam(attrs['index']))
def end_element(name):
in_element_list.pop()
def char_data(data):
in_element = '.'.join(in_element_list)
if in_element == "mavlink.messages.message.description":
self.message[-1].description += data
elif in_element == "mavlink.messages.message.field":
if self.message[-1].extensions_start is None or self.allow_extensions:
self.message[-1].fields[-1].description += data
elif in_element == "mavlink.enums.enum.description":
self.enum[-1].description += data
elif in_element == "mavlink.enums.enum.entry.description":
self.enum[-1].entry[-1].description += data
elif in_element == "mavlink.enums.enum.entry.param":
self.enum[-1].entry[-1].param[-1].description += data
elif in_element == "mavlink.version":
self.version = int(data)
elif in_element == "mavlink.include":
self.include.append(data)
f = open(filename, mode='rb')
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = start_element
p.EndElementHandler = end_element
p.CharacterDataHandler = char_data
p.ParseFile(f)
f.close()
self.message_lengths = {}
self.message_min_lengths = {}
self.message_flags = {}
self.message_target_system_ofs = {}
self.message_target_component_ofs = {}
self.message_crcs = {}
self.message_names = {}
self.largest_payload = 0
if not self.command_24bit:
# remove messages with IDs > 255
m2 = []
for m in self.message:
if m.id <= 255:
m2.append(m)
else:
print("Ignoring MAVLink2 message %s" % m.name)
self.message = m2
for m in self.message:
if not self.command_24bit and m.id > 255:
continue
m.wire_length = 0
m.wire_min_length = 0
m.fieldnames = []
m.fieldlengths = []
m.ordered_fieldnames = []
m.message_flags = 0
m.target_system_ofs = 0
m.target_component_ofs = 0
if self.sort_fields:
# when we have extensions we only sort up to the first extended field
sort_end = m.base_fields()
m.ordered_fields = sorted(m.fields[:sort_end],
key=operator.attrgetter('type_length'),
reverse=True)
m.ordered_fields.extend(m.fields[sort_end:])
else:
m.ordered_fields = m.fields
for f in m.fields:
m.fieldnames.append(f.name)
L = f.array_length
if L == 0:
m.fieldlengths.append(1)
elif L > 1 and f.type == 'char':
m.fieldlengths.append(1)
else:
m.fieldlengths.append(L)
for i in range(len(m.ordered_fields)):
f = m.ordered_fields[i]
f.wire_offset = m.wire_length
m.wire_length += f.wire_length
if m.extensions_start is None or i < m.extensions_start:
m.wire_min_length = m.wire_length
m.ordered_fieldnames.append(f.name)
f.set_test_value()
if f.name.find('[') != -1:
raise MAVParseError("invalid field name with array descriptor %s" % f.name)
# having flags for target_system and target_component helps a lot for routing code
if f.name == 'target_system':
m.message_flags |= FLAG_HAVE_TARGET_SYSTEM
m.target_system_ofs = f.wire_offset
elif f.name == 'target_component':
m.message_flags |= FLAG_HAVE_TARGET_COMPONENT
m.target_component_ofs = f.wire_offset
m.num_fields = len(m.fieldnames)
if m.num_fields > 64:
raise MAVParseError("num_fields=%u : Maximum number of field names allowed is" % (
m.num_fields, 64))
m.crc_extra = message_checksum(m)
key = m.id
self.message_crcs[key] = m.crc_extra
self.message_lengths[key] = m.wire_length
self.message_min_lengths[key] = m.wire_min_length
self.message_names[key] = m.name
self.message_flags[key] = m.message_flags
self.message_target_system_ofs[key] = m.target_system_ofs
self.message_target_component_ofs[key] = m.target_component_ofs
if m.wire_length > self.largest_payload:
self.largest_payload = m.wire_length
if m.wire_length+8 > 64:
print("Note: message %s is longer than 64 bytes long (%u bytes), which can cause fragmentation since many radio modems use 64 bytes as maximum air transfer unit." % (m.name, m.wire_length+8))
def __str__(self):
return "MAVXML for %s from %s (%u message, %u enums)" % (
self.basename, self.filename, len(self.message), len(self.enum))
def message_checksum(msg):
'''calculate a 8-bit checksum of the key fields of a message, so we
can detect incompatible XML changes'''
from .mavcrc import x25crc
crc = x25crc()
crc.accumulate_str(msg.name + ' ')
# in order to allow for extensions the crc does not include
# any field extensions
crc_end = msg.base_fields()
for i in range(crc_end):
f = msg.ordered_fields[i]
crc.accumulate_str(f.type + ' ')
crc.accumulate_str(f.name + ' ')
if f.array_length:
crc.accumulate([f.array_length])
return (crc.crc&0xFF) ^ (crc.crc>>8)
def merge_enums(xml):
'''merge enums between XML files'''
emap = {}
for x in xml:
newenums = []
for enum in x.enum:
if enum.name in emap:
emapitem = emap[enum.name]
# check for possible conflicting auto-assigned values after merge
if (emapitem.start_value <= enum.highest_value and emapitem.highest_value >= enum.start_value):
for entry in emapitem.entry:
# correct the value if necessary, but only if it was auto-assigned to begin with
if entry.value <= enum.highest_value and entry.autovalue is True:
entry.value = enum.highest_value + 1
enum.highest_value = entry.value
# merge the entries
emapitem.entry.extend(enum.entry)
if not emapitem.description:
emapitem.description = enum.description
print("Merged enum %s" % enum.name)
else:
newenums.append(enum)
emap[enum.name] = enum
x.enum = newenums
for e in emap:
# sort by value
emap[e].entry = sorted(emap[e].entry,
key=operator.attrgetter('value'),
reverse=False)
# add a ENUM_END
#emap[e].entry.append(MAVEnumEntry("%s_ENUM_END" % emap[e].name,
# emap[e].entry[-1].value+1, end_marker=True))
def check_duplicates(xml):
'''check for duplicate message IDs'''
merge_enums(xml)
msgmap = {}
enummap = {}
for x in xml:
for m in x.message:
key = m.id
if key in msgmap:
print("ERROR: Duplicate message id %u for %s (%s:%u) also used by %s" % (
m.id,
m.name,
x.filename, m.linenumber,
msgmap[key]))
return True
fieldset = set()
for f in m.fields:
if f.name in fieldset:
print("ERROR: Duplicate field %s in message %s (%s:%u)" % (
f.name, m.name,
x.filename, m.linenumber))
return True
fieldset.add(f.name)
msgmap[key] = '%s (%s:%u)' % (m.name, x.filename, m.linenumber)
for enum in x.enum:
for entry in enum.entry:
if entry.autovalue is True and "common.xml" not in entry.origin_file:
print("Note: An enum value was auto-generated: %s = %u" % (entry.name, entry.value))
s1 = "%s.%s" % (enum.name, entry.name)
s2 = "%s.%s" % (enum.name, entry.value)
if s1 in enummap or s2 in enummap:
print("ERROR: Duplicate enum %s:\n\t%s = %s @ %s:%u\n\t%s" % (
"names" if s1 in enummap else "values",
s1, entry.value, entry.origin_file, entry.origin_line,
enummap.get(s1) or enummap.get(s2)))
return True
enummap[s1] = enummap[s2] = "%s.%s = %s @ %s:%u" % (enum.name, entry.name, entry.value, entry.origin_file, entry.origin_line)
return False
def total_msgs(xml):
'''count total number of msgs'''
count = 0
for x in xml:
count += len(x.message)
return count
def mkdir_p(dir):
try:
os.makedirs(dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
# check version consistent
# add test.xml
# finish test suite
# printf style error macro, if defined call errors
|
gpl-3.0
|
Kwpolska/upass
|
docs/conf.py
|
1
|
7406
|
# -*- coding: utf-8 -*-
#
# upass documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 14 21:02:58 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'upass'
copyright = u'2015-2021, Chris Warrick'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3.0'
# The full version, including alpha/beta/rc tags.
release = '0.3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'upassdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'upass.tex', u'upass Documentation',
u'Chris Warrick', 'manual'),
]
latex_elements = {'papersize': 'a4paper', 'fontpkg': '\\usepackage{tgheros}',
'fncychap': '\\usepackage[Sonny]{fncychap}'}
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'upass', u'upass Documentation',
[u'Chris Warrick'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
bsd-3-clause
|
seem-sky/FrameworkBenchmarks
|
aspnet/setup_xsp.py
|
7
|
1097
|
import subprocess
import sys
import setup_util
import os
def start(args, logfile, errfile):
if os.name == 'nt':
return 1
setup_util.replace_text("aspnet/src/Web.config", "localhost", args.database_host)
try:
subprocess.check_call("rm -rf bin obj", shell=True, cwd="aspnet/src", stderr=errfile, stdout=logfile)
subprocess.check_call("xbuild /p:Configuration=Release", shell=True, cwd="aspnet/src", stderr=errfile, stdout=logfile)
subprocess.check_call("sudo chown -R ubuntu:ubuntu /usr/local/etc/mono", shell=True, stderr=errfile, stdout=logfile)
subprocess.Popen("MONO_OPTIONS=--gc=sgen xsp4 --nonstop", shell=True, cwd="aspnet/src", stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
def stop(logfile, errfile):
if os.name == 'nt':
return 0
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'xsp4' in line:
pid = int(line.split(None, 2)[1])
try:
os.kill(pid, 15)
except OSError:
pass
return 0
|
bsd-3-clause
|
EiSandi/greetingslack
|
greetingslack/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/charsetprober.py
|
3127
|
1902
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
|
mit
|
chaowyc/youtube-dl
|
youtube_dl/extractor/spiegeltv.py
|
87
|
4011
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlparse
from ..utils import (
determine_ext,
float_or_none,
)
class SpiegeltvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?spiegel\.tv/(?:#/)?filme/(?P<id>[\-a-z0-9]+)'
_TESTS = [{
'url': 'http://www.spiegel.tv/filme/flug-mh370/',
'info_dict': {
'id': 'flug-mh370',
'ext': 'm4v',
'title': 'Flug MH370',
'description': 'Das Rätsel um die Boeing 777 der Malaysia-Airlines',
'thumbnail': 're:http://.*\.jpg$',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
'url': 'http://www.spiegel.tv/#/filme/alleskino-die-wahrheit-ueber-maenner/',
'only_matching': True,
}]
def _real_extract(self, url):
if '/#/' in url:
url = url.replace('/#/', '/')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h1.*?>(.*?)</h1>', webpage, 'title')
apihost = 'http://spiegeltv-ivms2-restapi.s3.amazonaws.com'
version_json = self._download_json(
'%s/version.json' % apihost, video_id,
note='Downloading version information')
version_name = version_json['version_name']
slug_json = self._download_json(
'%s/%s/restapi/slugs/%s.json' % (apihost, version_name, video_id),
video_id,
note='Downloading object information')
oid = slug_json['object_id']
media_json = self._download_json(
'%s/%s/restapi/media/%s.json' % (apihost, version_name, oid),
video_id, note='Downloading media information')
uuid = media_json['uuid']
is_wide = media_json['is_wide']
server_json = self._download_json(
'http://spiegeltv-prod-static.s3.amazonaws.com/projectConfigs/projectConfig.json',
video_id, note='Downloading server information')
format = '16x9' if is_wide else '4x3'
formats = []
for streamingserver in server_json['streamingserver']:
endpoint = streamingserver.get('endpoint')
if not endpoint:
continue
play_path = 'mp4:%s_spiegeltv_0500_%s.m4v' % (uuid, format)
if endpoint.startswith('rtmp'):
formats.append({
'url': endpoint,
'format_id': 'rtmp',
'app': compat_urllib_parse_urlparse(endpoint).path[1:],
'play_path': play_path,
'player_path': 'http://prod-static.spiegel.tv/frontend-076.swf',
'ext': 'flv',
'rtmp_live': True,
})
elif determine_ext(endpoint) == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
endpoint.replace('[video]', play_path),
video_id, 'm4v',
preference=1, # Prefer hls since it allows to workaround georestriction
m3u8_id='hls', fatal=False)
if m3u8_formats is not False:
formats.extend(m3u8_formats)
else:
formats.append({
'url': endpoint,
})
thumbnails = []
for image in media_json['images']:
thumbnails.append({
'url': image['url'],
'width': image['width'],
'height': image['height'],
})
description = media_json['subtitle']
duration = float_or_none(media_json.get('duration_in_ms'), scale=1000)
return {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'thumbnails': thumbnails,
'formats': formats,
}
|
unlicense
|
brennanblue/svgplotlib
|
svgplotlib/Gantt.py
|
2
|
11612
|
#!python -u
# -*- coding: utf-8 -*-
# Gantt code from Plone : http://svn.plone.org/svn/collective/xm.charting/
# License : GPL
import itertools
import calendar
import datetime
from datetime import date
from datetime import timedelta
from svgplotlib import Base
ONEWEEK = timedelta(days=7)
ONEDAY = timedelta(days=1)
# help function to find monday by week number
def weekMonday(year, week):
startOfYear = datetime.date(year, 1, 1)
week0 = startOfYear - datetime.timedelta(days=startOfYear.isoweekday())
mon = week0 + datetime.timedelta(weeks=week) + datetime.timedelta(days=1)
return mon
class Duration:
def __init__(self, name, start, end, text = None, color = None):
self.name = unicode(name)
self.start = start
self.end = end
self.text = unicode(text) if text else None
self.color = color
class Gantt(Base):
"""
Gant plot
Example::
>>> items = []
>>> items.append(Duration('Item 1', date(2009, 1, 4), date(2009, 8, 10), '90%'))
>>> items.append(Duration('Item 2', date(2009, 3, 11), date(2009, 8, 17), '50%'))
>>> items.append(Duration('Item 3', date(2009, 4, 18), date(2009, 8, 24), '70%'))
>>> items.append(Duration('Item 4', date(2009, 5, 25), date(2009, 8, 31), '10%'))
>>> items.append(Duration('Item 4', date(2009, 5, 25), date(2009, 9, 27), '25%'))
>>>
>>> gantt = Gantt(items)
"""
PAD = 8
def __init__(self, data, **kwargs):
# smaller default font
if not 'fontSize' in kwargs:
kwargs['fontSize'] = 12
super(Gantt, self).__init__(**kwargs)
font = self.font
fontSize = self.fontSize
style = {
'stroke' : kwargs.get('color', 'black'),
'stroke-width' : '1',
'fill' : kwargs.get('color', 'black'),
}
dx, dy = self.PAD, self.PAD
main = self.Group(transform="translate(%d,%d)" % (dx,dy), **style)
# distance between rows
delta = fontSize + 2*self.PAD
# width & height
WIDTH = max(1000, kwargs.get('width', 1000)) - 2*self.PAD
# create name column
def draw_names():
size = self.textSize(u'Name')
maxwidth = size.width
maxheight = 0.
textdata = []
for duration in data:
text = unicode(duration.name)
dur_size = self.textSize(text)
textdata.append((text,dur_size))
maxwidth = max(dur_size.width, maxwidth)
x, y = Gantt.PAD, .5*delta
xpos = x
ypos = y + .5*size.height - .5*size.descent
main.EText(font, u'Name', x = xpos, y = ypos, stroke='none')
y += delta
maxheight += delta
for text, size in textdata:
ypos = y + .5*size.height - .5*size.descent
main.EText(font, text, x = x, y = ypos, stroke='none')
y += delta
maxheight += delta
return maxwidth + 2*Gantt.PAD, maxheight
name_width, HEIGHT = draw_names()
# create vertical lines
main.Line(x1 = 0, y1 = 0, x2 = 0, y2 = HEIGHT)
main.Line(x1 = WIDTH, y1 = 0, x2 = WIDTH, y2 = HEIGHT)
main.Line(x1 = name_width, y1 = 0,
x2 = name_width, y2 = HEIGHT)
# create horizontal lines
y = 0
for i in range(len(data) + 2):
main.Line(x1 = 0, y1 = i*delta, x2 = WIDTH, y2 = i*delta)
def draw_duration(x):
size = self.textSize(u'Duration')
maxwidth = size.width
textdata = []
for duration in data:
days = (duration.end - duration.start).days
text = u"%d days" % days
dur_size = self.textSize(text)
textdata.append((text,dur_size))
maxwidth = max(dur_size.width, maxwidth)
y = .5*delta
xpos = x + maxwidth + self.PAD - size.width
ypos = y + .5*size.height - .5*size.descent
main.EText(font, u'Duration', x = xpos, y = ypos, stroke='none')
y += delta
for text, size in textdata:
xpos = x + maxwidth + self.PAD - size.width
ypos = y + .5*size.height - .5*size.descent
main.EText(font, text, x = xpos, y = ypos, stroke='none')
y += delta
return maxwidth + 2*Gantt.PAD
duration_width = draw_duration(name_width)
# create vertical lines
main.Line(x1 = name_width + duration_width, y1 = 0,
x2 = name_width + duration_width, y2 = HEIGHT)
# find start and end dates
earliest = data[0].start
latest = data[0].end
for duration in data:
if duration.start < earliest:
earliest = duration.start
if duration.end > latest:
latest = duration.end
# make sure the first day is the first monday before (or on) the
# earliest date
while calendar.weekday(earliest.year, earliest.month,
earliest.day) != 0:
earliest -= ONEDAY
# make sure the last day is the last sunday after (or on) the latest
# date
while calendar.weekday(latest.year, latest.month, latest.day) != 6:
latest += ONEDAY
# how many units is one day?
if earliest > latest:
date_delta = earliest - latest
else:
date_delta = latest - earliest
day_size = float(WIDTH - (name_width + duration_width)) / date_delta.days
def plot_month(x, start, end):
y = .25*delta
year = start.year
month = start.month
endmonth = end.month
endyear = end.year
mstart = None
while year <= endyear:
if year == endyear and month > endmonth:
break
if month > 12:
month = 1
year += 1
if mstart is None:
mstart = start
else:
mstart = datetime.date(year, month, 1)
if month == 12:
mend = datetime.date(year + 1, 1, 1) - datetime.timedelta(days=1)
else:
mend = datetime.date(year, month + 1, 1) - datetime.timedelta(days=1)
if mend > end:
mend = end
rect_width = ((mend - mstart).days) * day_size
s = u"%s %d" % (calendar.month_abbr[month], year)
size = self.textSize(s)
if size.width > rect_width:
s = u"%s %d" % (calendar.month_abbr[month], year)
size = self.textSize(s)
if size.width > rect_width:
s = ""
xpos = x + .5*rect_width - .5*size.width
ypos = y + .5*size.height - .5*size.descent
if s:
main.EText(font, s, x = xpos, y = ypos, stroke='none')
main.Line(x1 = x, y1 = 0, x2 = x, y2 = .5*delta)
x += rect_width
month += 1
plot_month(name_width + duration_width, earliest, latest)
# create horizontal lines
main.Line(x1 = name_width + duration_width, y1 = .5*delta, x2 = WIDTH, y2 = .5*delta)
def plot_weeks(x, start, end):
y = .75*delta
year = start.year
month = start.month
week = int(start.strftime("%W"))
endyear = end.year
endmonth = end.month
endweek = int(end.strftime("%W"))
wstart = None
while year <= endyear:
if x >= WIDTH:
break
if wstart is None:
wstart = start
else:
wstart = weekMonday(year,week)
wend = wstart + datetime.timedelta(days=6)
rect_width = (wend - wstart).days * day_size
s = u"%d" % week
size = self.textSize(s)
if size.width > rect_width:
s = u""
xpos = x + .5*rect_width - .5*size.width
ypos = y + .5*size.height - 2.*size.descent
if s and x + size.width <= WIDTH:
main.EText(font, s, x = xpos, y = ypos, stroke='none')
main.Line(x1 = x, y1 = .5*delta, x2 = x, y2 = delta)
x += rect_width
# get next week
next = wstart + datetime.timedelta(days=7)
week = int(next.strftime("%W"))
month = int(next.strftime("%m"))
year = int(next.strftime("%Y"))
plot_weeks(name_width + duration_width, earliest, latest)
def plot_items(x, start, end):
y = delta
descent = font.get_descent()/64.
colors = itertools.cycle(self.COLORS)
for duration in data:
rect_start = x + (duration.start - start).days * day_size
rect_width = (duration.end - duration.start).days * day_size
color = duration.color or colors.next()
main.Rect(x = rect_start, y = y, width = rect_width, height = delta, fill = color)
if duration.text:
s = unicode(duration.text)
size = self.textSize(s)
xpos = rect_start + .5*rect_width - .5*size.width
ypos = y + .5*size.height + .5*delta
main.EText(font, s, x = xpos, y = ypos, stroke='none')
y += delta
plot_items(name_width + duration_width, earliest, latest)
# set total size
self.set('width', str(WIDTH + 2*self.PAD))
self.set('height', str(HEIGHT + 2*self.PAD))
if __name__ == '__main__':
from svgplotlib.SVG import show
items = []
items.append(Duration('Item 1', date(2009, 1, 4), date(2009, 8, 10), '90%'))
items.append(Duration('Item 2', date(2009, 3, 11), date(2009, 8, 17), '50%'))
items.append(Duration('Item 3', date(2009, 4, 18), date(2009, 8, 24), '70%'))
items.append(Duration('Item 4', date(2009, 5, 25), date(2009, 8, 31), '10%'))
items.append(Duration('Item 4', date(2009, 5, 25), date(2009, 9, 27), '25%'))
gantt = Gantt(items)
#gantt.write(encoding=None)
#gantt.write(file=open('gantt.svg','wb'))
show(gantt, gantt.width, gantt.height)
|
bsd-3-clause
|
mistercrunch/airflow
|
airflow/cli/commands/kerberos_command.py
|
14
|
1731
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Kerberos command"""
import daemon
from daemon.pidfile import TimeoutPIDLockFile
from airflow import settings
from airflow.security import kerberos as krb
from airflow.utils import cli as cli_utils
from airflow.utils.cli import setup_locations
@cli_utils.action_logging
def kerberos(args):
"""Start a kerberos ticket renewer"""
print(settings.HEADER)
if args.daemon:
pid, stdout, stderr, _ = setup_locations(
"kerberos", args.pid, args.stdout, args.stderr, args.log_file
)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
stdout=stdout,
stderr=stderr,
)
with ctx:
krb.run(principal=args.principal, keytab=args.keytab)
stdout.close()
stderr.close()
else:
krb.run(principal=args.principal, keytab=args.keytab)
|
apache-2.0
|
tmilicic/networkx
|
networkx/algorithms/tests/test_graphical.py
|
22
|
4665
|
#!/usr/bin/env python
from nose.tools import *
from nose import SkipTest
import networkx as nx
def test_valid_degree_sequence1():
n = 100
p = .3
for i in range(10):
G = nx.erdos_renyi_graph(n,p)
deg = (d for n, d in G.degree())
assert_true( nx.is_valid_degree_sequence(deg, method='eg') )
assert_true( nx.is_valid_degree_sequence(deg, method='hh') )
def test_valid_degree_sequence2():
n = 100
for i in range(10):
G = nx.barabasi_albert_graph(n,1)
deg = (d for n, d in G.degree())
assert_true( nx.is_valid_degree_sequence(deg, method='eg') )
assert_true( nx.is_valid_degree_sequence(deg, method='hh') )
@raises(nx.NetworkXException)
def test_string_input():
a = nx.is_valid_degree_sequence([],'foo')
def test_negative_input():
assert_false(nx.is_valid_degree_sequence([-1],'hh'))
assert_false(nx.is_valid_degree_sequence([-1],'eg'))
assert_false(nx.is_valid_degree_sequence([72.5],'eg'))
class TestAtlas(object):
@classmethod
def setupClass(cls):
global atlas
import platform
if platform.python_implementation()=='Jython':
raise SkipTest('graph atlas not available under Jython.')
import networkx.generators.atlas as atlas
def setUp(self):
self.GAG=atlas.graph_atlas_g()
def test_atlas(self):
for graph in self.GAG:
deg = (d for n, d in graph.degree())
assert_true( nx.is_valid_degree_sequence(deg, method='eg') )
assert_true( nx.is_valid_degree_sequence(deg, method='hh') )
def test_small_graph_true():
z=[5,3,3,3,3,2,2,2,1,1,1]
assert_true(nx.is_valid_degree_sequence(z, method='hh'))
assert_true(nx.is_valid_degree_sequence(z, method='eg'))
z=[10,3,3,3,3,2,2,2,2,2,2]
assert_true(nx.is_valid_degree_sequence(z, method='hh'))
assert_true(nx.is_valid_degree_sequence(z, method='eg'))
z=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert_true(nx.is_valid_degree_sequence(z, method='hh'))
assert_true(nx.is_valid_degree_sequence(z, method='eg'))
def test_small_graph_false():
z=[1000,3,3,3,3,2,2,2,1,1,1]
assert_false(nx.is_valid_degree_sequence(z, method='hh'))
assert_false(nx.is_valid_degree_sequence(z, method='eg'))
z=[6,5,4,4,2,1,1,1]
assert_false(nx.is_valid_degree_sequence(z, method='hh'))
assert_false(nx.is_valid_degree_sequence(z, method='eg'))
z=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert_false(nx.is_valid_degree_sequence(z, method='hh'))
assert_false(nx.is_valid_degree_sequence(z, method='eg'))
def test_directed_degree_sequence():
# Test a range of valid directed degree sequences
n, r = 100, 10
p = 1.0 / r
for i in range(r):
G = nx.erdos_renyi_graph(n,p*(i+1),None,True)
din = (d for n, d in G.in_degree())
dout = (d for n, d in G.out_degree())
assert_true(nx.is_digraphical(din, dout))
def test_small_directed_sequences():
dout=[5,3,3,3,3,2,2,2,1,1,1]
din=[3,3,3,3,3,2,2,2,2,2,1]
assert_true(nx.is_digraphical(din, dout))
# Test nongraphical directed sequence
dout = [1000,3,3,3,3,2,2,2,1,1,1]
din=[103,102,102,102,102,102,102,102,102,102]
assert_false(nx.is_digraphical(din, dout))
# Test digraphical small sequence
dout=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
din=[2, 2, 2, 2, 2, 2, 2, 2, 1, 1]
assert_true(nx.is_digraphical(din, dout))
# Test nonmatching sum
din=[2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1]
assert_false(nx.is_digraphical(din, dout))
# Test for negative integer in sequence
din=[2, 2, 2, -2, 2, 2, 2, 2, 1, 1, 4]
assert_false(nx.is_digraphical(din, dout))
def test_multi_sequence():
# Test nongraphical multi sequence
seq=[1000,3,3,3,3,2,2,2,1,1]
assert_false(nx.is_multigraphical(seq))
# Test small graphical multi sequence
seq=[6,5,4,4,2,1,1,1]
assert_true(nx.is_multigraphical(seq))
# Test for negative integer in sequence
seq=[6,5,4,-4,2,1,1,1]
assert_false(nx.is_multigraphical(seq))
# Test for sequence with odd sum
seq=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert_false(nx.is_multigraphical(seq))
def test_pseudo_sequence():
# Test small valid pseudo sequence
seq=[1000,3,3,3,3,2,2,2,1,1]
assert_true(nx.is_pseudographical(seq))
# Test for sequence with odd sum
seq=[1000,3,3,3,3,2,2,2,1,1,1]
assert_false(nx.is_pseudographical(seq))
# Test for negative integer in sequence
seq=[1000,3,3,3,3,2,2,-2,1,1]
assert_false(nx.is_pseudographical(seq))
|
bsd-3-clause
|
mgeorgehansen/FIFE_Technomage
|
demos/shooter/scripts/gui/guis.py
|
1
|
10455
|
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2010 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from fife import fife
from fife.extensions import pychan
from fife.extensions.pychan import widgets
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesNSImpl
class MainMenu(object):
def __init__(self, world, setting):
self._world = world
self._setting = setting
self._widget = pychan.loadXML('gui/mainmenu.xml')
self._continue = self._widget.findChild(name="continue")
self._newgame = self._widget.findChild(name="new_game")
self._credits = self._widget.findChild(name="credits")
self._highscores = self._widget.findChild(name="high_scores")
self._quit = self._widget.findChild(name="quit")
self._widget.position = (0,0)
eventMap = {
'continue': self._world.continueGame,
'new_game': self._world.newGame,
'settings': self._setting.onOptionsPress,
'credits': self._world.showCredits,
'high_scores': self._world.showHighScores,
'quit': self._world.quit,
}
self._widget.mapEvents(eventMap)
self._continueMinWidth = self._continue.min_width
self._continueMinHeight = self._continue.min_height
self._continueMaxWidth = self._continue.max_width
self._continueMaxHeight = self._continue.max_height
def show(self, cont=False):
if cont:
self._continue.min_width = self._continueMinWidth
self._continue.min_height = self._continueMinHeight
self._continue.max_width = self._continueMaxWidth
self._continue.max_height = self._continueMaxHeight
else:
self._continue.min_width = 0
self._continue.min_height = 0
self._continue.max_width = 0
self._continue.max_height = 0
self._continue.adaptLayout()
self._widget.show()
def hide(self):
self._widget.hide()
def isVisible(self):
return self._widget.isVisible()
class HeadsUpDisplay(object):
def __init__(self, world):
self._world = world
self._widget = pychan.loadXML('gui/hud.xml')
self._fpstext = self._widget.findChild(name="fps")
self._velocitytext = self._widget.findChild(name="velocity")
self._positiontext = self._widget.findChild(name="position")
self._scoretext = self._widget.findChild(name="score")
self._livestext = self._widget.findChild(name="lives")
self._widget.position = (0,0)
def show(self):
self._widget.show()
def hide(self):
self._widget.hide()
def setFPSText(self, text):
self._fpstext.text = text
def setPositionText(self, text):
self._positiontext.text = text
def setVelocityText(self, text):
self._velocitytext.text = text
def setScoreText(self, text):
self._scoretext.text = text
def setLivesText(self, text):
self._livestext.text = text
class GameOverDisplay(object):
def __init__(self):
self._widget = pychan.loadXML('gui/gameover.xml')
def show(self):
self._widget.show()
def hide(self):
self._widget.hide()
class WinnerDisplay(object):
def __init__(self):
self._widget = pychan.loadXML('gui/winner.xml')
def show(self):
self._widget.show()
def hide(self):
self._widget.hide()
class HighScore(object):
def __init__(self, name, score):
self._name = name
self._score = int(score)
class HighScores(object):
"""
Handles all the high scores. It saves and loads the high score file.
"""
def __init__(self, world):
self._world = world
self.load()
def load(self):
self._widget = pychan.loadXML('gui/highscores.xml')
self._scores = list()
for i in range(1,11):
place = self._widget.findChild(name=str(i))
name = self._widget.findChild(name="%iname" % i)
score = self._widget.findChild(name="%iscore" % i)
highscore = HighScore(name.text, int(score.text))
self._scores.append(highscore)
eventMap = {
'close': self.hide,
}
self._widget.mapEvents(eventMap)
def isHighScore(self, score):
for highscore in self._scores:
if score > highscore._score:
return True
return False
def addHighScore(self, score):
if not self.isHighScore(score._score):
return
i = 0
element = 0
for highscore in self._scores:
if score._score > highscore._score:
element = i
break
i += 1
self._scores.insert(element, score)
#remove last element
self._scores.pop()
self.saveHighScores()
self.load()
def startElement(self, name, attrs):
self._file.write(self._indent_level)
self._xmlout.startElementNS((None, name), name, attrs)
self._file.write('\n')
self._indent_level = self._indent_level + '\t'
def endElement(self, name):
self._indent_level = self._indent_level[0:(len(self._indent_level) - 1)]
self._file.write(self._indent_level)
self._xmlout.endElementNS((None, name), name)
self._file.write('\n')
def saveHighScores(self):
self._file = open('gui/highscores.xml', 'w')
self._xmlout = XMLGenerator(self._file, 'ascii')
self._xmlout.startDocument()
self._indent_level = ''
#<Container name="HighScores" position="0,0" size="1024,768" border_size="0" base_color="0,0,0" opaque="0">
attr_values = {
(None, 'name'): 'HighScores',
(None, 'position'): '0,0',
(None, 'size'): '1024,768',
(None, 'border_size'): '0',
(None, 'base_color'): '0,0,0',
(None, 'opaque'): '0',
}
attr_names = {
(None, 'name'): 'name',
(None, 'position'): 'position',
(None, 'size'): 'size',
(None, 'border_size'): 'border_size',
(None, 'base_color'): 'base_color',
(None, 'opaque'): 'opaque',
}
container_attrs = AttributesNSImpl(attr_values, attr_names)
self.startElement('Container', container_attrs)
#<VBox opaque='1' base_color="188, 0, 0" position="412,200" name="high_score">
attr_values = {
(None, 'opaque'): '1',
(None, 'base_color'): '188,0,0',
(None, 'position'): '412,200',
(None, 'name'): 'high_score',
}
attr_names = {
(None, 'opaque'): 'opaque',
(None, 'base_color'): 'base_color',
(None, 'position'): 'position',
(None, 'name'): 'name',
}
vbox_attrs = AttributesNSImpl(attr_values, attr_names)
self.startElement('VBox', vbox_attrs)
#<Label name="high_scores" text="High Scores" min_size="100,0" border_size="0"/>
self.startElement('HBox', AttributesNSImpl({}, {}))
attr_values = {
(None, 'name'): 'high_scores',
(None, 'text'): 'High Scores',
(None, 'min_size'): '100,0',
(None, 'border_size'): '0',
}
attr_names = {
(None, 'name'): 'name',
(None, 'text'): 'text',
(None, 'min_size'): 'min_size',
(None, 'border_size'): 'border_size',
}
self.startElement('Label', AttributesNSImpl(attr_values, attr_names))
self.endElement('Label')
self.endElement('HBox')
i = 1
for score in self._scores:
self.startElement('HBox', AttributesNSImpl({}, {}))
plc_attr_values = {
(None, 'name'): str(i),
(None, 'text'): str(i),
(None, 'min_size'): '20,0',
(None, 'border_size'): '5',
}
plc_attr_names = {
(None, 'name'): 'name',
(None, 'text'): 'text',
(None, 'min_size'): 'min_size',
(None, 'border_size'): 'border_size',
}
name_attr_values = {
(None, 'name'): str(i) + 'name',
(None, 'text'): score._name,
(None, 'min_size'): '50,0',
(None, 'border_size'): '5',
}
name_attr_names = {
(None, 'name'): 'name',
(None, 'text'): 'text',
(None, 'min_size'): 'min_size',
(None, 'border_size'): 'border_size',
}
score_attr_values = {
(None, 'name'): str(i) + 'score',
(None, 'text'): str(score._score),
(None, 'min_size'): '150,0',
(None, 'border_size'): '5',
}
score_attr_names = {
(None, 'name'): 'name',
(None, 'text'): 'text',
(None, 'min_size'): 'min_size',
(None, 'border_size'): 'border_size',
}
self.startElement('Label', AttributesNSImpl(plc_attr_values, plc_attr_names))
self.endElement('Label')
self.startElement('Label', AttributesNSImpl(name_attr_values, name_attr_names))
self.endElement('Label')
self.startElement('Label', AttributesNSImpl(score_attr_values, score_attr_names))
self.endElement('Label')
self.endElement('HBox')
i += 1
#<Button name="close" text="Close" min_size="100,0" border_size="0"/>
attr_values = {
(None, 'name'): 'close',
(None, 'text'): 'Close',
(None, 'min_size'): '100,0',
(None, 'border_size'): '0',
}
attr_names = {
(None, 'name'): 'name',
(None, 'text'): 'text',
(None, 'min_size'): 'min_size',
(None, 'border_size'): 'border_size',
}
self.startElement('Button', AttributesNSImpl(attr_values, attr_names))
self.endElement('Button')
self.endElement('VBox')
self.endElement('Container')
self._xmlout.endDocument()
self._file.close()
def show(self):
self._widget.show()
def hide(self):
self.saveHighScores()
self._widget.hide()
def isVisible(self):
return self._widget.isVisible()
class CreditsDisplay(object):
def __init__(self, world):
self._world = world
self._widget = pychan.loadXML('gui/credits.xml')
eventMap = {
'close': self.hide,
}
self._widget.mapEvents(eventMap)
def show(self):
self._widget.show()
def hide(self):
self._widget.hide()
|
lgpl-2.1
|
JazzeYoung/VeryDeepAutoEncoder
|
theano/sandbox/cuda/tests/test_driver.py
|
2
|
3607
|
from __future__ import absolute_import, print_function, division
import numpy
import theano
# Skip test if cuda_ndarray is not available.
try:
from nose.plugins.skip import SkipTest
import theano.sandbox.cuda as cuda_ndarray
if cuda_ndarray.cuda_available is False:
raise SkipTest('Optional package cuda disabled')
except ImportError:
# To have the GPU back-end work without nose, we need this file to
# be importable without nose.
pass
import theano.sandbox.cuda as cuda
import theano.sandbox.cuda.basic_ops as B
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
def test_nvidia_driver1():
""" Some nvidia driver give bad result for reduction
This execute some reduction test to ensure it run correctly
"""
a = numpy.random.rand(10000).astype("float32")
A = cuda.shared_constructor(a)
f = theano.function(inputs=[], outputs=A.sum(), mode=mode_with_gpu,
profile=False)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
if sum(isinstance(node.op, B.GpuCAReduce) for node in topo) != 1:
msg = '\n\t'.join(
['Expected exactly one occurrence of GpuCAReduce ' +
'but got:'] + [str(app) for app in topo])
raise AssertionError(msg)
if not numpy.allclose(f(), a.sum()):
raise Exception("The nvidia driver version installed with this OS "
"does not give good results for reduction."
"Installing the nvidia driver available on the same "
"download page as the cuda package will fix the "
"problem: http://developer.nvidia.com/cuda-downloads")
def test_nvidia_driver2():
""" Test that the gpu device is initialized by theano when
we manually make a shared variable on the gpu.
The driver should always be tested during theano initialization
of the gpu device
"""
a = numpy.random.rand(10000).astype("float32")
cuda.shared_constructor(a)
assert theano.sandbox.cuda.use.device_number is not None
def test_nvidia_driver3():
""" Test that the gpu device is initialized by theano when
we build a function with gpu op.
The driver should always be tested during theano initialization
of the gpu device
"""
var = cuda.fvector()
f = theano.function([var], var + 1, mode=mode_with_gpu,
profile=False)
topo = f.maker.fgraph.toposort()
assert any([isinstance(node.op, cuda.GpuElemwise) for node in topo])
assert theano.sandbox.cuda.use.device_number is not None
def test_nvcc_cast():
"""Test that the casting behaviour is correct.
Some versions of nvcc, in particular the one in 6.5.14, return an incorrect
value in this case.
Reported by Zijung Zhang at
https://groups.google.com/d/topic/theano-dev/LzHtP2OWeRE/discussion
"""
var = theano.tensor.fvector()
f = theano.function([var], -1. * (var > 0), mode=mode_with_gpu)
if not numpy.allclose(f([-1, 0, 1]), [0, 0, -1]):
raise Exception(
"The version of nvcc that Theano detected on your system "
"has a bug during conversion from integers to floating point. "
"Installing CUDA 7.0 (or more recent) should fix the problem.")
# TODO make sure the test_nvidia_driver test are executed when we make manually
# a CudaNdarray like this: cuda.CudaNdarray.zeros((5,4))
|
bsd-3-clause
|
fabiohsmachado/dandelion
|
dandelion/main.py
|
1
|
9787
|
"""Defines the Dandelion class"""
import networkx as nx;
class Dandelion:
"""The Dandelion code"""
def __init__(self, N, k, code = None, kTree = None):
"""Initializes the code."""
self.N = N;
self.k = k;
self._code = None;
self._kTree = None;
if code is not None:
self.code = code;
if kTree is not None:
self.kTree = kTree;
@property
def code(self):
return self._code;
@code.setter
def code(self, value):
self.validateCode(value);
self._code = value;
@property
def kTree(self):
return self._kTree;
@kTree.setter
def kTree(self, value):
self.validateKTree(value);
self._kTree = value;
def validateCode(self, code):
"""Check if the code is well formed.
Raises:
AssertionError: If it is not a valid code.
"""
#Check types
assert isinstance(code, list);
for t in code:
assert isinstance(t, tuple);
#Assert number of elements according to N and k
assert len(code) == self.N - self.k - 2
return True;
def validateKTree(self, kTree):
"""Check if the kTree is correct.
DOES NOT validates the kTree structure, instead does minimum validation
to check if it an instance of networkx's Graph.
Raises:
AssertionError: If it is not a valid kTree
"""
assert isinstance(kTree, nx.Graph)
return True;
def codeKTree(self):
"""Code a kTree into a Dandelion code.
Needs to have the kTree attribute set.
Fills the code field of this object (rewrites it if there was any).
Raises:
Error: If the kTree field of the Dandelion object is not filled.
"""
if self.kTree is None:
raise Error("Need to specify a k-Tree first");
#Turn the kTree into a Renyi kTree:
self.Q, self.Rk = self._relabelKTree(self.kTree);
#Obtain the characteristic tree T from the Renyi kTree:
self.T = self._pruneRk();
#Obtain the Dandelion Code from the characteristic tree:
##Setup parameters
r = 0;
nodesInT = [i for i in self.T][1:];
q_bar = min([n for n in nodesInT if n not in self.Q]);
x = self.Rk.node[q_bar]['old_label'];
##Generate the code
self.code = self._generalizedCode(self.T, r, x);
return self.Q, self.code;
def _generalizedCode(self, T, r, x):
"""Generates a dandelion code from a given characteristic tree.
The input tree is modified so it has the cicles that results from
generating the code.
Parameters:
T: nx.DiGraph
The input characteristic tree.
r: Graph node
The root node of the tree (usually 0)
x: Graph node
The minimal node not belonging to the original root
Returns:
code:
List of tuples The generalized dandelion code for the
given characteristic tree
"""
def swap(source, target):
"""Provides a method for swaping nodes in a graph.
This function removes the original edge of each node with its
parent and add a new edge with one node and the other node's
parent. It preserves the original label.
"""
labels = nx.get_edge_attributes(T, 'label');
parent_source = T.successors(source)[0];
label_source = labels[(source, parent_source)];
parent_target = T.successors(target)[0];
label_target = labels[(target, parent_target)];
T.remove_edge(source, parent_source);
T.remove_edge(target, parent_target);
T.add_edge(source, parent_target, label = label_target);
T.add_edge(target, parent_source, label = label_source);
path = nx.shortest_path(T, x, r)[1:-1];
while(len(path) > 0):
w = max(path);
swap(x, w);
path = nx.shortest_path(T, x, r)[1:-1];
nodes = sorted([v for v in T.nodes() if v not in [x, r]]);
labels = nx.get_edge_attributes(T, 'label');
code = [];
for v in nodes:
p_v = T.successors(v)[0]
code.append((p_v, labels[(v, p_v)]));
return code[1:];
def _pruneRk(self):
"""Wrapper function for generating the characteristic tree.
Creates a new tree, and separatedly populates its nodes and edge list.
Returns
T: nx.DiGraph
The new characteristic tree
"""
T = nx.DiGraph();
pruneOrder = self._addNodes(self.Rk, T);
self._addEdges(T, pruneOrder);
return T;
def _addEdges(self, T, pruneOrder):
"""Updates the characteristic three with the edge list according to the pruning
order.
The edge list are filled using the reversed pruning order and the
adjacent clique store in each node.
Parameters:
T: nx.DiGraph
The characteristic tree with a filled node list
pruneOrder: list
The prune order defined when the nodes were added
"""
#Initialize control variables
R = [self.N - self.k + i + 1 for i in range(self.k)]; #Root of the Renyi kTree
level = [0 for i in range(self.N)]; #All nodes levels start at 0
for v in reversed(pruneOrder):
Kv = T.node[v]['Kv'];
if Kv == R:
T.add_edge(v, 0, label = -1);
level[v-1] = 1;
else:
#Choose w in Kv such that level(w) is maximum
levelsInKv = [level[x-1] for x in Kv];
indexOfW = levelsInKv.index(max(levelsInKv));
w = Kv[indexOfW];
#The edge label is the index only element in Kw that does not
#belong to Kv
Kw = T.node[w]['Kv'];
edge_label = [i for i, v in enumerate(Kw, 1) if v not in Kv][0];
T.add_edge(v, w, label = edge_label);
level[v-1] = level[w] + 1;
def _addNodes(self, Rk, T):
"""Updates the characteristic tree T from a given Renyi kTree.
Updates T with the list of nodes and their adjacent cliques after the
pruning process. The adjacent clique is stored in a 'Kv' attribute of
each node. The pruning order is returned from the function.
All nodes except for the root in the Renyi kTree are marked as checked
in the process.
Parameters:
Rk: nx.Graph
The Renyi kTree
T: nx.Graph
The Characteristic tree to be filled with nodes
Returns:
pruneOrder: list
list The list of node in the order they were pruned
"""
def remove(node):
#Record the prune order
pruneOrder.append(node);
#Get the adjacent clique and add node to the graph
AdjacentClique = sorted([x for x in Rk[node] if not Rk.node[x]['checked']]);
T.add_node(node, Kv = AdjacentClique);
#Mark as removed and update degree list
Rk.node[node]['checked'] = True;
for y in adj[node-1]:
if not Rk.node[y]['checked']:
deg[y] -= 1;
#Initialize parameters
nx.set_node_attributes(Rk, 'checked', False);
T.add_node(0);
#initialize control variables
pruneOrder = [];
adj = Rk.adjacency_list();
deg = Rk.degree();
for v, _ in enumerate(range(self.N - self.k), 1):
if deg[v] == self.k:
remove(v);
for u in adj[v-1]:
if u < v and not Rk.node[u]['checked'] and deg[u] == self.k:
remove(u);
return pruneOrder;
def _relabelKTree(self, kTree):
"""Transforms the k-Tree into a Renyi k-Tree.
Creates a new relabeled tree.
Old labels are stored as a properties of the nodes to be able to reverse the relabel.
PARAMETERS
KTREE : networkx.Graph
The kTree
Returns
Q : list
A sorted list of nodes with degree = k
renyiKTree : networkx Graph
The relabeled kTree.
"""
renyiKTree = nx.Graph();
renyiKTree.add_edges_from(kTree.edges())
#l_m is the maximum node of degree k
l_m = max([k for k, v in kTree.degree().items() if v == self.k]);
#Q are the nodes adjacent to l_m
Q = sorted(list(kTree[l_m].keys()));
#Those are the algorithm steps as decribed in the paper
##Step 1: Define the new root and relabel node in Q for nodes in R
R = [self.N - self.k + i + 1 for i in range(self.k)] #New root
mapping = {k: v for k, v in zip(Q, R)}
##Step 2: Nothing needs to be done, just not remap those nodes
##Step 3: Close permutation cycles
loopClosers = [i for i in R if i not in Q];
for lc in loopClosers:
newLabel = lc;
while newLabel in mapping.values():
#Find dict keys by value
newLabel = list(mapping.keys())[list(mapping.values()).index(newLabel)]
mapping.update({lc: newLabel});
#Actual relabel
nx.set_node_attributes(renyiKTree, 'old_label', {n: n for n in
renyiKTree.nodes()}) #Save old node labels
return Q, nx.relabel_nodes(renyiKTree, mapping); #Update node labels
|
gpl-3.0
|
user-none/calibre
|
src/calibre/ebooks/pdb/ztxt/writer.py
|
24
|
3629
|
# -*- coding: utf-8 -*-
'''
Writer content to ztxt pdb file.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <[email protected]>'
__docformat__ = 'restructuredtext en'
import struct, zlib
from calibre.ebooks.pdb.formatwriter import FormatWriter
from calibre.ebooks.pdb.header import PdbHeaderBuilder
from calibre.ebooks.txt.txtml import TXTMLizer
from calibre.ebooks.txt.newlines import TxtNewlines, specified_newlines
MAX_RECORD_SIZE = 8192
class Writer(FormatWriter):
def __init__(self, opts, log):
self.opts = opts
self.log = log
def write_content(self, oeb_book, out_stream, metadata=None):
title = self.opts.title if self.opts.title else oeb_book.metadata.title[0].value if oeb_book.metadata.title != [] else _('Unknown')
txt_records, txt_length = self._generate_text(oeb_book)
crc32 = 0
section_lengths = []
compressor = zlib.compressobj(9)
self.log.info('Compressing data...')
for i in range(0, len(txt_records)):
self.log.debug('\tCompressing record %i' % i)
txt_records[i] = compressor.compress(txt_records[i])
txt_records[i] = txt_records[i] + compressor.flush(zlib.Z_FULL_FLUSH)
section_lengths.append(len(txt_records[i]))
crc32 = zlib.crc32(txt_records[i], crc32) & 0xffffffff
header_record = self._header_record(txt_length, len(txt_records), crc32)
section_lengths.insert(0, len(header_record))
out_stream.seek(0)
hb = PdbHeaderBuilder('zTXTGPlm', title)
hb.build_header(section_lengths, out_stream)
for record in [header_record]+txt_records:
out_stream.write(record)
def _generate_text(self, oeb_book):
writer = TXTMLizer(self.log)
txt = writer.extract_content(oeb_book, self.opts)
self.log.debug('\tReplacing newlines with selected type...')
txt = specified_newlines(TxtNewlines('windows').newline,
txt).encode(self.opts.pdb_output_encoding, 'replace')
txt_length = len(txt)
txt_records = []
for i in range(0, (len(txt) / MAX_RECORD_SIZE) + 1):
txt_records.append(txt[i * MAX_RECORD_SIZE : (i * MAX_RECORD_SIZE) + MAX_RECORD_SIZE])
return txt_records, txt_length
def _header_record(self, txt_length, record_count, crc32):
record = ''
record += struct.pack('>H', 0x012c) # [0:2], version. 0x012c = 1.44
record += struct.pack('>H', record_count) # [2:4], Number of PDB records used for the text of the book.
record += struct.pack('>L', txt_length) # [4:8], Uncompressed length of the entire text of the book.
record += struct.pack('>H', MAX_RECORD_SIZE) # [8:10], Maximum size of each record containing text
record += struct.pack('>H', 0) # [10:12], Number of bookmarks.
record += struct.pack('>H', 0) # [12:14], Bookmark record. 0 if there are no bookmarks.
record += struct.pack('>H', 0) # [14:16], Number of annotations.
record += struct.pack('>H', 0) # [16:18], Annotation record. 0 if there are no annotations.
record += struct.pack('>B', 1) # [18:19], Flags. Bitmask, 0x01 = Random Access. 0x02 = Non-Uniform text block size.
record += struct.pack('>B', 0) # [19:20], Reserved.
record += struct.pack('>L', crc32) # [20:24], crc32
record += struct.pack('>LL', 0, 0) # [24:32], padding
return record
|
gpl-3.0
|
augusten/webshop-app
|
node_modules/node-gyp/gyp/pylib/gyp/xcode_ninja.py
|
1789
|
10585
|
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode-ninja wrapper project file generator.
This updates the data structures passed to the Xcode gyp generator to build
with ninja instead. The Xcode project itself is transformed into a list of
executable targets, each with a build step to build with ninja, and a target
with every source and resource file. This appears to sidestep some of the
major performance headaches experienced using complex projects and large number
of targets within Xcode.
"""
import errno
import gyp.generator.ninja
import os
import re
import xml.sax.saxutils
def _WriteWorkspace(main_gyp, sources_gyp, params):
""" Create a workspace to wrap main and sources gyp paths. """
(build_file_root, build_file_ext) = os.path.splitext(main_gyp)
workspace_path = build_file_root + '.xcworkspace'
options = params['options']
if options.generator_output:
workspace_path = os.path.join(options.generator_output, workspace_path)
try:
os.makedirs(workspace_path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
output_string = '<?xml version="1.0" encoding="UTF-8"?>\n' + \
'<Workspace version = "1.0">\n'
for gyp_name in [main_gyp, sources_gyp]:
name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj'
name = xml.sax.saxutils.quoteattr("group:" + name)
output_string += ' <FileRef location = %s></FileRef>\n' % name
output_string += '</Workspace>\n'
workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata")
try:
with open(workspace_file, 'r') as input_file:
input_string = input_file.read()
if input_string == output_string:
return
except IOError:
# Ignore errors if the file doesn't exist.
pass
with open(workspace_file, 'w') as output_file:
output_file.write(output_string)
def _TargetFromSpec(old_spec, params):
""" Create fake target for xcode-ninja wrapper. """
# Determine ninja top level build dir (e.g. /path/to/out).
ninja_toplevel = None
jobs = 0
if params:
options = params['options']
ninja_toplevel = \
os.path.join(options.toplevel_dir,
gyp.generator.ninja.ComputeOutputDir(params))
jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0)
target_name = old_spec.get('target_name')
product_name = old_spec.get('product_name', target_name)
product_extension = old_spec.get('product_extension')
ninja_target = {}
ninja_target['target_name'] = target_name
ninja_target['product_name'] = product_name
if product_extension:
ninja_target['product_extension'] = product_extension
ninja_target['toolset'] = old_spec.get('toolset')
ninja_target['default_configuration'] = old_spec.get('default_configuration')
ninja_target['configurations'] = {}
# Tell Xcode to look in |ninja_toplevel| for build products.
new_xcode_settings = {}
if ninja_toplevel:
new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \
"%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel
if 'configurations' in old_spec:
for config in old_spec['configurations'].iterkeys():
old_xcode_settings = \
old_spec['configurations'][config].get('xcode_settings', {})
if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings:
new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO"
new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \
old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET']
ninja_target['configurations'][config] = {}
ninja_target['configurations'][config]['xcode_settings'] = \
new_xcode_settings
ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0)
ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0)
ninja_target['ios_watchkit_extension'] = \
old_spec.get('ios_watchkit_extension', 0)
ninja_target['ios_watchkit_app'] = old_spec.get('ios_watchkit_app', 0)
ninja_target['type'] = old_spec['type']
if ninja_toplevel:
ninja_target['actions'] = [
{
'action_name': 'Compile and copy %s via ninja' % target_name,
'inputs': [],
'outputs': [],
'action': [
'env',
'PATH=%s' % os.environ['PATH'],
'ninja',
'-C',
new_xcode_settings['CONFIGURATION_BUILD_DIR'],
target_name,
],
'message': 'Compile and copy %s via ninja' % target_name,
},
]
if jobs > 0:
ninja_target['actions'][0]['action'].extend(('-j', jobs))
return ninja_target
def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
"""Limit targets for Xcode wrapper.
Xcode sometimes performs poorly with too many targets, so only include
proper executable targets, with filters to customize.
Arguments:
target_extras: Regular expression to always add, matching any target.
executable_target_pattern: Regular expression limiting executable targets.
spec: Specifications for target.
"""
target_name = spec.get('target_name')
# Always include targets matching target_extras.
if target_extras is not None and re.search(target_extras, target_name):
return True
# Otherwise just show executable targets.
if spec.get('type', '') == 'executable' and \
spec.get('product_extension', '') != 'bundle':
# If there is a filter and the target does not match, exclude the target.
if executable_target_pattern is not None:
if not re.search(executable_target_pattern, target_name):
return False
return True
return False
def CreateWrapper(target_list, target_dicts, data, params):
"""Initialize targets for the ninja wrapper.
This sets up the necessary variables in the targets to generate Xcode projects
that use ninja as an external builder.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dict of flattened build files keyed on gyp path.
params: Dict of global options for gyp.
"""
orig_gyp = params['build_files'][0]
for gyp_name, gyp_dict in data.iteritems():
if gyp_name == orig_gyp:
depth = gyp_dict['_DEPTH']
# Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE
# and prepend .ninja before the .gyp extension.
generator_flags = params.get('generator_flags', {})
main_gyp = generator_flags.get('xcode_ninja_main_gyp', None)
if main_gyp is None:
(build_file_root, build_file_ext) = os.path.splitext(orig_gyp)
main_gyp = build_file_root + ".ninja" + build_file_ext
# Create new |target_list|, |target_dicts| and |data| data structures.
new_target_list = []
new_target_dicts = {}
new_data = {}
# Set base keys needed for |data|.
new_data[main_gyp] = {}
new_data[main_gyp]['included_files'] = []
new_data[main_gyp]['targets'] = []
new_data[main_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
# Normally the xcode-ninja generator includes only valid executable targets.
# If |xcode_ninja_executable_target_pattern| is set, that list is reduced to
# executable targets that match the pattern. (Default all)
executable_target_pattern = \
generator_flags.get('xcode_ninja_executable_target_pattern', None)
# For including other non-executable targets, add the matching target name
# to the |xcode_ninja_target_pattern| regular expression. (Default none)
target_extras = generator_flags.get('xcode_ninja_target_pattern', None)
for old_qualified_target in target_list:
spec = target_dicts[old_qualified_target]
if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
# Add to new_target_list.
target_name = spec.get('target_name')
new_target_name = '%s:%s#target' % (main_gyp, target_name)
new_target_list.append(new_target_name)
# Add to new_target_dicts.
new_target_dicts[new_target_name] = _TargetFromSpec(spec, params)
# Add to new_data.
for old_target in data[old_qualified_target.split(':')[0]]['targets']:
if old_target['target_name'] == target_name:
new_data_target = {}
new_data_target['target_name'] = old_target['target_name']
new_data_target['toolset'] = old_target['toolset']
new_data[main_gyp]['targets'].append(new_data_target)
# Create sources target.
sources_target_name = 'sources_for_indexing'
sources_target = _TargetFromSpec(
{ 'target_name' : sources_target_name,
'toolset': 'target',
'default_configuration': 'Default',
'mac_bundle': '0',
'type': 'executable'
}, None)
# Tell Xcode to look everywhere for headers.
sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } }
sources = []
for target, target_dict in target_dicts.iteritems():
base = os.path.dirname(target)
files = target_dict.get('sources', []) + \
target_dict.get('mac_bundle_resources', [])
for action in target_dict.get('actions', []):
files.extend(action.get('inputs', []))
# Remove files starting with $. These are mostly intermediate files for the
# build system.
files = [ file for file in files if not file.startswith('$')]
# Make sources relative to root build file.
relative_path = os.path.dirname(main_gyp)
sources += [ os.path.relpath(os.path.join(base, file), relative_path)
for file in files ]
sources_target['sources'] = sorted(set(sources))
# Put sources_to_index in it's own gyp.
sources_gyp = \
os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp")
fully_qualified_target_name = \
'%s:%s#target' % (sources_gyp, sources_target_name)
# Add to new_target_list, new_target_dicts and new_data.
new_target_list.append(fully_qualified_target_name)
new_target_dicts[fully_qualified_target_name] = sources_target
new_data_target = {}
new_data_target['target_name'] = sources_target['target_name']
new_data_target['_DEPTH'] = depth
new_data_target['toolset'] = "target"
new_data[sources_gyp] = {}
new_data[sources_gyp]['targets'] = []
new_data[sources_gyp]['included_files'] = []
new_data[sources_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
new_data[sources_gyp]['targets'].append(new_data_target)
# Write workspace to file.
_WriteWorkspace(main_gyp, sources_gyp, params)
return (new_target_list, new_target_dicts, new_data)
|
mit
|
Srisai85/scipy
|
scipy/stats/kde.py
|
27
|
17303
|
#-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to Scipy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
from __future__ import division, print_function, absolute_import
# Standard library imports.
import warnings
# Scipy imports.
from scipy._lib.six import callable, string_types
from scipy import linalg, special
from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \
ravel, power, atleast_1d, squeeze, sum, transpose
import numpy as np
from numpy.random import randint, multivariate_normal
# Local imports.
from . import mvn
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None):
self.dataset = atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
result = zeros((m,), dtype=float)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy = sum(diff*tdiff,axis=0) / 2.0
result = result + exp(-energy)
else:
# loop over points
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = sum(exp(-energy), axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError :
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
diff = self.dataset - mean
tdiff = dot(linalg.inv(sum_cov), diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies), axis=0) / sqrt(linalg.det(2 * pi *
sum_cov)) / self.n
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.mean(special.ndtr(normalized_high) -
special.ndtr(normalized_low))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies), axis=0)
result /= sqrt(linalg.det(2 * pi * sum_cov)) * large.n * small.n
return result
def resample(self, size=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the underlying dataset.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = self.n
norm = transpose(multivariate_normal(zeros((self.d,), float),
self.covariance, size=size))
indices = randint(0, self.n, size=size)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
return power(self.n, -1./(self.d+4))
def silverman_factor(self):
return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> import scipy.stats as stats
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
bias=False))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
Notes
-----
See `gaussian_kde.evaluate` for more details; this method simply
returns ``np.log(gaussian_kde.evaluate(x))``.
"""
return np.log(self.evaluate(x))
|
bsd-3-clause
|
coursemdetw/2014c2
|
exts/w2/static/Brython2.0.0-20140209-164925/Lib/unittest/case.py
|
743
|
48873
|
"""Test case implementation"""
import sys
import functools
import difflib
import pprint
import re
import warnings
import collections
from . import result
from .util import (strclass, safe_repr, _count_diff_all_purpose,
_count_diff_hashable)
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestCase.skipTest() or one of the skipping decorators
instead of raising this directly.
"""
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info):
super(_ExpectedFailure, self).__init__()
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
class _Outcome(object):
def __init__(self):
self.success = True
self.skipped = None
self.unexpectedSuccess = None
self.expectedFailure = None
self.errors = []
self.failures = []
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not isinstance(test_item, type):
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
return wrapper
class _AssertRaisesBaseContext(object):
def __init__(self, expected, test_case, callable_obj=None,
expected_regex=None):
self.expected = expected
self.test_case = test_case
if callable_obj is not None:
try:
self.obj_name = callable_obj.__name__
except AttributeError:
self.obj_name = str(callable_obj)
else:
self.obj_name = None
if isinstance(expected_regex, (bytes, str)):
expected_regex = re.compile(expected_regex)
self.expected_regex = expected_regex
self.msg = None
def _raiseFailure(self, standardMsg):
msg = self.test_case._formatMessage(self.msg, standardMsg)
raise self.test_case.failureException(msg)
def handle(self, name, callable_obj, args, kwargs):
"""
If callable_obj is None, assertRaises/Warns is being used as a
context manager, so check for a 'msg' kwarg and return self.
If callable_obj is not None, call it passing args and kwargs.
"""
if callable_obj is None:
self.msg = kwargs.pop('msg', None)
return self
with self:
callable_obj(*args, **kwargs)
class _AssertRaisesContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
if self.obj_name:
self._raiseFailure("{} not raised by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
# store exception, without traceback, for later retrieval
self.exception = exc_value.with_traceback(None)
if self.expected_regex is None:
return True
expected_regex = self.expected_regex
if not expected_regex.search(str(exc_value)):
self._raiseFailure('"{}" does not match "{}"'.format(
expected_regex.pattern, str(exc_value)))
return True
class _AssertWarnsContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertWarns* methods."""
def __enter__(self):
# The __warningregistry__'s need to be in a pristine state for tests
# to work properly.
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
self.warnings_manager = warnings.catch_warnings(record=True)
self.warnings = self.warnings_manager.__enter__()
warnings.simplefilter("always", self.expected)
return self
def __exit__(self, exc_type, exc_value, tb):
self.warnings_manager.__exit__(exc_type, exc_value, tb)
if exc_type is not None:
# let unexpected exceptions pass through
return
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
first_matching = None
for m in self.warnings:
w = m.message
if not isinstance(w, self.expected):
continue
if first_matching is None:
first_matching = w
if (self.expected_regex is not None and
not self.expected_regex.search(str(w))):
continue
# store warning for later retrieval
self.warning = w
self.filename = m.filename
self.lineno = m.lineno
return
# Now we simply try to choose a helpful failure message
if first_matching is not None:
self._raiseFailure('"{}" does not match "{}"'.format(
self.expected_regex.pattern, str(first_matching)))
if self.obj_name:
self._raiseFailure("{} not triggered by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not triggered".format(exc_name))
class TestCase(object):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
When subclassing TestCase, you can set these attributes:
* failureException: determines which exception will be raised when
the instance's assertion methods fail; test methods raising this
exception will be deemed to have 'failed' rather than 'errored'.
* longMessage: determines whether long messages (including repr of
objects used in assert methods) will be printed on failure in *addition*
to any explicit message passed.
* maxDiff: sets the maximum length of a diff in failure messages
by assert methods using difflib. It is looked up as an instance
attribute so can be configured by individual tests if required.
"""
failureException = AssertionError
longMessage = True
maxDiff = 80*8
# If a string is longer than _diffThreshold, use normal comparison instead
# of difflib. See #11763.
_diffThreshold = 2**16
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._outcomeForDoCleanups = None
self._testMethodDoc = 'No test'
try:
testMethod = getattr(self, methodName)
except AttributeError:
if methodName != 'runTest':
# we allow instantiation with no explicit method name
# but not an *incorrect* or missing method name
raise ValueError("no such test method in %s: %s" %
(self.__class__, methodName))
else:
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = {}
self.addTypeEqualityFunc(dict, 'assertDictEqual')
self.addTypeEqualityFunc(list, 'assertListEqual')
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
self.addTypeEqualityFunc(str, 'assertMultiLineEqual')
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
pass
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
pass
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
def countTestCases(self):
return 1
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._testMethodName == other._testMethodName
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(self)
def _executeTestPart(self, function, outcome, isTest=False):
try:
function()
except KeyboardInterrupt:
raise
except SkipTest as e:
outcome.success = False
outcome.skipped = str(e)
except _UnexpectedSuccess:
exc_info = sys.exc_info()
outcome.success = False
if isTest:
outcome.unexpectedSuccess = exc_info
else:
outcome.errors.append(exc_info)
except _ExpectedFailure:
outcome.success = False
exc_info = sys.exc_info()
if isTest:
outcome.expectedFailure = exc_info
else:
outcome.errors.append(exc_info)
except self.failureException:
outcome.success = False
outcome.failures.append(sys.exc_info())
exc_info = sys.exc_info()
except:
outcome.success = False
outcome.errors.append(sys.exc_info())
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
outcome = _Outcome()
self._outcomeForDoCleanups = outcome
self._executeTestPart(self.setUp, outcome)
if outcome.success:
self._executeTestPart(testMethod, outcome, isTest=True)
self._executeTestPart(self.tearDown, outcome)
self.doCleanups()
if outcome.success:
result.addSuccess(self)
else:
if outcome.skipped is not None:
self._addSkip(result, outcome.skipped)
for exc_info in outcome.errors:
result.addError(self, exc_info)
for exc_info in outcome.failures:
result.addFailure(self, exc_info)
if outcome.unexpectedSuccess is not None:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failures",
RuntimeWarning)
result.addFailure(self, outcome.unexpectedSuccess)
if outcome.expectedFailure is not None:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, outcome.expectedFailure)
else:
warnings.warn("TestResult has no addExpectedFailure method, reporting as passes",
RuntimeWarning)
result.addSuccess(self)
return result
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
outcome = self._outcomeForDoCleanups or _Outcome()
while self._cleanups:
function, args, kwargs = self._cleanups.pop()
part = lambda: function(*args, **kwargs)
self._executeTestPart(part, outcome)
# return this for backwards compatibility
# even though we no longer us it internally
return outcome.success
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"""Check that the expression is false."""
if expr:
msg = self._formatMessage(msg, "%s is not false" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Check that the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not true" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
# don't switch to '{}' formatting in Python 2.X
# it changes the way unicode input is handled
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is raised
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
raised, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
An optional keyword argument 'msg' can be provided when assertRaises
is used as a context object.
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
context = _AssertRaisesContext(excClass, self, callableObj)
return context.handle('assertRaises', callableObj, args, kwargs)
def assertWarns(self, expected_warning, callable_obj=None, *args, **kwargs):
"""Fail unless a warning of class warnClass is triggered
by callable_obj when invoked with arguments args and keyword
arguments kwargs. If a different type of warning is
triggered, it will not be handled: depending on the other
warning filtering rules in effect, it might be silenced, printed
out, or raised as an exception.
If called with callable_obj omitted or None, will return a
context object used like this::
with self.assertWarns(SomeWarning):
do_something()
An optional keyword argument 'msg' can be provided when assertWarns
is used as a context object.
The context manager keeps a reference to the first matching
warning as the 'warning' attribute; similarly, the 'filename'
and 'lineno' attributes give you information about the line
of Python code from which the warning was triggered.
This allows you to inspect the warning after the assertion::
with self.assertWarns(SomeWarning) as cm:
do_something()
the_warning = cm.warning
self.assertEqual(the_warning.some_attribute, 147)
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj)
return context.handle('assertWarns', callable_obj, args, kwargs)
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if type(first) is type(second):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
if isinstance(asserter, str):
asserter = getattr(self, asserter)
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '!='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second-first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(abs(second-first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = safe_repr(seq1)
seq2_repr = safe_repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in range(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support different types of sets, and
is optimized for sets specifically (parameters must support a
difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1),
safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, subset, dictionary, msg=None):
"""Checks whether dictionary is a superset of subset."""
warnings.warn('assertDictContainsSubset is deprecated',
DeprecationWarning)
missing = []
mismatched = []
for key, value in subset.items():
if key not in dictionary:
missing.append(key)
elif value != dictionary[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(dictionary[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertCountEqual(self, first, second, msg=None):
"""An unordered sequence comparison asserting that the same elements,
regardless of order. If the same element occurs more than once,
it verifies that the elements occur the same number of times.
self.assertEqual(Counter(list(first)),
Counter(list(second)))
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
first_seq, second_seq = list(first), list(second)
try:
first = collections.Counter(first_seq)
second = collections.Counter(second_seq)
except TypeError:
# Handle case with unhashable elements
differences = _count_diff_all_purpose(first_seq, second_seq)
else:
if first == second:
return
differences = _count_diff_hashable(first_seq, second_seq)
if differences:
standardMsg = 'Element counts were not equal:\n'
lines = ['First has %d, Second has %d: %r' % diff for diff in differences]
diffMsg = '\n'.join(lines)
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assertIsInstance(first, str, 'First argument is not a string')
self.assertIsInstance(second, str, 'Second argument is not a string')
if first != second:
# don't use difflib if the strings are too long
if (len(first) > self._diffThreshold or
len(second) > self._diffThreshold):
self._baseAssertEqual(first, second, msg)
firstlines = first.splitlines(keepends=True)
secondlines = second.splitlines(keepends=True)
if len(firstlines) == 1 and first.strip('\r\n') == first:
firstlines = [first + '\n']
secondlines = [second + '\n']
standardMsg = '%s != %s' % (safe_repr(first, True),
safe_repr(second, True))
diff = '\n' + ''.join(difflib.ndiff(firstlines, secondlines))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegex(self, expected_exception, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regex.
Args:
expected_exception: Exception class expected to be raised.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
msg: Optional message used in case of failure. Can only be used
when assertRaisesRegex is used as a context manager.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertRaisesContext(expected_exception, self, callable_obj,
expected_regex)
return context.handle('assertRaisesRegex', callable_obj, args, kwargs)
def assertWarnsRegex(self, expected_warning, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a triggered warning matches a regexp.
Basic functioning is similar to assertWarns() with the addition
that only warnings whose messages also match the regular expression
are considered successful matches.
Args:
expected_warning: Warning class expected to be triggered.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
msg: Optional message used in case of failure. Can only be used
when assertWarnsRegex is used as a context manager.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj,
expected_regex)
return context.handle('assertWarnsRegex', callable_obj, args, kwargs)
def assertRegex(self, text, expected_regex, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regex, (str, bytes)):
assert expected_regex, "expected_regex must not be empty."
expected_regex = re.compile(expected_regex)
if not expected_regex.search(text):
msg = msg or "Regex didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text)
raise self.failureException(msg)
def assertNotRegex(self, text, unexpected_regex, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regex, (str, bytes)):
unexpected_regex = re.compile(unexpected_regex)
match = unexpected_regex.search(text)
if match:
msg = msg or "Regex matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regex.pattern,
text)
raise self.failureException(msg)
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
'Please use {0} instead.'.format(original_func.__name__),
DeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
# see #9424
failUnlessEqual = assertEquals = _deprecate(assertEqual)
failIfEqual = assertNotEquals = _deprecate(assertNotEqual)
failUnlessAlmostEqual = assertAlmostEquals = _deprecate(assertAlmostEqual)
failIfAlmostEqual = assertNotAlmostEquals = _deprecate(assertNotAlmostEqual)
failUnless = assert_ = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
assertRaisesRegexp = _deprecate(assertRaisesRegex)
assertRegexpMatches = _deprecate(assertRegex)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s tec=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
|
gpl-2.0
|
carnell69/kuma
|
vendor/packages/setuptools/dist.py
|
259
|
35320
|
__all__ = ['Distribution']
import re
import os
import sys
import warnings
import numbers
import distutils.log
import distutils.core
import distutils.cmd
import distutils.dist
from distutils.core import Distribution as _Distribution
from distutils.errors import (DistutilsOptionError, DistutilsPlatformError,
DistutilsSetupError)
from setuptools.depends import Require
from setuptools.compat import basestring, PY2
from setuptools import windows_support
import pkg_resources
packaging = pkg_resources.packaging
def _get_unpatched(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
while cls.__module__.startswith('setuptools'):
cls, = cls.__bases__
if not cls.__module__.startswith('distutils'):
raise AssertionError(
"distutils has already been patched by %r" % cls
)
return cls
_Distribution = _get_unpatched(_Distribution)
def _patch_distribution_metadata_write_pkg_info():
"""
Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local
encoding to save the pkg_info. Monkey-patch its write_pkg_info method to
correct this undesirable behavior.
"""
environment_local = (3,) <= sys.version_info[:3] < (3, 2, 2)
if not environment_local:
return
# from Python 3.4
def write_pkg_info(self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
encoding='UTF-8') as pkg_info:
self.write_pkg_file(pkg_info)
distutils.dist.DistributionMetadata.write_pkg_info = write_pkg_info
_patch_distribution_metadata_write_pkg_info()
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x='+value)
assert not ep.extras
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr,value)
)
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert ''.join(value)!=value
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr,value)
)
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
assert_string_list(dist,attr,value)
for nsp in value:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
if '.' in nsp:
parent = '.'.join(nsp.split('.')[:-1])
if parent not in value:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for k,v in value.items():
if ':' in k:
k,m = k.split(':',1)
if pkg_resources.invalid_marker(m):
raise DistutilsSetupError("Invalid environment marker: "+m)
list(pkg_resources.parse_requirements(v))
except (TypeError,ValueError,AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
tmpl = "{attr!r} must be a boolean value (got {value!r})"
raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError, ValueError) as error:
tmpl = (
"{attr!r} must be a string or list of strings "
"containing valid project/version requirement specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError as e:
raise DistutilsSetupError(e)
def check_test_suite(dist, attr, value):
if not isinstance(value,basestring):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value,dict):
for k,v in value.items():
if not isinstance(k,str): break
try: iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr+" must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only"
".-separated package names in setup.py", pkgname
)
class Distribution(_Distribution):
"""Distribution with support for features, tests, and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'features' **deprecated** -- a dictionary mapping option names to
'setuptools.Feature'
objects. Features are a portion of the distribution that can be
included or excluded based on user options, inter-feature dependencies,
and availability on the current system. Excluded features are omitted
from all setup commands, including source and binary distributions, so
you can create multiple distributions from the same source tree.
Feature names should be valid Python identifiers, except that they may
contain the '-' (minus) sign. Features can be included or excluded
via the command line options '--with-X' and '--without-X', where 'X' is
the name of the feature. Whether a feature is included by default, and
whether you are allowed to control this from the command line, is
determined by the Feature object. See the 'Feature' class for more
information.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution. They are used by the feature subsystem to configure the
distribution for the included and excluded features.
"""
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__(self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
_attrs_dict = attrs or {}
if 'features' in _attrs_dict or 'require_features' in _attrs_dict:
Feature.warn_deprecated()
self.require_features = []
self.features = {}
self.dist_files = []
self.src_root = attrs and attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
# Make sure we have any eggs needed to interpret 'attrs'
if attrs is not None:
self.dependency_links = attrs.pop('dependency_links', [])
assert_string_list(self,'dependency_links',self.dependency_links)
if attrs and 'setup_requires' in attrs:
self.fetch_build_eggs(attrs['setup_requires'])
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
if not hasattr(self,ep.name):
setattr(self,ep.name,None)
_Distribution.__init__(self,attrs)
if isinstance(self.metadata.version, numbers.Number):
# Some people apparently take "version number" too literally :)
self.metadata.version = str(self.metadata.version)
if self.metadata.version is not None:
try:
ver = packaging.version.Version(self.metadata.version)
normalized_version = str(ver)
if self.metadata.version != normalized_version:
warnings.warn(
"Normalizing '%s' to '%s'" % (
self.metadata.version,
normalized_version,
)
)
self.metadata.version = normalized_version
except (packaging.version.InvalidVersion, TypeError):
warnings.warn(
"The version specified (%r) is an invalid version, this "
"may not work as expected with newer versions of "
"setuptools, pip, and PyPI. Please see PEP 440 for more "
"details." % self.metadata.version
)
def parse_command_line(self):
"""Process features after parsing command line options"""
result = _Distribution.parse_command_line(self)
if self.features:
self._finalize_features()
return result
def _feature_attrname(self,name):
"""Convert feature name to corresponding option attribute name"""
return 'with_'+name.replace('-','_')
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
resolved_dists = pkg_resources.working_set.resolve(
pkg_resources.parse_requirements(requires),
installer=self.fetch_build_egg,
replace_conflicting=True,
)
for dist in resolved_dists:
pkg_resources.working_set.add(dist, replace=True)
def finalize_options(self):
_Distribution.finalize_options(self)
if self.features:
self._set_global_opts_from_features()
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self,ep.name,None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [os.path.abspath(p) for p in self.convert_2to3_doctests]
else:
self.convert_2to3_doctests = []
def get_egg_cache_dir(self):
egg_cache_dir = os.path.join(os.curdir, '.eggs')
if not os.path.exists(egg_cache_dir):
os.mkdir(egg_cache_dir)
windows_support.hide_file(egg_cache_dir)
readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
with open(readme_txt_filename, 'w') as f:
f.write('This directory contains eggs that were downloaded '
'by setuptools to build, test, and run plug-ins.\n\n')
f.write('This directory caches those eggs to prevent '
'repeated downloads.\n\n')
f.write('However, it is safe to delete this directory.\n\n')
return egg_cache_dir
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
try:
cmd = self._egg_fetcher
cmd.package_index.to_scan = []
except AttributeError:
from setuptools.command.easy_install import easy_install
dist = self.__class__({'script_args':['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
install_dir = self.get_egg_cache_dir()
cmd = easy_install(
dist, args=["x"], install_dir=install_dir, exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
self._egg_fetcher = cmd
return cmd.easy_install(req)
def _set_global_opts_from_features(self):
"""Add --with-X/--without-X options based on optional features"""
go = []
no = self.negative_opt.copy()
for name,feature in self.features.items():
self._set_feature(name,None)
feature.validate(self)
if feature.optional:
descr = feature.description
incdef = ' (default)'
excdef=''
if not feature.include_by_default():
excdef, incdef = incdef, excdef
go.append(('with-'+name, None, 'include '+descr+incdef))
go.append(('without-'+name, None, 'exclude '+descr+excdef))
no['without-'+name] = 'with-'+name
self.global_options = self.feature_options = go + self.global_options
self.negative_opt = self.feature_negopt = no
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name,feature in self.features.items():
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name,1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name,feature in self.features.items():
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name,0)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
for ep in pkg_resources.iter_entry_points('distutils.commands',command):
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def _set_feature(self,name,status):
"""Set feature's inclusion status"""
setattr(self,self._feature_attrname(name),status)
def feature_is_included(self,name):
"""Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self,self._feature_attrname(name))
def include_feature(self,name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name)==0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name,1)
def include(self,**attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k,v in attrs.items():
include = getattr(self, '_include_'+k, None)
if include:
include(v)
else:
self._include_misc(k,v)
def exclude_package(self,package):
"""Remove packages, modules, and extensions in named package"""
pfx = package+'.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self,package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package+'.'
for p in self.iter_distribution_names():
if p==package or p.startswith(pfx):
return True
def _exclude_misc(self,name,value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is not None and not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self,name,[item for item in old if item not in value])
def _include_misc(self,name,value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is None:
setattr(self,name,value)
elif not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
else:
setattr(self,name,old+[item for item in value if item not in old])
def exclude(self,**attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k,v in attrs.items():
exclude = getattr(self, '_exclude_'+k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k,v)
def _exclude_packages(self,packages):
if not isinstance(packages,sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src,alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias,True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class,'command_consumes_arguments',None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd,opts in self.command_options.items():
for opt,(src,val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_','-')
if val==0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj,'negative_opt',{}))
for neg,pos in neg_opt.items():
if pos==opt:
opt=neg
val=None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val==1:
val = None
d.setdefault(cmd,{})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext,tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if PY2 or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
# Install it throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = Distribution
class Feature:
"""
**deprecated** -- The `Feature` facility was never completely implemented
or supported, `has reported issues
<https://bitbucket.org/pypa/setuptools/issue/58>`_ and will be removed in
a future version.
A subset of the distribution that can be excluded if unneeded/wanted
Features are created using these keyword arguments:
'description' -- a short, human readable description of the feature, to
be used in error messages, and option help messages.
'standard' -- if true, the feature is included by default if it is
available on the current system. Otherwise, the feature is only
included if requested via a command line '--with-X' option, or if
another included feature requires it. The default setting is 'False'.
'available' -- if true, the feature is available for installation on the
current system. The default setting is 'True'.
'optional' -- if true, the feature's inclusion can be controlled from the
command line, using the '--with-X' or '--without-X' options. If
false, the feature's inclusion status is determined automatically,
based on 'availabile', 'standard', and whether any other feature
requires it. The default setting is 'True'.
'require_features' -- a string or sequence of strings naming features
that should also be included if this feature is included. Defaults to
empty list. May also contain 'Require' objects that should be
added/removed from the distribution.
'remove' -- a string or list of strings naming packages to be removed
from the distribution if this feature is *not* included. If the
feature *is* included, this argument is ignored. This argument exists
to support removing features that "crosscut" a distribution, such as
defining a 'tests' feature that removes all the 'tests' subpackages
provided by other features. The default for this argument is an empty
list. (Note: the named package(s) or modules must exist in the base
distribution when the 'setup()' function is initially called.)
other keywords -- any other keyword arguments are saved, and passed to
the distribution's 'include()' and 'exclude()' methods when the
feature is included or excluded, respectively. So, for example, you
could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
added or removed from the distribution as appropriate.
A feature must include at least one 'requires', 'remove', or other
keyword argument. Otherwise, it can't affect the distribution in any way.
Note also that you can subclass 'Feature' to create your own specialized
feature types that modify the distribution in other ways when included or
excluded. See the docstrings for the various methods here for more detail.
Aside from the methods, the only feature attributes that distributions look
at are 'description' and 'optional'.
"""
@staticmethod
def warn_deprecated():
warnings.warn(
"Features are deprecated and will be removed in a future "
"version. See http://bitbucket.org/pypa/setuptools/65.",
DeprecationWarning,
stacklevel=3,
)
def __init__(self, description, standard=False, available=True,
optional=True, require_features=(), remove=(), **extras):
self.warn_deprecated()
self.description = description
self.standard = standard
self.available = available
self.optional = optional
if isinstance(require_features,(str,Require)):
require_features = require_features,
self.require_features = [
r for r in require_features if isinstance(r,str)
]
er = [r for r in require_features if not isinstance(r,str)]
if er: extras['require_features'] = er
if isinstance(remove,str):
remove = remove,
self.remove = remove
self.extras = extras
if not remove and not require_features and not extras:
raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or at least one"
" of 'packages', 'py_modules', etc."
)
def include_by_default(self):
"""Should this feature be included by default?"""
return self.available and self.standard
def include_in(self,dist):
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution. Note that this method may be called more than once
per feature, and so should be idempotent.
"""
if not self.available:
raise DistutilsPlatformError(
self.description+" is required,"
"but is not available on this platform"
)
dist.include(**self.extras)
for f in self.require_features:
dist.include_feature(f)
def exclude_from(self,dist):
"""Ensure feature is excluded from distribution
You may override this in a subclass to perform additional operations on
the distribution. This method will be called at most once per
feature, and only after all included features have been asked to
include themselves.
"""
dist.exclude(**self.extras)
if self.remove:
for item in self.remove:
dist.exclude_package(item)
def validate(self,dist):
"""Verify that feature makes sense in context of distribution
This method is called by the distribution just before it parses its
command line. It checks to ensure that the 'remove' attribute, if any,
contains only valid package/module names that are present in the base
distribution when 'setup()' is called. You may override it in a
subclass to perform any other required validation of the feature
against a target distribution.
"""
for item in self.remove:
if not dist.has_contents_for(item):
raise DistutilsSetupError(
"%s wants to be able to remove %s, but the distribution"
" doesn't contain any packages or modules under %s"
% (self.description, item, item)
)
|
mpl-2.0
|
pre-commit/pre-commit
|
pre_commit/languages/golang.py
|
1
|
3152
|
import contextlib
import os.path
import sys
from typing import Generator
from typing import Sequence
from typing import Tuple
import pre_commit.constants as C
from pre_commit import git
from pre_commit.envcontext import envcontext
from pre_commit.envcontext import PatchesT
from pre_commit.envcontext import Var
from pre_commit.hook import Hook
from pre_commit.languages import helpers
from pre_commit.prefix import Prefix
from pre_commit.util import clean_path_on_failure
from pre_commit.util import cmd_output
from pre_commit.util import cmd_output_b
from pre_commit.util import rmtree
ENVIRONMENT_DIR = 'golangenv'
get_default_version = helpers.basic_get_default_version
healthy = helpers.basic_healthy
def get_env_patch(venv: str) -> PatchesT:
return (
('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),
)
@contextlib.contextmanager
def in_env(prefix: Prefix) -> Generator[None, None, None]:
envdir = prefix.path(
helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
)
with envcontext(get_env_patch(envdir)):
yield
def guess_go_dir(remote_url: str) -> str:
if remote_url.endswith('.git'):
remote_url = remote_url[:-1 * len('.git')]
looks_like_url = (
not remote_url.startswith('file://') and
('//' in remote_url or '@' in remote_url)
)
remote_url = remote_url.replace(':', '/')
if looks_like_url:
_, _, remote_url = remote_url.rpartition('//')
_, _, remote_url = remote_url.rpartition('@')
return remote_url
else:
return 'unknown_src_dir'
def install_environment(
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> None:
helpers.assert_version_default('golang', version)
directory = prefix.path(
helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
)
with clean_path_on_failure(directory):
remote = git.get_remote_url(prefix.prefix_dir)
repo_src_dir = os.path.join(directory, 'src', guess_go_dir(remote))
# Clone into the goenv we'll create
cmd = ('git', 'clone', '--recursive', '.', repo_src_dir)
helpers.run_setup_cmd(prefix, cmd)
if sys.platform == 'cygwin': # pragma: no cover
_, gopath, _ = cmd_output('cygpath', '-w', directory)
gopath = gopath.strip()
else:
gopath = directory
env = dict(os.environ, GOPATH=gopath)
env.pop('GOBIN', None)
cmd_output_b('go', 'get', './...', cwd=repo_src_dir, env=env)
for dependency in additional_dependencies:
cmd_output_b('go', 'get', dependency, cwd=repo_src_dir, env=env)
# Same some disk space, we don't need these after installation
rmtree(prefix.path(directory, 'src'))
pkgdir = prefix.path(directory, 'pkg')
if os.path.exists(pkgdir): # pragma: no cover (go<1.10)
rmtree(pkgdir)
def run_hook(
hook: Hook,
file_args: Sequence[str],
color: bool,
) -> Tuple[int, bytes]:
with in_env(hook.prefix):
return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
|
mit
|
crimsonthunder/Samsung_mm
|
tools/perf/python/twatch.py
|
7370
|
1334
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <[email protected]>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
gpl-2.0
|
swayf/ajenti
|
plugins/filesystems/backend.py
|
17
|
1910
|
import re
import os
from ajenti.api import *
from ajenti.com import *
from ajenti.utils import *
class Entry:
def __init__(self):
self.src = ''
self.dst = ''
self.options = ''
self.fs_type = ''
self.dump_p = 0
self.fsck_p = 0
def read():
ss = ConfManager.get().load('filesystems', '/etc/fstab').split('\n')
r = []
for s in ss:
if s != '' and s[0] != '#':
try:
s = s.split()
e = Entry()
try:
e.src = s[0]
e.dst = s[1]
e.fs_type = s[2]
e.options = s[3]
e.dump_p = int(s[4])
e.fsck_p = int(s[5])
except:
pass
r.append(e)
except:
pass
return r
def save(ee):
d = ''
for e in ee:
d += '%s\t%s\t%s\t%s\t%i\t%i\n' % (e.src, e.dst, e.fs_type, e.options, e.dump_p, e.fsck_p)
ConfManager.get().save('filesystems', '/etc/fstab', d)
ConfManager.get().commit('filesystems')
def list_disks():
r = []
for s in os.listdir('/dev'):
if re.match('sd.$|hd.$|scd.$|fd.$|ad.+$', s):
r.append('/dev/' + s)
return sorted(r)
def list_partitions():
r = []
for s in os.listdir('/dev'):
if re.match('sd..$|hd..$|scd.$|fd.$', s):
r.append('/dev/' + s)
return sorted(r)
def get_disk_vendor(d):
return ' '.join(shell('hdparm -I ' + d + ' | grep Model').split()[3:])
def get_partition_uuid_by_name(p):
return shell('blkid -o value -s UUID ' + p).split('\n')[0]
def get_partition_name_by_uuid(u):
return shell('blkid -U ' + u)
class FSConfigurable (Plugin):
implements(IConfigurable)
name = 'Filesystems'
id = 'filesystems'
def list_files(self):
return ['/etc/fstab']
|
lgpl-3.0
|
lenovo-a3-dev/kernel_lenovo_a3
|
tools/perf/scripts/python/syscall-counts.py
|
11181
|
1522
|
# system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
|
gpl-2.0
|
CouchPotato/CouchPotatoServer
|
libs/guessit/language.py
|
94
|
14207
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import UnicodeMixin, base_text_type, u, s
from guessit.fileutils import load_file_in_same_dir
from guessit.textutils import find_words
from guessit.country import Country
import re
import logging
__all__ = [ 'is_iso_language', 'is_language', 'lang_set', 'Language',
'ALL_LANGUAGES', 'ALL_LANGUAGES_NAMES', 'UNDETERMINED',
'search_language', 'guess_language' ]
log = logging.getLogger(__name__)
# downloaded from http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
#
# Description of the fields:
# "An alpha-3 (bibliographic) code, an alpha-3 (terminologic) code (when given),
# an alpha-2 code (when given), an English name, and a French name of a language
# are all separated by pipe (|) characters."
_iso639_contents = load_file_in_same_dir(__file__, 'ISO-639-2_utf-8.txt')
# drop the BOM from the beginning of the file
_iso639_contents = _iso639_contents[1:]
language_matrix = [ l.strip().split('|')
for l in _iso639_contents.strip().split('\n') ]
# update information in the language matrix
language_matrix += [['mol', '', 'mo', 'Moldavian', 'moldave'],
['ass', '', '', 'Assyrian', 'assyrien']]
for lang in language_matrix:
# remove unused languages that shadow other common ones with a non-official form
if (lang[2] == 'se' or # Northern Sami shadows Swedish
lang[2] == 'br'): # Breton shadows Brazilian
lang[2] = ''
# add missing information
if lang[0] == 'und':
lang[2] = 'un'
if lang[0] == 'srp':
lang[1] = 'scc' # from OpenSubtitles
lng3 = frozenset(l[0] for l in language_matrix if l[0])
lng3term = frozenset(l[1] for l in language_matrix if l[1])
lng2 = frozenset(l[2] for l in language_matrix if l[2])
lng_en_name = frozenset(lng for l in language_matrix
for lng in l[3].lower().split('; ') if lng)
lng_fr_name = frozenset(lng for l in language_matrix
for lng in l[4].lower().split('; ') if lng)
lng_all_names = lng3 | lng3term | lng2 | lng_en_name | lng_fr_name
lng3_to_lng3term = dict((l[0], l[1]) for l in language_matrix if l[1])
lng3term_to_lng3 = dict((l[1], l[0]) for l in language_matrix if l[1])
lng3_to_lng2 = dict((l[0], l[2]) for l in language_matrix if l[2])
lng2_to_lng3 = dict((l[2], l[0]) for l in language_matrix if l[2])
# we only return the first given english name, hoping it is the most used one
lng3_to_lng_en_name = dict((l[0], l[3].split('; ')[0])
for l in language_matrix if l[3])
lng_en_name_to_lng3 = dict((en_name.lower(), l[0])
for l in language_matrix if l[3]
for en_name in l[3].split('; '))
# we only return the first given french name, hoping it is the most used one
lng3_to_lng_fr_name = dict((l[0], l[4].split('; ')[0])
for l in language_matrix if l[4])
lng_fr_name_to_lng3 = dict((fr_name.lower(), l[0])
for l in language_matrix if l[4]
for fr_name in l[4].split('; '))
# contains a list of exceptions: strings that should be parsed as a language
# but which are not in an ISO form
lng_exceptions = { 'unknown': ('und', None),
'inconnu': ('und', None),
'unk': ('und', None),
'un': ('und', None),
'gr': ('gre', None),
'greek': ('gre', None),
'esp': ('spa', None),
'español': ('spa', None),
'se': ('swe', None),
'po': ('pt', 'br'),
'pb': ('pt', 'br'),
'pob': ('pt', 'br'),
'br': ('pt', 'br'),
'brazilian': ('pt', 'br'),
'català': ('cat', None),
'cz': ('cze', None),
'ua': ('ukr', None),
'cn': ('chi', None),
'chs': ('chi', None),
'jp': ('jpn', None),
'scr': ('hrv', None)
}
def is_iso_language(language):
return language.lower() in lng_all_names
def is_language(language):
return is_iso_language(language) or language in lng_exceptions
def lang_set(languages, strict=False):
"""Return a set of guessit.Language created from their given string
representation.
if strict is True, then this will raise an exception if any language
could not be identified.
"""
return set(Language(l, strict=strict) for l in languages)
class Language(UnicodeMixin):
"""This class represents a human language.
You can initialize it with pretty much anything, as it knows conversion
from ISO-639 2-letter and 3-letter codes, English and French names.
You can also distinguish languages for specific countries, such as
Portuguese and Brazilian Portuguese.
There are various properties on the language object that give you the
representation of the language for a specific usage, such as .alpha3
to get the ISO 3-letter code, or .opensubtitles to get the OpenSubtitles
language code.
>>> Language('fr')
Language(French)
>>> s(Language('eng').french_name)
'anglais'
>>> s(Language('pt(br)').country.english_name)
'Brazil'
>>> s(Language('Español (Latinoamérica)').country.english_name)
'Latin America'
>>> Language('Spanish (Latin America)') == Language('Español (Latinoamérica)')
True
>>> s(Language('zz', strict=False).english_name)
'Undetermined'
>>> s(Language('pt(br)').opensubtitles)
'pob'
"""
_with_country_regexp = re.compile('(.*)\((.*)\)')
_with_country_regexp2 = re.compile('(.*)-(.*)')
def __init__(self, language, country=None, strict=False, scheme=None):
language = u(language.strip().lower())
with_country = (Language._with_country_regexp.match(language) or
Language._with_country_regexp2.match(language))
if with_country:
self.lang = Language(with_country.group(1)).lang
self.country = Country(with_country.group(2))
return
self.lang = None
self.country = Country(country) if country else None
# first look for scheme specific languages
if scheme == 'opensubtitles':
if language == 'br':
self.lang = 'bre'
return
elif language == 'se':
self.lang = 'sme'
return
elif scheme is not None:
log.warning('Unrecognized scheme: "%s" - Proceeding with standard one' % scheme)
# look for ISO language codes
if len(language) == 2:
self.lang = lng2_to_lng3.get(language)
elif len(language) == 3:
self.lang = (language
if language in lng3
else lng3term_to_lng3.get(language))
else:
self.lang = (lng_en_name_to_lng3.get(language) or
lng_fr_name_to_lng3.get(language))
# general language exceptions
if self.lang is None and language in lng_exceptions:
lang, country = lng_exceptions[language]
self.lang = Language(lang).alpha3
self.country = Country(country) if country else None
msg = 'The given string "%s" could not be identified as a language' % language
if self.lang is None and strict:
raise ValueError(msg)
if self.lang is None:
log.debug(msg)
self.lang = 'und'
@property
def alpha2(self):
return lng3_to_lng2[self.lang]
@property
def alpha3(self):
return self.lang
@property
def alpha3term(self):
return lng3_to_lng3term[self.lang]
@property
def english_name(self):
return lng3_to_lng_en_name[self.lang]
@property
def french_name(self):
return lng3_to_lng_fr_name[self.lang]
@property
def opensubtitles(self):
if self.lang == 'por' and self.country and self.country.alpha2 == 'br':
return 'pob'
elif self.lang in ['gre', 'srp']:
return self.alpha3term
return self.alpha3
@property
def tmdb(self):
if self.country:
return '%s-%s' % (self.alpha2, self.country.alpha2.upper())
return self.alpha2
def __hash__(self):
return hash(self.lang)
def __eq__(self, other):
if isinstance(other, Language):
return self.lang == other.lang
if isinstance(other, base_text_type):
try:
return self == Language(other)
except ValueError:
return False
return False
def __ne__(self, other):
return not self == other
def __nonzero__(self):
return self.lang != 'und'
def __unicode__(self):
if self.country:
return '%s(%s)' % (self.english_name, self.country.alpha2)
else:
return self.english_name
def __repr__(self):
if self.country:
return 'Language(%s, country=%s)' % (self.english_name, self.country)
else:
return 'Language(%s)' % self.english_name
UNDETERMINED = Language('und')
ALL_LANGUAGES = frozenset(Language(lng) for lng in lng_all_names) - frozenset([UNDETERMINED])
ALL_LANGUAGES_NAMES = lng_all_names
def search_language(string, lang_filter=None, skip=None):
"""Looks for language patterns, and if found return the language object,
its group span and an associated confidence.
you can specify a list of allowed languages using the lang_filter argument,
as in lang_filter = [ 'fr', 'eng', 'spanish' ]
>>> search_language('movie [en].avi')
(Language(English), (7, 9), 0.8)
>>> search_language('the zen fat cat and the gay mad men got a new fan', lang_filter = ['en', 'fr', 'es'])
(None, None, None)
"""
# list of common words which could be interpreted as languages, but which
# are far too common to be able to say they represent a language in the
# middle of a string (where they most likely carry their commmon meaning)
lng_common_words = frozenset([
# english words
'is', 'it', 'am', 'mad', 'men', 'man', 'run', 'sin', 'st', 'to',
'no', 'non', 'war', 'min', 'new', 'car', 'day', 'bad', 'bat', 'fan',
'fry', 'cop', 'zen', 'gay', 'fat', 'cherokee', 'got', 'an', 'as',
'cat', 'her', 'be', 'hat', 'sun', 'may', 'my', 'mr', 'rum', 'pi',
# french words
'bas', 'de', 'le', 'son', 'vo', 'vf', 'ne', 'ca', 'ce', 'et', 'que',
'mal', 'est', 'vol', 'or', 'mon', 'se',
# spanish words
'la', 'el', 'del', 'por', 'mar',
# other
'ind', 'arw', 'ts', 'ii', 'bin', 'chan', 'ss', 'san', 'oss', 'iii',
'vi', 'ben', 'da', 'lt'
])
sep = r'[](){} \._-+'
if lang_filter:
lang_filter = lang_set(lang_filter)
slow = ' %s ' % string.lower()
confidence = 1.0 # for all of them
for lang in set(find_words(slow)) & lng_all_names:
if lang in lng_common_words:
continue
pos = slow.find(lang)
if pos != -1:
end = pos + len(lang)
# skip if span in in skip list
while skip and (pos - 1, end - 1) in skip:
pos = slow.find(lang, end)
if pos == -1:
continue
end = pos + len(lang)
if pos == -1:
continue
# make sure our word is always surrounded by separators
if slow[pos - 1] not in sep or slow[end] not in sep:
continue
language = Language(slow[pos:end])
if lang_filter and language not in lang_filter:
continue
# only allow those languages that have a 2-letter code, those that
# don't are too esoteric and probably false matches
if language.lang not in lng3_to_lng2:
continue
# confidence depends on lng2, lng3, english name, ...
if len(lang) == 2:
confidence = 0.8
elif len(lang) == 3:
confidence = 0.9
else:
# Note: we could either be really confident that we found a
# language or assume that full language names are too
# common words and lower their confidence accordingly
confidence = 0.3 # going with the low-confidence route here
return language, (pos - 1, end - 1), confidence
return None, None, None
def guess_language(text):
"""Guess the language in which a body of text is written.
This uses the external guess-language python module, and will fail and return
Language(Undetermined) if it is not installed.
"""
try:
from guess_language import guessLanguage
return Language(guessLanguage(text))
except ImportError:
log.error('Cannot detect the language of the given text body, missing dependency: guess-language')
log.error('Please install it from PyPI, by doing eg: pip install guess-language')
return UNDETERMINED
|
gpl-3.0
|
icereval/osf.io
|
scripts/create_fakes.py
|
2
|
19939
|
# -*- coding: utf-8 -*-
"""Fake data generator.
To use:
1. Install fake-factory.
pip install fake-factory
2. Create your OSF user account
3. Run the script, passing in your username (email).
::
python -m scripts.create_fakes --user [email protected]
This will create 3 fake public projects, each with 3 fake contributors (with
you as the creator).
To create a project with a complex component structure, pass in a list representing the depth you would
like each component to contain.
Examples:
python -m scripts.create_fakes -u fred@cos --components '[1, 1, 1, 1]' --nprojects 1
...will create a project with 4 components.
python -m scripts.create_fakes -u fred@cos --components '4' --nprojects 1
...will create a project with a series of components, 4 levels deep.
python -m scripts.create_fakes -u fred@cos --components '[1, [1, 1]]' --nprojects 1
...will create a project with two top level components, and one with a depth of 2 components.
python -m scripts.create_fakes -u fred@cos --nprojects 3 --preprint True
...will create 3 preprints with the default provider osf
python -m scripts.create_fakes -u fred@cos --nprojects 3 --preprint True --preprintprovider osf,test_provider
...will create 3 preprints with the providers osf and test_provider
"""
from __future__ import print_function, absolute_import
import ast
import sys
import mock
import argparse
import logging
import django
import pytz
from faker import Factory
from faker.providers import BaseProvider
django.setup()
from framework.auth import Auth
from osf_tests.factories import UserFactory, ProjectFactory, NodeFactory, RegistrationFactory, PreprintFactory, PreprintProviderFactory, fake_email
from osf import models
from website.app import init_app
class Sciencer(BaseProvider):
# Science term Faker Provider created by @csheldonhess
# https://github.com/csheldonhess/FakeConsumer/blob/master/faker/providers/science.py
word_list = ('abiosis', 'abrade', 'absorption', 'acceleration', 'accumulation',
'acid', 'acidic', 'activist', 'adaptation', 'agonistic', 'agrarian', 'airborne',
'alchemist', 'alignment', 'allele', 'alluvial', 'alveoli', 'ambiparous',
'amphibian', 'amplitude', 'analysis', 'ancestor', 'anodize', 'anomaly',
'anther', 'antigen', 'apiary', 'apparatus', 'application', 'approximation',
'aquatic', 'aquifer', 'arboreal', 'archaeology', 'artery', 'assessment',
'asteroid', 'atmosphere', 'atomic', 'atrophy', 'attenuate', 'aven', 'aviary',
'axis', 'bacteria', 'balance', 'bases', 'biome', 'biosphere', 'black hole',
'blight', 'buoyancy', 'calcium', 'canopy', 'capacity', 'capillary', 'carapace',
'carcinogen', 'catalyst', 'cauldron', 'celestial', 'cells', 'centigrade',
'centimeter', 'centrifugal', 'chemical reaction', 'chemicals', 'chemistry',
'chlorophyll', 'choked', 'chromosome', 'chronic', 'churn', 'classification',
'climate', 'cloud', 'comet', 'composition', 'compound', 'compression',
'condensation', 'conditions', 'conduction', 'conductivity', 'conservation',
'constant', 'constellation', 'continental', 'convection', 'convention', 'cool',
'core', 'cosmic', 'crater', 'creature', 'crepuscular', 'crystals', 'cycle', 'cytoplasm',
'dampness', 'data', 'decay', 'decibel', 'deciduous', 'defoliate', 'density',
'denude', 'dependency', 'deposits', 'depth', 'desiccant', 'detritus',
'development', 'digestible', 'diluted', 'direction', 'disappearance', 'discovery',
'dislodge', 'displace', 'dissection', 'dissolution', 'dissolve', 'distance',
'diurnal', 'diverse', 'doldrums', 'dynamics', 'earthquake', 'eclipse', 'ecology',
'ecosystem', 'electricity', 'elements', 'elevation', 'embryo', 'endangered',
'endocrine', 'energy', 'entropy', 'environment', 'enzyme', 'epidermis', 'epoch',
'equilibrium', 'equine', 'erosion', 'essential', 'estuary', 'ethical', 'evaporation',
'event', 'evidence', 'evolution', 'examination', 'existence', 'expansion',
'experiment', 'exploration ', 'extinction', 'extreme', 'facet', 'fault', 'fauna',
'feldspar', 'fermenting', 'fission', 'fissure', 'flora', 'flourish', 'flowstone',
'foliage', 'food chain', 'forage', 'force', 'forecast', 'forensics', 'formations',
'fossil fuel', 'frequency', 'friction', 'fungi', 'fusion', 'galaxy', 'gastric',
'geo-science', 'geothermal', 'germination', 'gestation', 'global', 'gravitation',
'green', 'greenhouse effect', 'grotto', 'groundwater', 'habitat', 'heat', 'heavens',
'hemisphere', 'hemoglobin', 'herpetologist', 'hormones', 'host', 'humidity', 'hyaline',
'hydrogen', 'hydrology', 'hypothesis', 'ichthyology', 'illumination', 'imagination',
'impact of', 'impulse', 'incandescent', 'indigenous', 'inertia', 'inevitable', 'inherit',
'inquiry', 'insoluble', 'instinct', 'instruments', 'integrity', 'intelligence',
'interacts with', 'interdependence', 'interplanetary', 'invertebrate', 'investigation',
'invisible', 'ions', 'irradiate', 'isobar', 'isotope', 'joule', 'jungle', 'jurassic',
'jutting', 'kilometer', 'kinetics', 'kingdom', 'knot', 'laser', 'latitude', 'lava',
'lethal', 'life', 'lift', 'light', 'limestone', 'lipid', 'lithosphere', 'load',
'lodestone', 'luminous', 'luster', 'magma', 'magnet', 'magnetism', 'mangrove', 'mantle',
'marine', 'marsh', 'mass', 'matter', 'measurements', 'mechanical', 'meiosis', 'meridian',
'metamorphosis', 'meteor', 'microbes', 'microcosm', 'migration', 'millennia', 'minerals',
'modulate', 'moisture', 'molecule', 'molten', 'monograph', 'monolith', 'motion',
'movement', 'mutant', 'mutation', 'mysterious', 'natural', 'navigable', 'navigation',
'negligence', 'nervous system', 'nesting', 'neutrons', 'niche', 'nocturnal',
'nuclear energy', 'numerous', 'nurture', 'obsidian', 'ocean', 'oceanography', 'omnivorous',
'oolites (cave pearls)', 'opaque', 'orbit', 'organ', 'organism', 'ornithology',
'osmosis', 'oxygen', 'paleontology', 'parallax', 'particle', 'penumbra',
'percolate', 'permafrost', 'permutation', 'petrify', 'petrograph', 'phenomena',
'physical property', 'planetary', 'plasma', 'polar', 'pole', 'pollination',
'polymer', 'population', 'precipitation', 'predator', 'prehensile', 'preservation',
'preserve', 'pressure', 'primate', 'pristine', 'probe', 'process', 'propagation',
'properties', 'protected', 'proton', 'pulley', 'qualitative data', 'quantum', 'quark',
'quarry', 'radiation', 'radioactivity', 'rain forest', 'ratio', 'reaction', 'reagent',
'realm', 'redwoods', 'reeds', 'reflection', 'refraction', 'relationships between', 'reptile',
'research', 'resistance', 'resonate', 'rookery', 'rubble', 'runoff', 'salinity', 'sandbar',
'satellite', 'saturation', 'scientific investigation', 'scientist\'s', 'sea floor', 'season',
'sedentary', 'sediment', 'sedimentary', 'seepage', 'seismic', 'sensors', 'shard',
'similarity', 'solar', 'soluble', 'solvent', 'sonic', 'sound', 'source', 'species',
'spectacular', 'spectrum', 'speed', 'sphere', 'spring', 'stage', 'stalactite',
'stalagmites', 'stimulus', 'substance', 'subterranean', 'sulfuric acid', 'surface',
'survival', 'swamp', 'sylvan', 'symbiosis', 'symbol', 'synergy', 'synthesis', 'taiga',
'taxidermy', 'technology', 'tectonics', 'temperate', 'temperature', 'terrestrial',
'thermals', 'thermometer', 'thrust', 'torque', 'toxin', 'trade winds', 'pterodactyl',
'transformation tremors', 'tropical', 'umbra', 'unbelievable', 'underwater', 'unearth',
'unique', 'unite', 'unity', 'universal', 'unpredictable', 'unusual', 'ursine', 'vacuole',
'valuable', 'vapor', 'variable', 'variety', 'vast', 'velocity', 'ventifact', 'verdant',
'vespiary', 'viable', 'vibration', 'virus', 'viscosity', 'visible', 'vista', 'vital',
'vitreous', 'volt', 'volume', 'vulpine', 'wave', 'wax', 'weather', 'westerlies', 'wetlands',
'whitewater', 'xeriscape', 'xylem', 'yield', 'zero-impact', 'zone', 'zygote', 'achieving',
'acquisition of', 'an alternative', 'analysis of', 'approach toward', 'area', 'aspects of',
'assessment of', 'assuming', 'authority', 'available', 'benefit of', 'circumstantial',
'commentary', 'components', 'concept of', 'consistent', 'corresponding', 'criteria',
'data', 'deduction', 'demonstrating', 'derived', 'distribution', 'dominant', 'elements',
'equation', 'estimate', 'evaluation', 'factors', 'features', 'final', 'function',
'initial', 'instance ', 'interpretation of', 'maintaining ', 'method', 'perceived',
'percent', 'period', 'positive', 'potential', 'previous', 'primary', 'principle',
'procedure', 'process', 'range', 'region', 'relevant', 'required', 'research',
'resources', 'response', 'role', 'section', 'select', 'significant ', 'similar',
'source', 'specific', 'strategies', 'structure', 'theory', 'transfer', 'variables',
'corvidae', 'passerine', 'Pica pica', 'Chinchilla lanigera', 'Nymphicus hollandicus',
'Melopsittacus undulatus', )
def science_word(cls):
"""
:example 'Lorem'
"""
return cls.random_element(cls.word_list)
def science_words(cls, nb=3):
"""
Generate an array of random words
:example array('Lorem', 'ipsum', 'dolor')
:param nb how many words to return
"""
return [cls.science_word() for _ in range(0, nb)]
def science_sentence(cls, nb_words=6, variable_nb_words=True):
"""
Generate a random sentence
:example 'Lorem ipsum dolor sit amet.'
:param nb_words around how many words the sentence should contain
:param variable_nb_words set to false if you want exactly $nbWords returned,
otherwise $nbWords may vary by +/-40% with a minimum of 1
"""
if nb_words <= 0:
return ''
if variable_nb_words:
nb_words = cls.randomize_nb_elements(nb_words)
words = cls.science_words(nb_words)
words[0] = words[0].title()
return " ".join(words) + '.'
def science_sentences(cls, nb=3):
"""
Generate an array of sentences
:example array('Lorem ipsum dolor sit amet.', 'Consectetur adipisicing eli.')
:param nb how many sentences to return
:return list
"""
return [cls.science_sentence() for _ in range(0, nb)]
def science_paragraph(cls, nb_sentences=3, variable_nb_sentences=True):
"""
Generate a single paragraph
:example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'
:param nb_sentences around how many sentences the paragraph should contain
:param variable_nb_sentences set to false if you want exactly $nbSentences returned,
otherwise $nbSentences may vary by +/-40% with a minimum of 1
:return string
"""
if nb_sentences <= 0:
return ''
if variable_nb_sentences:
nb_sentences = cls.randomize_nb_elements(nb_sentences)
return " ".join(cls.science_sentences(nb_sentences))
def science_paragraphs(cls, nb=3):
"""
Generate an array of paragraphs
:example array($paragraph1, $paragraph2, $paragraph3)
:param nb how many paragraphs to return
:return array
"""
return [cls.science_paragraph() for _ in range(0, nb)]
def science_text(cls, max_nb_chars=200):
"""
Generate a text string.
Depending on the $maxNbChars, returns a string made of words, sentences, or paragraphs.
:example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'
:param max_nb_chars Maximum number of characters the text should contain (minimum 5)
:return string
"""
text = []
if max_nb_chars < 5:
raise ValueError('text() can only generate text of at least 5 characters')
if max_nb_chars < 25:
# join words
while not text:
size = 0
# determine how many words are needed to reach the $max_nb_chars once;
while size < max_nb_chars:
word = (' ' if size else '') + cls.science_word()
text.append(word)
size += len(word)
text.pop()
text[0] = text[0][0].upper() + text[0][1:]
last_index = len(text) - 1
text[last_index] += '.'
elif max_nb_chars < 100:
# join sentences
while not text:
size = 0
# determine how many sentences are needed to reach the $max_nb_chars once
while size < max_nb_chars:
sentence = (' ' if size else '') + cls.science_sentence()
text.append(sentence)
size += len(sentence)
text.pop()
else:
# join paragraphs
while not text:
size = 0
# determine how many paragraphs are needed to reach the $max_nb_chars once
while size < max_nb_chars:
paragraph = ('\n' if size else '') + cls.science_paragraph()
text.append(paragraph)
size += len(paragraph)
text.pop()
return "".join(text)
logger = logging.getLogger('create_fakes')
SILENT_LOGGERS = [
'factory',
'website.mails',
]
for logger_name in SILENT_LOGGERS:
logging.getLogger(logger_name).setLevel(logging.CRITICAL)
fake = Factory.create()
fake.add_provider(Sciencer)
def create_fake_user():
email = fake_email()
name = fake.name()
user = UserFactory(username=email, fullname=name,
is_registered=True, is_claimed=True,
date_registered=fake.date_time(tzinfo=pytz.UTC),
emails=[email]
)
user.set_password('faker123')
user.save()
logger.info('Created user: {0} <{1}>'.format(user.fullname, user.username))
return user
def parse_args():
parser = argparse.ArgumentParser(description='Create fake data.')
parser.add_argument('-u', '--user', dest='user', required=True)
parser.add_argument('--nusers', dest='n_users', type=int, default=3)
parser.add_argument('--nprojects', dest='n_projects', type=int, default=3)
parser.add_argument('-c', '--components', dest='n_components', type=evaluate_argument, default='0')
parser.add_argument('-p', '--privacy', dest="privacy", type=str, default='private', choices=['public', 'private'])
parser.add_argument('-n', '--name', dest='name', type=str, default=None)
parser.add_argument('-t', '--tags', dest='n_tags', type=int, default=5)
parser.add_argument('--presentation', dest='presentation_name', type=str, default=None)
parser.add_argument('-r', '--registration', dest='is_registration', type=bool, default=False)
parser.add_argument('-pre', '--preprint', dest='is_preprint', type=bool, default=False)
parser.add_argument('-preprovider', '--preprintprovider', dest='preprint_provider', type=str, default=None)
return parser.parse_args()
def evaluate_argument(string):
return ast.literal_eval(string)
def create_fake_project(creator, n_users, privacy, n_components, name, n_tags, presentation_name, is_registration, is_preprint, preprint_provider):
auth = Auth(user=creator)
project_title = name if name else fake.science_sentence()
if is_preprint:
provider = None
if preprint_provider:
try:
provider = models.PreprintProvider.objects.get(_id=provider)
except models.PreprintProvider.DoesNotExist:
pass
if not provider:
provider = PreprintProviderFactory(name=fake.science_word())
privacy = 'public'
mock_change_identifier = mock.patch('website.identifiers.client.EzidClient.update_identifier')
mock_change_identifier.start()
mock_change_identifier_preprints = mock.patch('website.identifiers.client.CrossRefClient.update_identifier')
mock_change_identifier_preprints.start()
project = PreprintFactory(title=project_title, description=fake.science_paragraph(), creator=creator, provider=provider)
node = project.node
elif is_registration:
project = RegistrationFactory(title=project_title, description=fake.science_paragraph(), creator=creator)
node = project
else:
project = ProjectFactory(title=project_title, description=fake.science_paragraph(), creator=creator)
node = project
node.set_privacy(privacy)
for _ in range(n_users):
contrib = create_fake_user()
node.add_contributor(contrib, auth=auth)
if isinstance(n_components, int):
for _ in range(n_components):
NodeFactory(parent=node, title=fake.science_sentence(), description=fake.science_paragraph(),
creator=creator)
elif isinstance(n_components, list):
render_generations_from_node_structure_list(node, creator, n_components)
for _ in range(n_tags):
node.add_tag(fake.science_word(), auth=auth)
if presentation_name is not None:
node.add_tag(presentation_name, auth=auth)
node.add_tag('poster', auth=auth)
node.save()
project.save()
logger.info('Created project: {0}'.format(node.title))
return project
def render_generations_from_parent(parent, creator, num_generations):
current_gen = parent
for generation in xrange(0, num_generations):
next_gen = NodeFactory(
parent=current_gen,
creator=creator,
title=fake.science_sentence(),
description=fake.science_paragraph()
)
current_gen = next_gen
return current_gen
def render_generations_from_node_structure_list(parent, creator, node_structure_list):
new_parent = None
for node_number in node_structure_list:
if isinstance(node_number, list):
render_generations_from_node_structure_list(new_parent or parent, creator, node_number)
else:
new_parent = render_generations_from_parent(parent, creator, node_number)
return new_parent
def main():
args = parse_args()
creator = models.OSFUser.objects.get(username=args.user)
for i in range(args.n_projects):
name = args.name + str(i) if args.name else ''
create_fake_project(creator, args.n_users, args.privacy, args.n_components, name, args.n_tags,
args.presentation_name, args.is_registration, args.is_preprint, args.preprint_provider)
print('Created {n} fake projects.'.format(n=args.n_projects))
sys.exit(0)
if __name__ == '__main__':
init_app(set_backends=True, routes=False)
main()
|
apache-2.0
|
polimediaupv/edx-platform
|
lms/djangoapps/instructor/features/data_download.py
|
56
|
3761
|
"""
Define steps for instructor dashboard - data download tab
acceptance tests.
"""
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from nose.tools import assert_in, assert_regexp_matches # pylint: disable=no-name-in-module
from terrain.steps import reload_the_page
from django.utils import http
@step(u'I see a table of student profiles')
def find_student_profile_table(step): # pylint: disable=unused-argument
# Find the grading configuration display
world.wait_for_visible('#data-student-profiles-table')
# Wait for the data table to be populated
world.wait_for(lambda _: world.css_text('#data-student-profiles-table') not in [u'', u'Loading...'])
if world.role == 'instructor':
expected_data = [
world.instructor.username,
world.instructor.email,
world.instructor.profile.name,
world.instructor.profile.gender,
world.instructor.profile.goals
]
elif world.role == 'staff':
expected_data = [
world.staff.username,
world.staff.email,
world.staff.profile.name,
world.staff.profile.gender,
world.staff.profile.goals
]
for datum in expected_data:
assert_in(datum, world.css_text('#data-student-profiles-table'))
@step(u"I do not see a button to 'List enrolled students' profile information'")
def no_student_profile_table(step): # pylint: disable=unused-argument
world.is_css_not_present('input[name="list-profiles"]')
@step(u"I see the grading configuration for the course")
def find_grading_config(step): # pylint: disable=unused-argument
# Find the grading configuration display
world.wait_for_visible('#data-grade-config-text')
# expected config is the default grading configuration from common/lib/xmodule/xmodule/course_module.py
expected_config = u"""-----------------------------------------------------------------------------
Course grader:
<class 'xmodule.graders.WeightedSubsectionsGrader'>
Graded sections:
subgrader=<class 'xmodule.graders.AssignmentFormatGrader'>, type=Homework, category=Homework, weight=0.15
subgrader=<class 'xmodule.graders.AssignmentFormatGrader'>, type=Lab, category=Lab, weight=0.15
subgrader=<class 'xmodule.graders.AssignmentFormatGrader'>, type=Midterm Exam, category=Midterm Exam, weight=0.3
subgrader=<class 'xmodule.graders.AssignmentFormatGrader'>, type=Final Exam, category=Final Exam, weight=0.4
-----------------------------------------------------------------------------
Listing grading context for course {}
graded sections:
[]
all descriptors:
length=0""".format(world.course_key)
assert_in(expected_config, world.css_text('#data-grade-config-text'))
def verify_report_is_generated(report_name_substring):
# Need to reload the page to see the reports table updated
reload_the_page(step)
world.wait_for_visible('#report-downloads-table')
# Find table and assert a .csv file is present
quoted_id = http.urlquote(world.course_key).replace('/', '_')
expected_file_regexp = quoted_id + '_' + report_name_substring + '_\d{4}-\d{2}-\d{2}-\d{4}\.csv'
assert_regexp_matches(
world.css_html('#report-downloads-table'), expected_file_regexp,
msg="Expected report filename was not found."
)
@step(u"I see a grade report csv file in the reports table")
def find_grade_report_csv_link(step): # pylint: disable=unused-argument
verify_report_is_generated('grade_report')
@step(u"I see a student profile csv file in the reports table")
def find_student_profile_report_csv_link(step): # pylint: disable=unused-argument
verify_report_is_generated('student_profile_info')
|
agpl-3.0
|
armab/st2
|
st2tests/st2tests/config.py
|
2
|
8930
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg, types
from st2common import log as logging
import st2common.config as common_config
from st2common.constants.sensors import DEFAULT_PARTITION_LOADER
from st2tests.fixturesloader import get_fixtures_base_path
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def parse_args():
_setup_config_opts()
CONF(args=[])
def _setup_config_opts():
cfg.CONF.reset()
try:
_register_config_opts()
except Exception as e:
print(e)
# Some scripts register the options themselves which means registering them again will
# cause a non-fatal exception
return
_override_config_opts()
def _override_config_opts():
_override_db_opts()
_override_common_opts()
def _register_config_opts():
_register_common_opts()
_register_api_opts()
_register_auth_opts()
_register_action_sensor_opts()
_register_ssh_runner_opts()
_register_cloudslang_opts()
_register_scheduler_opts()
_register_exporter_opts()
_register_sensor_container_opts()
def _override_db_opts():
CONF.set_override(name='db_name', override='st2-test', group='database')
def _override_common_opts():
packs_base_path = get_fixtures_base_path()
CONF.set_override(name='base_path', override=packs_base_path, group='system')
CONF.set_override(name='system_packs_base_path', override=packs_base_path, group='content')
CONF.set_override(name='packs_base_paths', override=packs_base_path, group='content')
CONF.set_override(name='api_url', override='http://localhost', group='auth')
CONF.set_override(name='admin_users', override=['admin_user'], group='system')
CONF.set_override(name='mask_secrets', override=True, group='log')
CONF.set_override(name='url', override='zake://', group='coordination')
CONF.set_override(name='lock_timeout', override=1, group='coordination')
def _register_common_opts():
try:
common_config.register_opts(ignore_errors=True)
except:
LOG.exception('Common config registration failed.')
def _register_api_opts():
api_opts = [
cfg.ListOpt('allow_origin', default=['http://localhost:3000', 'http://dev'],
help='List of origins allowed'),
cfg.IntOpt('heartbeat', default=25,
help='Send empty message every N seconds to keep connection open'),
cfg.BoolOpt('mask_secrets', default=True,
help='True to mask secrets in API responses')
]
_register_opts(api_opts, group='api')
# XXX: note : template_path value only works if started from the top-level of the codebase.
# Brittle!
pecan_opts = [
cfg.StrOpt('root',
default='st2api.controllers.root.RootController',
help='Pecan root controller'),
cfg.StrOpt('template_path',
default='%(confdir)s/st2api/st2api/templates'),
cfg.ListOpt('modules', default=['st2api']),
cfg.BoolOpt('debug', default=True),
cfg.BoolOpt('auth_enable', default=True),
cfg.DictOpt('errors', default={404: '/error/404', '__force_dict__': True})
]
_register_opts(pecan_opts, group='api_pecan')
messaging_opts = [
cfg.StrOpt('url', default='amqp://guest:[email protected]:5672//',
help='URL of the messaging server.'),
cfg.ListOpt('cluster_urls', default=[],
help='URL of all the nodes in a messaging service cluster.')
]
_register_opts(messaging_opts, group='messaging')
ssh_runner_opts = [
cfg.StrOpt('remote_dir',
default='/tmp',
help='Location of the script on the remote filesystem.'),
cfg.BoolOpt('allow_partial_failure',
default=False,
help='How partial success of actions run on multiple nodes should be treated.'),
cfg.BoolOpt('use_ssh_config',
default=False,
help='Use the .ssh/config file. Useful to override ports etc.')
]
_register_opts(ssh_runner_opts, group='ssh_runner')
def _register_auth_opts():
auth_opts = [
cfg.StrOpt('host', default='0.0.0.0'),
cfg.IntOpt('port', default=9100),
cfg.BoolOpt('use_ssl', default=False),
cfg.StrOpt('mode', default='proxy'),
cfg.StrOpt('logging', default='conf/logging.conf'),
cfg.IntOpt('token_ttl', default=86400, help='Access token ttl in seconds.'),
cfg.BoolOpt('debug', default=True)
]
_register_opts(auth_opts, group='auth')
def _register_action_sensor_opts():
action_sensor_opts = [
cfg.BoolOpt('enable', default=True,
help='Whether to enable or disable the ability ' +
'to post a trigger on action.'),
cfg.StrOpt('triggers_base_url', default='http://localhost:9101/v1/triggertypes/',
help='URL for action sensor to post TriggerType.'),
cfg.IntOpt('request_timeout', default=1,
help='Timeout value of all httprequests made by action sensor.'),
cfg.IntOpt('max_attempts', default=10,
help='No. of times to retry registration.'),
cfg.IntOpt('retry_wait', default=1,
help='Amount of time to wait prior to retrying a request.')
]
_register_opts(action_sensor_opts, group='action_sensor')
def _register_ssh_runner_opts():
ssh_runner_opts = [
cfg.BoolOpt('use_ssh_config', default=False,
help='Use the .ssh/config file. Useful to override ports etc.'),
cfg.StrOpt('remote_dir',
default='/tmp',
help='Location of the script on the remote filesystem.'),
cfg.BoolOpt('allow_partial_failure',
default=False,
help='How partial success of actions run on multiple nodes ' +
'should be treated.'),
cfg.BoolOpt('use_paramiko_ssh_runner',
default=False,
help='Use Paramiko based SSH runner as the default remote runner. ' +
'EXPERIMENTAL!!! USE AT YOUR OWN RISK.'),
cfg.IntOpt('max_parallel_actions', default=50,
help='Max number of parallel remote SSH actions that should be run. ' +
'Works only with Paramiko SSH runner.'),
]
_register_opts(ssh_runner_opts, group='ssh_runner')
def _register_cloudslang_opts():
cloudslang_opts = [
cfg.StrOpt('home_dir', default='/opt/cslang',
help='CloudSlang home directory.')
]
_register_opts(cloudslang_opts, group='cloudslang')
def _register_scheduler_opts():
scheduler_opts = [
cfg.IntOpt('delayed_execution_recovery', default=600,
help='The time in seconds to wait before recovering delayed action executions.'),
cfg.IntOpt('rescheduling_interval', default=300,
help='The frequency for rescheduling action executions.')
]
_register_opts(scheduler_opts, group='scheduler')
def _register_exporter_opts():
exporter_opts = [
cfg.StrOpt('dump_dir', default='/opt/stackstorm/exports/',
help='Directory to dump data to.')
]
_register_opts(exporter_opts, group='exporter')
def _register_sensor_container_opts():
partition_opts = [
cfg.StrOpt('sensor_node_name', default='sensornode1',
help='name of the sensor node.'),
cfg.Opt('partition_provider', type=types.Dict(value_type=types.String()),
default={'name': DEFAULT_PARTITION_LOADER},
help='Provider of sensor node partition config.')
]
_register_opts(partition_opts, group='sensorcontainer')
sensor_test_opt = cfg.StrOpt('sensor-ref', help='Only run sensor with the provided reference. \
Value is of the form pack.sensor-name.')
_register_cli_opts([sensor_test_opt])
def _register_opts(opts, group=None):
CONF.register_opts(opts, group)
def _register_cli_opts(opts):
cfg.CONF.register_cli_opts(opts)
|
apache-2.0
|
CDSherrill/psi4
|
tests/json/schema-1-orient/input.py
|
17
|
2246
|
#! test QC_JSON Schema mol orientation
import numpy as np
import psi4
import json
import copy
# Generate JSON data
json_data = {
"schema_name": "qc_schema_input",
"schema_version": 1,
"molecule": {
"geometry": [
0.0,
0.0,
0.0,
0.0,
1.732,
0.0,
],
"symbols": ["F", "H"]
},
"driver": "energy",
"model": {
"method": "SCF",
"basis": "cc-pVDZ"
},
"keywords": {
"scf_type": "df"
}
}
noorient_data = copy.deepcopy(json_data)
noorient_data["molecule"]["fix_orientation"] = True
noorient_data["molecule"]["fix_com"] = True
# Write expected output
expected_return_result = -100.0194177509218
linear_dipole = 0.7667930938
json_ret = psi4.json_wrapper.run_json(json_data)
# Orients to Z axis
psi4.compare_integers(True, json_ret["success"], "JSON Success") #TEST
psi4.compare_values(expected_return_result, json_ret["return_result"], 5, "Return Value") #TEST
psi4.compare_values(0.0, json_ret["properties"]["scf_dipole_moment"][0], 3, "DIPOLE X") #TEST
psi4.compare_values(0.0, json_ret["properties"]["scf_dipole_moment"][1], 3, "DIPOLE Y") #TEST
psi4.compare_values(linear_dipole, json_ret["properties"]["scf_dipole_moment"][2], 3, "DIPOLE Z") #TEST
dist = np.linalg.norm(np.array(json_ret["molecule"]["geometry"])[:3] - np.array(np.array(json_ret["molecule"]["geometry"])[3:])) #TEST
psi4.compare_values(1.732, dist, 4, "HF Bond Distance") #TEST
json_ret = psi4.json_wrapper.run_json(noorient_data)
# Orients to Z axis
psi4.compare_integers(True, json_ret["success"], "JSON Success") #TEST
psi4.compare_values(expected_return_result, json_ret["return_result"], 5, "Return Value") #TEST
psi4.compare_values(0.0, json_ret["properties"]["scf_dipole_moment"][0], 3, "DIPOLE X") #TEST
psi4.compare_values(linear_dipole, json_ret["properties"]["scf_dipole_moment"][1], 3, "DIPOLE Y") #TEST
psi4.compare_values(0.0, json_ret["properties"]["scf_dipole_moment"][2], 3, "DIPOLE Z") #TEST
psi4.compare_arrays([0.0, 0.0, 0.0], json_ret["molecule"]["geometry"][:3], 3, "H Position") #TEST
psi4.compare_arrays([0.0, 1.732, 0.0], json_ret["molecule"]["geometry"][3:], 3, "F Position") #TEST
|
lgpl-3.0
|
kyvinh/home-assistant
|
homeassistant/components/camera/amcrest.py
|
10
|
4397
|
"""
This component provides basic support for Amcrest IP cameras.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.amcrest/
"""
import asyncio
import logging
import aiohttp
import voluptuous as vol
import homeassistant.loader as loader
from homeassistant.components.camera import (Camera, PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_USERNAME, CONF_PASSWORD, CONF_PORT)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import (
async_get_clientsession, async_aiohttp_proxy_stream)
REQUIREMENTS = ['amcrest==1.1.4']
_LOGGER = logging.getLogger(__name__)
CONF_RESOLUTION = 'resolution'
CONF_STREAM_SOURCE = 'stream_source'
DEFAULT_NAME = 'Amcrest Camera'
DEFAULT_PORT = 80
DEFAULT_RESOLUTION = 'high'
DEFAULT_STREAM_SOURCE = 'mjpeg'
NOTIFICATION_ID = 'amcrest_notification'
NOTIFICATION_TITLE = 'Amcrest Camera Setup'
RESOLUTION_LIST = {
'high': 0,
'low': 1,
}
STREAM_SOURCE_LIST = {
'mjpeg': 0,
'snapshot': 1
}
CONTENT_TYPE_HEADER = 'Content-Type'
TIMEOUT = 5
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_RESOLUTION, default=DEFAULT_RESOLUTION):
vol.All(vol.In(RESOLUTION_LIST)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_STREAM_SOURCE, default=DEFAULT_STREAM_SOURCE):
vol.All(vol.In(STREAM_SOURCE_LIST)),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up an Amcrest IP Camera."""
from amcrest import AmcrestCamera
camera = AmcrestCamera(
config.get(CONF_HOST), config.get(CONF_PORT),
config.get(CONF_USERNAME), config.get(CONF_PASSWORD)).camera
persistent_notification = loader.get_component('persistent_notification')
try:
camera.current_time
# pylint: disable=broad-except
except Exception as ex:
_LOGGER.error("Unable to connect to Amcrest camera: %s", str(ex))
persistent_notification.create(
hass, 'Error: {}<br />'
'You will need to restart hass after fixing.'
''.format(ex),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID)
return False
add_devices([AmcrestCam(hass, config, camera)])
return True
class AmcrestCam(Camera):
"""An implementation of an Amcrest IP camera."""
def __init__(self, hass, device_info, camera):
"""Initialize an Amcrest camera."""
super(AmcrestCam, self).__init__()
self._camera = camera
self._base_url = self._camera.get_base_url()
self._hass = hass
self._name = device_info.get(CONF_NAME)
self._resolution = RESOLUTION_LIST[device_info.get(CONF_RESOLUTION)]
self._stream_source = STREAM_SOURCE_LIST[
device_info.get(CONF_STREAM_SOURCE)
]
self._token = self._auth = aiohttp.BasicAuth(
device_info.get(CONF_USERNAME),
password=device_info.get(CONF_PASSWORD)
)
def camera_image(self):
"""Return a still image reponse from the camera."""
# Send the request to snap a picture and return raw jpg data
response = self._camera.snapshot(channel=self._resolution)
return response.data
@asyncio.coroutine
def handle_async_mjpeg_stream(self, request):
"""Return an MJPEG stream."""
# The snapshot implementation is handled by the parent class
if self._stream_source == STREAM_SOURCE_LIST['snapshot']:
yield from super().handle_async_mjpeg_stream(request)
return
# Otherwise, stream an MJPEG image stream directly from the camera
websession = async_get_clientsession(self.hass)
streaming_url = '{0}mjpg/video.cgi?channel=0&subtype={1}'.format(
self._base_url, self._resolution)
stream_coro = websession.get(
streaming_url, auth=self._token, timeout=TIMEOUT)
yield from async_aiohttp_proxy_stream(self.hass, request, stream_coro)
@property
def name(self):
"""Return the name of this camera."""
return self._name
|
apache-2.0
|
kronoscode/Booktype
|
lib/booktype/utils/tests/test_log_chapter_history.py
|
7
|
1743
|
# -*- coding: utf-8 -*-
# NOTE: for now this is intended to test all methods in
# log.py in old booki app. In near future we should move log.py
# into the new booktype app
from django.test import TestCase
from booktype.tests.factory_models import BookFactory, BookVersionFactory, ChapterFactory, UserFactory
from booki.utils.log import logChapterHistory
from booki.editor.models import ChapterHistory
class LogChapterHistoryTestCase(TestCase):
"""
Tests all methods in log.py module
"""
def setUp(self):
super(LogChapterHistoryTestCase, self).setUp()
self.book = BookFactory()
self.book_version = BookVersionFactory(book=self.book)
self.chapter = ChapterFactory(book=self.book)
self.user = UserFactory()
def test_log_chapter_history(self):
chapter_content = 'Test of log chapter history'
history = logChapterHistory(
chapter=self.chapter,
user=self.user,
revision=1,
content=chapter_content
)
# check returned object
self.assertTrue(
isinstance(history, ChapterHistory),
"Returned object should be instance of ChapterHistory"
)
# checl some values also
self.assertEqual(history.chapter, self.chapter)
self.assertEqual(history.content, chapter_content)
self.assertEqual(history.revision, 1)
# test with some bad values
none_history = logChapterHistory(
chapter=self.chapter,
user=''
)
# check returned values for bad params
self.assertEqual(
none_history,
None,
"It should return None in case of bad parameters"
)
|
agpl-3.0
|
Perferom/android_external_chromium_org
|
chrome/common/extensions/docs/server2/test_object_store_test.py
|
153
|
1495
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from test_object_store import TestObjectStore
import unittest
class TestObjectStoreTest(unittest.TestCase):
def testEmpty(self):
store = TestObjectStore('namespace')
self.assertEqual(None, store.Get('hi').Get())
self.assertEqual({}, store.GetMulti(['hi', 'lo']).Get())
def testNonEmpty(self):
store = TestObjectStore('namespace')
store.Set('hi', 'bye')
self.assertEqual('bye', store.Get('hi').Get())
self.assertEqual({'hi': 'bye'}, store.GetMulti(['hi', 'lo']).Get())
store.Set('hi', 'blah')
self.assertEqual('blah', store.Get('hi').Get())
self.assertEqual({'hi': 'blah'}, store.GetMulti(['hi', 'lo']).Get())
store.Del('hi')
self.assertEqual(None, store.Get('hi').Get())
self.assertEqual({}, store.GetMulti(['hi', 'lo']).Get())
def testCheckAndReset(self):
store = TestObjectStore('namespace')
store.Set('x', 'y')
self.assertTrue(*store.CheckAndReset(set_count=1))
store.Set('x', 'y')
store.Set('x', 'y')
self.assertTrue(*store.CheckAndReset(set_count=2))
store.Set('x', 'y')
store.Set('x', 'y')
store.Get('x').Get()
store.Get('x').Get()
store.Get('x').Get()
store.Del('x')
self.assertTrue(*store.CheckAndReset(get_count=3, set_count=2, del_count=1))
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
LumaPictures/rez
|
src/rez/bind/python.py
|
3
|
3490
|
"""
Binds a python executable as a rez package.
"""
from __future__ import absolute_import
from rez.bind._utils import check_version, find_exe, extract_version, \
make_dirs, log, run_python_command
from rez.package_maker__ import make_package
from rez.system import system
from rez.utils.lint_helper import env
from rez.utils.platform_ import platform_
from rez.vendor.version.version import Version
import shutil
import os.path
def setup_parser(parser):
parser.add_argument("--exe", type=str, metavar="PATH",
help="bind an interpreter other than the current "
"python interpreter")
def commands():
env.PATH.append('{this.root}/bin')
def post_commands():
# these are the builtin modules for this python executable. If we don't
# include these, some python behavior can be incorrect.
import os, os.path
path = os.path.join(this.root, "python")
for dirname in os.listdir(path):
path_ = os.path.join(path, dirname)
env.PYTHONPATH.append(path_)
def bind(path, version_range=None, opts=None, parser=None):
# find executable, determine version
exepath = find_exe("python", opts.exe)
code = "import sys; print '.'.join(str(x) for x in sys.version_info)"
version = extract_version(exepath, ["-c", code])
check_version(version, version_range)
log("binding python: %s" % exepath)
# find builtin modules
builtin_paths = {}
entries = [("lib", "os"),
("extra", "setuptools")]
for dirname, module_name in entries:
success, out, err = run_python_command([
"import %s" % module_name,
"print %s.__file__" % module_name])
if success:
pypath = os.path.dirname(out)
if os.path.basename(pypath) == module_name:
pypath = os.path.dirname(pypath)
if pypath not in builtin_paths.values():
builtin_paths[dirname] = pypath
# make the package
#
def make_root(variant, root):
binpath = make_dirs(root, "bin")
link = os.path.join(binpath, "python")
platform_.symlink(exepath, link)
if builtin_paths:
pypath = make_dirs(root, "python")
for dirname, srcpath in builtin_paths.iteritems():
destpath = os.path.join(pypath, dirname)
log("Copying builtins from %s to %s..." % (srcpath, destpath))
shutil.copytree(srcpath, destpath)
with make_package("python", path, make_root=make_root) as pkg:
pkg.version = version
pkg.tools = ["python"]
pkg.commands = commands
pkg.variants = [system.variant]
if builtin_paths:
pkg.post_commands = post_commands
return pkg.installed_variants
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
lgpl-3.0
|
Abi1ity/uniclust2.0
|
flask/lib/python2.7/site-packages/django/conf/locale/cy/formats.py
|
160
|
1822
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y' # '25 Hydref 2006'
TIME_FORMAT = 'P' # '2:30 y.b.'
DATETIME_FORMAT = 'j F Y, P' # '25 Hydref 2006, 2:30 y.b.'
YEAR_MONTH_FORMAT = 'F Y' # 'Hydref 2006'
MONTH_DAY_FORMAT = 'j F' # '25 Hydref'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 y.b.'
FIRST_DAY_OF_WEEK = 1 # 'Dydd Llun'
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
bsd-3-clause
|
elenanst/HPOlib
|
HPOlib/wrapping.py
|
2
|
24657
|
##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from argparse import ArgumentParser
import imp
import logging
import psutil
import os
from Queue import Queue, Empty
import signal
import shlex
import shutil
import subprocess
import sys
from threading import Thread
import time
import warnings
import HPOlib
import HPOlib.check_before_start as check_before_start
import HPOlib.wrapping_util as wrapping_util
import HPOlib.dispatcher.runsolver_wrapper as runsolver_wrapper
# Import experiment only after the check for numpy succeeded
__authors__ = ["Katharina Eggensperger", "Matthias Feurer"]
__contact__ = "automl.org"
INFODEVEL = """
##############################################################################
# Your are using the DEVELOPMENT version. This means we might change things #
# on a daily basis, commit untested code and remove or add features without #
# announcements. We do not intend to break any functionality, but cannot #
# guarantee to not do it. #
##############################################################################
"""
IS_DEVELOPMENT = True
hpolib_logger = logging.getLogger("HPOlib")
logger = logging.getLogger("HPOlib.wrapping")
def calculate_wrapping_overhead(trials):
wrapping_time = 0
for times in zip(trials.cv_starttime, trials.cv_endtime):
wrapping_time += times[1] - times[0]
# We need to import numpy again
import numpy as np
benchmark_time = 0
for t in trials.trials:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
benchmark_time += np.nansum(t['instance_durations'])
wrapping_time = wrapping_time - benchmark_time
return wrapping_time
def calculate_optimizer_time(trials):
optimizer_time = []
time_idx = 0
optimizer_time.append(trials.cv_starttime[0] - trials.starttime[time_idx])
for i in range(len(trials.cv_starttime[1:])):
if trials.cv_starttime[i + 1] > trials.endtime[time_idx]:
optimizer_time.append(trials.endtime[time_idx] -
trials.cv_endtime[i])
time_idx += 1
optimizer_time.append(trials.cv_starttime[i + 1] -
trials.starttime[time_idx])
else:
optimizer_time.append(trials.cv_starttime[i + 1] -
trials.cv_endtime[i])
optimizer_time.append(trials.endtime[time_idx] - trials.cv_endtime[-1])
trials.optimizer_time = optimizer_time
# We need to import numpy again
import numpy as np
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.nansum(optimizer_time)
def use_arg_parser():
"""Parse all options which can be handled by the wrapping script.
Unknown arguments are ignored and returned as a list. It is useful to
check this list in your program to handle typos etc.
Returns:
a tuple. The first element is an argparse.Namespace object,
the second a list with all unknown arguments.
"""
description = "Perform an experiment with HPOlib. " \
"Call this script from experiment directory (containing 'config.cfg')"
epilog = "Your are using HPOlib " + HPOlib.__version__
prog = "path/from/Experiment/to/HPOlib/wrapping.py"
parser = ArgumentParser(description=description, prog=prog, epilog=epilog)
parser.add_argument("-o", "--optimizer", action="store", type=str,
dest="optimizer",
help="Specify the optimizer name.", required=True)
parser.add_argument("-p", "--print", action="store_true",
dest="printcmd", default=False,
help="If set print the command instead of executing it")
parser.add_argument("-s", "--seed", action="store", type=int,
dest="seed", default=1,
help="Set the seed of the optimizer")
parser.add_argument("-t", "--title", action="store", type=str,
dest="title", default=None,
help="A title for the experiment")
parser.add_argument("--cwd", action="store", type=str, dest="working_dir",
default=None, help="Change the working directory to "
"<working_directory> prior to running the experiment")
parser.add_argument("-r", "--restore", action="store", type=str,
dest="restore", default=None,
help="Restore the state from a given directory")
group = parser.add_mutually_exclusive_group()
group.add_argument("-q", "--silent", action="store_true",
dest="silent", default=False,
help="Don't print anything during optimization")
group.add_argument("-v", "--verbose", action="store_true",
dest="verbose", default=False,
help="Print stderr/stdout for optimizer")
args, unknown = parser.parse_known_args()
return args, unknown
def main():
"""Start an optimization of the HPOlib. For documentation see the
comments inside this function and the general HPOlib documentation."""
args, unknown_arguments = use_arg_parser()
if args.working_dir:
experiment_dir = args.working_dir
elif args.restore:
args.restore = os.path.abspath(args.restore) + "/"
experiment_dir = args.restore
else:
experiment_dir = os.getcwd()
formatter = logging.Formatter('[%(levelname)s] [%(asctime)s:%(name)s] %('
'message)s', datefmt='%H:%M:%S')
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
hpolib_logger.addHandler(handler)
hpolib_logger.setLevel(1)
# First of all print the infodevel
if IS_DEVELOPMENT:
logger.critical(INFODEVEL)
args, unknown_arguments = use_arg_parser()
# Convert the path to the optimizer to be an absolute path, which is
# necessary later when we change the working directory
optimizer = args.optimizer
print("opti:", optimizer)
if not os.path.isabs(optimizer):
relative_path = optimizer
optimizer = os.path.abspath(optimizer)
logger.info("Converting relative optimizer path %s to absolute "
"optimizer path %s.", relative_path, optimizer)
os.chdir(experiment_dir)
experiment_dir = os.getcwd()
check_before_start.check_first(experiment_dir)
# Now we can safely import non standard things
import numpy as np
import HPOlib.Experiment as Experiment # Wants numpy and scipy
# Check how many optimizer versions are present and if all dependencies
# are installed also dynamically load optimizer obj
optimizer_version, opt_obj = check_before_start.check_optimizer(optimizer)
logger.warning("You called -o %s, I am using optimizer defined in "
"%sDefault.cfg", optimizer, optimizer_version)
optimizer = os.path.basename(optimizer_version)
config = wrapping_util.get_configuration(experiment_dir,
optimizer_version, unknown_arguments, opt_obj)
# DO NOT LOG UNTIL HERE UNLESS SOMETHING DRAMATIC HAS HAPPENED!!!
loglevel = config.getint("HPOLIB", "HPOlib_loglevel")
hpolib_logger.setLevel(loglevel)
if args.silent:
hpolib_logger.setLevel(60)
if args.verbose:
hpolib_logger.setLevel(10)
# Saving the config file is down further at the bottom, as soon as we get
# hold of the new optimizer directory
# wrapping_dir = os.path.dirname(os.path.realpath(__file__))
# Load optimizer
try:
optimizer_dir = os.path.dirname(os.path.realpath(optimizer_version))
optimizer_module = imp.load_source(optimizer_dir, optimizer_version + ".py")
except (ImportError, IOError):
logger.critical("Optimizer module %s not found", optimizer)
import traceback
logger.critical(traceback.format_exc())
sys.exit(1)
# So the optimizer module can acces the seed from the config and
config.set("HPOLIB", "seed", str(args.seed))
experiment_directory_prefix = config.get("HPOLIB", "experiment_directory_prefix")
optimizer_call, optimizer_dir_in_experiment = \
opt_obj.main(config=config, options=args,
experiment_dir=experiment_dir)
# experiment_directory_prefix=experiment_directory_prefix)
cmd = optimizer_call
# Start the server for logging from subprocesses here, because its port must
# be written to the config file.
logging_host = config.get("HPOLIB", "logging_host")
if logging_host:
logging_receiver_thread = None
default_logging_port = DEFAULT_TCP_LOGGING_PORT
for logging_port in range(default_logging_port, 65535):
try:
logging_receiver = logging_server.LoggingReceiver(
host=logging_host, port=logging_port,
handler=logging_server.LogRecordStreamHandler)
logging_receiver_thread = Thread(target=logging_receiver.serve_forever)
logging_receiver_thread.daemon = True
logger.info('%s started at %s' % (
logging_receiver.__class__.__name__,
logging_receiver.server_address))
logging_receiver_thread.start()
break
# TODO I did not find any useful documentation about which Exceptions
# I should catch here...
except Exception as e:
logger.debug(e)
logger.debug(e.message)
if logging_receiver_thread is None:
logger.critical("Could not create the logging server. Going to shut "
"down.")
sys.exit(1)
config.set("HPOLIB", "logging_port", str(logging_port))
with open(os.path.join(optimizer_dir_in_experiment, "config.cfg"), "w") as f:
config.set("HPOLIB", "is_not_original_config_file", "True")
wrapping_util.save_config_to_file(f, config, write_nones=True)
# initialize/reload pickle file
if args.restore:
try:
os.remove(os.path.join(optimizer_dir_in_experiment, optimizer + ".pkl.lock"))
except OSError:
pass
folds = config.getint('HPOLIB', 'number_cv_folds')
trials = Experiment.Experiment(expt_dir=optimizer_dir_in_experiment,
expt_name=experiment_directory_prefix + optimizer,
folds=folds,
max_wallclock_time=config.get('HPOLIB',
'cpu_limit'),
title=args.title)
trials.optimizer = optimizer_version
optimizer_output_file = os.path.join(optimizer_dir_in_experiment, optimizer + wrapping_util.get_time_string() +
"_" + str(args.seed) + ".out")
if args.restore:
# noinspection PyBroadException
try:
restored_runs = optimizer_module.restore(config=config,
optimizer_dir=optimizer_dir_in_experiment,
cmd=cmd)
except:
logger.critical("Could not restore runs for %s", args.restore)
import traceback
logger.critical(traceback.format_exc())
sys.exit(1)
logger.info("Restored %d runs", restored_runs)
trials.remove_all_but_first_runs(restored_runs)
fh = open(optimizer_output_file, "a")
fh.write("#" * 80 + "\n" + "Restart! Restored %d runs.\n" % restored_runs)
fh.close()
if len(trials.endtime) < len(trials.starttime):
trials.endtime.append(trials.cv_endtime[-1])
trials.starttime.append(time.time())
else:
trials.starttime.append(time.time())
# noinspection PyProtectedMember
trials._save_jobs()
del trials
sys.stdout.flush()
# Run call
if args.printcmd:
logger.info(cmd)
return 0
else:
# Create a second formatter and handler to customize the optimizer
# output
optimization_formatter = logging.Formatter(
'[%(levelname)s] [%(asctime)s:%(optimizer)s] %(message)s',
datefmt='%H:%M:%S')
optimization_handler = logging.StreamHandler(sys.stdout)
optimization_handler.setFormatter(optimization_formatter)
optimization_logger = logging.getLogger(optimizer)
optimization_logger.addHandler(optimization_handler)
optimizer_loglevel = config.getint("HPOLIB", "optimizer_loglevel")
optimization_logger.setLevel(optimizer_loglevel)
# Use a flag which is set to true as soon as all children are
# supposed to be killed
exit_ = wrapping_util.Exit()
signal.signal(signal.SIGTERM, exit_.signal_callback)
signal.signal(signal.SIGABRT, exit_.signal_callback)
signal.signal(signal.SIGINT, exit_.signal_callback)
signal.signal(signal.SIGHUP, exit_.signal_callback)
# Change into the current experiment directory
# Some optimizer might expect this
dir_before_exp = os.getcwd()
temporary_output_dir = config.get("HPOLIB", "temporary_output_directory")
if temporary_output_dir:
last_part = os.path.split(optimizer_dir_in_experiment)[1]
temporary_output_dir = os.path.join(temporary_output_dir, last_part)
# Replace any occurence of the path in the command
cmd = cmd.replace(optimizer_dir_in_experiment, temporary_output_dir)
optimizer_output_file = optimizer_output_file.replace(optimizer_dir_in_experiment, temporary_output_dir)
shutil.copytree(optimizer_dir_in_experiment, temporary_output_dir)
# shutil.rmtree does not work properly with NFS
# https://github.com/hashdist/hashdist/issues/113
# Idea from https://github.com/ahmadia/hashdist/
for rmtree_iter in range(5):
try:
shutil.rmtree(optimizer_dir_in_experiment)
break
except OSError, e:
time.sleep(rmtree_iter)
optimizer_dir_in_experiment = temporary_output_dir
# call target_function.setup()
fn_setup = config.get("HPOLIB", "function_setup")
if fn_setup:
# if temporary_output_dir:
# logger.critical("The options 'temporary_output_directory' "
# "and 'function_setup' cannot be used "
# "together.")
# sys.exit(1)
fn_setup_output = os.path.join(optimizer_dir_in_experiment,
"function_setup.out")
runsolver_cmd = runsolver_wrapper._make_runsolver_command(
config, fn_setup_output)
setup_cmd = runsolver_cmd + " " + fn_setup
# runsolver_output = subprocess.STDOUT
runsolver_output = open("/dev/null")
runsolver_wrapper._run_command_with_shell(setup_cmd,
runsolver_output)
os.chdir(optimizer_dir_in_experiment)
logger.info(cmd)
output_file = optimizer_output_file
fh = open(output_file, "a")
cmd = shlex.split(cmd)
print cmd
# See man 7 credentials for the meaning of a process group id
# This makes wrapping.py useable with SGEs default behaviour,
# where qdel sends a SIGKILL to a whole process group
# logger.info(os.getpid())
# os.setpgid(os.getpid(), os.getpid()) # same as os.setpgid(0, 0)
# TODO: figure out why shell=True was removed in commit f47ac4bb3ffe7f70b795d50c0828ca7e109d2879
# maybe it has something todo with the previous behaviour where a
# session id was set...
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
global child_process_pid
child_process_pid = proc.pid
process = psutil.Process(os.getpid())
logger.info("-----------------------RUNNING----------------------------------")
# http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
# How often is the experiment pickle supposed to be opened?
if config.get("HPOLIB", "total_time_limit"):
optimizer_end_time = time.time() + config.getint("HPOLIB", "total_time_limit")
else:
optimizer_end_time = sys.float_info.max
sent_SIGINT = False
sent_SIGINT_time = np.inf
sent_SIGTERM = False
sent_SIGTERM_time = np.inf
sent_SIGKILL = False
sent_SIGKILL_time = np.inf
children_to_kill = list()
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
stderr_queue = Queue()
stdout_queue = Queue()
stderr_thread = Thread(target=enqueue_output, args=(proc.stderr, stderr_queue))
stdout_thread = Thread(target=enqueue_output, args=(proc.stdout, stdout_queue))
stderr_thread.daemon = True
stdout_thread.daemon = True
stderr_thread.start()
stdout_thread.start()
if not (args.verbose or args.silent):
logger.info('Optimizer runs with PID: %d', proc.pid)
logger.info('We start in directory %s', os.getcwd())
while True:
# this implements the total runtime limit
if time.time() > optimizer_end_time and not sent_SIGINT:
logger.info("Reached total_time_limit, going to shutdown.")
exit_.true()
# necessary, otherwise HPOlib-run takes 100% of one processor
time.sleep(0.25)
try:
while True:
line = stdout_queue.get_nowait()
fh.write(line)
fh.flush()
optimization_logger.info(line.replace("\n", ""),
extra={'optimizer': optimizer})
except Empty:
pass
try:
while True:
line = stderr_queue.get_nowait()
fh.write(line)
fh.flush()
optimization_logger.error(line.replace("\n", ""),
extra={'optimizer': optimizer})
except Empty:
pass
ret = proc.poll()
if ret is not None:
# This does not include wrapping.py
children = process.children()
if len(children) == 0:
break
# TODO: what happens if we have a ret but something is still
# running?
if exit_.get_exit() == True and not sent_SIGINT:
logger.critical("Shutdown procedure: Sending SIGINT")
wrapping_util.kill_processes(signal.SIGINT)
sent_SIGINT_time = time.time()
sent_SIGINT = True
if exit_.get_exit() == True and not sent_SIGTERM and time.time() \
> sent_SIGINT_time + 5:
logger.critical("Shutdown procedure: Sending SIGTERM")
wrapping_util.kill_processes(signal.SIGTERM)
sent_SIGTERM_time = time.time()
sent_SIGTERM = True
if exit_.get_exit() == True and not sent_SIGKILL and time.time() \
> sent_SIGTERM_time + 5:
logger.critical("Shutdown procedure: Sending SIGKILL")
wrapping_util.kill_processes(signal.SIGKILL)
sent_SIGKILL_time = time.time()
sent_SIGKILL = True
logger.info("-----------------------END--------------------------------------")
ret = proc.returncode
logger.info("Finished with return code: %d", ret)
del proc
fh.close()
# Change back into to directory
os.chdir(dir_before_exp)
# call target_function.setup()
fn_teardown = config.get("HPOLIB", "function_teardown")
if fn_teardown:
# if temporary_output_dir:
# logger.critical("The options 'temporary_output_directory' "
# "and 'function_teardown' cannot be used "
# "together.")
# sys.exit(1)
fn_teardown_output = os.path.join(optimizer_dir_in_experiment,
"function_teardown.out")
runsolver_cmd = runsolver_wrapper._make_runsolver_command(
config, fn_teardown_output)
teardown_cmd = runsolver_cmd + " " + fn_teardown
# runsolver_output = subprocess.STDOUT
runsolver_output = open("/dev/null")
runsolver_wrapper._run_command_with_shell(teardown_cmd,
runsolver_output)
if temporary_output_dir:
# We cannot be sure that the directory
# optimizer_dir_in_experiment in dir_before_exp got deleted
# properly, therefore we append an underscore to the end of the
# filename
last_part = os.path.split(optimizer_dir_in_experiment)[1]
new_dir = os.path.join(dir_before_exp, last_part)
try:
shutil.copytree(optimizer_dir_in_experiment, new_dir)
except OSError as e:
new_dir += "_"
shutil.copytree(optimizer_dir_in_experiment, new_dir)
# shutil.rmtree does not work properly with NFS
# https://github.com/hashdist/hashdist/issues/113
# Idea from https://github.com/ahmadia/hashdist/
for rmtree_iter in range(5):
try:
shutil.rmtree(optimizer_dir_in_experiment)
break
except OSError, e:
time.sleep(rmtree_iter)
optimizer_dir_in_experiment = new_dir
trials = Experiment.Experiment(optimizer_dir_in_experiment,
experiment_directory_prefix + optimizer)
trials.endtime.append(time.time())
# noinspection PyProtectedMember
trials._save_jobs()
# trials.finish_experiment()
total_time = 0
logger.info("Best result %f", trials.get_best())
logger.info("Durations")
try:
for starttime, endtime in zip(trials.starttime, trials.endtime):
total_time += endtime - starttime
logger.info(" Needed a total of %f seconds", total_time)
logger.info(" The optimizer %s took %10.5f seconds",
optimizer, float(calculate_optimizer_time(trials)))
logger.info(" The overhead of HPOlib is %f seconds",
calculate_wrapping_overhead(trials))
logger.info(" The benchmark itself took %f seconds" % \
trials.total_wallclock_time)
except Exception as e:
logger.error(HPOlib.wrapping_util.format_traceback(sys.exc_info()))
logger.error("Experiment itself went fine, but calculating "
"durations of optimization failed: %s %s",
sys.exc_info()[0], e)
del trials
logger.info("Finished with return code: " + str(ret))
return ret
if __name__ == "__main__":
main()
|
gpl-3.0
|
benschhold/android_kernel_oneplus_msm8994_custom
|
scripts/build-all.py
|
704
|
14699
|
#! /usr/bin/env python
# Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
from collections import namedtuple
import glob
from optparse import OptionParser
import os
import re
import shutil
import subprocess
import sys
import threading
import Queue
version = 'build-all.py, version 1.99'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
all_options = {}
compile64 = os.environ.get('CROSS_COMPILE64')
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
if not os.environ.get('CROSS_COMPILE'):
fail("CROSS_COMPILE must be set in the environment")
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def build_threads():
"""Determine the number of build threads requested by the user"""
if all_options.load_average:
return all_options.load_average
return all_options.jobs or 1
failed_targets = []
BuildResult = namedtuple('BuildResult', ['status', 'messages'])
class BuildSequence(namedtuple('BuildSequence', ['log_name', 'short_name', 'steps'])):
def set_width(self, width):
self.width = width
def __enter__(self):
self.log = open(self.log_name, 'w')
def __exit__(self, type, value, traceback):
self.log.close()
def run(self):
self.status = None
messages = ["Building: " + self.short_name]
def printer(line):
text = "[%-*s] %s" % (self.width, self.short_name, line)
messages.append(text)
self.log.write(text)
self.log.write('\n')
for step in self.steps:
st = step.run(printer)
if st:
self.status = BuildResult(self.short_name, messages)
break
if not self.status:
self.status = BuildResult(None, messages)
class BuildTracker:
"""Manages all of the steps necessary to perform a build. The
build consists of one or more sequences of steps. The different
sequences can be processed independently, while the steps within a
sequence must be done in order."""
def __init__(self):
self.sequence = []
self.lock = threading.Lock()
def add_sequence(self, log_name, short_name, steps):
self.sequence.append(BuildSequence(log_name, short_name, steps))
def longest_name(self):
longest = 0
for seq in self.sequence:
longest = max(longest, len(seq.short_name))
return longest
def __repr__(self):
return "BuildTracker(%s)" % self.sequence
def run_child(self, seq):
seq.set_width(self.longest)
tok = self.build_tokens.get()
with self.lock:
print "Building:", seq.short_name
with seq:
seq.run()
self.results.put(seq.status)
self.build_tokens.put(tok)
def run(self):
self.longest = self.longest_name()
self.results = Queue.Queue()
children = []
errors = []
self.build_tokens = Queue.Queue()
nthreads = build_threads()
print "Building with", nthreads, "threads"
for i in range(nthreads):
self.build_tokens.put(True)
for seq in self.sequence:
child = threading.Thread(target=self.run_child, args=[seq])
children.append(child)
child.start()
for child in children:
stats = self.results.get()
if all_options.verbose:
with self.lock:
for line in stats.messages:
print line
sys.stdout.flush()
if stats.status:
errors.append(stats.status)
for child in children:
child.join()
if errors:
fail("\n ".join(["Failed targets:"] + errors))
class PrintStep:
"""A step that just prints a message"""
def __init__(self, message):
self.message = message
def run(self, outp):
outp(self.message)
class MkdirStep:
"""A step that makes a directory"""
def __init__(self, direc):
self.direc = direc
def run(self, outp):
outp("mkdir %s" % self.direc)
os.mkdir(self.direc)
class RmtreeStep:
def __init__(self, direc):
self.direc = direc
def run(self, outp):
outp("rmtree %s" % self.direc)
shutil.rmtree(self.direc, ignore_errors=True)
class CopyfileStep:
def __init__(self, src, dest):
self.src = src
self.dest = dest
def run(self, outp):
outp("cp %s %s" % (self.src, self.dest))
shutil.copyfile(self.src, self.dest)
class ExecStep:
def __init__(self, cmd, **kwargs):
self.cmd = cmd
self.kwargs = kwargs
def run(self, outp):
outp("exec: %s" % (" ".join(self.cmd),))
with open('/dev/null', 'r') as devnull:
proc = subprocess.Popen(self.cmd, stdin=devnull,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**self.kwargs)
stdout = proc.stdout
while True:
line = stdout.readline()
if not line:
break
line = line.rstrip('\n')
outp(line)
result = proc.wait()
if result != 0:
return ('error', result)
else:
return None
class Builder():
def __init__(self, name, defconfig):
self.name = name
self.defconfig = defconfig
self.confname = self.defconfig.split('/')[-1]
# Determine if this is a 64-bit target based on the location
# of the defconfig.
self.make_env = os.environ.copy()
if "/arm64/" in defconfig:
if compile64:
self.make_env['CROSS_COMPILE'] = compile64
else:
fail("Attempting to build 64-bit, without setting CROSS_COMPILE64")
self.make_env['ARCH'] = 'arm64'
else:
self.make_env['ARCH'] = 'arm'
self.make_env['KCONFIG_NOTIMESTAMP'] = 'true'
self.log_name = "%s/log-%s.log" % (build_dir, self.name)
def build(self):
steps = []
dest_dir = os.path.join(build_dir, self.name)
log_name = "%s/log-%s.log" % (build_dir, self.name)
steps.append(PrintStep('Building %s in %s log %s' %
(self.name, dest_dir, log_name)))
if not os.path.isdir(dest_dir):
steps.append(MkdirStep(dest_dir))
defconfig = self.defconfig
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
steps.append(RmtreeStep(os.path.join(dest_dir, staging_dir)))
steps.append(ExecStep(['make', 'O=%s' % dest_dir,
self.confname], env=self.make_env))
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of
# previous build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
steps.append(ExecStep(cmd_line + [t], env=self.make_env))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
steps.append(ExecStep(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=self.make_env))
steps.append(CopyfileStep(savedefconfig, defconfig))
return steps
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
with open(file, 'a') as defconfig:
defconfig.write(str + '\n')
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = []
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'mdm*_defconfig',
r'mpq*_defconfig',
)
arch64_pats = (
r'msm*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
name = os.path.basename(n)[:-10]
names.append(Builder(name, n))
if 'CROSS_COMPILE64' in os.environ:
for p in arch64_pats:
for n in glob.glob('arch/arm64/configs/' + p):
name = os.path.basename(n)[:-10] + "-64"
names.append(Builder(name, n))
return names
def build_many(targets):
print "Building %d target(s)" % len(targets)
# If we are requesting multiple builds, divide down the job number
# to construct the make_command, giving it a floor of 2, so there
# is still some parallelism.
if all_options.jobs and all_options.jobs > 1:
j = max(all_options.jobs / len(targets), 2)
make_command.append("-j" + str(j))
tracker = BuildTracker()
for target in targets:
if all_options.updateconfigs:
update_config(target.defconfig, all_options.updateconfigs)
steps = target.build()
tracker.add_sequence(target.log_name, target.name, steps)
tracker.run()
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs:
print " %s" % target.name
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if args == ['all']:
build_many(configs)
elif args == ['perf']:
targets = []
for t in configs:
if "perf" in t.name:
targets.append(t)
build_many(targets)
elif args == ['noperf']:
targets = []
for t in configs:
if "perf" not in t.name:
targets.append(t)
build_many(targets)
elif len(args) > 0:
all_configs = {}
for t in configs:
all_configs[t.name] = t
targets = []
for t in args:
if t not in all_configs:
parser.error("Target '%s' not one of %s" % (t, all_configs.keys()))
targets.append(all_configs[t])
build_many(targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
|
gpl-2.0
|
jbassen/edx-platform
|
cms/envs/bok_choy.py
|
11
|
4495
|
"""
Settings for Bok Choy tests that are used when running Studio.
Bok Choy uses two different settings files:
1. test_static_optimized is used when invoking collectstatic
2. bok_choy is used when running the tests
Note: it isn't possible to have a single settings file, because Django doesn't
support both generating static assets to a directory and also serving static
from the same directory.
"""
import os
from path import path
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
########################## Prod-like settings ###################################
# These should be as close as possible to the settings we use in production.
# As in prod, we read in environment and auth variables from JSON files.
# Unlike in prod, we use the JSON files stored in this repo.
# This is a convenience for ensuring (a) that we can consistently find the files
# and (b) that the files are the same in Jenkins as in local dev.
os.environ['SERVICE_VARIANT'] = 'bok_choy'
os.environ['CONFIG_ROOT'] = path(__file__).abspath().dirname()
from .aws import * # pylint: disable=wildcard-import, unused-wildcard-import
######################### Testing overrides ####################################
# Needed for the reset database management command
INSTALLED_APPS += ('django_extensions',)
# Redirect to the test_root folder within the repo
TEST_ROOT = REPO_ROOT / "test_root"
GITHUB_REPO_ROOT = (TEST_ROOT / "data").abspath()
LOG_DIR = (TEST_ROOT / "log").abspath()
DATA_DIR = TEST_ROOT / "data"
# Configure modulestore to use the test folder within the repo
update_module_store_settings(
MODULESTORE,
module_store_options={
'fs_root': (TEST_ROOT / "data").abspath(),
},
xml_store_options={
'data_dir': (TEST_ROOT / "data").abspath(),
},
default_store=os.environ.get('DEFAULT_STORE', 'draft'),
)
############################ STATIC FILES #############################
# Enable debug so that static assets are served by Django
DEBUG = True
# Serve static files at /static directly from the staticfiles directory under test root
# Note: optimized files for testing are generated with settings from test_static_optimized
STATIC_URL = "/static/"
STATICFILES_FINDERS = (
'staticfiles.finders.FileSystemFinder',
)
STATICFILES_DIRS = (
(TEST_ROOT / "staticfiles" / "cms").abspath(),
)
# Silence noisy logs
import logging
LOG_OVERRIDES = [
('track.middleware', logging.CRITICAL),
('edx.discussion', logging.CRITICAL),
]
for log_name, log_level in LOG_OVERRIDES:
logging.getLogger(log_name).setLevel(log_level)
# Use the auto_auth workflow for creating users and logging them in
FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] = True
# Enable milestones app
FEATURES['MILESTONES_APP'] = True
# Enable pre-requisite course
FEATURES['ENABLE_PREREQUISITE_COURSES'] = True
# Enable student notes
FEATURES['ENABLE_EDXNOTES'] = True
# Enable teams feature
FEATURES['ENABLE_TEAMS'] = True
# Enable custom content licensing
FEATURES['LICENSING'] = True
FEATURES['ENABLE_MOBILE_REST_API'] = True # Enable video bumper in Studio
FEATURES['ENABLE_VIDEO_BUMPER'] = True # Enable video bumper in Studio settings
########################### Entrance Exams #################################
FEATURES['ENTRANCE_EXAMS'] = True
FEATURES['ENABLE_PROCTORED_EXAMS'] = True
# Point the URL used to test YouTube availability to our stub YouTube server
YOUTUBE_PORT = 9080
YOUTUBE['API'] = "http://127.0.0.1:{0}/get_youtube_api/".format(YOUTUBE_PORT)
YOUTUBE['METADATA_URL'] = "http://127.0.0.1:{0}/test_youtube/".format(YOUTUBE_PORT)
YOUTUBE['TEXT_API']['url'] = "127.0.0.1:{0}/test_transcripts_youtube/".format(YOUTUBE_PORT)
FEATURES['ENABLE_COURSEWARE_INDEX'] = True
FEATURES['ENABLE_LIBRARY_INDEX'] = True
SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine"
# Path at which to store the mock index
MOCK_SEARCH_BACKING_FILE = (
TEST_ROOT / "index_file.dat"
).abspath()
# Generate a random UUID so that different runs of acceptance tests don't break each other
import uuid
SECRET_KEY = uuid.uuid4().hex
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=import-error
except ImportError:
pass
|
agpl-3.0
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-rmarkdown/package.py
|
5
|
1715
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRmarkdown(RPackage):
"""Convert R Markdown documents into a variety of formats."""
homepage = "http://rmarkdown.rstudio.com/"
url = "https://cloud.r-project.org/src/contrib/rmarkdown_1.0.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/rmarkdown"
version('1.14', sha256='f636b1048c5be56e06aa0b2b4342ad5c8192734f1e9b27468fef62be672edc61')
version('1.13', sha256='96fb6b08d27bbb8054145e0a55721f905341941d4f6691480a2a234e2d5a63ef')
version('1.7', sha256='c3191db65b9ad41b6dbb77aff53487701032d306e92b208ef7515b747931fe63')
version('1.0', sha256='ff1ecb74ebc444b9b0b7b547adc512daefe1ee08d06bc0e3ee4eb68e58d2ef30')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-base64enc', type=('build', 'run'))
depends_on('r-jsonlite', type=('build', 'run'))
depends_on('r-rprojroot', when='@1.3:1.7', type=('build', 'run'))
depends_on('r-mime', when='@1.8:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.6:', type=('build', 'run'))
depends_on('r-catools', when='@:1.7', type=('build', 'run'))
depends_on('[email protected]:', when='@1.10:', type=('build', 'run'))
depends_on('r-xfun', when='@1.13:', type=('build', 'run'))
depends_on('[email protected]:')
|
lgpl-2.1
|
theonion/django-bulbs
|
tests/instant_articles/test_facebook_api.py
|
1
|
6087
|
import requests_mock
from django.utils import timezone
from django.test.utils import override_settings
from bulbs.content.models import FeatureType, Content
from bulbs.utils.test import BaseIndexableTestCase
from bulbs.utils.test.mock_vault import mock_vault
from example.testcontent.models import TestContentObjThree
class FacebookAPITestCase(BaseIndexableTestCase):
def setUp(self):
super(FacebookAPITestCase, self).setUp()
self.ft = FeatureType.objects.create(name="NIP", instant_article=True)
@override_settings(
FACEBOOK_POST_TO_IA=True,
BETTY_FIXED_URL='http://i.onionstatic.com/onion')
@mock_vault({'facebook/onion_token': {'authtoken': 'TOKEN'}})
def test_publish(self):
with requests_mock.mock() as mocker:
# post to instant article endpoit
mocker.post(
"https://graph.facebook.com/v2.6/123456/instant_articles",
status_code=200,
json={
"id": 456
})
# get status of instant article
mocker.get(
"https://graph.facebook.com/v2.6/456?access_token=TOKEN",
status_code=200,
json={
"id": 9876,
"status": "SUCCESS"
})
# create content
content = TestContentObjThree.objects.create(
body="<p>This is the body</p>",
feature_type=self.ft)
# get article ID
mocker.get(
"https://graph.facebook.com/v2.6?id=http://www.theonion.com{0}&fields=instant_article&access_token=TOKEN".format(
content.get_absolute_url()),
status_code=200,
json={
"instant_article": {
"id": "420"
},
"id": "http://www.theonion.com/article/blaze-it-420"
}
)
content.published = timezone.now()
content.save()
Content.search_objects.refresh()
c = Content.objects.get(id=content.id)
# check that the ia_id is set & call count is correct
self.assertEqual(c.instant_article_id, 420)
self.assertEqual(mocker.call_count, 3)
@override_settings(
FACEBOOK_POST_TO_IA=True,
BETTY_FIXED_URL='http://i.onionstatic.com/onion')
@mock_vault({'facebook/onion_token': {'authtoken': 'TOKEN'}})
def test_unpublish(self):
with requests_mock.mock() as mocker:
mocker.post(
"https://graph.facebook.com/v2.6/123456/instant_articles",
status_code=201,
json={
"id": 456
})
mocker.get(
"https://graph.facebook.com/v2.6/456?access_token=TOKEN",
status_code=200,
json={
"id": 9876,
"status": "SUCCESS"
})
mocker.delete(
"https://graph.facebook.com/v2.6/420?access_token=TOKEN",
status_code=204,
json={
"success": True
})
content = TestContentObjThree.objects.create(
body="<p>This is the body</p>",
feature_type=self.ft)
mocker.get(
"https://graph.facebook.com/v2.6?id=http://www.theonion.com{0}&fields=instant_article&access_token=TOKEN".format(
content.get_absolute_url()),
status_code=200,
json={
"instant_article": {
"id": "420"
},
"id": "http://www.theonion.com/article/blaze-it-420"
}
)
content.published = timezone.now()
content.save()
Content.search_objects.refresh()
c = Content.objects.get(id=content.id)
self.assertEqual(c.instant_article_id, 420)
self.assertEqual(mocker.call_count, 3)
# unpublish article and check that delete is called
c.published = timezone.now() + timezone.timedelta(days=1)
c.save()
self.assertEqual(mocker.call_count, 4)
@override_settings(
FACEBOOK_POST_TO_IA=True,
BETTY_FIXED_URL='http://i.onionstatic.com/onion')
@mock_vault({'facebook/onion_token': {'authtoken': 'TOKEN'}})
def test_delete(self):
with requests_mock.mock() as mocker:
mocker.post(
"https://graph.facebook.com/v2.6/123456/instant_articles",
status_code=201,
json={
"id": 456
})
mocker.get(
"https://graph.facebook.com/v2.6/456?access_token=TOKEN",
status_code=200,
json={
"id": 9876,
"status": "SUCCESS"
})
mocker.delete(
"https://graph.facebook.com/v2.6/420?access_token=TOKEN",
status_code=204,
json={
"success": True
})
content = TestContentObjThree.objects.create(
body="<p>This is the body</p>",
feature_type=self.ft)
mocker.get(
"https://graph.facebook.com/v2.6?id=http://www.theonion.com{0}&fields=instant_article&access_token=TOKEN".format(
content.get_absolute_url()),
status_code=200,
json={
"instant_article": {
"id": "420"
},
"id": "http://www.theonion.com/article/blaze-it-420"
}
)
content.published = timezone.now()
content.save()
self.assertEqual(mocker.call_count, 3)
content.delete()
self.assertEqual(mocker.call_count, 4)
|
mit
|
waytai/django
|
tests/m2m_through_regress/models.py
|
273
|
2771
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# Forward declared intermediate model
@python_2_unicode_compatible
class Membership(models.Model):
person = models.ForeignKey('Person', models.CASCADE)
group = models.ForeignKey('Group', models.CASCADE)
price = models.IntegerField(default=100)
def __str__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
# using custom id column to test ticket #11107
@python_2_unicode_compatible
class UserMembership(models.Model):
id = models.AutoField(db_column='usermembership_id', primary_key=True)
user = models.ForeignKey(User, models.CASCADE)
group = models.ForeignKey('Group', models.CASCADE)
price = models.IntegerField(default=100)
def __str__(self):
return "%s is a user and member of %s" % (self.user.username, self.group.name)
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=128)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=128)
# Membership object defined as a class
members = models.ManyToManyField(Person, through=Membership)
user_members = models.ManyToManyField(User, through='UserMembership')
def __str__(self):
return self.name
# A set of models that use an non-abstract inherited model as the 'through' model.
class A(models.Model):
a_text = models.CharField(max_length=20)
class ThroughBase(models.Model):
a = models.ForeignKey(A, models.CASCADE)
b = models.ForeignKey('B', models.CASCADE)
class Through(ThroughBase):
extra = models.CharField(max_length=20)
class B(models.Model):
b_text = models.CharField(max_length=20)
a_list = models.ManyToManyField(A, through=Through)
# Using to_field on the through model
@python_2_unicode_compatible
class Car(models.Model):
make = models.CharField(max_length=20, unique=True, null=True)
drivers = models.ManyToManyField('Driver', through='CarDriver')
def __str__(self):
return "%s" % self.make
@python_2_unicode_compatible
class Driver(models.Model):
name = models.CharField(max_length=20, unique=True, null=True)
def __str__(self):
return "%s" % self.name
class Meta:
ordering = ('name',)
@python_2_unicode_compatible
class CarDriver(models.Model):
car = models.ForeignKey('Car', models.CASCADE, to_field='make')
driver = models.ForeignKey('Driver', models.CASCADE, to_field='name')
def __str__(self):
return "pk=%s car=%s driver=%s" % (str(self.pk), self.car, self.driver)
|
bsd-3-clause
|
VitalLabs/gcloud-python
|
gcloud/bigtable/happybase/pool.py
|
5
|
5918
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Bigtable HappyBase pool module."""
import contextlib
import threading
import six
from gcloud.bigtable.happybase.connection import Connection
from gcloud.bigtable.happybase.connection import _get_instance
_MIN_POOL_SIZE = 1
"""Minimum allowable size of a connection pool."""
class NoConnectionsAvailable(RuntimeError):
"""Exception raised when no connections are available.
This happens if a timeout was specified when obtaining a connection,
and no connection became available within the specified timeout.
"""
class ConnectionPool(object):
"""Thread-safe connection pool.
.. note::
All keyword arguments are passed unmodified to the
:class:`Connection <.happybase.connection.Connection>` constructor
**except** for ``autoconnect``. This is because the ``open`` /
``closed`` status of a connection is managed by the pool. In addition,
if ``instance`` is not passed, the default / inferred instance is
determined by the pool and then passed to each
:class:`Connection <.happybase.connection.Connection>` that is created.
:type size: int
:param size: The maximum number of concurrently open connections.
:type kwargs: dict
:param kwargs: Keyword arguments passed to
:class:`Connection <.happybase.Connection>`
constructor.
:raises: :class:`TypeError <exceptions.TypeError>` if ``size``
is non an integer.
:class:`ValueError <exceptions.ValueError>` if ``size``
is not positive.
"""
def __init__(self, size, **kwargs):
if not isinstance(size, six.integer_types):
raise TypeError('Pool size arg must be an integer')
if size < _MIN_POOL_SIZE:
raise ValueError('Pool size must be positive')
self._lock = threading.Lock()
self._queue = six.moves.queue.LifoQueue(maxsize=size)
self._thread_connections = threading.local()
connection_kwargs = kwargs
connection_kwargs['autoconnect'] = False
if 'instance' not in connection_kwargs:
connection_kwargs['instance'] = _get_instance(
timeout=kwargs.get('timeout'))
for _ in six.moves.range(size):
connection = Connection(**connection_kwargs)
self._queue.put(connection)
def _acquire_connection(self, timeout=None):
"""Acquire a connection from the pool.
:type timeout: int
:param timeout: (Optional) Time (in seconds) to wait for a connection
to open.
:rtype: :class:`Connection <.happybase.Connection>`
:returns: An active connection from the queue stored on the pool.
:raises: :class:`NoConnectionsAvailable` if ``Queue.get`` fails
before the ``timeout`` (only if a timeout is specified).
"""
try:
return self._queue.get(block=True, timeout=timeout)
except six.moves.queue.Empty:
raise NoConnectionsAvailable('No connection available from pool '
'within specified timeout')
@contextlib.contextmanager
def connection(self, timeout=None):
"""Obtain a connection from the pool.
Must be used as a context manager, for example::
with pool.connection() as connection:
pass # do something with the connection
If ``timeout`` is omitted, this method waits forever for a connection
to become available from the local queue.
:type timeout: int
:param timeout: (Optional) Time (in seconds) to wait for a connection
to open.
:rtype: :class:`Connection <.happybase.connection.Connection>`
:returns: An active connection from the pool.
:raises: :class:`NoConnectionsAvailable` if no connection can be
retrieved from the pool before the ``timeout`` (only if
a timeout is specified).
"""
connection = getattr(self._thread_connections, 'current', None)
retrieved_new_cnxn = False
if connection is None:
# In this case we need to actually grab a connection from the
# pool. After retrieval, the connection is stored on a thread
# local so that nested connection requests from the same
# thread can re-use the same connection instance.
#
# NOTE: This code acquires a lock before assigning to the
# thread local; see
# ('https://emptysqua.re/blog/'
# 'another-thing-about-pythons-threadlocals/')
retrieved_new_cnxn = True
connection = self._acquire_connection(timeout)
with self._lock:
self._thread_connections.current = connection
# This is a no-op for connections that have already been opened
# since they just call Client.start().
connection.open()
yield connection
# Remove thread local reference after the outermost 'with' block
# ends. Afterwards the thread no longer owns the connection.
if retrieved_new_cnxn:
del self._thread_connections.current
self._queue.put(connection)
|
apache-2.0
|
saurabh6790/medsynaptic-app
|
stock/report/supplier_wise_sales_analytics/supplier_wise_sales_analytics.py
|
30
|
3487
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import flt
def execute(filters=None):
columns = get_columns(filters)
consumed_details = get_consumed_details(filters)
supplier_details = get_suppliers_details(filters)
material_transfer_vouchers = get_material_transfer_vouchers()
data = []
for item_code, suppliers in supplier_details.items():
consumed_qty = consumed_amount = delivered_qty = delivered_amount = 0.0
total_qty = total_amount = 0.0
if consumed_details.get(item_code):
for cd in consumed_details.get(item_code):
if (cd.voucher_no not in material_transfer_vouchers):
if cd.voucher_type=="Delivery Note":
delivered_qty += abs(flt(cd.actual_qty))
delivered_amount += abs(flt(cd.stock_value_difference))
elif cd.voucher_type!="Delivery Note":
consumed_qty += abs(flt(cd.actual_qty))
consumed_amount += abs(flt(cd.stock_value_difference))
if consumed_qty or consumed_amount or delivered_qty or delivered_amount:
total_qty += delivered_qty + consumed_qty
total_amount += delivered_amount + consumed_amount
row = [cd.item_code, cd.item_name, cd.description, cd.stock_uom, \
consumed_qty, consumed_amount, delivered_qty, delivered_amount, \
total_qty, total_amount, list(set(suppliers))]
data.append(row)
return columns, data
def get_columns(filters):
"""return columns based on filters"""
columns = ["Item:Link/Item:100"] + ["Item Name::100"] + \
["Description::150"] + ["UOM:Link/UOM:90"] + \
["Consumed Qty:Float:110"] + ["Consumed Amount:Currency:130"] + \
["Delivered Qty:Float:110"] + ["Delivered Amount:Currency:130"] + \
["Total Qty:Float:110"] + ["Total Amount:Currency:130"] + \
["Supplier(s)::250"]
return columns
def get_conditions(filters):
conditions = ""
values = []
if filters.get('from_date') and filters.get('to_date'):
conditions = "and sle.posting_date>=%s and sle.posting_date<=%s"
values = [filters.get('from_date'), filters.get('to_date')]
return conditions, values
def get_consumed_details(filters):
conditions, values = get_conditions(filters)
consumed_details = {}
for d in webnotes.conn.sql("""select sle.item_code, i.item_name, i.description,
i.stock_uom, sle.actual_qty, sle.stock_value_difference,
sle.voucher_no, sle.voucher_type
from `tabStock Ledger Entry` sle, `tabItem` i
where sle.item_code=i.name and sle.actual_qty < 0 %s""" % conditions, values, as_dict=1):
consumed_details.setdefault(d.item_code, []).append(d)
return consumed_details
def get_suppliers_details(filters):
item_supplier_map = {}
supplier = filters.get('supplier')
for d in webnotes.conn.sql("""select pr.supplier, pri.item_code from
`tabPurchase Receipt` pr, `tabPurchase Receipt Item` pri
where pr.name=pri.parent and pr.docstatus=1 and
pri.item_code=(select name from `tabItem` where
ifnull(is_stock_item, 'Yes')='Yes' and name=pri.item_code)""", as_dict=1):
item_supplier_map.setdefault(d.item_code, []).append(d.supplier)
if supplier:
for item_code, suppliers in item_supplier_map.items():
if supplier not in suppliers:
del item_supplier_map[item_code]
return item_supplier_map
def get_material_transfer_vouchers():
return webnotes.conn.sql_list("""select name from `tabStock Entry` where
purpose='Material Transfer' and docstatus=1""")
|
agpl-3.0
|
neilLasrado/erpnext
|
erpnext/config/accounts.py
|
1
|
13984
|
from __future__ import unicode_literals
from frappe import _
import frappe
def get_data():
config = [
{
"label": _("Accounts Receivable"),
"items": [
{
"type": "doctype",
"name": "Sales Invoice",
"description": _("Bills raised to Customers."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Customer",
"description": _("Customer database."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Payment Entry",
"description": _("Bank/Cash transactions against party or for internal transfer")
},
{
"type": "doctype",
"name": "Payment Request",
"description": _("Payment Request"),
},
{
"type": "report",
"name": "Accounts Receivable",
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Accounts Receivable Summary",
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Sales Register",
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Item-wise Sales Register",
"is_query_report": True,
"doctype": "Sales Invoice"
},
{
"type": "report",
"name": "Ordered Items To Be Billed",
"is_query_report": True,
"doctype": "Sales Invoice"
},
{
"type": "report",
"name": "Delivered Items To Be Billed",
"is_query_report": True,
"doctype": "Sales Invoice"
},
]
},
{
"label": _("Accounts Payable"),
"items": [
{
"type": "doctype",
"name": "Purchase Invoice",
"description": _("Bills raised by Suppliers."),
"onboard": 1
},
{
"type": "doctype",
"name": "Supplier",
"description": _("Supplier database."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Payment Entry",
"description": _("Bank/Cash transactions against party or for internal transfer")
},
{
"type": "report",
"name": "Accounts Payable",
"doctype": "Purchase Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Accounts Payable Summary",
"doctype": "Purchase Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Purchase Register",
"doctype": "Purchase Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Item-wise Purchase Register",
"is_query_report": True,
"doctype": "Purchase Invoice"
},
{
"type": "report",
"name": "Purchase Order Items To Be Billed",
"is_query_report": True,
"doctype": "Purchase Invoice"
},
{
"type": "report",
"name": "Received Items To Be Billed",
"is_query_report": True,
"doctype": "Purchase Invoice"
},
]
},
{
"label": _("Accounting Masters"),
"items": [
{
"type": "doctype",
"name": "Company",
"description": _("Company (not Customer or Supplier) master."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Account",
"icon": "fa fa-sitemap",
"label": _("Chart of Accounts"),
"route": "#Tree/Account",
"description": _("Tree of financial accounts."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Accounts Settings",
},
{
"type": "doctype",
"name": "Fiscal Year",
"description": _("Financial / accounting year.")
},
{
"type": "doctype",
"name": "Accounting Dimension",
},
{
"type": "doctype",
"name": "Finance Book",
},
{
"type": "doctype",
"name": "Accounting Period",
},
{
"type": "doctype",
"name": "Payment Term",
"description": _("Payment Terms based on conditions")
},
]
},
{
"label": _("Banking and Payments"),
"items": [
{
"type": "doctype",
"label": _("Match Payments with Invoices"),
"name": "Payment Reconciliation",
"description": _("Match non-linked Invoices and Payments.")
},
{
"type": "doctype",
"label": _("Invoice Discounting"),
"name": "Invoice Discounting",
},
{
"type": "doctype",
"label": _("Update Bank Clearance Dates"),
"name": "Bank Clearance",
"description": _("Update bank payment dates with journals.")
},
{
"type": "doctype",
"label": _("Bank Transaction"),
"name": "Bank Transaction",
"doctype": "Bank Transaction"
},
{
"type": "report",
"name": "Bank Reconciliation Statement",
"is_query_report": True,
"doctype": "Journal Entry"
},
{
"type": "report",
"name": "Bank Clearance Summary",
"is_query_report": True,
"doctype": "Journal Entry"
},
{
"type": "doctype",
"name": "Bank Guarantee"
},
{
"type": "doctype",
"name": "Cheque Print Template",
"description": _("Setup cheque dimensions for printing")
},
]
},
{
"label": _("General Ledger"),
"items": [
{
"type": "doctype",
"name": "Journal Entry",
"description": _("Accounting journal entries.")
},
{
"type": "report",
"name": "General Ledger",
"doctype": "GL Entry",
"is_query_report": True,
},
{
"type": "report",
"name": "Customer Ledger Summary",
"doctype": "Sales Invoice",
"is_query_report": True,
},
{
"type": "report",
"name": "Supplier Ledger Summary",
"doctype": "Sales Invoice",
"is_query_report": True,
}
]
},
{
"label": _("Taxes"),
"items": [
{
"type": "doctype",
"name": "Sales Taxes and Charges Template",
"description": _("Tax template for selling transactions.")
},
{
"type": "doctype",
"name": "Purchase Taxes and Charges Template",
"description": _("Tax template for buying transactions.")
},
{
"type": "doctype",
"name": "Item Tax Template",
"description": _("Tax template for item tax rates.")
},
{
"type": "doctype",
"name": "Tax Category",
"description": _("Tax Category for overriding tax rates.")
},
{
"type": "doctype",
"name": "Tax Rule",
"description": _("Tax Rule for transactions.")
},
{
"type": "doctype",
"name": "Tax Withholding Category",
"description": _("Tax Withholding rates to be applied on transactions.")
},
]
},
{
"label": _("Cost Center and Budgeting"),
"items": [
{
"type": "doctype",
"name": "Cost Center",
"icon": "fa fa-sitemap",
"label": _("Chart of Cost Centers"),
"route": "#Tree/Cost Center",
"description": _("Tree of financial Cost Centers."),
},
{
"type": "doctype",
"name": "Budget",
"description": _("Define budget for a financial year.")
},
{
"type": "doctype",
"name": "Accounting Dimension",
},
{
"type": "report",
"name": "Budget Variance Report",
"is_query_report": True,
"doctype": "Cost Center"
},
{
"type": "doctype",
"name": "Monthly Distribution",
"description": _("Seasonality for setting budgets, targets etc.")
},
]
},
{
"label": _("Financial Statements"),
"items": [
{
"type": "report",
"name": "Trial Balance",
"doctype": "GL Entry",
"is_query_report": True,
},
{
"type": "report",
"name": "Profit and Loss Statement",
"doctype": "GL Entry",
"is_query_report": True
},
{
"type": "report",
"name": "Balance Sheet",
"doctype": "GL Entry",
"is_query_report": True
},
{
"type": "report",
"name": "Cash Flow",
"doctype": "GL Entry",
"is_query_report": True
},
{
"type": "report",
"name": "Cost Center Wise Financial Statements",
"doctype": "GL Entry",
"is_query_report": True
},
{
"type": "report",
"name": "Consolidated Financial Statement",
"doctype": "GL Entry",
"is_query_report": True
},
]
},
{
"label": _("Opening and Closing"),
"items": [
{
"type": "doctype",
"name": "Opening Invoice Creation Tool",
},
{
"type": "doctype",
"name": "Chart of Accounts Importer",
},
{
"type": "doctype",
"name": "Period Closing Voucher",
"description": _("Close Balance Sheet and book Profit or Loss.")
},
]
},
{
"label": _("Multi Currency"),
"items": [
{
"type": "doctype",
"name": "Currency",
"description": _("Enable / disable currencies.")
},
{
"type": "doctype",
"name": "Currency Exchange",
"description": _("Currency exchange rate master.")
},
{
"type": "doctype",
"name": "Exchange Rate Revaluation",
"description": _("Exchange Rate Revaluation master.")
},
]
},
{
"label": _("Loyalty Program"),
"icon": "fa fa-cog",
"items": [
{
"type": "doctype",
"name": "Loyalty Program",
"label": _("Loyalty Program"),
"description": _("To make Customer based incentive schemes.")
},
{
"type": "doctype",
"name": "Loyalty Point Entry",
"label": _("Loyalty Point Entry"),
"description": _("To view logs of Loyalty Points assigned to a Customer.")
}
]
},
{
"label": _("Settings"),
"icon": "fa fa-cog",
"items": [
{
"type": "doctype",
"name": "Payment Gateway Account",
"description": _("Setup Gateway accounts.")
},
{
"type": "doctype",
"name": "Terms and Conditions",
"label": _("Terms and Conditions Template"),
"description": _("Template of terms or contract.")
},
{
"type": "doctype",
"name": "Mode of Payment",
"description": _("e.g. Bank, Cash, Credit Card")
},
]
},
{
"label": _("Subscription Management"),
"items": [
{
"type": "doctype",
"name": "Subscriber",
},
{
"type": "doctype",
"name": "Subscription Plan",
},
{
"type": "doctype",
"name": "Subscription"
},
{
"type": "doctype",
"name": "Subscription Settings"
}
]
},
{
"label": _("Bank Statement"),
"items": [
{
"type": "doctype",
"label": _("Bank"),
"name": "Bank",
},
{
"type": "doctype",
"label": _("Bank Account"),
"name": "Bank Account",
},
{
"type": "doctype",
"name": "Bank Statement Transaction Entry",
},
{
"type": "doctype",
"label": _("Bank Statement Settings"),
"name": "Bank Statement Settings",
},
]
},
{
"label": _("Profitability"),
"items": [
{
"type": "report",
"name": "Gross Profit",
"doctype": "Sales Invoice",
"is_query_report": True
},
{
"type": "report",
"name": "Profitability Analysis",
"doctype": "GL Entry",
"is_query_report": True,
},
{
"type": "report",
"name": "Sales Invoice Trends",
"is_query_report": True,
"doctype": "Sales Invoice"
},
{
"type": "report",
"name": "Purchase Invoice Trends",
"is_query_report": True,
"doctype": "Purchase Invoice"
},
]
},
{
"label": _("Reports"),
"icon": "fa fa-table",
"items": [
{
"type": "report",
"name": "Trial Balance for Party",
"doctype": "GL Entry",
"is_query_report": True,
},
{
"type": "report",
"name": "Payment Period Based On Invoice Date",
"is_query_report": True,
"doctype": "Journal Entry"
},
{
"type": "report",
"name": "Sales Partners Commission",
"is_query_report": True,
"doctype": "Sales Invoice"
},
{
"type": "report",
"is_query_report": True,
"name": "Customer Credit Balance",
"doctype": "Customer"
},
{
"type": "report",
"is_query_report": True,
"name": "Sales Payment Summary",
"doctype": "Sales Invoice"
},
{
"type": "report",
"is_query_report": True,
"name": "Address And Contacts",
"doctype": "Address"
}
]
},
{
"label": _("Share Management"),
"icon": "fa fa-microchip ",
"items": [
{
"type": "doctype",
"name": "Shareholder",
"description": _("List of available Shareholders with folio numbers")
},
{
"type": "doctype",
"name": "Share Transfer",
"description": _("List of all share transactions"),
},
{
"type": "report",
"name": "Share Ledger",
"doctype": "Share Transfer",
"is_query_report": True
},
{
"type": "report",
"name": "Share Balance",
"doctype": "Share Transfer",
"is_query_report": True
}
]
},
]
gst = {
"label": _("Goods and Services Tax (GST India)"),
"items": [
{
"type": "doctype",
"name": "GST Settings",
},
{
"type": "doctype",
"name": "GST HSN Code",
},
{
"type": "report",
"name": "GSTR-1",
"is_query_report": True
},
{
"type": "report",
"name": "GSTR-2",
"is_query_report": True
},
{
"type": "doctype",
"name": "GSTR 3B Report",
},
{
"type": "report",
"name": "GST Sales Register",
"is_query_report": True
},
{
"type": "report",
"name": "GST Purchase Register",
"is_query_report": True
},
{
"type": "report",
"name": "GST Itemised Sales Register",
"is_query_report": True
},
{
"type": "report",
"name": "GST Itemised Purchase Register",
"is_query_report": True
},
{
"type": "doctype",
"name": "C-Form",
"description": _("C-Form records"),
"country": "India"
},
]
}
countries = frappe.get_all("Company", fields="country")
countries = [country["country"] for country in countries]
if "India" in countries:
config.insert(9, gst)
domains = frappe.get_active_domains()
return config
|
gpl-3.0
|
nameoffnv/pywinauto
|
sandbox/playing_RepeatedHotkeyFixing.py
|
16
|
7751
|
# GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
#import set
from Test_RepeatedHotkey import GetHotkey
controls = (
"&Hello",
"&Exit",
"&OK",
"&Python",
"&Ion Trail",
"&Galaxy Quest",
"&Xenon",
"&Sierra",
"&Zope",
"&Sizzling",
"Here &and Now",
"&Later maybe",
"&Scram",
"Wo&w",
"What is T&HAT",
"&Mercury",
"&Venus",
"&Earth",
"M&ercury",
"Ven&us",
"Ea&rth",
"&Mercury",
"&OK",
"&Python",
"&Ion Trail",
"&Galaxy Quest",
)
charIndex = {}
hotkeyCtrls = {}
allChars = set()
for ctrl in controls:
# try to build up information on the dialog first
# e.g. if it's possible to fix at all
# any controls that have a unique character
hotkeyCtrls.setdefault(GetHotkey(ctrl)[1].lower(), []).append(ctrl)
allChars = allChars.union(set(ctrl.lower()))
for c in ctrl:
if c in ' _&': continue
charIndex.setdefault(c.lower(), []).append(ctrl)
allChars.difference_update(" _&")
freeChars = allChars.difference(hotkeyCtrls.keys())
print freeChars
for c in hotkeyCtrls:
print c
for ctrl in hotkeyCtrls[c]:
print "\t", ctrl
if len(allChars) < len(controls):
print "impossible to fix completely because there are more hotkeys then individual characters in the dialog"
print "the following characters are free:"
#for c in freeChars:
# print "\t%s"% c
# for ctrl in charIndex[c]:
# print "\t\t%s" % ctrl
usedChars = hotkeyCtrls.keys()
changesMade = 1
while changesMade:
changesMade = 0
for char, ctrls in charIndex.items():
# if there is only one control that has this character
if len (ctrls) == 1:
# get the control
ctrl = ctrls[0]
# find the hotkey for that control
ctrlHotkey = GetHotkey(ctrl)[1].lower()
print ctrlHotkey, `ctrl`
# remove the control from the list
hotkeyCtrls[ctrlHotkey].remove(ctrl)
# if there are now now controls for that hotkey
# remove it
if len(hotkeyCtrls[ctrlHotkey]) == 0:
del(hotkeyCtrls[ctrlHotkey])
# add the new changed one to the hotkeys
hotkeyCtrls.setdefault(char, []).append(ctrl)
changesMade = 1
else:
for hotkey, ctrls in hotkeyCtrls.items():
if len(ctrls) > 1:
for ctrl in ctrls:
ctrlChars = set(ctrl.lower()).difference(" &_")
if freeChars.intersection(ctrlChars):
print "="*100
for c in hotkeyCtrls:
print c
for ctrl in hotkeyCtrls[c]:
print "\t", ctrl
#for x in charIndex:
# print x, charIndex[x]
for hotkey, ctrls in hotkeyCtrls.items():
if len(ctrls) > 1:
print "***** BUG *****"
print "\t", hotkey, ctrls
# find the chars free for each control
ctrlFree = []
for ctrl in ctrls:
ctrlFree.append("".join(set(ctrl.lower()).intersection(freeChars)))
# count the controls with no hotkey free
countNotFree = len([c for c in ctrlFree if not c])
if countNotFree > 1:
print "Cannot be fixed without possibly changing other controls also"
for i, free in enumerate(ctrlFree):
if not free:
print "Must leave '%s' alone" %ctrls[i]
import sys
sys.exit()
#================================================
#================================================
#================================================
#================================================
#================================================
# return the controls that have characters in common with
# the control
def GetCtrlsWithSameChars(ctrl, controls):
ourChars = set(ctrl.lower())
ourChars.difference_update("& _")
toRet = []
for control in controls:
ctrlChars = set(control.lower())
if ourChars.intersection(ctrlChars):
toRet.append(control)
return toRet
def GetFreeCharsForControls(allFree, controls):
ctrlFree = []
allCtrlsFree = []
for ctrl in controls:
curCtrlFree = set(ctrl.lower()).intersection(allFree)
ctrlFree.append("".join(curCtrlFree))
allCtrlsFree.extend(curCtrlFree)
return ctrlFree, "".join(allCtrlsFree)
charIndex = {}
hotkeyCtrls = {}
for c in controls:
hotkeyCtrls.setdefault(GetHotkey(c)[1].lower(), []).append(c)
for char in c.lower():
charIndex.setdefault(char, []).append(c)
hotkeys = set(hotkeyCtrls.keys())
allKeys = set("".join(controls).lower())
allKeys.difference_update("& _")
freeKeys = allKeys.difference(hotkeys)
print len(controls)
if len(controls) > len(allKeys):
print "**** Oops - more hotkeys than available characters :-( "
for hotkey, ctrls in hotkeyCtrls.items():
if len(ctrls) > 1:
print "**bug**"
# can it be fixed simply by changing one or more of the controls
# to another character within these controls
ctrlsFreeChars, allFree = GetFreeCharsForControls(freeKeys, ctrls)
# find out if we can use this method (0 or 1 controls with no free characters)
noFreeCount = 0
for i, ctrl in enumerate(ctrls):
if not ctrlsFreeChars[i]:
noFreeCount += 1
# ok - so more than one control has no free chars - can't use the
# simple method
if noFreeCount > 1:
print "cant use that method"
continue
if noFreeCount == 0:
extraText = ' or leave the same'
else:
extraText = ''
for i, ctrl in enumerate(ctrls):
if len(ctrlsFreeChars[i]) > 1:
print "Change '%s' to one of (%s)%s"% (ctrl, "".join(ctrlsFreeChars[i]), extraText)
elif len(ctrlsFreeChars[i]) == 1:
print "Change '%s' to %s%s"% (ctrl, "".join(ctrlsFreeChars[i]), extraText)
else:
print "do not change %s" % ctrl
#
# for curCtrl in ctrls:
# # Get the letters that could be used
# ctrlAvl = set(curCtrl.lower()).intersection(freeKeys)
#
# #changesNeeded = ''
#
# # if there are no free letters in that control
# # try and find if any other control could have it's
# # hotkey changed to free up for ourself
# if len(ctrlAvl) == 0:
#
# # get the controls that share some letters
# otherCtrls = GetCtrlsWithSameChars(c, controls)
#
# suggestedChanges = []
# # check if any of the letters in those controls can be freed up
# for otherCtrl in otherCtrls:
# if GetHotkey(otherCtrl)[1].lower() == hotkey:
# freeOther = set(otherCtrl.lower()).intersection(freeKeys)
#
# if freeOther:
# print "To Fix %s Free %s in %s by changing to any of (%s)"%(curCtrl, GetHotkey(otherCtrl)[1], otherCtrl, "".join(freeOther))
#
#
#
## posChange = set(c.lower()).intersection(allKeys)
## changesNeeded = "".join(posChange)
##
## for char in posChange:
## # get the controls that have that character
## otherCtrls = charIndex[char]
##
# else:
# print "To fix %s change %s to any of (%s)"% (curCtrl, c, "".join(ctrlAvl))
#
#
#print "bug", ctrls, "".join(freeKeys)
|
lgpl-2.1
|
wholeGenomeSequencingAnalysisPipeline/BacPipe
|
cutadapt-1.12/tests/testmodifiers.py
|
1
|
1619
|
# coding: utf-8
from __future__ import print_function, division, absolute_import
from cutadapt.seqio import Sequence
from cutadapt.modifiers import (UnconditionalCutter, NEndTrimmer, QualityTrimmer,
Shortener)
def test_unconditional_cutter():
uc = UnconditionalCutter(length=5)
s = 'abcdefg'
assert UnconditionalCutter(length=2)(s) == 'cdefg'
assert UnconditionalCutter(length=-2)(s) == 'abcde'
assert UnconditionalCutter(length=100)(s) == ''
assert UnconditionalCutter(length=-100)(s) == ''
def test_nend_trimmer():
trimmer = NEndTrimmer()
seqs = ['NNNNAAACCTTGGNNN', 'NNNNAAACNNNCTTGGNNN', 'NNNNNN']
trims = ['AAACCTTGG', 'AAACNNNCTTGG', '']
for seq, trimmed in zip(seqs, trims):
_seq = Sequence('read1', seq, qualities='#'*len(seq))
_trimmed = Sequence('read1', trimmed, qualities='#'*len(trimmed))
assert trimmer(_seq) == _trimmed
def test_quality_trimmer():
read = Sequence('read1', 'ACGTTTACGTA', '##456789###')
qt = QualityTrimmer(10, 10, 33)
assert qt(read) == Sequence('read1', 'GTTTAC', '456789')
qt = QualityTrimmer(0, 10, 33)
assert qt(read) == Sequence('read1', 'ACGTTTAC', '##456789')
qt = QualityTrimmer(10, 0, 33)
assert qt(read) == Sequence('read1', 'GTTTACGTA', '456789###')
def test_shortener():
read = Sequence('read1', 'ACGTTTACGTA', '##456789###')
shortener = Shortener(0)
assert shortener(read) == Sequence('read1', '', '')
shortener = Shortener(1)
assert shortener(read) == Sequence('read1', 'A', '#')
shortener = Shortener(5)
assert shortener(read) == Sequence('read1', 'ACGTT', '##456')
shortener = Shortener(100)
assert shortener(read) == read
|
gpl-3.0
|
vFense/vFenseAgent-nix
|
agent/deps/rpm6-32/Python-2.7.5/lib/python2.7/idlelib/configHandler.py
|
51
|
29367
|
"""Provides access to stored IDLE configuration information.
Refer to the comments at the beginning of config-main.def for a description of
the available configuration files and the design implemented to update user
configuration information. In particular, user configuration choices which
duplicate the defaults will be removed from the user's configuration files,
and if a file becomes empty, it will be deleted.
The contents of the user files may be altered using the Options/Configure IDLE
menu to access the configuration GUI (configDialog.py), or manually.
Throughout this module there is an emphasis on returning useable defaults
when a problem occurs in returning a requested configuration value back to
idle. This is to allow IDLE to continue to function in spite of errors in
the retrieval of config information. When a default is returned instead of
a requested config value, a message is printed to stderr to aid in
configuration problem notification and resolution.
"""
import os
import sys
import string
from idlelib import macosxSupport
from ConfigParser import ConfigParser, NoOptionError, NoSectionError
class InvalidConfigType(Exception): pass
class InvalidConfigSet(Exception): pass
class InvalidFgBg(Exception): pass
class InvalidTheme(Exception): pass
class IdleConfParser(ConfigParser):
"""
A ConfigParser specialised for idle configuration file handling
"""
def __init__(self, cfgFile, cfgDefaults=None):
"""
cfgFile - string, fully specified configuration file name
"""
self.file=cfgFile
ConfigParser.__init__(self,defaults=cfgDefaults)
def Get(self, section, option, type=None, default=None, raw=False):
"""
Get an option value for given section/option or return default.
If type is specified, return as type.
"""
if not self.has_option(section, option):
return default
if type=='bool':
return self.getboolean(section, option)
elif type=='int':
return self.getint(section, option)
else:
return self.get(section, option, raw=raw)
def GetOptionList(self,section):
"""
Get an option list for given section
"""
if self.has_section(section):
return self.options(section)
else: #return a default value
return []
def Load(self):
"""
Load the configuration file from disk
"""
self.read(self.file)
class IdleUserConfParser(IdleConfParser):
"""
IdleConfigParser specialised for user configuration handling.
"""
def AddSection(self,section):
"""
if section doesn't exist, add it
"""
if not self.has_section(section):
self.add_section(section)
def RemoveEmptySections(self):
"""
remove any sections that have no options
"""
for section in self.sections():
if not self.GetOptionList(section):
self.remove_section(section)
def IsEmpty(self):
"""
Remove empty sections and then return 1 if parser has no sections
left, else return 0.
"""
self.RemoveEmptySections()
if self.sections():
return 0
else:
return 1
def RemoveOption(self,section,option):
"""
If section/option exists, remove it.
Returns 1 if option was removed, 0 otherwise.
"""
if self.has_section(section):
return self.remove_option(section,option)
def SetOption(self,section,option,value):
"""
Sets option to value, adding section if required.
Returns 1 if option was added or changed, otherwise 0.
"""
if self.has_option(section,option):
if self.get(section,option)==value:
return 0
else:
self.set(section,option,value)
return 1
else:
if not self.has_section(section):
self.add_section(section)
self.set(section,option,value)
return 1
def RemoveFile(self):
"""
Removes the user config file from disk if it exists.
"""
if os.path.exists(self.file):
os.remove(self.file)
def Save(self):
"""Update user configuration file.
Remove empty sections. If resulting config isn't empty, write the file
to disk. If config is empty, remove the file from disk if it exists.
"""
if not self.IsEmpty():
fname = self.file
try:
cfgFile = open(fname, 'w')
except IOError:
os.unlink(fname)
cfgFile = open(fname, 'w')
self.write(cfgFile)
else:
self.RemoveFile()
class IdleConf:
"""
holds config parsers for all idle config files:
default config files
(idle install dir)/config-main.def
(idle install dir)/config-extensions.def
(idle install dir)/config-highlight.def
(idle install dir)/config-keys.def
user config files
(user home dir)/.idlerc/config-main.cfg
(user home dir)/.idlerc/config-extensions.cfg
(user home dir)/.idlerc/config-highlight.cfg
(user home dir)/.idlerc/config-keys.cfg
"""
def __init__(self):
self.defaultCfg={}
self.userCfg={}
self.cfg={}
self.CreateConfigHandlers()
self.LoadCfgFiles()
#self.LoadCfg()
def CreateConfigHandlers(self):
"""
set up a dictionary of config parsers for default and user
configurations respectively
"""
#build idle install path
if __name__ != '__main__': # we were imported
idleDir=os.path.dirname(__file__)
else: # we were exec'ed (for testing only)
idleDir=os.path.abspath(sys.path[0])
userDir=self.GetUserCfgDir()
configTypes=('main','extensions','highlight','keys')
defCfgFiles={}
usrCfgFiles={}
for cfgType in configTypes: #build config file names
defCfgFiles[cfgType]=os.path.join(idleDir,'config-'+cfgType+'.def')
usrCfgFiles[cfgType]=os.path.join(userDir,'config-'+cfgType+'.cfg')
for cfgType in configTypes: #create config parsers
self.defaultCfg[cfgType]=IdleConfParser(defCfgFiles[cfgType])
self.userCfg[cfgType]=IdleUserConfParser(usrCfgFiles[cfgType])
def GetUserCfgDir(self):
"""
Creates (if required) and returns a filesystem directory for storing
user config files.
"""
cfgDir = '.idlerc'
userDir = os.path.expanduser('~')
if userDir != '~': # expanduser() found user home dir
if not os.path.exists(userDir):
warn = ('\n Warning: os.path.expanduser("~") points to\n '+
userDir+',\n but the path does not exist.\n')
try:
sys.stderr.write(warn)
except IOError:
pass
userDir = '~'
if userDir == "~": # still no path to home!
# traditionally IDLE has defaulted to os.getcwd(), is this adequate?
userDir = os.getcwd()
userDir = os.path.join(userDir, cfgDir)
if not os.path.exists(userDir):
try:
os.mkdir(userDir)
except (OSError, IOError):
warn = ('\n Warning: unable to create user config directory\n'+
userDir+'\n Check path and permissions.\n Exiting!\n\n')
sys.stderr.write(warn)
raise SystemExit
return userDir
def GetOption(self, configType, section, option, default=None, type=None,
warn_on_default=True, raw=False):
"""
Get an option value for given config type and given general
configuration section/option or return a default. If type is specified,
return as type. Firstly the user configuration is checked, with a
fallback to the default configuration, and a final 'catch all'
fallback to a useable passed-in default if the option isn't present in
either the user or the default configuration.
configType must be one of ('main','extensions','highlight','keys')
If a default is returned, and warn_on_default is True, a warning is
printed to stderr.
"""
try:
if self.userCfg[configType].has_option(section,option):
return self.userCfg[configType].Get(section, option,
type=type, raw=raw)
except ValueError:
warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n'
' invalid %r value for configuration option %r\n'
' from section %r: %r\n' %
(type, option, section,
self.userCfg[configType].Get(section, option,
raw=raw)))
try:
sys.stderr.write(warning)
except IOError:
pass
try:
if self.defaultCfg[configType].has_option(section,option):
return self.defaultCfg[configType].Get(section, option,
type=type, raw=raw)
except ValueError:
pass
#returning default, print warning
if warn_on_default:
warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n'
' problem retrieving configuration option %r\n'
' from section %r.\n'
' returning default value: %r\n' %
(option, section, default))
try:
sys.stderr.write(warning)
except IOError:
pass
return default
def SetOption(self, configType, section, option, value):
"""In user's config file, set section's option to value.
"""
self.userCfg[configType].SetOption(section, option, value)
def GetSectionList(self, configSet, configType):
"""
Get a list of sections from either the user or default config for
the given config type.
configSet must be either 'user' or 'default'
configType must be one of ('main','extensions','highlight','keys')
"""
if not (configType in ('main','extensions','highlight','keys')):
raise InvalidConfigType, 'Invalid configType specified'
if configSet == 'user':
cfgParser=self.userCfg[configType]
elif configSet == 'default':
cfgParser=self.defaultCfg[configType]
else:
raise InvalidConfigSet, 'Invalid configSet specified'
return cfgParser.sections()
def GetHighlight(self, theme, element, fgBg=None):
"""
return individual highlighting theme elements.
fgBg - string ('fg'or'bg') or None, if None return a dictionary
containing fg and bg colours (appropriate for passing to Tkinter in,
e.g., a tag_config call), otherwise fg or bg colour only as specified.
"""
if self.defaultCfg['highlight'].has_section(theme):
themeDict=self.GetThemeDict('default',theme)
else:
themeDict=self.GetThemeDict('user',theme)
fore=themeDict[element+'-foreground']
if element=='cursor': #there is no config value for cursor bg
back=themeDict['normal-background']
else:
back=themeDict[element+'-background']
highlight={"foreground": fore,"background": back}
if not fgBg: #return dict of both colours
return highlight
else: #return specified colour only
if fgBg == 'fg':
return highlight["foreground"]
if fgBg == 'bg':
return highlight["background"]
else:
raise InvalidFgBg, 'Invalid fgBg specified'
def GetThemeDict(self,type,themeName):
"""
type - string, 'default' or 'user' theme type
themeName - string, theme name
Returns a dictionary which holds {option:value} for each element
in the specified theme. Values are loaded over a set of ultimate last
fallback defaults to guarantee that all theme elements are present in
a newly created theme.
"""
if type == 'user':
cfgParser=self.userCfg['highlight']
elif type == 'default':
cfgParser=self.defaultCfg['highlight']
else:
raise InvalidTheme, 'Invalid theme type specified'
#foreground and background values are provded for each theme element
#(apart from cursor) even though all these values are not yet used
#by idle, to allow for their use in the future. Default values are
#generally black and white.
theme={ 'normal-foreground':'#000000',
'normal-background':'#ffffff',
'keyword-foreground':'#000000',
'keyword-background':'#ffffff',
'builtin-foreground':'#000000',
'builtin-background':'#ffffff',
'comment-foreground':'#000000',
'comment-background':'#ffffff',
'string-foreground':'#000000',
'string-background':'#ffffff',
'definition-foreground':'#000000',
'definition-background':'#ffffff',
'hilite-foreground':'#000000',
'hilite-background':'gray',
'break-foreground':'#ffffff',
'break-background':'#000000',
'hit-foreground':'#ffffff',
'hit-background':'#000000',
'error-foreground':'#ffffff',
'error-background':'#000000',
#cursor (only foreground can be set)
'cursor-foreground':'#000000',
#shell window
'stdout-foreground':'#000000',
'stdout-background':'#ffffff',
'stderr-foreground':'#000000',
'stderr-background':'#ffffff',
'console-foreground':'#000000',
'console-background':'#ffffff' }
for element in theme.keys():
if not cfgParser.has_option(themeName,element):
#we are going to return a default, print warning
warning=('\n Warning: configHandler.py - IdleConf.GetThemeDict'
' -\n problem retrieving theme element %r'
'\n from theme %r.\n'
' returning default value: %r\n' %
(element, themeName, theme[element]))
try:
sys.stderr.write(warning)
except IOError:
pass
colour=cfgParser.Get(themeName,element,default=theme[element])
theme[element]=colour
return theme
def CurrentTheme(self):
"""
Returns the name of the currently active theme
"""
return self.GetOption('main','Theme','name',default='')
def CurrentKeys(self):
"""
Returns the name of the currently active key set
"""
return self.GetOption('main','Keys','name',default='')
def GetExtensions(self, active_only=True, editor_only=False, shell_only=False):
"""
Gets a list of all idle extensions declared in the config files.
active_only - boolean, if true only return active (enabled) extensions
"""
extns=self.RemoveKeyBindNames(
self.GetSectionList('default','extensions'))
userExtns=self.RemoveKeyBindNames(
self.GetSectionList('user','extensions'))
for extn in userExtns:
if extn not in extns: #user has added own extension
extns.append(extn)
if active_only:
activeExtns=[]
for extn in extns:
if self.GetOption('extensions', extn, 'enable', default=True,
type='bool'):
#the extension is enabled
if editor_only or shell_only:
if editor_only:
option = "enable_editor"
else:
option = "enable_shell"
if self.GetOption('extensions', extn,option,
default=True, type='bool',
warn_on_default=False):
activeExtns.append(extn)
else:
activeExtns.append(extn)
return activeExtns
else:
return extns
def RemoveKeyBindNames(self,extnNameList):
#get rid of keybinding section names
names=extnNameList
kbNameIndicies=[]
for name in names:
if name.endswith(('_bindings', '_cfgBindings')):
kbNameIndicies.append(names.index(name))
kbNameIndicies.sort()
kbNameIndicies.reverse()
for index in kbNameIndicies: #delete each keybinding section name
del(names[index])
return names
def GetExtnNameForEvent(self,virtualEvent):
"""
Returns the name of the extension that virtualEvent is bound in, or
None if not bound in any extension.
virtualEvent - string, name of the virtual event to test for, without
the enclosing '<< >>'
"""
extName=None
vEvent='<<'+virtualEvent+'>>'
for extn in self.GetExtensions(active_only=0):
for event in self.GetExtensionKeys(extn).keys():
if event == vEvent:
extName=extn
return extName
def GetExtensionKeys(self,extensionName):
"""
returns a dictionary of the configurable keybindings for a particular
extension,as they exist in the dictionary returned by GetCurrentKeySet;
that is, where previously used bindings are disabled.
"""
keysName=extensionName+'_cfgBindings'
activeKeys=self.GetCurrentKeySet()
extKeys={}
if self.defaultCfg['extensions'].has_section(keysName):
eventNames=self.defaultCfg['extensions'].GetOptionList(keysName)
for eventName in eventNames:
event='<<'+eventName+'>>'
binding=activeKeys[event]
extKeys[event]=binding
return extKeys
def __GetRawExtensionKeys(self,extensionName):
"""
returns a dictionary of the configurable keybindings for a particular
extension, as defined in the configuration files, or an empty dictionary
if no bindings are found
"""
keysName=extensionName+'_cfgBindings'
extKeys={}
if self.defaultCfg['extensions'].has_section(keysName):
eventNames=self.defaultCfg['extensions'].GetOptionList(keysName)
for eventName in eventNames:
binding=self.GetOption('extensions',keysName,
eventName,default='').split()
event='<<'+eventName+'>>'
extKeys[event]=binding
return extKeys
def GetExtensionBindings(self,extensionName):
"""
Returns a dictionary of all the event bindings for a particular
extension. The configurable keybindings are returned as they exist in
the dictionary returned by GetCurrentKeySet; that is, where re-used
keybindings are disabled.
"""
bindsName=extensionName+'_bindings'
extBinds=self.GetExtensionKeys(extensionName)
#add the non-configurable bindings
if self.defaultCfg['extensions'].has_section(bindsName):
eventNames=self.defaultCfg['extensions'].GetOptionList(bindsName)
for eventName in eventNames:
binding=self.GetOption('extensions',bindsName,
eventName,default='').split()
event='<<'+eventName+'>>'
extBinds[event]=binding
return extBinds
def GetKeyBinding(self, keySetName, eventStr):
"""
returns the keybinding for a specific event.
keySetName - string, name of key binding set
eventStr - string, the virtual event we want the binding for,
represented as a string, eg. '<<event>>'
"""
eventName=eventStr[2:-2] #trim off the angle brackets
binding=self.GetOption('keys',keySetName,eventName,default='').split()
return binding
def GetCurrentKeySet(self):
result = self.GetKeySet(self.CurrentKeys())
if macosxSupport.runningAsOSXApp():
# We're using AquaTk, replace all keybingings that use the
# Alt key by ones that use the Option key because the former
# don't work reliably.
for k, v in result.items():
v2 = [ x.replace('<Alt-', '<Option-') for x in v ]
if v != v2:
result[k] = v2
return result
def GetKeySet(self,keySetName):
"""
Returns a dictionary of: all requested core keybindings, plus the
keybindings for all currently active extensions. If a binding defined
in an extension is already in use, that binding is disabled.
"""
keySet=self.GetCoreKeys(keySetName)
activeExtns=self.GetExtensions(active_only=1)
for extn in activeExtns:
extKeys=self.__GetRawExtensionKeys(extn)
if extKeys: #the extension defines keybindings
for event in extKeys.keys():
if extKeys[event] in keySet.values():
#the binding is already in use
extKeys[event]='' #disable this binding
keySet[event]=extKeys[event] #add binding
return keySet
def IsCoreBinding(self,virtualEvent):
"""
returns true if the virtual event is bound in the core idle keybindings.
virtualEvent - string, name of the virtual event to test for, without
the enclosing '<< >>'
"""
return ('<<'+virtualEvent+'>>') in self.GetCoreKeys().keys()
def GetCoreKeys(self, keySetName=None):
"""
returns the requested set of core keybindings, with fallbacks if
required.
Keybindings loaded from the config file(s) are loaded _over_ these
defaults, so if there is a problem getting any core binding there will
be an 'ultimate last resort fallback' to the CUA-ish bindings
defined here.
"""
keyBindings={
'<<copy>>': ['<Control-c>', '<Control-C>'],
'<<cut>>': ['<Control-x>', '<Control-X>'],
'<<paste>>': ['<Control-v>', '<Control-V>'],
'<<beginning-of-line>>': ['<Control-a>', '<Home>'],
'<<center-insert>>': ['<Control-l>'],
'<<close-all-windows>>': ['<Control-q>'],
'<<close-window>>': ['<Alt-F4>'],
'<<do-nothing>>': ['<Control-x>'],
'<<end-of-file>>': ['<Control-d>'],
'<<python-docs>>': ['<F1>'],
'<<python-context-help>>': ['<Shift-F1>'],
'<<history-next>>': ['<Alt-n>'],
'<<history-previous>>': ['<Alt-p>'],
'<<interrupt-execution>>': ['<Control-c>'],
'<<view-restart>>': ['<F6>'],
'<<restart-shell>>': ['<Control-F6>'],
'<<open-class-browser>>': ['<Alt-c>'],
'<<open-module>>': ['<Alt-m>'],
'<<open-new-window>>': ['<Control-n>'],
'<<open-window-from-file>>': ['<Control-o>'],
'<<plain-newline-and-indent>>': ['<Control-j>'],
'<<print-window>>': ['<Control-p>'],
'<<redo>>': ['<Control-y>'],
'<<remove-selection>>': ['<Escape>'],
'<<save-copy-of-window-as-file>>': ['<Alt-Shift-S>'],
'<<save-window-as-file>>': ['<Alt-s>'],
'<<save-window>>': ['<Control-s>'],
'<<select-all>>': ['<Alt-a>'],
'<<toggle-auto-coloring>>': ['<Control-slash>'],
'<<undo>>': ['<Control-z>'],
'<<find-again>>': ['<Control-g>', '<F3>'],
'<<find-in-files>>': ['<Alt-F3>'],
'<<find-selection>>': ['<Control-F3>'],
'<<find>>': ['<Control-f>'],
'<<replace>>': ['<Control-h>'],
'<<goto-line>>': ['<Alt-g>'],
'<<smart-backspace>>': ['<Key-BackSpace>'],
'<<newline-and-indent>>': ['<Key-Return>', '<Key-KP_Enter>'],
'<<smart-indent>>': ['<Key-Tab>'],
'<<indent-region>>': ['<Control-Key-bracketright>'],
'<<dedent-region>>': ['<Control-Key-bracketleft>'],
'<<comment-region>>': ['<Alt-Key-3>'],
'<<uncomment-region>>': ['<Alt-Key-4>'],
'<<tabify-region>>': ['<Alt-Key-5>'],
'<<untabify-region>>': ['<Alt-Key-6>'],
'<<toggle-tabs>>': ['<Alt-Key-t>'],
'<<change-indentwidth>>': ['<Alt-Key-u>'],
'<<del-word-left>>': ['<Control-Key-BackSpace>'],
'<<del-word-right>>': ['<Control-Key-Delete>']
}
if keySetName:
for event in keyBindings.keys():
binding=self.GetKeyBinding(keySetName,event)
if binding:
keyBindings[event]=binding
else: #we are going to return a default, print warning
warning=('\n Warning: configHandler.py - IdleConf.GetCoreKeys'
' -\n problem retrieving key binding for event %r'
'\n from key set %r.\n'
' returning default value: %r\n' %
(event, keySetName, keyBindings[event]))
try:
sys.stderr.write(warning)
except IOError:
pass
return keyBindings
def GetExtraHelpSourceList(self,configSet):
"""Fetch list of extra help sources from a given configSet.
Valid configSets are 'user' or 'default'. Return a list of tuples of
the form (menu_item , path_to_help_file , option), or return the empty
list. 'option' is the sequence number of the help resource. 'option'
values determine the position of the menu items on the Help menu,
therefore the returned list must be sorted by 'option'.
"""
helpSources=[]
if configSet=='user':
cfgParser=self.userCfg['main']
elif configSet=='default':
cfgParser=self.defaultCfg['main']
else:
raise InvalidConfigSet, 'Invalid configSet specified'
options=cfgParser.GetOptionList('HelpFiles')
for option in options:
value=cfgParser.Get('HelpFiles',option,default=';')
if value.find(';')==-1: #malformed config entry with no ';'
menuItem='' #make these empty
helpPath='' #so value won't be added to list
else: #config entry contains ';' as expected
value=string.split(value,';')
menuItem=value[0].strip()
helpPath=value[1].strip()
if menuItem and helpPath: #neither are empty strings
helpSources.append( (menuItem,helpPath,option) )
helpSources.sort(key=lambda x: int(x[2]))
return helpSources
def GetAllExtraHelpSourcesList(self):
"""
Returns a list of tuples containing the details of all additional help
sources configured, or an empty list if there are none. Tuples are of
the format returned by GetExtraHelpSourceList.
"""
allHelpSources=( self.GetExtraHelpSourceList('default')+
self.GetExtraHelpSourceList('user') )
return allHelpSources
def LoadCfgFiles(self):
"""
load all configuration files.
"""
for key in self.defaultCfg.keys():
self.defaultCfg[key].Load()
self.userCfg[key].Load() #same keys
def SaveUserCfgFiles(self):
"""
write all loaded user configuration files back to disk
"""
for key in self.userCfg.keys():
self.userCfg[key].Save()
idleConf=IdleConf()
### module test
if __name__ == '__main__':
def dumpCfg(cfg):
print '\n',cfg,'\n'
for key in cfg.keys():
sections=cfg[key].sections()
print key
print sections
for section in sections:
options=cfg[key].options(section)
print section
print options
for option in options:
print option, '=', cfg[key].Get(section,option)
dumpCfg(idleConf.defaultCfg)
dumpCfg(idleConf.userCfg)
print idleConf.userCfg['main'].Get('Theme','name')
#print idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')
|
lgpl-3.0
|
takeshineshiro/cinder
|
cinder/tests/unit/fake_utils.py
|
23
|
2907
|
# Copyright (c) 2011 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This modules stubs out functions in cinder.utils."""
import re
from eventlet import greenthread
import six
from cinder import utils
_fake_execute_repliers = []
_fake_execute_log = []
def fake_execute_get_log():
return _fake_execute_log
def fake_execute_clear_log():
global _fake_execute_log
_fake_execute_log = []
def fake_execute_set_repliers(repliers):
"""Allows the client to configure replies to commands."""
global _fake_execute_repliers
_fake_execute_repliers = repliers
def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs):
"""A reply handler for commands that haven't been added to the reply list.
Returns empty strings for stdout and stderr.
"""
return '', ''
def fake_execute(*cmd_parts, **kwargs):
"""This function stubs out execute.
It optionally executes a preconfigued function to return expected data.
"""
global _fake_execute_repliers
process_input = kwargs.get('process_input', None)
check_exit_code = kwargs.get('check_exit_code', 0)
delay_on_retry = kwargs.get('delay_on_retry', True)
attempts = kwargs.get('attempts', 1)
run_as_root = kwargs.get('run_as_root', False)
cmd_str = ' '.join(str(part) for part in cmd_parts)
_fake_execute_log.append(cmd_str)
reply_handler = fake_execute_default_reply_handler
for fake_replier in _fake_execute_repliers:
if re.match(fake_replier[0], cmd_str):
reply_handler = fake_replier[1]
break
if isinstance(reply_handler, six.string_types):
# If the reply handler is a string, return it as stdout
reply = reply_handler, ''
else:
# Alternative is a function, so call it
reply = reply_handler(cmd_parts,
process_input=process_input,
delay_on_retry=delay_on_retry,
attempts=attempts,
run_as_root=run_as_root,
check_exit_code=check_exit_code)
# Replicate the sleep call in the real function
greenthread.sleep(0)
return reply
def stub_out_utils_execute(stubs):
fake_execute_set_repliers([])
fake_execute_clear_log()
stubs.Set(utils, 'execute', fake_execute)
|
apache-2.0
|
mfraezz/osf.io
|
addons/dataverse/migrations/0001_initial.py
|
28
|
1807
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-23 20:34
from __future__ import unicode_literals
from django.db import migrations, models
import osf.models.base
import osf.utils.datetime_aware_jsonfield
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NodeSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_id', models.CharField(db_index=True, default=osf.models.base.generate_object_id, max_length=24, unique=True)),
('deleted', models.BooleanField(default=False)),
('dataverse_alias', models.TextField(blank=True, null=True)),
('dataverse', models.TextField(blank=True, null=True)),
('dataset_doi', models.TextField(blank=True, null=True)),
('_dataset_id', models.TextField(blank=True, null=True)),
('dataset', models.TextField(blank=True, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UserSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_id', models.CharField(db_index=True, default=osf.models.base.generate_object_id, max_length=24, unique=True)),
('deleted', models.BooleanField(default=False)),
('oauth_grants', osf.utils.datetime_aware_jsonfield.DateTimeAwareJSONField(blank=True, default=dict)),
],
options={
'abstract': False,
},
),
]
|
apache-2.0
|
hunter007/django
|
django/conf/global_settings.py
|
46
|
22617
|
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format [('Full Name', '[email protected]'), ('Full Name', '[email protected]')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = []
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
# 'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
]
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
TEMPLATES = []
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ['/home/html', '/var/www']
ALLOWED_INCLUDE_ROOTS = []
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/robots.txt$),
# re.compile(r'^/phpmyadmin/),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
]
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like ".example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_COOKIE_HTTPONLY = True
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
|
bsd-3-clause
|
mclaughlin6464/pylearn2
|
pylearn2/scripts/datasets/step_through_norb_foveated.py
|
44
|
1160
|
from __future__ import print_function
__author__ = "Ian Goodfellow"
"""
A script for sequentially stepping through FoveatedNORB, viewing each image
and its label.
"""
import numpy as np
from theano.compat.six.moves import input
from pylearn2.datasets.norb_small import FoveatedNORB
from pylearn2.gui.patch_viewer import PatchViewer
from pylearn2.utils import get_choice
print('Use test set?')
choices = {'y': 'test', 'n': 'train'}
which_set = choices[get_choice(choices)]
dataset = FoveatedNORB(which_set=which_set, center=True)
topo = dataset.get_topological_view()
b, r, c, ch = topo.shape
assert ch == 2
pv = PatchViewer((1, 2), (r, c), is_color=False)
i = 0
while True:
patch = topo[i, :, :, :]
patch = patch / np.abs(patch).max()
pv.add_patch(patch[:, :, 1], rescale=False)
pv.add_patch(patch[:, :, 0], rescale=False)
pv.show()
print(dataset.y[i])
choices = {'g': 'goto image', 'q': 'quit'}
if i + 1 < b:
choices['n'] = 'next image'
choice = get_choice(choices)
if choice == 'q':
quit()
if choice == 'n':
i += 1
if choice == 'g':
i = int(input('index: '))
|
bsd-3-clause
|
mafrosis/python-lifx-sdk
|
doc/source/conf.py
|
2
|
9408
|
# -*- coding: utf-8 -*-
#
# lifx-sdk documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 4 22:06:59 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
]
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'lifx-sdk'
copyright = u'2015, Daniel Hall'
author = u'Daniel Hall'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6'
# The full version, including alpha/beta/rc tags.
release = '0.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'lifx-sdkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'lifx-sdk.tex', u'lifx-sdk Documentation',
u'Daniel Hall', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lifx-sdk', u'lifx-sdk Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'lifx-sdk', u'lifx-sdk Documentation',
author, 'lifx-sdk', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
mit
|
caglar/Arcade-Universe
|
arcade_universe/data_generator.py
|
1
|
2656
|
from gen import *
from pdataset import *
import numpy as np
import argparse
from arcade_universe.fg import Foreground, FGTextureType
import time
import copy
class PentominoGenerator(object):
def __init__(self,
batch_size,
use_patch_centers=False,
seed=1321, patch_size=(8, 8),
enable_perlin=False,
center_objects=False,
task=4,
upper_bound=10000000):
self.batch_size = batch_size
self.use_patch_centers = use_patch_centers
self.upper_bound = upper_bound
self.pix_per_patch = np.prod(patch_size)
self.n_examples = 0
# PENTOMINO
self.pentomino_gen = lambda w, h: TwoGroups("pentl/pentn/pentp/pentf/penty/pentj/pentn2/pentq/pentf2/penty2",
seed,
w,
h,
use_patch_centers=use_patch_centers,
n1=1,
n2=2,
rot=True,
texture=None,
scale=True,
center_objects=center_objects,
patch_size=patch_size,
task=task)
pentomino = SpritePlacer(self.pentomino_gen(64, 64),
collision_check=True,
enable_perlin=enable_perlin)
self.pentomino_data_gen = pentomino
def __iter__(self):
return copy.copy(self)
def next(self):
np_data = np.array(np.zeros(self.pix_per_patch**2))
#Target variables
np_targets = np.asarray(np.zeros(1), dtype="int8")
n_count = 0
for data in self.pentomino_data_gen:
if self.n_examples < self.upper_bound:
if n_count < self.batch_size:
np_data = np.vstack((np_data, data[0]))
np_targets = np.vstack((np_targets, data[1]))
if n_count == 0:
np_data = np_data[1:]
np_targets = np_targets[1:]
n_count +=1
self.n_examples +=1
else:
#Target variables
batched_data = [np_data, np_targets]
return batched_data
else:
raise StopIteration()
|
bsd-3-clause
|
iModels/mbuild
|
mbuild/bond_graph.py
|
2
|
6551
|
"""Bond graph for mBuild Compounds.
NOTE: The functions 'connected_components' and '_bfs' have been obtained (with
modifications) from the Networkx Python package, which is distributed under the
following BSD license:
Copyright (C) 2004-2016, NetworkX Developers
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the NetworkX Developers nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from collections import defaultdict
from mbuild.utils.orderedset import OrderedSet
class BondGraph(object):
"""A graph-like object used to store and manipulate bonding information.
`BondGraph` is designed to mimic the API and partial functionality of
NetworkX's `Graph` data structure.
"""
def __init__(self):
self._adj = defaultdict(OrderedSet)
def add_node(self, node):
"""Add a node to the bond graph."""
if not self.has_node(node):
self._adj[node] = OrderedSet()
def remove_node(self, node):
"""Remove a node from the bond graph."""
adj = self._adj
for other_node in self.nodes():
if node in adj[other_node]:
self.remove_edge(node, other_node)
def has_node(self, node):
"""Determine whether the graph contains a node."""
return node in self._adj
def nodes(self):
"""Return all nodes of the bond graph."""
return [node for node in self._adj]
def nodes_iter(self):
"""Iterate through the nodes."""
for node in self._adj:
yield node
def number_of_nodes(self):
"""Get the number of nodes in the graph."""
return sum(1 for _ in self.nodes_iter())
def add_edge(self, node1, node2):
"""Add an edge to the bond graph."""
self._adj[node1].add(node2)
self._adj[node2].add(node1)
def remove_edge(self, node1, node2):
"""Remove an edge from the bond graph."""
adj = self._adj
if self.has_node(node1) and self.has_node(node2):
adj[node1].remove(node2)
adj[node2].remove(node1)
if not adj[node1]:
del adj[node1]
if not adj[node2]:
del adj[node2]
else:
raise ValueError(
"There is no edge between {} and {}".format(node1, node2)
)
def has_edge(self, node1, node2):
"""Determine whether the graph contains an edge."""
if self.has_node(node1):
return node2 in self._adj[node1]
def edges(self):
"""Return all edges in the bond graph."""
edges = OrderedSet()
for node, neighbors in self._adj.items():
for neighbor in neighbors:
bond = (
(node, neighbor)
if self.nodes().index(node) > self.nodes().index(neighbor)
else (neighbor, node)
)
edges.add(bond)
return list(edges)
def edges_iter(self):
"""Iterate through the edges in the bond graph."""
for edge in self.edges():
yield edge
def number_of_edges(self):
"""Get the number of edges in the graph."""
return sum(1 for _ in self.edges())
def neighbors(self, node):
"""Get all neighbors of the given node."""
if self.has_node(node):
return [neighbor for neighbor in self._adj[node]]
else:
return []
def neighbors_iter(self, node):
"""Iterate through the neighbors of the given node."""
if self.has_node(node):
return (neighbor for neighbor in self._adj[node])
else:
return iter(())
def compose(self, graph):
"""Compose this graph with the given graph."""
adj = self._adj
for node, neighbors in graph._adj.items():
if self.has_node(node):
(adj[node].add(neighbor) for neighbor in neighbors)
elif neighbors:
adj[node] = neighbors
def subgraph(self, nodes):
"""Return a subgraph view of the subgraph induced on given nodes."""
new_graph = BondGraph()
nodes = list(nodes)
adj = self._adj
for node in nodes:
if node not in adj:
continue
for neighbor in adj[node]:
if neighbor in nodes:
new_graph.add_edge(node, neighbor)
return new_graph
def connected_components(self):
"""Generate connected components."""
seen = set()
components = []
for v in self.nodes():
if v not in seen:
c = set(self._bfs(v))
components.append(list(c))
seen.update(c)
return components
def _bfs(self, source):
seen = set()
nextlevel = {source}
while nextlevel:
thislevel = nextlevel
nextlevel = set()
for v in thislevel:
if v not in seen:
yield v
seen.add(v)
nextlevel.update(self.neighbors(v))
|
mit
|
bcornwellmott/erpnext
|
erpnext/crm/report/campaign_efficiency/campaign_efficiency.py
|
32
|
2829
|
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
columns=get_columns()
data=get_lead_data(filters, "Campaign Name")
return columns, data
def get_columns():
return [
_("Campaign Name") + ":data:130",
_("Lead Count") + ":Int:80",
_("Opp Count") + ":Int:80",
_("Quot Count") + ":Int:80",
_("Order Count") + ":Int:100",
_("Order Value") + ":Float:100",
_("Opp/Lead %") + ":Float:100",
_("Quot/Lead %") + ":Float:100",
_("Order/Quot %") + ":Float:100"
]
def get_lead_data(filters, based_on):
based_on_field = frappe.scrub(based_on)
conditions = get_filter_conditions(filters)
lead_details = frappe.db.sql("""
select {based_on_field}, name
from `tabLead`
where {based_on_field} is not null and {based_on_field} != '' {conditions}
""".format(based_on_field=based_on_field, conditions=conditions), filters, as_dict=1)
lead_map = frappe._dict()
for d in lead_details:
lead_map.setdefault(d.get(based_on_field), []).append(d.name)
data = []
for based_on_value, leads in lead_map.items():
row = {
based_on: based_on_value,
"Lead Count": len(leads)
}
row["Quot Count"]= get_lead_quotation_count(leads)
row["Opp Count"] = get_lead_opp_count(leads)
row["Order Count"] = get_quotation_ordered_count(leads)
row["Order Value"] = get_order_amount(leads)
row["Opp/Lead %"] = row["Opp Count"] / row["Lead Count"] * 100
row["Quot/Lead %"] = row["Quot Count"] / row["Lead Count"] * 100
row["Order/Quot %"] = row["Order Count"] / (row["Quot Count"] or 1) * 100
data.append(row)
return data
def get_filter_conditions(filters):
conditions=""
if filters.from_date:
conditions += " and date(creation) >= %(from_date)s"
if filters.to_date:
conditions += " and date(creation) <= %(to_date)s"
return conditions
def get_lead_quotation_count(leads):
return frappe.db.sql("""select count(name) from `tabQuotation`
where lead in (%s)""" % ', '.join(["%s"]*len(leads)), tuple(leads))[0][0]
def get_lead_opp_count(leads):
return frappe.db.sql("""select count(name) from `tabOpportunity`
where lead in (%s)""" % ', '.join(["%s"]*len(leads)), tuple(leads))[0][0]
def get_quotation_ordered_count(leads):
return frappe.db.sql("""select count(name)
from `tabQuotation` where status = 'Ordered'
and lead in (%s)""" % ', '.join(["%s"]*len(leads)), tuple(leads))[0][0]
def get_order_amount(leads):
return frappe.db.sql("""select sum(base_net_amount)
from `tabSales Order Item`
where prevdoc_docname in (
select name from `tabQuotation` where status = 'Ordered'
and lead in (%s)
)""" % ', '.join(["%s"]*len(leads)), tuple(leads))[0][0]
|
gpl-3.0
|
simvisage/oricreate
|
docs/howtos/ex08_rigid_facets/sim031miura_ori_psi_cntl.py
|
1
|
2750
|
r'''
Fold the Miura ori crease pattern using psi control
---------------------------------------------------
'''
import numpy as np
from oricreate.api import \
SimulationTask, SimulationConfig, \
FTV, FTA
from oricreate.gu import \
GuConstantLength, GuDofConstraints, GuPsiConstraints, fix
def create_cp_factory():
# begin
from oricreate.api import MiuraOriCPFactory
cp_factory = MiuraOriCPFactory(L_x=30,
L_y=21,
n_x=2,
n_y=2,
d_0=3.0,
d_1=-3.0)
# end
return cp_factory
if __name__ == '__main__':
cpf = create_cp_factory()
cp = cpf.formed_object
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
cp.plot_mpl(ax, facets=True)
plt.tight_layout()
plt.show()
# Link the crease factory it with the constraint client
gu_constant_length = GuConstantLength()
dof_constraints = fix(cpf.N_grid[0, 1], [1]) \
+ fix(cpf.N_grid[1, 1], [0, 1, 2]) \
+ fix(cpf.N_grid[1, (0, -1)], [2])
gu_dof_constraints = GuDofConstraints(dof_constraints=dof_constraints)
psi_max = np.pi / 4.0
diag_psi_constraints = [([(i, 1.0)], 0) for i in cpf.L_d_grid.flatten()]
gu_psi_constraints = \
GuPsiConstraints(forming_task=cpf,
psi_constraints=diag_psi_constraints +
[([(cpf.L_h_grid[1, 1], 1.0)],
lambda t: -psi_max * t),
])
sim_config = SimulationConfig(goal_function_type='none',
gu={'cl': gu_constant_length,
'dofs': gu_dof_constraints,
'psi': gu_psi_constraints},
acc=1e-5, MAX_ITER=10)
sim_task = SimulationTask(previous_task=cpf,
config=sim_config,
n_steps=5)
cp = sim_task.formed_object
cp.u[cpf.N_grid[(0, -1), 1], 2] = -1.0
sim_task.u_1
ftv = FTV()
#ftv.add(sim_task.sim_history.viz3d['node_numbers'], order=5)
ftv.add(sim_task.sim_history.viz3d['cp'])
ftv.add(gu_dof_constraints.viz3d['default'])
fta = FTA(ftv=ftv)
fta.init_view(a=200, e=35, d=50, f=(0, 0, 0), r=0)
fta.add_cam_move(a=200, e=34, n=5, d=50, r=0,
duration=10,
# vot_fn=lambda cmt: np.linspace(0, 1, 4),
azimuth_move='damped',
elevation_move='damped',
distance_move='damped')
fta.plot()
fta.configure_traits()
|
gpl-3.0
|
CAChemE/stochastic-optimization
|
ConventionalDistillationColumn/hyInterface.py
|
1
|
4985
|
# -*- coding: utf-8 -*-
import os
import win32com.client as win32
"""
# -------------------------------------------------------------------------
# SIMULATION-BASED OPTIMIZATION OF A SINGLE CONVENTIONAL DISTILLATION
# COLUMN USING THE PARTICLE SWARM OPTIMIZATION ALGORITHM
#--------------------------------------------------------------------------
# Juan Javaloyes Antón. Sep 2016
#--------------------------------------------------------------------------
# # 03 # Aspen Hysys Python Interface - Conventional Distillation Column Test
#--------------------------------------------------------------------------
"""
# >>>>>>>>>>>>>>>[ Aspen Hysys - Python Interface ]>>>>>>>>>>>>>>> > User inputs
# Aspen Hysys file name
#hy_filename = 'Test_Column.hsc'
#hy_best_model_filename = 'Best_Column.hsc'
#hy_visible = 1
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> >End inputs
def hy_Dist_Col_Object(Problem, *varargin):
hy_filename = Problem.hy_filename
hy_best_model_filename = Problem.hy_best_model_filename
hy_visible = Problem.hy_visible
# 01 Full path to Aspen Hysys File & Best Solution Hysys File
hyFilePath = os.path.abspath(hy_filename)
hy_beswt_solution_FilePath = os.path.abspath(hy_best_model_filename)
# 02 Initialize Aspen Hysys application
print(' # Connecting to the Aspen Hysys App ... ')
HyApp = win32.Dispatch('HYSYS.Application')
# 03 Open Aspen Hysys File
# HyCase = HyApp.SimulationCases.Open(hyFilePath)
HyCase = HyApp.ActiveDocument
# 04 Aspen Hysys Environment Visible
HyCase.Visible = hy_visible
# 05 Aspen Hysys File Name
HySysFile = HyCase.Title.Value
print(' ')
print('HySys File: ---------- ', HySysFile)
# 06 Aspen Hysys Fluid Package Name
package_name = HyCase.Flowsheet.FluidPackage.PropertyPackageName
print('HySys Fluid Package: --- ', package_name)
print(' ')
### Access to Main Aspen Hysys Objects #########################################
# -----------------------------------------------------------------------------
# 07 Main Aspen Hysys Document Objects
HySolver = HyCase.Solver # Access to Hysys Solver
# HyFlowsheet = HyCase.Flowsheet # Access to main Flowsheet
HyOperations = HyCase.Flowsheet.Operations # Access to the Unit Operations
HyMaterialStream = HyCase.Flowsheet.MaterialStreams # Access to the material streams
HyEnergyStream = HyCase.Flowsheet.EnergyStreams # Access to the energy streams
# 08 Access to Distillation Column Environment
# Interfacing with the Aspen Hysys Objects needed to compute the Total
# Annual Cost of the Conventional Distillation Column #
# 08.1 Access to Hysys Distillation Column and Column Flowsheet
Column_Name = HyOperations.Names[0]
class DistColumn:
pass
DistColumn.Column = HyOperations.Item(Column_Name)
DistColumn.ColumnFlowsheet = DistColumn.Column.ColumnFlowsheet
# 08.1.1 Access to Column objects
DistColumn.Specifications = DistColumn.ColumnFlowsheet.Specifications # RR/BR/....
DistColumn.Operations = DistColumn.ColumnFlowsheet.Operations # Main TS/Reboiler/Condenser
DistColumn.FeedStreams = DistColumn.ColumnFlowsheet.FeedStreams # Access to Feed Streams (material and energy) for the Column Environment (Main TS, Reboiler y Condenser)
# 08.1.1.1 Access to Main TS of the distillation column (Column Environment)
DistColumn.Main_TS = DistColumn.ColumnFlowsheet.Operations.Item('Main TS') # Access to Main TS in Column Environment
# 08.1.1.2 Access to Feed stream object of the Main Tray Section
DistColumn.FeedMainTS = DistColumn.FeedStreams.Item('Feed')
# 08.2. Material Streams
class MaterialStream:
pass
MaterialStream.Distillate = HyMaterialStream.Item('Distillate')
MaterialStream.Bottoms = HyMaterialStream.Item('Bottoms')
# 08.3. Energy Streams
class EnergyStream:
pass
EnergyStream.Qreb = HyEnergyStream.Item('Qreb')
EnergyStream.Qcond = HyEnergyStream.Item('Qcond')
# 09 ...: HYSYS OBJECTS :...
class HyObject:
pass
HyObject.HyApp = HyApp
HyObject.HyCase = HyCase
HyObject.DistColumn = DistColumn
HyObject.MaterialStream = MaterialStream
HyObject.EnergyStream = EnergyStream
HyObject.HySolver = HySolver
HyObject.CDC_model_Root = hy_beswt_solution_FilePath # Full Path to Best Solution Aspen Hysys file
HyObject.folder_paht = hy_beswt_solution_FilePath[0:-len(hy_best_model_filename)] # Folder Path
print( '# Aspen Hysys - Python Interface has been Established....')
return(HyObject)
|
bsd-3-clause
|
jrutila/cmsplugin-filer
|
cmsplugin_filer_link/models.py
|
17
|
1199
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from cms.models import CMSPlugin, Page
from cms.models.fields import PageField
from filer.fields.file import FilerFileField
DEFULT_LINK_STYLES = (
(" ", "Default"),
)
LINK_STYLES = getattr(settings, "FILER_LINK_STYLES", DEFULT_LINK_STYLES)
class FilerLinkPlugin(CMSPlugin):
name = models.CharField(_('name'), max_length=255)
url = models.CharField(_("url"), blank=True, null=True, max_length=255)
page_link = PageField(verbose_name=_("page"), blank=True, null=True,
help_text=_("A link to a page has priority over urls."))
mailto = models.EmailField(_("mailto"), blank=True, null=True,
help_text=_("An email address has priority over both pages and urls"))
link_style = models.CharField(_("link style"), max_length=255,
choices=LINK_STYLES, default=" ")
new_window = models.BooleanField(_("new window?"), default=False,
help_text=_("Do you want this link to open a new window?"))
file = FilerFileField(blank=True, null=True)
def __unicode__(self):
return u'%s' % self.name
|
bsd-3-clause
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Pillow-2.3.0-py2.7-linux-x86_64.egg/PIL/GbrImagePlugin.py
|
18
|
1579
|
#
# The Python Imaging Library
# $Id$
#
# load a GIMP brush file
#
# History:
# 96-03-14 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
from PIL import Image, ImageFile, _binary
i32 = _binary.i32be
def _accept(prefix):
return i32(prefix) >= 20 and i32(prefix[4:8]) == 1
##
# Image plugin for the GIMP brush format.
class GbrImageFile(ImageFile.ImageFile):
format = "GBR"
format_description = "GIMP brush file"
def _open(self):
header_size = i32(self.fp.read(4))
version = i32(self.fp.read(4))
if header_size < 20 or version != 1:
raise SyntaxError("not a GIMP brush")
width = i32(self.fp.read(4))
height = i32(self.fp.read(4))
bytes = i32(self.fp.read(4))
if width <= 0 or height <= 0 or bytes != 1:
raise SyntaxError("not a GIMP brush")
comment = self.fp.read(header_size - 20)[:-1]
self.mode = "L"
self.size = width, height
self.info["comment"] = comment
# Since the brush is so small, we read the data immediately
self.data = self.fp.read(width * height)
def load(self):
if not self.data:
return
# create an image out of the brush data block
self.im = Image.core.new(self.mode, self.size)
self.im.frombytes(self.data)
self.data = b""
#
# registry
Image.register_open("GBR", GbrImageFile, _accept)
Image.register_extension("GBR", ".gbr")
|
apache-2.0
|
dash-dash/pyzmq
|
zmq/eventloop/minitornado/platform/auto.py
|
50
|
1424
|
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of platform-specific functionality.
For each function or class described in `tornado.platform.interface`,
the appropriate platform-specific implementation exists in this module.
Most code that needs access to this functionality should do e.g.::
from tornado.platform.auto import set_close_exec
"""
from __future__ import absolute_import, division, print_function, with_statement
import os
if os.name == 'nt':
from .common import Waker
from .windows import set_close_exec
else:
from .posix import set_close_exec, Waker
try:
# monotime monkey-patches the time module to have a monotonic function
# in versions of python before 3.3.
import monotime
except ImportError:
pass
try:
from time import monotonic as monotonic_time
except ImportError:
monotonic_time = None
|
bsd-3-clause
|
harveybia/face-hack
|
venv/face/lib/python2.7/site-packages/PIL/ImageOps.py
|
51
|
13895
|
#
# The Python Imaging Library.
# $Id$
#
# standard image operations
#
# History:
# 2001-10-20 fl Created
# 2001-10-23 fl Added autocontrast operator
# 2001-12-18 fl Added Kevin's fit operator
# 2004-03-14 fl Fixed potential division by zero in equalize
# 2005-05-05 fl Fixed equalize for low number of values
#
# Copyright (c) 2001-2004 by Secret Labs AB
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from PIL import Image
from PIL._util import isStringType
import operator
import functools
#
# helpers
def _border(border):
if isinstance(border, tuple):
if len(border) == 2:
left, top = right, bottom = border
elif len(border) == 4:
left, top, right, bottom = border
else:
left = top = right = bottom = border
return left, top, right, bottom
def _color(color, mode):
if isStringType(color):
from PIL import ImageColor
color = ImageColor.getcolor(color, mode)
return color
def _lut(image, lut):
if image.mode == "P":
# FIXME: apply to lookup table, not image data
raise NotImplementedError("mode P support coming soon")
elif image.mode in ("L", "RGB"):
if image.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return image.point(lut)
else:
raise IOError("not supported for this image mode")
#
# actions
def autocontrast(image, cutoff=0, ignore=None):
"""
Maximize (normalize) image contrast. This function calculates a
histogram of the input image, removes **cutoff** percent of the
lightest and darkest pixels from the histogram, and remaps the image
so that the darkest pixel becomes black (0), and the lightest
becomes white (255).
:param image: The image to process.
:param cutoff: How many percent to cut off from the histogram.
:param ignore: The background pixel value (use None for no background).
:return: An image.
"""
histogram = image.histogram()
lut = []
for layer in range(0, len(histogram), 256):
h = histogram[layer:layer+256]
if ignore is not None:
# get rid of outliers
try:
h[ignore] = 0
except TypeError:
# assume sequence
for ix in ignore:
h[ix] = 0
if cutoff:
# cut off pixels from both ends of the histogram
# get number of pixels
n = 0
for ix in range(256):
n = n + h[ix]
# remove cutoff% pixels from the low end
cut = n * cutoff // 100
for lo in range(256):
if cut > h[lo]:
cut = cut - h[lo]
h[lo] = 0
else:
h[lo] -= cut
cut = 0
if cut <= 0:
break
# remove cutoff% samples from the hi end
cut = n * cutoff // 100
for hi in range(255, -1, -1):
if cut > h[hi]:
cut = cut - h[hi]
h[hi] = 0
else:
h[hi] -= cut
cut = 0
if cut <= 0:
break
# find lowest/highest samples after preprocessing
for lo in range(256):
if h[lo]:
break
for hi in range(255, -1, -1):
if h[hi]:
break
if hi <= lo:
# don't bother
lut.extend(list(range(256)))
else:
scale = 255.0 / (hi - lo)
offset = -lo * scale
for ix in range(256):
ix = int(ix * scale + offset)
if ix < 0:
ix = 0
elif ix > 255:
ix = 255
lut.append(ix)
return _lut(image, lut)
def colorize(image, black, white):
"""
Colorize grayscale image. The **black** and **white**
arguments should be RGB tuples; this function calculates a color
wedge mapping all black pixels in the source image to the first
color, and all white pixels to the second color.
:param image: The image to colorize.
:param black: The color to use for black input pixels.
:param white: The color to use for white input pixels.
:return: An image.
"""
assert image.mode == "L"
black = _color(black, "RGB")
white = _color(white, "RGB")
red = []
green = []
blue = []
for i in range(256):
red.append(black[0]+i*(white[0]-black[0])//255)
green.append(black[1]+i*(white[1]-black[1])//255)
blue.append(black[2]+i*(white[2]-black[2])//255)
image = image.convert("RGB")
return _lut(image, red + green + blue)
def crop(image, border=0):
"""
Remove border from image. The same amount of pixels are removed
from all four sides. This function works on all image modes.
.. seealso:: :py:meth:`~PIL.Image.Image.crop`
:param image: The image to crop.
:param border: The number of pixels to remove.
:return: An image.
"""
left, top, right, bottom = _border(border)
return image.crop(
(left, top, image.size[0]-right, image.size[1]-bottom)
)
def deform(image, deformer, resample=Image.BILINEAR):
"""
Deform the image.
:param image: The image to deform.
:param deformer: A deformer object. Any object that implements a
**getmesh** method can be used.
:param resample: What resampling filter to use.
:return: An image.
"""
return image.transform(
image.size, Image.MESH, deformer.getmesh(image), resample
)
def equalize(image, mask=None):
"""
Equalize the image histogram. This function applies a non-linear
mapping to the input image, in order to create a uniform
distribution of grayscale values in the output image.
:param image: The image to equalize.
:param mask: An optional mask. If given, only the pixels selected by
the mask are included in the analysis.
:return: An image.
"""
if image.mode == "P":
image = image.convert("RGB")
h = image.histogram(mask)
lut = []
for b in range(0, len(h), 256):
histo = [_f for _f in h[b:b+256] if _f]
if len(histo) <= 1:
lut.extend(list(range(256)))
else:
step = (functools.reduce(operator.add, histo) - histo[-1]) // 255
if not step:
lut.extend(list(range(256)))
else:
n = step // 2
for i in range(256):
lut.append(n // step)
n = n + h[i+b]
return _lut(image, lut)
def expand(image, border=0, fill=0):
"""
Add border to the image
:param image: The image to expand.
:param border: Border width, in pixels.
:param fill: Pixel fill value (a color value). Default is 0 (black).
:return: An image.
"""
left, top, right, bottom = _border(border)
width = left + image.size[0] + right
height = top + image.size[1] + bottom
out = Image.new(image.mode, (width, height), _color(fill, image.mode))
out.paste(image, (left, top))
return out
def fit(image, size, method=Image.NEAREST, bleed=0.0, centering=(0.5, 0.5)):
"""
Returns a sized and cropped version of the image, cropped to the
requested aspect ratio and size.
This function was contributed by Kevin Cazabon.
:param size: The requested output size in pixels, given as a
(width, height) tuple.
:param method: What resampling method to use. Default is
:py:attr:`PIL.Image.NEAREST`.
:param bleed: Remove a border around the outside of the image (from all
four edges. The value is a decimal percentage (use 0.01 for
one percent). The default value is 0 (no border).
:param centering: Control the cropping position. Use (0.5, 0.5) for
center cropping (e.g. if cropping the width, take 50% off
of the left side, and therefore 50% off the right side).
(0.0, 0.0) will crop from the top left corner (i.e. if
cropping the width, take all of the crop off of the right
side, and if cropping the height, take all of it off the
bottom). (1.0, 0.0) will crop from the bottom left
corner, etc. (i.e. if cropping the width, take all of the
crop off the left side, and if cropping the height take
none from the top, and therefore all off the bottom).
:return: An image.
"""
# by Kevin Cazabon, Feb 17/2000
# [email protected]
# http://www.cazabon.com
# ensure inputs are valid
if not isinstance(centering, list):
centering = [centering[0], centering[1]]
if centering[0] > 1.0 or centering[0] < 0.0:
centering[0] = 0.50
if centering[1] > 1.0 or centering[1] < 0.0:
centering[1] = 0.50
if bleed > 0.49999 or bleed < 0.0:
bleed = 0.0
# calculate the area to use for resizing and cropping, subtracting
# the 'bleed' around the edges
# number of pixels to trim off on Top and Bottom, Left and Right
bleedPixels = (
int((float(bleed) * float(image.size[0])) + 0.5),
int((float(bleed) * float(image.size[1])) + 0.5)
)
liveArea = (0, 0, image.size[0], image.size[1])
if bleed > 0.0:
liveArea = (
bleedPixels[0], bleedPixels[1], image.size[0] - bleedPixels[0] - 1,
image.size[1] - bleedPixels[1] - 1
)
liveSize = (liveArea[2] - liveArea[0], liveArea[3] - liveArea[1])
# calculate the aspect ratio of the liveArea
liveAreaAspectRatio = float(liveSize[0])/float(liveSize[1])
# calculate the aspect ratio of the output image
aspectRatio = float(size[0]) / float(size[1])
# figure out if the sides or top/bottom will be cropped off
if liveAreaAspectRatio >= aspectRatio:
# liveArea is wider than what's needed, crop the sides
cropWidth = int((aspectRatio * float(liveSize[1])) + 0.5)
cropHeight = liveSize[1]
else:
# liveArea is taller than what's needed, crop the top and bottom
cropWidth = liveSize[0]
cropHeight = int((float(liveSize[0])/aspectRatio) + 0.5)
# make the crop
leftSide = int(liveArea[0] + (float(liveSize[0]-cropWidth) * centering[0]))
if leftSide < 0:
leftSide = 0
topSide = int(liveArea[1] + (float(liveSize[1]-cropHeight) * centering[1]))
if topSide < 0:
topSide = 0
out = image.crop(
(leftSide, topSide, leftSide + cropWidth, topSide + cropHeight)
)
# resize the image and return it
return out.resize(size, method)
def flip(image):
"""
Flip the image vertically (top to bottom).
:param image: The image to flip.
:return: An image.
"""
return image.transpose(Image.FLIP_TOP_BOTTOM)
def grayscale(image):
"""
Convert the image to grayscale.
:param image: The image to convert.
:return: An image.
"""
return image.convert("L")
def invert(image):
"""
Invert (negate) the image.
:param image: The image to invert.
:return: An image.
"""
lut = []
for i in range(256):
lut.append(255-i)
return _lut(image, lut)
def mirror(image):
"""
Flip image horizontally (left to right).
:param image: The image to mirror.
:return: An image.
"""
return image.transpose(Image.FLIP_LEFT_RIGHT)
def posterize(image, bits):
"""
Reduce the number of bits for each color channel.
:param image: The image to posterize.
:param bits: The number of bits to keep for each channel (1-8).
:return: An image.
"""
lut = []
mask = ~(2**(8-bits)-1)
for i in range(256):
lut.append(i & mask)
return _lut(image, lut)
def solarize(image, threshold=128):
"""
Invert all pixel values above a threshold.
:param image: The image to solarize.
:param threshold: All pixels above this greyscale level are inverted.
:return: An image.
"""
lut = []
for i in range(256):
if i < threshold:
lut.append(i)
else:
lut.append(255-i)
return _lut(image, lut)
# --------------------------------------------------------------------
# PIL USM components, from Kevin Cazabon.
def gaussian_blur(im, radius=None):
""" PIL_usm.gblur(im, [radius])"""
if radius is None:
radius = 5.0
im.load()
return im.im.gaussian_blur(radius)
gblur = gaussian_blur
def unsharp_mask(im, radius=None, percent=None, threshold=None):
""" PIL_usm.usm(im, [radius, percent, threshold])"""
if radius is None:
radius = 5.0
if percent is None:
percent = 150
if threshold is None:
threshold = 3
im.load()
return im.im.unsharp_mask(radius, percent, threshold)
usm = unsharp_mask
def box_blur(image, radius):
"""
Blur the image by setting each pixel to the average value of the pixels
in a square box extending radius pixels in each direction.
Supports float radius of arbitrary size. Uses an optimized implementation
which runs in linear time relative to the size of the image
for any radius value.
:param image: The image to blur.
:param radius: Size of the box in one direction. Radius 0 does not blur,
returns an identical image. Radius 1 takes 1 pixel
in each direction, i.e. 9 pixels in total.
:return: An image.
"""
image.load()
return image._new(image.im.box_blur(radius))
|
mit
|
yaojingwu1992/XlsxWriter
|
xlsxwriter/test/comparison/test_chart_display_units06.py
|
8
|
1414
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_display_units06.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [93548544, 93550464]
data = [
[10000000, 20000000, 30000000, 20000000, 10000000],
]
worksheet.write_column(0, 0, data[0])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.set_y_axis({'display_units': 'millions', 'display_units_visible': 0})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
bsd-2-clause
|
juanantoniofm12/toonai
|
Zappa/zappa/middleware.py
|
4
|
2374
|
from werkzeug.wsgi import ClosingIterator
def all_casings(input_string):
"""
Permute all casings of a given string.
A pretty algoritm, via @Amber
http://stackoverflow.com/questions/6792803/finding-all-possible-case-permutations-in-python
"""
if not input_string:
yield ""
else:
first = input_string[:1]
if first.lower() == first.upper():
for sub_casing in all_casings(input_string[1:]):
yield first + sub_casing
else:
for sub_casing in all_casings(input_string[1:]):
yield first.lower() + sub_casing
yield first.upper() + sub_casing
class ZappaWSGIMiddleware(object):
"""
Middleware functions necessary for a Zappa deployment.
Most hacks have now been remove except for Set-Cookie permuation.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
"""
We must case-mangle the Set-Cookie header name or AWS will use only a
single one of these headers.
"""
def encode_response(status, headers, exc_info=None):
"""
Create an APIGW-acceptable version of our cookies.
We have to use a bizarre hack that turns multiple Set-Cookie headers into
their case-permutated format, ex:
Set-cookie:
sEt-cookie:
seT-cookie:
To get around an API Gateway limitation.
This is weird, but better than our previous hack of creating a Base58-encoded
supercookie.
"""
# All the non-cookie headers should be sent unharmed.
new_headers = [(header[0], header[1]) for header in headers if
header[0] != 'Set-Cookie']
cookie_headers = [x for x in headers if x[0] == "Set-Cookie"]
for header, new_name in zip(cookie_headers,
all_casings("Set-Cookie")):
new_headers.append((new_name, header[1]))
return start_response(status, new_headers, exc_info)
# Call the application with our modifier
response = self.application(environ, encode_response)
# Return the response as a WSGI-safe iterator
return ClosingIterator(response)
|
mit
|
wanggang3333/scikit-learn
|
examples/model_selection/plot_validation_curve.py
|
229
|
1823
|
"""
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
|
bsd-3-clause
|
timvideos/flumotion
|
flumotion/component/common/fgdp/fgdp.py
|
3
|
10973
|
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
import gobject
import gst
from twisted.internet import reactor
from flumotion.component.common.fgdp import protocol as fgdp
GDP_TYPE_PRODUCER = "producer-type"
GDP_TYPE_CONSUMER = "consumer-type"
class FDHandler(object):
"""
Base class for elements handling file descriptors
@type fdelement: L{gst.Element}
"""
def __init__(self, fdelement):
self.fdelement = fdelement
### FDHandler interface for subclasses
def connectFd(self, fd):
'''
Connects a file descriptor to the gstreamer element that will be
writting to it or reading from it
@type fd: int
'''
raise NotImplemented("subclass must implement connectFD")
def disconnectFd(self, fd):
'''
Disconnects a file descriptor from the gstreamer element that is
writting to it or reading from it
@type fd: int
'''
raise NotImplemented("subclass must implement disconnectFD")
class FDSrc(FDHandler):
"""
File descriptors handler based on fdsrc
@type protocol: L{flummotion.common.gdp_protocol.FGDPBaseProtocol}
"""
logCategory = 'gdp-producer'
protocol = None
_handler_id = None
def __init__(self, fdelement):
FDHandler.__init__(self, fdelement)
def _check_eos(self, pad, event):
if event.type == gst.EVENT_EOS:
# EOS are triggered after a disconnection, when the read in the
# socket is 0 Bytes. Remove the handler and close the connection
pad.remove_event_probe(self._handler_id)
if self.protocol is not None:
reactor.callFromThread(self.protocol.loseConnection)
return False
return True
def connectFd(self, fd):
# Unlock the state of the element, which should be already in the READY
# state. Add the fd and an event probe to detect disconnections
self.fdelement.set_locked_state(False)
self.fdelement.set_property('fd', fd)
srcpad = self.fdelement.get_pad("src")
self._handler_id = srcpad.add_event_probe(self._check_eos)
self.fdelement.set_state(gst.STATE_PLAYING)
def disconnectFd(self, _):
# Set back the element to the READY state, in which a fd can be
# added/changed and lock the state of the element
self.fdelement.set_state(gst.STATE_READY)
self.fdelement.get_state(0)
self.fdelement.set_locked_state(True)
class MultiFDSink(FDHandler):
"""
File descriptors handler based on fdsrc
@type protocol: L{flummotion.common.gdp_protocol.FGDPBaseProtocol}
"""
logCategory = 'gdp-consumer'
protocol = None
_activeFD = None
_handler_id = None
def __init__(self, fdelement):
FDHandler.__init__(self, fdelement)
def _on_client_removed(self, a, fd):
if self.protocol is not None:
reactor.callFromThread(self.protocol.loseConnection)
self.fdelement.handler_disconnect(self._handler_id)
def connectFd(self, fd):
self.fdelement.emit('add', fd)
self._activeFD = fd
self._handler_id = self.fdelement.connect('client-fd-removed',
self._on_client_removed)
def disconnectFd(self, fd):
if self._activeFD == fd:
self.fdelement.emit('remove', fd)
self._activeFD = None
class _ProtocolMixin(object):
"""
Provides an abstraction for the start and stop of a client or server using
the FGDP protocol, which depends on the 'mode' selected amongst 'push' or
'pull'
@type mode: str
@type host: str
@type port: int
"""
mode = ''
host = None
port = None
_listener = None
_connector = None
def start(self):
"""
Starts a server/client using the FGDP protocol when the element
is ready.
"""
if self.mode == 'push':
self._start_push()
else:
self._start_pull()
def stop(self):
"""
Stops the server/client using the FGDP protocol.
"""
if self._listener is not None:
self._listener.stopListening()
if self._connector is not None:
self._connector.disconnect()
def _start_push(self):
self.info("Starting fgdp client")
factory = fgdp.FGDPClientFactory(self)
self._connector = reactor.connectTCP(self.host, self.port, factory)
def _start_pull(self):
self.info("Starting fgdp server")
factory = fgdp.FGDPServerFactory(self)
self._listener = reactor.listenTCP(self.port, factory)
class FGDPBase(gst.Bin, _ProtocolMixin):
"""
Base class for gstreamer elements using the FGDP protocol
"""
mode = 'pull'
host = 'localhost'
port = 15000
username = 'user'
password = 'test'
maxDelay = 5
version = '0.1'
__gproperties__ = {
'mode': (gobject.TYPE_STRING, 'mode',
"Connection mode: 'pull' or 'push'",
'pull', gobject.PARAM_READWRITE),
'host': (gobject.TYPE_STRING, 'host',
'Name of the host to connect (in push mode)',
'localhost', gobject.PARAM_READWRITE),
'port': (gobject.TYPE_INT, 'port',
'Connection port',
1, 64000, 15000, gobject.PARAM_READWRITE),
'username': (gobject.TYPE_STRING, 'user name',
'Username for the authentication',
'user', gobject.PARAM_READWRITE),
'password': (gobject.TYPE_STRING, 'password',
'Password for the authentication',
'test', gobject.PARAM_READWRITE),
'version': (gobject.TYPE_STRING, 'version',
'Protocol version',
'0.1', gobject.PARAM_READWRITE),
'max-reconnection-delay': (gobject.TYPE_FLOAT,
'maximum delay between reconnections in seconds',
'Maximum delay between reconnections in seconds (for push mode)',
1, 100, 5, gobject.PARAM_READWRITE)}
__gsignals__ = {"connected": (gobject.SIGNAL_RUN_LAST,\
gobject.TYPE_NONE, []),
"disconnected": (gobject.SIGNAL_RUN_LAST,\
gobject.TYPE_NONE,
(gobject.TYPE_STRING, ))}
def _handle_error(self, message):
err = gst.GError(gst.RESOURCE_ERROR,
gst.RESOURCE_ERROR_FAILED, message)
m = gst.message_new_error(self, err, message)
self.post_message(m)
self.error(message)
def do_change_state(self, transition):
if transition == gst.STATE_CHANGE_READY_TO_PAUSED:
try:
self.prepare()
self.start()
except Exception, e:
self._handle_error(str(e))
self.stop()
return gst.STATE_CHANGE_FAILURE
elif transition == gst.STATE_CHANGE_PAUSED_TO_READY:
self.stop()
return gst.Bin.do_change_state(self, transition)
def do_set_property(self, prop, value):
if prop.name in ['mode', 'host', 'username', 'password', 'port',
'version']:
setattr(self, prop.name, value)
elif prop.name == 'max-reconnection-delay':
self.maxDelay = float(value)
else:
raise AttributeError('unknown property %s' % prop.name)
def do_get_property(self, prop):
if prop.name in ['mode', 'host', 'username', 'password', 'port',
'version']:
return getattr(self, prop.name)
if prop.name == 'max-reconnection-delay':
return self.maxDelay
raise AttributeError('unknown property %s' % prop.name)
def prepare(self):
"""
Should be implemented by subclasses that needs to do something
before starting the server/client
"""
pass
class FGDPSink(FGDPBase, MultiFDSink):
'''
GStreamer sink element using the FGDP protocol
'''
mode = 'push'
__gstdetails__ = ('FGDPsink', 'Sink',
'Flumotion GStreamer data protocol sink',
'Flumotion DevTeam')
def __init__(self):
FGDPBase.__init__(self)
# Create elements
gdppay = gst.element_factory_make('gdppay')
self.fdelement = gst.element_factory_make('multifdsink')
# Set default properties
self.fdelement.set_property('sync', False)
self.fdelement.set_property('unit-type', 2)
self.fdelement.set_property('units-max', 1 * gst.SECOND)
self.fdelement.set_property('units-soft-max', 700 * gst.MSECOND)
self.fdelement.set_property('recover-policy', 1)
# Create fd handler proxy
MultiFDSink.__init__(self, self.fdelement)
# Add elements to the bin and link them
self.add(gdppay, self.fdelement)
gdppay.link(self.fdelement)
# Create sink pads
self._sink_pad = gst.GhostPad('sink', gdppay.get_pad('sink'))
self.add_pad(self._sink_pad)
class FGDPSrc(FGDPBase, FDSrc):
'''
GStreamer source element using the FGDP protocol
'''
mode = 'pull'
__gstdetails__ = ('FGDPsrc', 'Source',
'Flumotion GStreamer data protocol source',
'Flumotion DevTeam')
def __init__(self):
FGDPBase.__init__(self)
# Create elements
self.fdelement = gst.element_factory_make('fdsrc')
gdpdepay = gst.element_factory_make('gdpdepay')
# Add elements to the bin and link them
self.add(self.fdelement, gdpdepay)
self.fdelement.link(gdpdepay)
# Create fd handler proxy
FDSrc.__init__(self, self.fdelement)
# Create sink pads
self._src_pad = gst.GhostPad('src', gdpdepay.get_pad('src'))
self.add_pad(self._src_pad)
def prepare(self):
# Lock the state until we get the first connection and we can pass it
# a valid fd, otherwhise it will be using stdin.
self.fdelement.set_locked_state(True)
gobject.type_register(FGDPSink)
gst.element_register(FGDPSink, "fgdpsink", gst.RANK_MARGINAL)
gobject.type_register(FGDPSrc)
gst.element_register(FGDPSrc, "fgdpsrc", gst.RANK_MARGINAL)
|
lgpl-2.1
|
iambibhas/django
|
django/db/migrations/operations/models.py
|
2
|
18399
|
from __future__ import unicode_literals
from django.db import models
from django.db.models.options import normalize_together
from django.db.migrations.state import ModelState
from django.db.migrations.operations.base import Operation
from django.utils import six
class CreateModel(Operation):
"""
Create a model's table.
"""
serialization_expand_args = ['fields', 'options', 'managers']
def __init__(self, name, fields, options=None, bases=None, managers=None):
self.name = name
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model,)
self.managers = managers or []
def deconstruct(self):
kwargs = {
'name': self.name,
'fields': self.fields,
}
if self.options:
kwargs['options'] = self.options
if self.bases and self.bases != (models.Model,):
kwargs['bases'] = self.bases
if self.managers and self.managers != [('objects', models.Manager())]:
kwargs['managers'] = self.managers
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.add_model(ModelState(
app_label,
self.name,
list(self.fields),
dict(self.options),
tuple(self.bases),
list(self.managers),
))
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def describe(self):
return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name)
def references_model(self, name, app_label=None):
strings_to_check = [self.name]
# Check we didn't inherit from the model
for base in self.bases:
if isinstance(base, six.string_types):
strings_to_check.append(base.split(".")[-1])
# Check we have no FKs/M2Ms with it
for fname, field in self.fields:
if field.rel:
if isinstance(field.rel.to, six.string_types):
strings_to_check.append(field.rel.to.split(".")[-1])
# Now go over all the strings and compare them
for string in strings_to_check:
if string.lower() == name.lower():
return True
return False
class DeleteModel(Operation):
"""
Drops a model's table.
"""
def __init__(self, name):
self.name = name
def deconstruct(self):
kwargs = {
'name': self.name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.remove_model(app_label, self.name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Delete model %s" % (self.name, )
class RenameModel(Operation):
"""
Renames a model.
"""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
def deconstruct(self):
kwargs = {
'old_name': self.old_name,
'new_name': self.new_name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
# Get all of the related objects we need to repoint
apps = state.apps
model = apps.get_model(app_label, self.old_name)
model._meta.apps = apps
related_objects = model._meta.get_all_related_objects()
related_m2m_objects = model._meta.get_all_related_many_to_many_objects()
# Rename the model
state.models[app_label, self.new_name.lower()] = state.models[app_label, self.old_name.lower()]
state.models[app_label, self.new_name.lower()].name = self.new_name
state.remove_model(app_label, self.old_name)
# Repoint the FKs and M2Ms pointing to us
for related_object in (related_objects + related_m2m_objects):
# Use the new related key for self referential related objects.
if related_object.model == model:
related_key = (app_label, self.new_name.lower())
else:
related_key = (
related_object.model._meta.app_label,
related_object.model._meta.object_name.lower(),
)
new_fields = []
for name, field in state.models[related_key].fields:
if name == related_object.field.name:
field = field.clone()
field.rel.to = "%s.%s" % (app_label, self.new_name)
new_fields.append((name, field))
state.models[related_key].fields = new_fields
state.reload_model(*related_key)
state.reload_model(app_label, self.new_name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.new_name)
if self.allowed_to_migrate(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.old_name)
# Move the main table
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Alter the fields pointing to us
related_objects = old_model._meta.get_all_related_objects()
related_m2m_objects = old_model._meta.get_all_related_many_to_many_objects()
for related_object in (related_objects + related_m2m_objects):
if related_object.model == old_model:
model = new_model
related_key = (app_label, self.new_name.lower())
else:
model = related_object.model
related_key = (
related_object.model._meta.app_label,
related_object.model._meta.object_name.lower(),
)
to_field = to_state.apps.get_model(
*related_key
)._meta.get_field_by_name(related_object.field.name)[0]
schema_editor.alter_field(
model,
related_object.field,
to_field,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.new_name, self.old_name = self.old_name, self.new_name
self.database_forwards(app_label, schema_editor, from_state, to_state)
self.new_name, self.old_name = self.old_name, self.new_name
def references_model(self, name, app_label=None):
return (
name.lower() == self.old_name.lower() or
name.lower() == self.new_name.lower()
)
def describe(self):
return "Rename model %s to %s" % (self.old_name, self.new_name)
class AlterModelTable(Operation):
"""
Renames a model's table
"""
def __init__(self, name, table):
self.name = name
self.table = table
def deconstruct(self):
kwargs = {
'name': self.name,
'table': self.table,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.models[app_label, self.name.lower()].options["db_table"] = self.table
state.reload_model(app_label, self.name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Rename M2M fields whose name is based on this model's db_table
for (old_field, new_field) in zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many):
if new_field.rel.through._meta.auto_created:
schema_editor.alter_db_table(
new_field.rel.through,
old_field.rel.through._meta.db_table,
new_field.rel.through._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Rename table for %s to %s" % (self.name, self.table)
class AlterUniqueTogether(Operation):
"""
Changes the value of unique_together to the target one.
Input value of unique_together must be a set of tuples.
"""
option_name = "unique_together"
def __init__(self, name, unique_together):
self.name = name
unique_together = normalize_together(unique_together)
self.unique_together = set(tuple(cons) for cons in unique_together)
def deconstruct(self):
kwargs = {
'name': self.name,
'unique_together': self.unique_together,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options[self.option_name] = self.unique_together
state.reload_model(app_label, self.name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_unique_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.unique_together or ''))
class AlterIndexTogether(Operation):
"""
Changes the value of index_together to the target one.
Input value of index_together must be a set of tuples.
"""
option_name = "index_together"
def __init__(self, name, index_together):
self.name = name
index_together = normalize_together(index_together)
self.index_together = set(tuple(cons) for cons in index_together)
def deconstruct(self):
kwargs = {
'name': self.name,
'index_together': self.index_together,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options[self.option_name] = self.index_together
state.reload_model(app_label, self.name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_index_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.index_together or ''))
class AlterOrderWithRespectTo(Operation):
"""
Represents a change with the order_with_respect_to option.
"""
def __init__(self, name, order_with_respect_to):
self.name = name
self.order_with_respect_to = order_with_respect_to
def deconstruct(self):
kwargs = {
'name': self.name,
'order_with_respect_to': self.order_with_respect_to,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options['order_with_respect_to'] = self.order_with_respect_to
state.reload_model(app_label, self.name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.name)
if self.allowed_to_migrate(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.name)
# Remove a field if we need to
if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to:
schema_editor.remove_field(from_model, from_model._meta.get_field_by_name("_order")[0])
# Add a field if we need to (altering the column is untouched as
# it's likely a rename)
elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to:
field = to_model._meta.get_field_by_name("_order")[0]
if not field.has_default():
field.default = 0
schema_editor.add_field(
from_model,
field,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Set order_with_respect_to on %s to %s" % (self.name, self.order_with_respect_to)
class AlterModelOptions(Operation):
"""
Sets new model options that don't directly affect the database schema
(like verbose_name, permissions, ordering). Python code in migrations
may still need them.
"""
# Model options we want to compare and preserve in an AlterModelOptions op
ALTER_OPTION_KEYS = [
"get_latest_by",
"managed",
"ordering",
"permissions",
"default_permissions",
"select_on_save",
"verbose_name",
"verbose_name_plural",
]
def __init__(self, name, options):
self.name = name
self.options = options
def deconstruct(self):
kwargs = {
'name': self.name,
'options': self.options,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options = dict(model_state.options)
model_state.options.update(self.options)
for key in self.ALTER_OPTION_KEYS:
if key not in self.options and key in model_state.options:
del model_state.options[key]
state.reload_model(app_label, self.name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Change Meta options on %s" % (self.name, )
class AlterModelManagers(Operation):
"""
Alters the model's managers
"""
serialization_expand_args = ['managers']
def __init__(self, name, managers):
self.name = name
self.managers = managers
def deconstruct(self):
return (
self.__class__.__name__,
[self.name, self.managers],
{}
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.managers = list(self.managers)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Change managers on %s" % (self.name, )
|
bsd-3-clause
|
PulsePod/evepod
|
lib/python2.7/site-packages/werkzeug/testsuite/http.py
|
89
|
18911
|
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.http
~~~~~~~~~~~~~~~~~~~~~~~
HTTP parsing utilities.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from datetime import datetime
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug._compat import itervalues, wsgi_encoding_dance
from werkzeug import http, datastructures
from werkzeug.test import create_environ
class HTTPUtilityTestCase(WerkzeugTestCase):
def test_accept(self):
a = http.parse_accept_header('en-us,ru;q=0.5')
self.assert_equal(list(itervalues(a)), ['en-us', 'ru'])
self.assert_equal(a.best, 'en-us')
self.assert_equal(a.find('ru'), 1)
self.assert_raises(ValueError, a.index, 'de')
self.assert_equal(a.to_header(), 'en-us,ru;q=0.5')
def test_mime_accept(self):
a = http.parse_accept_header('text/xml,application/xml,'
'application/xhtml+xml,'
'text/html;q=0.9,text/plain;q=0.8,'
'image/png,*/*;q=0.5',
datastructures.MIMEAccept)
self.assert_raises(ValueError, lambda: a['missing'])
self.assert_equal(a['image/png'], 1)
self.assert_equal(a['text/plain'], 0.8)
self.assert_equal(a['foo/bar'], 0.5)
self.assert_equal(a[a.find('foo/bar')], ('*/*', 0.5))
def test_accept_matches(self):
a = http.parse_accept_header('text/xml,application/xml,application/xhtml+xml,'
'text/html;q=0.9,text/plain;q=0.8,'
'image/png', datastructures.MIMEAccept)
self.assert_equal(a.best_match(['text/html', 'application/xhtml+xml']),
'application/xhtml+xml')
self.assert_equal(a.best_match(['text/html']), 'text/html')
self.assert_true(a.best_match(['foo/bar']) is None)
self.assert_equal(a.best_match(['foo/bar', 'bar/foo'],
default='foo/bar'), 'foo/bar')
self.assert_equal(a.best_match(['application/xml', 'text/xml']), 'application/xml')
def test_charset_accept(self):
a = http.parse_accept_header('ISO-8859-1,utf-8;q=0.7,*;q=0.7',
datastructures.CharsetAccept)
self.assert_equal(a['iso-8859-1'], a['iso8859-1'])
self.assert_equal(a['iso-8859-1'], 1)
self.assert_equal(a['UTF8'], 0.7)
self.assert_equal(a['ebcdic'], 0.7)
def test_language_accept(self):
a = http.parse_accept_header('de-AT,de;q=0.8,en;q=0.5',
datastructures.LanguageAccept)
self.assert_equal(a.best, 'de-AT')
self.assert_true('de_AT' in a)
self.assert_true('en' in a)
self.assert_equal(a['de-at'], 1)
self.assert_equal(a['en'], 0.5)
def test_set_header(self):
hs = http.parse_set_header('foo, Bar, "Blah baz", Hehe')
self.assert_true('blah baz' in hs)
self.assert_true('foobar' not in hs)
self.assert_true('foo' in hs)
self.assert_equal(list(hs), ['foo', 'Bar', 'Blah baz', 'Hehe'])
hs.add('Foo')
self.assert_equal(hs.to_header(), 'foo, Bar, "Blah baz", Hehe')
def test_list_header(self):
hl = http.parse_list_header('foo baz, blah')
self.assert_equal(hl, ['foo baz', 'blah'])
def test_dict_header(self):
d = http.parse_dict_header('foo="bar baz", blah=42')
self.assert_equal(d, {'foo': 'bar baz', 'blah': '42'})
def test_cache_control_header(self):
cc = http.parse_cache_control_header('max-age=0, no-cache')
assert cc.max_age == 0
assert cc.no_cache
cc = http.parse_cache_control_header('private, community="UCI"', None,
datastructures.ResponseCacheControl)
assert cc.private
assert cc['community'] == 'UCI'
c = datastructures.ResponseCacheControl()
assert c.no_cache is None
assert c.private is None
c.no_cache = True
assert c.no_cache == '*'
c.private = True
assert c.private == '*'
del c.private
assert c.private is None
assert c.to_header() == 'no-cache'
def test_authorization_header(self):
a = http.parse_authorization_header('Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==')
assert a.type == 'basic'
assert a.username == 'Aladdin'
assert a.password == 'open sesame'
a = http.parse_authorization_header('''Digest username="Mufasa",
realm="[email protected]",
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
uri="/dir/index.html",
qop=auth,
nc=00000001,
cnonce="0a4f113b",
response="6629fae49393a05397450978507c4ef1",
opaque="5ccc069c403ebaf9f0171e9517f40e41"''')
assert a.type == 'digest'
assert a.username == 'Mufasa'
assert a.realm == '[email protected]'
assert a.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093'
assert a.uri == '/dir/index.html'
assert 'auth' in a.qop
assert a.nc == '00000001'
assert a.cnonce == '0a4f113b'
assert a.response == '6629fae49393a05397450978507c4ef1'
assert a.opaque == '5ccc069c403ebaf9f0171e9517f40e41'
a = http.parse_authorization_header('''Digest username="Mufasa",
realm="[email protected]",
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
uri="/dir/index.html",
response="e257afa1414a3340d93d30955171dd0e",
opaque="5ccc069c403ebaf9f0171e9517f40e41"''')
assert a.type == 'digest'
assert a.username == 'Mufasa'
assert a.realm == '[email protected]'
assert a.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093'
assert a.uri == '/dir/index.html'
assert a.response == 'e257afa1414a3340d93d30955171dd0e'
assert a.opaque == '5ccc069c403ebaf9f0171e9517f40e41'
assert http.parse_authorization_header('') is None
assert http.parse_authorization_header(None) is None
assert http.parse_authorization_header('foo') is None
def test_www_authenticate_header(self):
wa = http.parse_www_authenticate_header('Basic realm="WallyWorld"')
assert wa.type == 'basic'
assert wa.realm == 'WallyWorld'
wa.realm = 'Foo Bar'
assert wa.to_header() == 'Basic realm="Foo Bar"'
wa = http.parse_www_authenticate_header('''Digest
realm="[email protected]",
qop="auth,auth-int",
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
opaque="5ccc069c403ebaf9f0171e9517f40e41"''')
assert wa.type == 'digest'
assert wa.realm == '[email protected]'
assert 'auth' in wa.qop
assert 'auth-int' in wa.qop
assert wa.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093'
assert wa.opaque == '5ccc069c403ebaf9f0171e9517f40e41'
wa = http.parse_www_authenticate_header('broken')
assert wa.type == 'broken'
assert not http.parse_www_authenticate_header('').type
assert not http.parse_www_authenticate_header('')
def test_etags(self):
assert http.quote_etag('foo') == '"foo"'
assert http.quote_etag('foo', True) == 'w/"foo"'
assert http.unquote_etag('"foo"') == ('foo', False)
assert http.unquote_etag('w/"foo"') == ('foo', True)
es = http.parse_etags('"foo", "bar", w/"baz", blar')
assert sorted(es) == ['bar', 'blar', 'foo']
assert 'foo' in es
assert 'baz' not in es
assert es.contains_weak('baz')
assert 'blar' in es
assert es.contains_raw('w/"baz"')
assert es.contains_raw('"foo"')
assert sorted(es.to_header().split(', ')) == ['"bar"', '"blar"', '"foo"', 'w/"baz"']
def test_etags_nonzero(self):
etags = http.parse_etags('w/"foo"')
self.assert_true(bool(etags))
self.assert_true(etags.contains_raw('w/"foo"'))
def test_parse_date(self):
assert http.parse_date('Sun, 06 Nov 1994 08:49:37 GMT ') == datetime(1994, 11, 6, 8, 49, 37)
assert http.parse_date('Sunday, 06-Nov-94 08:49:37 GMT') == datetime(1994, 11, 6, 8, 49, 37)
assert http.parse_date(' Sun Nov 6 08:49:37 1994') == datetime(1994, 11, 6, 8, 49, 37)
assert http.parse_date('foo') is None
def test_parse_date_overflows(self):
assert http.parse_date(' Sun 02 Feb 1343 08:49:37 GMT') == datetime(1343, 2, 2, 8, 49, 37)
assert http.parse_date('Thu, 01 Jan 1970 00:00:00 GMT') == datetime(1970, 1, 1, 0, 0)
assert http.parse_date('Thu, 33 Jan 1970 00:00:00 GMT') is None
def test_remove_entity_headers(self):
now = http.http_date()
headers1 = [('Date', now), ('Content-Type', 'text/html'), ('Content-Length', '0')]
headers2 = datastructures.Headers(headers1)
http.remove_entity_headers(headers1)
assert headers1 == [('Date', now)]
http.remove_entity_headers(headers2)
self.assert_equal(headers2, datastructures.Headers([(u'Date', now)]))
def test_remove_hop_by_hop_headers(self):
headers1 = [('Connection', 'closed'), ('Foo', 'bar'),
('Keep-Alive', 'wtf')]
headers2 = datastructures.Headers(headers1)
http.remove_hop_by_hop_headers(headers1)
assert headers1 == [('Foo', 'bar')]
http.remove_hop_by_hop_headers(headers2)
assert headers2 == datastructures.Headers([('Foo', 'bar')])
def test_parse_options_header(self):
assert http.parse_options_header(r'something; foo="other\"thing"') == \
('something', {'foo': 'other"thing'})
assert http.parse_options_header(r'something; foo="other\"thing"; meh=42') == \
('something', {'foo': 'other"thing', 'meh': '42'})
assert http.parse_options_header(r'something; foo="other\"thing"; meh=42; bleh') == \
('something', {'foo': 'other"thing', 'meh': '42', 'bleh': None})
assert http.parse_options_header('something; foo="other;thing"; meh=42; bleh') == \
('something', {'foo': 'other;thing', 'meh': '42', 'bleh': None})
assert http.parse_options_header('something; foo="otherthing"; meh=; bleh') == \
('something', {'foo': 'otherthing', 'meh': None, 'bleh': None})
def test_dump_options_header(self):
assert http.dump_options_header('foo', {'bar': 42}) == \
'foo; bar=42'
assert http.dump_options_header('foo', {'bar': 42, 'fizz': None}) in \
('foo; bar=42; fizz', 'foo; fizz; bar=42')
def test_dump_header(self):
assert http.dump_header([1, 2, 3]) == '1, 2, 3'
assert http.dump_header([1, 2, 3], allow_token=False) == '"1", "2", "3"'
assert http.dump_header({'foo': 'bar'}, allow_token=False) == 'foo="bar"'
assert http.dump_header({'foo': 'bar'}) == 'foo=bar'
def test_is_resource_modified(self):
env = create_environ()
# ignore POST
env['REQUEST_METHOD'] = 'POST'
assert not http.is_resource_modified(env, etag='testing')
env['REQUEST_METHOD'] = 'GET'
# etagify from data
self.assert_raises(TypeError, http.is_resource_modified, env,
data='42', etag='23')
env['HTTP_IF_NONE_MATCH'] = http.generate_etag(b'awesome')
assert not http.is_resource_modified(env, data=b'awesome')
env['HTTP_IF_MODIFIED_SINCE'] = http.http_date(datetime(2008, 1, 1, 12, 30))
assert not http.is_resource_modified(env,
last_modified=datetime(2008, 1, 1, 12, 00))
assert http.is_resource_modified(env,
last_modified=datetime(2008, 1, 1, 13, 00))
def test_date_formatting(self):
assert http.cookie_date(0) == 'Thu, 01-Jan-1970 00:00:00 GMT'
assert http.cookie_date(datetime(1970, 1, 1)) == 'Thu, 01-Jan-1970 00:00:00 GMT'
assert http.http_date(0) == 'Thu, 01 Jan 1970 00:00:00 GMT'
assert http.http_date(datetime(1970, 1, 1)) == 'Thu, 01 Jan 1970 00:00:00 GMT'
def test_cookies(self):
self.assert_strict_equal(
dict(http.parse_cookie('dismiss-top=6; CP=null*; PHPSESSID=0a539d42abc001cd'
'c762809248d4beed; a=42; b="\\\";"')),
{
'CP': u'null*',
'PHPSESSID': u'0a539d42abc001cdc762809248d4beed',
'a': u'42',
'dismiss-top': u'6',
'b': u'\";'
}
)
self.assert_strict_equal(
set(http.dump_cookie('foo', 'bar baz blub', 360, httponly=True,
sync_expires=False).split(u'; ')),
set([u'HttpOnly', u'Max-Age=360', u'Path=/', u'foo="bar baz blub"'])
)
self.assert_strict_equal(dict(http.parse_cookie('fo234{=bar; blub=Blah')),
{'fo234{': u'bar', 'blub': u'Blah'})
def test_cookie_quoting(self):
val = http.dump_cookie("foo", "?foo")
self.assert_strict_equal(val, 'foo="?foo"; Path=/')
self.assert_strict_equal(dict(http.parse_cookie(val)), {'foo': u'?foo'})
self.assert_strict_equal(dict(http.parse_cookie(r'foo="foo\054bar"')),
{'foo': u'foo,bar'})
def test_cookie_domain_resolving(self):
val = http.dump_cookie('foo', 'bar', domain=u'\N{SNOWMAN}.com')
self.assert_strict_equal(val, 'foo=bar; Domain=xn--n3h.com; Path=/')
def test_cookie_unicode_dumping(self):
val = http.dump_cookie('foo', u'\N{SNOWMAN}')
h = datastructures.Headers()
h.add('Set-Cookie', val)
self.assert_equal(h['Set-Cookie'], 'foo="\\342\\230\\203"; Path=/')
cookies = http.parse_cookie(h['Set-Cookie'])
self.assert_equal(cookies['foo'], u'\N{SNOWMAN}')
def test_cookie_unicode_keys(self):
# Yes, this is technically against the spec but happens
val = http.dump_cookie(u'fö', u'fö')
self.assert_equal(val, wsgi_encoding_dance(u'fö="f\\303\\266"; Path=/', 'utf-8'))
cookies = http.parse_cookie(val)
self.assert_equal(cookies[u'fö'], u'fö')
def test_cookie_unicode_parsing(self):
# This is actually a correct test. This is what is being submitted
# by firefox if you set an unicode cookie and we get the cookie sent
# in on Python 3 under PEP 3333.
cookies = http.parse_cookie(u'fö=fö')
self.assert_equal(cookies[u'fö'], u'fö')
def test_cookie_domain_encoding(self):
val = http.dump_cookie('foo', 'bar', domain=u'\N{SNOWMAN}.com')
self.assert_strict_equal(val, 'foo=bar; Domain=xn--n3h.com; Path=/')
val = http.dump_cookie('foo', 'bar', domain=u'.\N{SNOWMAN}.com')
self.assert_strict_equal(val, 'foo=bar; Domain=.xn--n3h.com; Path=/')
val = http.dump_cookie('foo', 'bar', domain=u'.foo.com')
self.assert_strict_equal(val, 'foo=bar; Domain=.foo.com; Path=/')
class RangeTestCase(WerkzeugTestCase):
def test_if_range_parsing(self):
rv = http.parse_if_range_header('"Test"')
assert rv.etag == 'Test'
assert rv.date is None
assert rv.to_header() == '"Test"'
# weak information is dropped
rv = http.parse_if_range_header('w/"Test"')
assert rv.etag == 'Test'
assert rv.date is None
assert rv.to_header() == '"Test"'
# broken etags are supported too
rv = http.parse_if_range_header('bullshit')
assert rv.etag == 'bullshit'
assert rv.date is None
assert rv.to_header() == '"bullshit"'
rv = http.parse_if_range_header('Thu, 01 Jan 1970 00:00:00 GMT')
assert rv.etag is None
assert rv.date == datetime(1970, 1, 1)
assert rv.to_header() == 'Thu, 01 Jan 1970 00:00:00 GMT'
for x in '', None:
rv = http.parse_if_range_header(x)
assert rv.etag is None
assert rv.date is None
assert rv.to_header() == ''
def test_range_parsing():
rv = http.parse_range_header('bytes=52')
assert rv is None
rv = http.parse_range_header('bytes=52-')
assert rv.units == 'bytes'
assert rv.ranges == [(52, None)]
assert rv.to_header() == 'bytes=52-'
rv = http.parse_range_header('bytes=52-99')
assert rv.units == 'bytes'
assert rv.ranges == [(52, 100)]
assert rv.to_header() == 'bytes=52-99'
rv = http.parse_range_header('bytes=52-99,-1000')
assert rv.units == 'bytes'
assert rv.ranges == [(52, 100), (-1000, None)]
assert rv.to_header() == 'bytes=52-99,-1000'
rv = http.parse_range_header('bytes = 1 - 100')
assert rv.units == 'bytes'
assert rv.ranges == [(1, 101)]
assert rv.to_header() == 'bytes=1-100'
rv = http.parse_range_header('AWesomes=0-999')
assert rv.units == 'awesomes'
assert rv.ranges == [(0, 1000)]
assert rv.to_header() == 'awesomes=0-999'
def test_content_range_parsing():
rv = http.parse_content_range_header('bytes 0-98/*')
assert rv.units == 'bytes'
assert rv.start == 0
assert rv.stop == 99
assert rv.length is None
assert rv.to_header() == 'bytes 0-98/*'
rv = http.parse_content_range_header('bytes 0-98/*asdfsa')
assert rv is None
rv = http.parse_content_range_header('bytes 0-99/100')
assert rv.to_header() == 'bytes 0-99/100'
rv.start = None
rv.stop = None
assert rv.units == 'bytes'
assert rv.to_header() == 'bytes */100'
rv = http.parse_content_range_header('bytes */100')
assert rv.start is None
assert rv.stop is None
assert rv.length == 100
assert rv.units == 'bytes'
class RegressionTestCase(WerkzeugTestCase):
def test_best_match_works(self):
# was a bug in 0.6
rv = http.parse_accept_header('foo=,application/xml,application/xhtml+xml,'
'text/html;q=0.9,text/plain;q=0.8,'
'image/png,*/*;q=0.5',
datastructures.MIMEAccept).best_match(['foo/bar'])
self.assert_equal(rv, 'foo/bar')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(HTTPUtilityTestCase))
suite.addTest(unittest.makeSuite(RegressionTestCase))
return suite
|
apache-2.0
|
wuhengzhi/chromium-crosswalk
|
native_client_sdk/src/build_tools/sdk_tools/command/update.py
|
15
|
13917
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import hashlib
import copy
import logging
import os
import subprocess
import sys
import urlparse
import urllib2
import command_common
import download
from sdk_update_common import Error
import sdk_update_common
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARENT_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(PARENT_DIR)
try:
import cygtar
except ImportError:
# Try to find this in the Chromium repo.
CHROME_SRC_DIR = os.path.abspath(
os.path.join(PARENT_DIR, '..', '..', '..', '..'))
sys.path.append(os.path.join(CHROME_SRC_DIR, 'native_client', 'build'))
import cygtar
RECOMMENDED = 'recommended'
SDK_TOOLS = 'sdk_tools'
HTTP_CONTENT_LENGTH = 'Content-Length' # HTTP Header field for content length
DEFAULT_CACHE_SIZE = 512 * 1024 * 1024 # 1/2 Gb cache by default
class UpdateDelegate(object):
def BundleDirectoryExists(self, bundle_name):
raise NotImplementedError()
def DownloadToFile(self, url, dest_filename):
raise NotImplementedError()
def ExtractArchives(self, archives, extract_dir, rename_from_dir,
rename_to_dir):
raise NotImplementedError()
class RealUpdateDelegate(UpdateDelegate):
def __init__(self, user_data_dir, install_dir, cfg):
UpdateDelegate.__init__(self)
self.archive_cache = os.path.join(user_data_dir, 'archives')
self.install_dir = install_dir
self.cache_max = getattr(cfg, 'cache_max', DEFAULT_CACHE_SIZE)
def BundleDirectoryExists(self, bundle_name):
bundle_path = os.path.join(self.install_dir, bundle_name)
return os.path.isdir(bundle_path)
def VerifyDownload(self, filename, archive):
"""Verify that a local filename in the cache matches the given
online archive.
Returns True if both size and sha1 match, False otherwise.
"""
filename = os.path.join(self.archive_cache, filename)
if not os.path.exists(filename):
logging.info('File does not exist: %s.' % filename)
return False
size = os.path.getsize(filename)
if size != archive.size:
logging.info('File size does not match (%d vs %d): %s.' % (size,
archive.size, filename))
return False
sha1_hash = hashlib.sha1()
with open(filename, 'rb') as f:
sha1_hash.update(f.read())
if sha1_hash.hexdigest() != archive.GetChecksum():
logging.info('File hash does not match: %s.' % filename)
return False
return True
def BytesUsedInCache(self):
"""Determine number of bytes currently be in local archive cache."""
total = 0
for root, _, files in os.walk(self.archive_cache):
for filename in files:
total += os.path.getsize(os.path.join(root, filename))
return total
def CleanupCache(self):
"""Remove archives from the local filesystem cache until the
total size is below cache_max.
This is done my deleting the oldest archive files until the
condition is satisfied. If cache_max is zero then the entire
cache will be removed.
"""
used = self.BytesUsedInCache()
logging.info('Cache usage: %d / %d' % (used, self.cache_max))
if used <= self.cache_max:
return
clean_bytes = used - self.cache_max
logging.info('Clearing %d bytes in archive cache' % clean_bytes)
file_timestamps = []
for root, _, files in os.walk(self.archive_cache):
for filename in files:
fullname = os.path.join(root, filename)
file_timestamps.append((os.path.getmtime(fullname), fullname))
file_timestamps.sort()
while clean_bytes > 0:
assert(file_timestamps)
filename_to_remove = file_timestamps[0][1]
clean_bytes -= os.path.getsize(filename_to_remove)
logging.info('Removing from cache: %s' % filename_to_remove)
os.remove(filename_to_remove)
# Also remove resulting empty parent directory structure
while True:
filename_to_remove = os.path.dirname(filename_to_remove)
if not os.listdir(filename_to_remove):
os.rmdir(filename_to_remove)
else:
break
file_timestamps = file_timestamps[1:]
def DownloadToFile(self, url, dest_filename):
dest_path = os.path.join(self.archive_cache, dest_filename)
sdk_update_common.MakeDirs(os.path.dirname(dest_path))
out_stream = None
url_stream = None
try:
out_stream = open(dest_path, 'wb')
url_stream = download.UrlOpen(url)
content_length = int(url_stream.info()[HTTP_CONTENT_LENGTH])
progress = download.MakeProgressFunction(content_length)
sha1, size = download.DownloadAndComputeHash(url_stream, out_stream,
progress)
return sha1, size
except urllib2.URLError as e:
raise Error('Unable to read from URL "%s".\n %s' % (url, e))
except IOError as e:
raise Error('Unable to write to file "%s".\n %s' % (dest_filename, e))
finally:
if url_stream:
url_stream.close()
if out_stream:
out_stream.close()
def ExtractArchives(self, archives, extract_dir, rename_from_dir,
rename_to_dir):
tar_file = None
extract_path = os.path.join(self.install_dir, extract_dir)
rename_from_path = os.path.join(self.install_dir, rename_from_dir)
rename_to_path = os.path.join(self.install_dir, rename_to_dir)
# Extract to extract_dir, usually "<bundle name>_update".
# This way if the extraction fails, we haven't blown away the old bundle
# (if it exists).
sdk_update_common.RemoveDir(extract_path)
sdk_update_common.MakeDirs(extract_path)
curpath = os.getcwd()
tar_file = None
try:
try:
logging.info('Changing the directory to %s' % (extract_path,))
os.chdir(extract_path)
except Exception as e:
raise Error('Unable to chdir into "%s".\n %s' % (extract_path, e))
for i, archive in enumerate(archives):
archive_path = os.path.join(self.archive_cache, archive)
if len(archives) > 1:
print '(file %d/%d - "%s")' % (
i + 1, len(archives), os.path.basename(archive_path))
logging.info('Extracting to %s' % (extract_path,))
if sys.platform == 'win32':
try:
logging.info('Opening file %s (%d/%d).' % (archive_path, i + 1,
len(archives)))
try:
tar_file = cygtar.CygTar(archive_path, 'r', verbose=True)
except Exception as e:
raise Error("Can't open archive '%s'.\n %s" % (archive_path, e))
tar_file.Extract()
finally:
if tar_file:
tar_file.Close()
else:
try:
subprocess.check_call(['tar', 'xf', archive_path])
except subprocess.CalledProcessError:
raise Error('Error extracting archive: %s' % archive_path)
logging.info('Changing the directory to %s' % (curpath,))
os.chdir(curpath)
logging.info('Renaming %s->%s' % (rename_from_path, rename_to_path))
sdk_update_common.RenameDir(rename_from_path, rename_to_path)
finally:
# Change the directory back so we can remove the update directory.
os.chdir(curpath)
# Clean up the ..._update directory.
try:
sdk_update_common.RemoveDir(extract_path)
except Exception as e:
logging.error('Failed to remove directory \"%s\". %s' % (
extract_path, e))
def Update(delegate, remote_manifest, local_manifest, bundle_names, force):
valid_bundles = set([bundle.name for bundle in remote_manifest.GetBundles()])
requested_bundles = _GetRequestedBundleNamesFromArgs(remote_manifest,
bundle_names)
invalid_bundles = requested_bundles - valid_bundles
if invalid_bundles:
logging.warn('Ignoring unknown bundle(s): %s' % (
', '.join(invalid_bundles)))
requested_bundles -= invalid_bundles
if SDK_TOOLS in requested_bundles:
logging.warn('Updating sdk_tools happens automatically. '
'Ignoring manual update request.')
requested_bundles.discard(SDK_TOOLS)
if requested_bundles:
for bundle_name in requested_bundles:
logging.info('Trying to update %s' % (bundle_name,))
UpdateBundleIfNeeded(delegate, remote_manifest, local_manifest,
bundle_name, force)
else:
logging.warn('No bundles to update.')
def Reinstall(delegate, local_manifest, bundle_names):
valid_bundles, invalid_bundles = \
command_common.GetValidBundles(local_manifest, bundle_names)
if invalid_bundles:
logging.warn('Unknown bundle(s): %s\n' % (', '.join(invalid_bundles)))
if not valid_bundles:
logging.warn('No bundles to reinstall.')
return
for bundle_name in valid_bundles:
bundle = copy.deepcopy(local_manifest.GetBundle(bundle_name))
# HACK(binji): There was a bug where we'd merge the bundles from the old
# archive and the new archive when updating. As a result, some users may
# have a cache manifest that contains duplicate archives. Remove all
# archives with the same basename except for the most recent.
# Because the archives are added to a list, we know the most recent is at
# the end.
archives = {}
for archive in bundle.GetArchives():
url = archive.url
path = urlparse.urlparse(url)[2]
basename = os.path.basename(path)
archives[basename] = archive
# Update the bundle with these new archives.
bundle.RemoveAllArchives()
for _, archive in archives.iteritems():
bundle.AddArchive(archive)
_UpdateBundle(delegate, bundle, local_manifest)
def UpdateBundleIfNeeded(delegate, remote_manifest, local_manifest,
bundle_name, force):
bundle = remote_manifest.GetBundle(bundle_name)
if bundle:
if _BundleNeedsUpdate(delegate, local_manifest, bundle):
# TODO(binji): It would be nicer to detect whether the user has any
# modifications to the bundle. If not, we could update with impunity.
if not force and delegate.BundleDirectoryExists(bundle_name):
print ('%s already exists, but has an update available.\n'
'Run update with the --force option to overwrite the '
'existing directory.\nWarning: This will overwrite any '
'modifications you have made within this directory.'
% (bundle_name,))
return
_UpdateBundle(delegate, bundle, local_manifest)
else:
print '%s is already up-to-date.' % (bundle.name,)
else:
logging.error('Bundle %s does not exist.' % (bundle_name,))
def _GetRequestedBundleNamesFromArgs(remote_manifest, requested_bundles):
requested_bundles = set(requested_bundles)
if RECOMMENDED in requested_bundles:
requested_bundles.discard(RECOMMENDED)
requested_bundles |= set(_GetRecommendedBundleNames(remote_manifest))
return requested_bundles
def _GetRecommendedBundleNames(remote_manifest):
result = []
for bundle in remote_manifest.GetBundles():
if bundle.recommended == 'yes' and bundle.name != SDK_TOOLS:
result.append(bundle.name)
return result
def _BundleNeedsUpdate(delegate, local_manifest, bundle):
# Always update the bundle if the directory doesn't exist;
# the user may have deleted it.
if not delegate.BundleDirectoryExists(bundle.name):
return True
return local_manifest.BundleNeedsUpdate(bundle)
def _UpdateBundle(delegate, bundle, local_manifest):
archives = bundle.GetHostOSArchives()
if not archives:
logging.warn('Bundle %s does not exist for this platform.' % (bundle.name,))
return
archive_filenames = []
shown_banner = False
for i, archive in enumerate(archives):
archive_filename = _GetFilenameFromURL(archive.url)
archive_filename = os.path.join(bundle.name, archive_filename)
if not delegate.VerifyDownload(archive_filename, archive):
if not shown_banner:
shown_banner = True
print 'Downloading bundle %s' % (bundle.name,)
if len(archives) > 1:
print '(file %d/%d - "%s")' % (
i + 1, len(archives), os.path.basename(archive.url))
sha1, size = delegate.DownloadToFile(archive.url, archive_filename)
_ValidateArchive(archive, sha1, size)
archive_filenames.append(archive_filename)
print 'Updating bundle %s to version %s, revision %s' % (
bundle.name, bundle.version, bundle.revision)
extract_dir = bundle.name + '_update'
repath_dir = bundle.get('repath', None)
if repath_dir:
# If repath is specified:
# The files are extracted to nacl_sdk/<bundle.name>_update/<repath>/...
# The destination directory is nacl_sdk/<bundle.name>/...
rename_from_dir = os.path.join(extract_dir, repath_dir)
else:
# If no repath is specified:
# The files are extracted to nacl_sdk/<bundle.name>_update/...
# The destination directory is nacl_sdk/<bundle.name>/...
rename_from_dir = extract_dir
rename_to_dir = bundle.name
delegate.ExtractArchives(archive_filenames, extract_dir, rename_from_dir,
rename_to_dir)
logging.info('Updating local manifest to include bundle %s' % (bundle.name))
local_manifest.RemoveBundle(bundle.name)
local_manifest.SetBundle(bundle)
delegate.CleanupCache()
def _GetFilenameFromURL(url):
path = urlparse.urlparse(url)[2]
return os.path.basename(path)
def _ValidateArchive(archive, actual_sha1, actual_size):
if actual_size != archive.size:
raise Error('Size mismatch on "%s". Expected %s but got %s bytes' % (
archive.url, archive.size, actual_size))
if actual_sha1 != archive.GetChecksum():
raise Error('SHA1 checksum mismatch on "%s". Expected %s but got %s' % (
archive.url, archive.GetChecksum(), actual_sha1))
|
bsd-3-clause
|
googyanas/Googy-Max4-CM-Kernel
|
Documentation/target/tcm_mod_builder.py
|
4981
|
41422
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
gpl-2.0
|
samuell/luigi
|
test/contrib/esindex_test.py
|
14
|
12176
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Tests for Elasticsearch index (esindex) target and indexing.
An Elasticsearch server must be running for these tests.
To use a non-standard host and port, use `ESINDEX_TEST_HOST`,
`ESINDEX_TEST_PORT` environment variables to override defaults.
To test HTTP basic authentication `ESINDEX_TEST_HTTP_AUTH`.
Example running tests against port 9201 with basic auth:
$ ESINDEX_TEST_PORT=9201 ESINDEX_TEST_HTTP_AUTH='admin:admin' nosetests test/_esindex_test.py
"""
# pylint: disable=C0103,E1101,F0401
import collections
import datetime
import os
from helpers import unittest
import elasticsearch
import luigi
from elasticsearch.connection import Urllib3HttpConnection
from luigi.contrib.esindex import CopyToIndex, ElasticsearchTarget
HOST = os.getenv('ESINDEX_TEST_HOST', 'localhost')
PORT = os.getenv('ESINDEX_TEST_PORT', 9200)
HTTP_AUTH = os.getenv('ESINDEX_TEST_HTTP_AUTH', None)
INDEX = 'esindex_luigi_test'
DOC_TYPE = 'esindex_test_type'
MARKER_INDEX = 'esindex_luigi_test_index_updates'
MARKER_DOC_TYPE = 'esindex_test_entry'
def _create_test_index():
""" Create content index, if if does not exists. """
es = elasticsearch.Elasticsearch(connection_class=Urllib3HttpConnection, host=HOST, port=PORT, http_auth=HTTP_AUTH)
if not es.indices.exists(INDEX):
es.indices.create(INDEX)
try:
_create_test_index()
except Exception:
raise unittest.SkipTest('Unable to connect to ElasticSearch')
class ElasticsearchTargetTest(unittest.TestCase):
""" Test touch and exists. """
def test_touch_and_exists(self):
""" Basic test. """
target = ElasticsearchTarget(HOST, PORT, INDEX, DOC_TYPE, 'update_id', http_auth=HTTP_AUTH)
target.marker_index = MARKER_INDEX
target.marker_doc_type = MARKER_DOC_TYPE
delete()
self.assertFalse(target.exists(),
'Target should not exist before touching it')
target.touch()
self.assertTrue(target.exists(),
'Target should exist after touching it')
delete()
def delete():
""" Delete marker_index, if it exists. """
es = elasticsearch.Elasticsearch(connection_class=Urllib3HttpConnection, host=HOST, port=PORT, http_auth=HTTP_AUTH)
if es.indices.exists(MARKER_INDEX):
es.indices.delete(MARKER_INDEX)
es.indices.refresh()
class CopyToTestIndex(CopyToIndex):
""" Override the default `marker_index` table with a test name. """
host = HOST
port = PORT
http_auth = HTTP_AUTH
index = INDEX
doc_type = DOC_TYPE
marker_index_hist_size = 0
def output(self):
""" Use a test target with an own marker_index. """
target = ElasticsearchTarget(
host=self.host,
port=self.port,
http_auth=self.http_auth,
index=self.index,
doc_type=self.doc_type,
update_id=self.update_id(),
marker_index_hist_size=self.marker_index_hist_size
)
target.marker_index = MARKER_INDEX
target.marker_doc_type = MARKER_DOC_TYPE
return target
class IndexingTask1(CopyToTestIndex):
""" Test the redundant version, where `_index` and `_type` are
given in the `docs` as well. A more DRY example is `IndexingTask2`. """
def docs(self):
""" Return a list with a single doc. """
return [{'_id': 123, '_index': self.index, '_type': self.doc_type,
'name': 'sample', 'date': 'today'}]
class IndexingTask2(CopyToTestIndex):
""" Just another task. """
def docs(self):
""" Return a list with a single doc. """
return [{'_id': 234, '_index': self.index, '_type': self.doc_type,
'name': 'another', 'date': 'today'}]
class IndexingTask3(CopyToTestIndex):
""" This task will request an empty index to start with. """
purge_existing_index = True
def docs(self):
""" Return a list with a single doc. """
return [{'_id': 234, '_index': self.index, '_type': self.doc_type,
'name': 'yet another', 'date': 'today'}]
def _cleanup():
""" Delete both the test marker index and the content index. """
es = elasticsearch.Elasticsearch(connection_class=Urllib3HttpConnection, host=HOST, port=PORT, http_auth=HTTP_AUTH)
if es.indices.exists(MARKER_INDEX):
es.indices.delete(MARKER_INDEX)
if es.indices.exists(INDEX):
es.indices.delete(INDEX)
class CopyToIndexTest(unittest.TestCase):
""" Test indexing tasks. """
@classmethod
def setUpClass(cls):
cls.es = elasticsearch.Elasticsearch(connection_class=Urllib3HttpConnection, host=HOST, port=PORT, http_auth=HTTP_AUTH)
def setUp(self):
""" Cleanup before each test. """
_cleanup()
def tearDown(self):
""" Remove residues after each test. """
_cleanup()
def test_copy_to_index(self):
""" Test a single document upload. """
task = IndexingTask1()
self.assertFalse(self.es.indices.exists(task.index))
self.assertFalse(task.complete())
luigi.build([task], local_scheduler=True)
self.assertTrue(self.es.indices.exists(task.index))
self.assertTrue(task.complete())
self.assertEqual(1, self.es.count(index=task.index).get('count'))
self.assertEqual({u'date': u'today', u'name': u'sample'},
self.es.get_source(index=task.index,
doc_type=task.doc_type, id=123))
def test_copy_to_index_incrementally(self):
""" Test two tasks that upload docs into the same index. """
task1 = IndexingTask1()
task2 = IndexingTask2()
self.assertFalse(self.es.indices.exists(task1.index))
self.assertFalse(self.es.indices.exists(task2.index))
self.assertFalse(task1.complete())
self.assertFalse(task2.complete())
luigi.build([task1, task2], local_scheduler=True)
self.assertTrue(self.es.indices.exists(task1.index))
self.assertTrue(self.es.indices.exists(task2.index))
self.assertTrue(task1.complete())
self.assertTrue(task2.complete())
self.assertEqual(2, self.es.count(index=task1.index).get('count'))
self.assertEqual(2, self.es.count(index=task2.index).get('count'))
self.assertEqual({u'date': u'today', u'name': u'sample'},
self.es.get_source(index=task1.index,
doc_type=task1.doc_type, id=123))
self.assertEqual({u'date': u'today', u'name': u'another'},
self.es.get_source(index=task2.index,
doc_type=task2.doc_type, id=234))
def test_copy_to_index_purge_existing(self):
""" Test purge_existing_index purges index. """
task1 = IndexingTask1()
task2 = IndexingTask2()
task3 = IndexingTask3()
luigi.build([task1, task2], local_scheduler=True)
luigi.build([task3], local_scheduler=True)
self.assertTrue(self.es.indices.exists(task3.index))
self.assertTrue(task3.complete())
self.assertEqual(1, self.es.count(index=task3.index).get('count'))
self.assertEqual({u'date': u'today', u'name': u'yet another'},
self.es.get_source(index=task3.index,
doc_type=task3.doc_type, id=234))
class MarkerIndexTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.es = elasticsearch.Elasticsearch(connection_class=Urllib3HttpConnection, host=HOST, port=PORT, http_auth=HTTP_AUTH)
def setUp(self):
""" Cleanup before each test. """
_cleanup()
def tearDown(self):
""" Remove residues after each test. """
_cleanup()
def test_update_marker(self):
def will_raise():
self.es.count(
index=MARKER_INDEX,
doc_type=MARKER_DOC_TYPE,
body={'query': {'match_all': {}}}
)
self.assertRaises(elasticsearch.NotFoundError, will_raise)
task1 = IndexingTask1()
luigi.build([task1], local_scheduler=True)
result = self.es.count(index=MARKER_INDEX, doc_type=MARKER_DOC_TYPE,
body={'query': {'match_all': {}}})
self.assertEqual(1, result.get('count'))
result = self.es.search(index=MARKER_INDEX, doc_type=MARKER_DOC_TYPE,
body={'query': {'match_all': {}}})
marker_doc = result.get('hits').get('hits')[0].get('_source')
self.assertEqual(task1.task_id, marker_doc.get('update_id'))
self.assertEqual(INDEX, marker_doc.get('target_index'))
self.assertEqual(DOC_TYPE, marker_doc.get('target_doc_type'))
self.assertTrue('date' in marker_doc)
task2 = IndexingTask2()
luigi.build([task2], local_scheduler=True)
result = self.es.count(index=MARKER_INDEX, doc_type=MARKER_DOC_TYPE,
body={'query': {'match_all': {}}})
self.assertEqual(2, result.get('count'))
result = self.es.search(index=MARKER_INDEX, doc_type=MARKER_DOC_TYPE,
body={'query': {'match_all': {}}})
hits = result.get('hits').get('hits')
Entry = collections.namedtuple('Entry', ['date', 'update_id'])
dates_update_id = []
for hit in hits:
source = hit.get('_source')
update_id = source.get('update_id')
date = source.get('date')
dates_update_id.append(Entry(date, update_id))
it = iter(sorted(dates_update_id))
first = next(it)
second = next(it)
self.assertTrue(first.date < second.date)
self.assertEqual(first.update_id, task1.task_id)
self.assertEqual(second.update_id, task2.task_id)
class IndexingTask4(CopyToTestIndex):
""" Just another task. """
date = luigi.DateParameter(default=datetime.date(1970, 1, 1))
marker_index_hist_size = 1
def docs(self):
""" Return a list with a single doc. """
return [{'_id': 234, '_index': self.index, '_type': self.doc_type,
'name': 'another', 'date': 'today'}]
class IndexHistSizeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.es = elasticsearch.Elasticsearch(connection_class=Urllib3HttpConnection, host=HOST, port=PORT, http_auth=HTTP_AUTH)
def setUp(self):
""" Cleanup before each test. """
_cleanup()
def tearDown(self):
""" Remove residues after each test. """
_cleanup()
def test_limited_history(self):
task4_1 = IndexingTask4(date=datetime.date(2000, 1, 1))
luigi.build([task4_1], local_scheduler=True)
task4_2 = IndexingTask4(date=datetime.date(2001, 1, 1))
luigi.build([task4_2], local_scheduler=True)
task4_3 = IndexingTask4(date=datetime.date(2002, 1, 1))
luigi.build([task4_3], local_scheduler=True)
result = self.es.count(index=MARKER_INDEX, doc_type=MARKER_DOC_TYPE,
body={'query': {'match_all': {}}})
self.assertEqual(1, result.get('count'))
marker_index_document_id = task4_3.output().marker_index_document_id()
result = self.es.get(id=marker_index_document_id, index=MARKER_INDEX,
doc_type=MARKER_DOC_TYPE)
self.assertEqual(task4_3.task_id,
result.get('_source').get('update_id'))
|
apache-2.0
|
zozo123/buildbot
|
master/buildbot/test/unit/test_data_steps.py
|
1
|
13740
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
from buildbot.data import steps
from buildbot.test.fake import fakedb
from buildbot.test.fake import fakemaster
from buildbot.test.util import endpoint
from buildbot.test.util import interfaces
from buildbot.util import epoch2datetime
from twisted.internet import defer
from twisted.internet import reactor
from twisted.trial import unittest
TIME1 = 2001111
TIME2 = 2002222
TIME3 = 2003333
class StepEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = steps.StepEndpoint
resourceTypeClass = steps.Step
def setUp(self):
self.setUpEndpoint()
self.db.insertTestData([
fakedb.Buildslave(id=47, name='linux'),
fakedb.Builder(id=77),
fakedb.Master(id=88),
fakedb.Buildset(id=8822),
fakedb.BuildRequest(id=82, buildsetid=8822),
fakedb.Build(id=30, builderid=77, number=7, masterid=88,
buildrequestid=82, buildslaveid=47),
fakedb.Step(id=70, number=0, name='one', buildid=30,
started_at=TIME1, complete_at=TIME2, results=0),
fakedb.Step(id=71, number=1, name='two', buildid=30,
started_at=TIME2, complete_at=TIME3, results=2,
urls_json='[{"name":"url","url":"http://url"}]'),
fakedb.Step(id=72, number=2, name='three', buildid=30,
started_at=TIME3),
])
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def test_get_existing(self):
step = yield self.callGet(('steps', 72))
self.validateData(step)
self.assertEqual(step, {
'buildid': 30,
'complete': False,
'complete_at': None,
'name': u'three',
'number': 2,
'results': None,
'started_at': epoch2datetime(TIME3),
'state_strings': [],
'stepid': 72,
'urls': []})
@defer.inlineCallbacks
def test_get_existing_buildid_name(self):
step = yield self.callGet(('builds', 30, 'steps', 'two'))
self.validateData(step)
self.assertEqual(step['stepid'], 71)
@defer.inlineCallbacks
def test_get_existing_buildid_number(self):
step = yield self.callGet(('builds', 30, 'steps', 1))
self.validateData(step)
self.assertEqual(step['stepid'], 71)
@defer.inlineCallbacks
def test_get_existing_builder_name(self):
step = yield self.callGet(('builders', 77, 'builds', 7, 'steps', 'two'))
self.validateData(step)
self.assertEqual(step['stepid'], 71)
@defer.inlineCallbacks
def test_get_existing_builder_number(self):
step = yield self.callGet(('builders', 77, 'builds', 7, 'steps', 1))
self.validateData(step)
self.assertEqual(step['stepid'], 71)
@defer.inlineCallbacks
def test_get_missing(self):
step = yield self.callGet(('steps', 9999))
self.assertEqual(step, None)
class StepsEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = steps.StepsEndpoint
resourceTypeClass = steps.Step
def setUp(self):
self.setUpEndpoint()
self.db.insertTestData([
fakedb.Buildslave(id=47, name='linux'),
fakedb.Builder(id=77),
fakedb.Master(id=88),
fakedb.Buildset(id=8822),
fakedb.BuildRequest(id=82, buildsetid=8822),
fakedb.Build(id=30, builderid=77, number=7, masterid=88,
buildrequestid=82, buildslaveid=47),
fakedb.Build(id=31, builderid=77, number=8, masterid=88,
buildrequestid=82, buildslaveid=47),
fakedb.Step(id=70, number=0, name='one', buildid=30,
started_at=TIME1, complete_at=TIME2, results=0),
fakedb.Step(id=71, number=1, name='two', buildid=30,
started_at=TIME2, complete_at=TIME3, results=2,
urls_json='[{"name":"url","url":"http://url"}]'),
fakedb.Step(id=72, number=2, name='three', buildid=30,
started_at=TIME3),
fakedb.Step(id=73, number=0, name='otherbuild', buildid=31,
started_at=TIME2),
])
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def test_get_buildid(self):
steps = yield self.callGet(('builds', 30, 'steps'))
[self.validateData(step) for step in steps]
self.assertEqual([s['number'] for s in steps], [0, 1, 2])
@defer.inlineCallbacks
def test_get_builder(self):
steps = yield self.callGet(('builders', 77, 'builds', 7, 'steps'))
[self.validateData(step) for step in steps]
self.assertEqual([s['number'] for s in steps], [0, 1, 2])
class Step(interfaces.InterfaceTests, unittest.TestCase):
def setUp(self):
self.master = fakemaster.make_master(testcase=self,
wantMq=True, wantDb=True, wantData=True)
self.rtype = steps.Step(self.master)
def do_test_callthrough(self, dbMethodName, method, exp_args=None,
exp_kwargs=None, *args, **kwargs):
rv = defer.succeed(None)
m = mock.Mock(return_value=rv)
setattr(self.master.db.steps, dbMethodName, m)
self.assertIdentical(method(*args, **kwargs), rv)
m.assert_called_with(*(exp_args or args), **(exp_kwargs or kwargs))
def test_signature_newStep(self):
@self.assertArgSpecMatches(
self.master.data.updates.newStep, # fake
self.rtype.newStep) # real
def newStep(self, buildid, name):
pass
@defer.inlineCallbacks
def test_newStep(self):
stepid, number, name = yield self.rtype.newStep(buildid=10,
name=u'name')
msgBody = {
'buildid': 10,
'complete': False,
'complete_at': None,
'name': name,
'number': number,
'results': None,
'started_at': None,
'state_strings': [u'pending'],
'stepid': stepid,
'urls': [],
}
self.master.mq.assertProductions([
(('builds', '10', 'steps', str(stepid), 'new'), msgBody),
(('steps', str(stepid), 'new'), msgBody),
])
step = yield self.master.db.steps.getStep(stepid)
self.assertEqual(step, {
'buildid': 10,
'complete_at': None,
'id': stepid,
'name': name,
'number': number,
'results': None,
'started_at': None,
'state_strings': [u'pending'],
'urls': [],
})
@defer.inlineCallbacks
def test_fake_newStep(self):
self.assertEqual(
len((yield self.master.data.updates.newStep(buildid=10,
name=u'ten'))),
3)
def test_signature_startStep(self):
@self.assertArgSpecMatches(
self.master.data.updates.startStep, # fake
self.rtype.startStep) # real
def newStep(self, stepid):
pass
@defer.inlineCallbacks
def test_startStep(self):
self.patch(reactor, 'seconds', lambda: TIME1)
yield self.master.db.steps.addStep(buildid=10, name=u'ten',
state_strings=[u'pending'])
yield self.rtype.startStep(stepid=100)
msgBody = {
'buildid': 10,
'complete': False,
'complete_at': None,
'name': u'ten',
'number': 0,
'results': None,
'started_at': epoch2datetime(TIME1),
'state_strings': [u'pending'],
'stepid': 100,
'urls': [],
}
self.master.mq.assertProductions([
(('builds', '10', 'steps', str(100), 'started'), msgBody),
(('steps', str(100), 'started'), msgBody),
])
step = yield self.master.db.steps.getStep(100)
self.assertEqual(step, {
'buildid': 10,
'complete_at': None,
'id': 100,
'name': u'ten',
'number': 0,
'results': None,
'started_at': epoch2datetime(TIME1),
'state_strings': [u'pending'],
'urls': [],
})
def test_signature_setStepStateStrings(self):
@self.assertArgSpecMatches(
self.master.data.updates.setStepStateStrings, # fake
self.rtype.setStepStateStrings) # real
def setStepStateStrings(self, stepid, state_strings):
pass
@defer.inlineCallbacks
def test_setStepStateStrings(self):
yield self.master.db.steps.addStep(buildid=10, name=u'ten',
state_strings=[u'pending'])
yield self.rtype.setStepStateStrings(stepid=100, state_strings=[u'hi'])
msgBody = {
'buildid': 10,
'complete': False,
'complete_at': None,
'name': u'ten',
'number': 0,
'results': None,
'started_at': None,
'state_strings': [u'hi'],
'stepid': 100,
'urls': [],
}
self.master.mq.assertProductions([
(('builds', '10', 'steps', str(100), 'updated'), msgBody),
(('steps', str(100), 'updated'), msgBody),
])
step = yield self.master.db.steps.getStep(100)
self.assertEqual(step, {
'buildid': 10,
'complete_at': None,
'id': 100,
'name': u'ten',
'number': 0,
'results': None,
'started_at': None,
'state_strings': [u'hi'],
'urls': [],
})
def test_signature_finishStep(self):
@self.assertArgSpecMatches(
self.master.data.updates.finishStep, # fake
self.rtype.finishStep) # real
def finishStep(self, stepid, results):
pass
@defer.inlineCallbacks
def test_finishStep(self):
yield self.master.db.steps.addStep(buildid=10, name=u'ten',
state_strings=[u'pending'])
self.patch(reactor, 'seconds', lambda: TIME1)
yield self.rtype.startStep(stepid=100)
self.patch(reactor, 'seconds', lambda: TIME2)
self.master.mq.clearProductions()
yield self.rtype.finishStep(stepid=100, results=9)
msgBody = {
'buildid': 10,
'complete': True,
'complete_at': epoch2datetime(TIME2),
'name': u'ten',
'number': 0,
'results': 9,
'started_at': epoch2datetime(TIME1),
'state_strings': [u'pending'],
'stepid': 100,
'urls': [],
}
self.master.mq.assertProductions([
(('builds', '10', 'steps', str(100), 'finished'), msgBody),
(('steps', str(100), 'finished'), msgBody),
])
step = yield self.master.db.steps.getStep(100)
self.assertEqual(step, {
'buildid': 10,
'complete_at': epoch2datetime(TIME2),
'id': 100,
'name': u'ten',
'number': 0,
'results': 9,
'started_at': epoch2datetime(TIME1),
'state_strings': [u'pending'],
'urls': [],
})
def test_signature_addStepURL(self):
@self.assertArgSpecMatches(
self.master.data.updates.addStepURL, # fake
self.rtype.addStepURL) # real
def addStepURL(self, stepid, name, url):
pass
@defer.inlineCallbacks
def test_addStepURL(self):
yield self.master.db.steps.addStep(buildid=10, name=u'ten',
state_strings=[u'pending'])
yield self.rtype.addStepURL(stepid=100, name=u"foo", url=u"bar")
msgBody = {
'buildid': 10,
'complete': False,
'complete_at': None,
'name': u'ten',
'number': 0,
'results': None,
'started_at': None,
'state_strings': [u'pending'],
'stepid': 100,
'urls': [{u'name': u'foo', u'url': u'bar'}],
}
self.master.mq.assertProductions([
(('builds', '10', 'steps', str(100), 'updated'), msgBody),
(('steps', str(100), 'updated'), msgBody),
])
step = yield self.master.db.steps.getStep(100)
self.assertEqual(step, {
'buildid': 10,
'complete_at': None,
'id': 100,
'name': u'ten',
'number': 0,
'results': None,
'started_at': None,
'state_strings': [u'pending'],
'urls': [{u'name': u'foo', u'url': u'bar'}],
})
|
gpl-3.0
|
chaosblog/pyload
|
module/lib/thrift/transport/TSocket.py
|
62
|
5094
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from TTransport import *
import os
import errno
import socket
import sys
class TSocketBase(TTransportBase):
def _resolveAddr(self):
if self._unix_socket is not None:
return [(socket.AF_UNIX, socket.SOCK_STREAM, None, None, self._unix_socket)]
else:
return socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE | socket.AI_ADDRCONFIG)
def close(self):
if self.handle:
self.handle.close()
self.handle = None
class TSocket(TSocketBase):
"""Socket implementation of TTransport base."""
def __init__(self, host='localhost', port=9090, unix_socket=None):
"""Initialize a TSocket
@param host(str) The host to connect to.
@param port(int) The (TCP) port to connect to.
@param unix_socket(str) The filename of a unix socket to connect to.
(host and port will be ignored.)
"""
self.host = host
self.port = port
self.handle = None
self._unix_socket = unix_socket
self._timeout = None
def setHandle(self, h):
self.handle = h
def isOpen(self):
return self.handle is not None
def setTimeout(self, ms):
if ms is None:
self._timeout = None
else:
self._timeout = ms/1000.0
if self.handle is not None:
self.handle.settimeout(self._timeout)
def open(self):
try:
res0 = self._resolveAddr()
for res in res0:
self.handle = socket.socket(res[0], res[1])
self.handle.settimeout(self._timeout)
try:
self.handle.connect(res[4])
except socket.error, e:
if res is not res0[-1]:
continue
else:
raise e
break
except socket.error, e:
if self._unix_socket:
message = 'Could not connect to socket %s' % self._unix_socket
else:
message = 'Could not connect to %s:%d' % (self.host, self.port)
raise TTransportException(type=TTransportException.NOT_OPEN, message=message)
def read(self, sz):
try:
buff = self.handle.recv(sz)
except socket.error, e:
if (e.args[0] == errno.ECONNRESET and
(sys.platform == 'darwin' or sys.platform.startswith('freebsd'))):
# freebsd and Mach don't follow POSIX semantic of recv
# and fail with ECONNRESET if peer performed shutdown.
# See corresponding comment and code in TSocket::read()
# in lib/cpp/src/transport/TSocket.cpp.
self.close()
# Trigger the check to raise the END_OF_FILE exception below.
buff = ''
else:
raise
if len(buff) == 0:
raise TTransportException(type=TTransportException.END_OF_FILE, message='TSocket read 0 bytes')
return buff
def write(self, buff):
if not self.handle:
raise TTransportException(type=TTransportException.NOT_OPEN, message='Transport not open')
sent = 0
have = len(buff)
while sent < have:
plus = self.handle.send(buff)
if plus == 0:
raise TTransportException(type=TTransportException.END_OF_FILE, message='TSocket sent 0 bytes')
sent += plus
buff = buff[plus:]
def flush(self):
pass
class TServerSocket(TSocketBase, TServerTransportBase):
"""Socket implementation of TServerTransport base."""
def __init__(self, host=None, port=9090, unix_socket=None):
self.host = host
self.port = port
self._unix_socket = unix_socket
self.handle = None
def listen(self):
res0 = self._resolveAddr()
for res in res0:
if res[0] is socket.AF_INET6 or res is res0[-1]:
break
# We need remove the old unix socket if the file exists and
# nobody is listening on it.
if self._unix_socket:
tmp = socket.socket(res[0], res[1])
try:
tmp.connect(res[4])
except socket.error, err:
eno, message = err.args
if eno == errno.ECONNREFUSED:
os.unlink(res[4])
self.handle = socket.socket(res[0], res[1])
self.handle.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(self.handle, 'settimeout'):
self.handle.settimeout(None)
self.handle.bind(res[4])
self.handle.listen(128)
def accept(self):
client, addr = self.handle.accept()
result = TSocket()
result.setHandle(client)
return result
|
gpl-3.0
|
0x46616c6b/ansible
|
lib/ansible/cli/vault.py
|
5
|
14344
|
# (c) 2014, James Tanner <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.vault import VaultEditor
from ansible.cli import CLI
from ansible.module_utils._text import to_text, to_bytes
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class VaultCLI(CLI):
""" Vault command line class """
VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "encrypt_string", "rekey", "view")
FROM_STDIN = "stdin"
FROM_ARGS = "the command line args"
FROM_PROMPT = "the interactive prompt"
def __init__(self, args):
self.vault_pass = None
self.new_vault_pass = None
self.encrypt_string_read_stdin = False
super(VaultCLI, self).__init__(args)
def parse(self):
self.parser = CLI.base_parser(
vault_opts=True,
usage = "usage: %%prog [%s] [--help] [options] vaultfile.yml" % "|".join(self.VALID_ACTIONS),
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
self.set_action()
# options specific to self.actions
if self.action == "create":
self.parser.set_usage("usage: %prog create [options] file_name")
elif self.action == "decrypt":
self.parser.set_usage("usage: %prog decrypt [options] file_name")
elif self.action == "edit":
self.parser.set_usage("usage: %prog edit [options] file_name")
elif self.action == "view":
self.parser.set_usage("usage: %prog view [options] file_name")
elif self.action == "encrypt":
self.parser.set_usage("usage: %prog encrypt [options] file_name")
# I have no prefence for either dash or underscore
elif self.action == "encrypt_string":
self.parser.add_option('-p', '--prompt', dest='encrypt_string_prompt',
action='store_true',
help="Prompt for the string to encrypt")
self.parser.add_option('-n', '--name', dest='encrypt_string_names',
action='append',
help="Specify the variable name")
self.parser.add_option('--stdin-name', dest='encrypt_string_stdin_name',
default=None,
help="Specify the variable name for stdin")
self.parser.set_usage("usage: %prog encrypt-string [--prompt] [options] string_to_encrypt")
elif self.action == "rekey":
self.parser.set_usage("usage: %prog rekey [options] file_name")
super(VaultCLI, self).parse()
display.verbosity = self.options.verbosity
can_output = ['encrypt', 'decrypt', 'encrypt_string']
if self.action not in can_output:
if self.options.output_file:
raise AnsibleOptionsError("The --output option can be used only with ansible-vault %s" % '/'.join(can_output))
if len(self.args) == 0:
raise AnsibleOptionsError("Vault requires at least one filename as a parameter")
else:
# This restriction should remain in place until it's possible to
# load multiple YAML records from a single file, or it's too easy
# to create an encrypted file that can't be read back in. But in
# the meanwhile, "cat a b c|ansible-vault encrypt --output x" is
# a workaround.
if self.options.output_file and len(self.args) > 1:
raise AnsibleOptionsError("At most one input file may be used with the --output option")
if self.action == 'encrypt_string':
if '-' in self.args or len(self.args) == 0 or self.options.encrypt_string_stdin_name:
self.encrypt_string_read_stdin = True
# TODO: prompting from stdin and reading from stdin seem
# mutually exclusive, but verify that.
if self.options.encrypt_string_prompt and self.encrypt_string_read_stdin:
raise AnsibleOptionsError('The --prompt option is not supported if also reading input from stdin')
def run(self):
super(VaultCLI, self).run()
loader = DataLoader()
# set default restrictive umask
old_umask = os.umask(0o077)
if self.options.vault_password_file:
# read vault_pass from a file
self.vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader)
if self.options.new_vault_password_file:
# for rekey only
self.new_vault_pass = CLI.read_vault_password_file(self.options.new_vault_password_file, loader)
if not self.vault_pass or self.options.ask_vault_pass:
self.vault_pass = self.ask_vault_passwords()
if not self.vault_pass:
raise AnsibleOptionsError("A password is required to use Ansible's Vault")
if self.action == 'rekey':
if not self.new_vault_pass:
self.new_vault_pass = self.ask_new_vault_passwords()
if not self.new_vault_pass:
raise AnsibleOptionsError("A password is required to rekey Ansible's Vault")
if self.action == 'encrypt_string':
if self.options.encrypt_string_prompt:
self.encrypt_string_prompt = True
self.editor = VaultEditor(self.vault_pass)
self.execute()
# and restore umask
os.umask(old_umask)
def execute_encrypt(self):
if len(self.args) == 0 and sys.stdin.isatty():
display.display("Reading plaintext input from stdin", stderr=True)
for f in self.args or ['-']:
self.editor.encrypt_file(f, output_file=self.options.output_file)
if sys.stdout.isatty():
display.display("Encryption successful", stderr=True)
def format_ciphertext_yaml(self, b_ciphertext, indent=None, name=None):
indent = indent or 10
block_format_var_name = ""
if name:
block_format_var_name = "%s: " % name
block_format_header = "%s!vault-encrypted |" % block_format_var_name
lines = []
vault_ciphertext = to_text(b_ciphertext)
lines.append(block_format_header)
for line in vault_ciphertext.splitlines():
lines.append('%s%s' % (' ' * indent, line))
yaml_ciphertext = '\n'.join(lines)
return yaml_ciphertext
def execute_encrypt_string(self):
b_plaintext = None
# Holds tuples (the_text, the_source_of_the_string, the variable name if its provided).
b_plaintext_list = []
# remove the non-option '-' arg (used to indicate 'read from stdin') from the candidate args so
# we dont add it to the plaintext list
args = [x for x in self.args if x != '-']
# We can prompt and read input, or read from stdin, but not both.
if self.options.encrypt_string_prompt:
msg = "String to encrypt: "
name = None
name_prompt_response = display.prompt('Variable name (enter for no name): ')
# TODO: enforce var naming rules?
if name_prompt_response != "":
name = name_prompt_response
# could use private=True for shadowed input if useful
prompt_response = display.prompt(msg)
if prompt_response == '':
raise AnsibleOptionsError('The plaintext provided from the prompt was empty, not encrypting')
b_plaintext = to_bytes(prompt_response)
b_plaintext_list.append((b_plaintext, self.FROM_PROMPT, name))
# read from stdin
if self.encrypt_string_read_stdin:
if sys.stdout.isatty():
display.display("Reading plaintext input from stdin. (ctrl-d to end input)", stderr=True)
stdin_text = sys.stdin.read()
if stdin_text == '':
raise AnsibleOptionsError('stdin was empty, not encrypting')
b_plaintext = to_bytes(stdin_text)
# defaults to None
name = self.options.encrypt_string_stdin_name
b_plaintext_list.append((b_plaintext, self.FROM_STDIN, name))
# use any leftover args as strings to encrypt
# Try to match args up to --name options
if hasattr(self.options, 'encrypt_string_names') and self.options.encrypt_string_names:
name_and_text_list = list(zip(self.options.encrypt_string_names, args))
# Some but not enough --name's to name each var
if len(args) > len(name_and_text_list):
# Trying to avoid ever showing the plaintext in the output, so this warning is vague to avoid that.
display.display('The number of --name options do not match the number of args.',
stderr=True)
display.display('The last named variable will be "%s". The rest will not have names.' % self.options.encrypt_string_names[-1],
stderr=True)
# Add the rest of the args without specifying a name
for extra_arg in args[len(name_and_text_list):]:
name_and_text_list.append((None, extra_arg))
# if no --names are provided, just use the args without a name.
else:
name_and_text_list = [(None, x) for x in args]
# Convert the plaintext text objects to bytestrings and collect
for name_and_text in name_and_text_list:
name, plaintext = name_and_text
if plaintext == '':
raise AnsibleOptionsError('The plaintext provided from the command line args was empty, not encrypting')
b_plaintext = to_bytes(plaintext)
b_plaintext_list.append((b_plaintext, self.FROM_ARGS, name))
# Format the encrypted strings and any corresponding stderr output
outputs = self._format_output_vault_strings(b_plaintext_list)
for output in outputs:
err = output.get('err', None)
out = output.get('out', '')
if err:
sys.stderr.write(err)
print(out)
if sys.stdout.isatty():
display.display("Encryption successful", stderr=True)
# TODO: offer block or string ala eyaml
def _format_output_vault_strings(self, b_plaintext_list):
# If we are only showing one item in the output, we dont need to included commented
# delimiters in the text
show_delimiter = False
if len(b_plaintext_list) > 1:
show_delimiter = True
# list of dicts {'out': '', 'err': ''}
output = []
# Encrypt the plaintext, and format it into a yaml block that can be pasted into a playbook.
# For more than one input, show some differentiating info in the stderr output so we can tell them
# apart. If we have a var name, we include that in the yaml
for index, b_plaintext_info in enumerate(b_plaintext_list):
# (the text itself, which input it came from, its name)
b_plaintext, src, name = b_plaintext_info
b_ciphertext = self.editor.encrypt_bytes(b_plaintext)
# block formatting
yaml_text = self.format_ciphertext_yaml(b_ciphertext, name=name)
err_msg = None
if show_delimiter:
human_index = index + 1
if name:
err_msg = '# The encrypted version of variable ("%s", the string #%d from %s).\n' % (name, human_index, src)
else:
err_msg = '# The encrypted version of the string #%d from %s.)\n' % (human_index, src)
output.append({'out': yaml_text, 'err': err_msg})
return output
def execute_decrypt(self):
if len(self.args) == 0 and sys.stdin.isatty():
display.display("Reading ciphertext input from stdin", stderr=True)
for f in self.args or ['-']:
self.editor.decrypt_file(f, output_file=self.options.output_file)
if sys.stdout.isatty():
display.display("Decryption successful", stderr=True)
def execute_create(self):
if len(self.args) > 1:
raise AnsibleOptionsError("ansible-vault create can take only one filename argument")
self.editor.create_file(self.args[0])
def execute_edit(self):
for f in self.args:
self.editor.edit_file(f)
def execute_view(self):
for f in self.args:
# Note: vault should return byte strings because it could encrypt
# and decrypt binary files. We are responsible for changing it to
# unicode here because we are displaying it and therefore can make
# the decision that the display doesn't have to be precisely what
# the input was (leave that to decrypt instead)
self.pager(to_text(self.editor.plaintext(f)))
def execute_rekey(self):
for f in self.args:
if not (os.path.isfile(f)):
raise AnsibleError(f + " does not exist")
for f in self.args:
self.editor.rekey_file(f, self.new_vault_pass)
display.display("Rekey successful", stderr=True)
|
gpl-3.0
|
dyjakan/interpreter-bugs
|
python/cpython-2.7.13-asan/ba74a2ffd077ff29ca7c84d5132be64ce3e9782f5f7dff0ec21b6e47a2100294.py
|
1
|
1090
|
"""
gc.get_referrers() can be used to see objects before they are fully built.
Note that this is only an example. There are many ways to crash Python
by using gc.get_referrers(), as well as many extension modules (even
when they are using perfectly documented patterns to build objects).
Identifying and removing all places that expose to the GC a
partially-built object is a long-term project. A patch was proposed on
SF specifically for this example but I consider fixing just this single
example a bit pointless (#1517042).
A fix would include a whole-scale code review, possibly with an API
change to decouple object creation and GC registration, and according
fixes to the documentation for extension module writers. It's unlikely
to happen, though. So this is currently classified as
"gc.get_referrers() is dangerous, use only for debugging".
"""
import gc
def g():
marker = object()
yield marker
# now the marker is in the tuple being constructed
[tup] = [x for x in gc.get_referrers(marker) if type(x) is tuple]
print tup
print tup[1]
tuple(g())
|
unlicense
|
beni55/SimpleCV
|
SimpleCV/examples/util/ColorCube.py
|
13
|
1901
|
from SimpleCV import Image, Camera, Display, Color
import pygame as pg
import numpy as np
from pylab import *
from mpl_toolkits.mplot3d import axes3d
from matplotlib.backends.backend_agg import FigureCanvasAgg
import cv2
bins = 8
#precompute
idxs = []
colors = []
offset = bins/2
skip = 255/bins
for x in range(0,bins):
for y in range(0,bins):
for z in range(0,bins):
b = ((x*skip)+offset)/255.0
g = ((y*skip)+offset)/255.0
r = ((z*skip)+offset)/255.0
idxs.append((x,y,z,(r,g,b)))
# plot points in 3D
cam = Camera()
disp = Display((800,600))
fig = figure()
fig.set_size_inches( (10,7) )
canvas = FigureCanvasAgg(fig)
azim = 0
while disp.isNotDone():
ax = fig.gca(projection='3d')
ax.set_xlabel('BLUE', color=(0,0,1) )
ax.set_ylabel('GREEN',color=(0,1,0))
ax.set_zlabel('RED',color=(1,0,0))
# Get the color histogram
img = cam.getImage().scale(0.3)
rgb = img.getNumpyCv2()
hist = cv2.calcHist([rgb],[0,1,2],None,[bins,bins,bins],[0,256,0,256,0,256])
hist = hist/np.max(hist)
# render everything
[ ax.plot([x],[y],[z],'.',markersize=max(hist[x,y,z]*100,6),color=color) for x,y,z,color in idxs if(hist[x][y][z]>0) ]
#[ ax.plot([x],[y],[z],'.',color=color) for x,y,z,color in idxs if(hist[x][y][z]>0) ]
ax.set_xlim3d(0, bins-1)
ax.set_ylim3d(0, bins-1)
ax.set_zlim3d(0, bins-1)
azim = (azim+0.5)%360
ax.view_init(elev=35, azim=azim)
########### convert matplotlib to SimpleCV image
canvas.draw()
renderer = canvas.get_renderer()
raw_data = renderer.tostring_rgb()
size = canvas.get_width_height()
surf = pg.image.fromstring(raw_data, size, "RGB")
figure = Image(surf)
############ All done
figure = figure.floodFill((0,0), tolerance=5,color=Color.WHITE)
result = figure.blit(img, pos=(20,20))
result.save(disp)
fig.clf()
|
bsd-3-clause
|
PaulieC/sprint3-Council
|
ServerPackage/Scorekeeper.py
|
2
|
3735
|
__author__ = "Paul Council, Joseph Gonzoph, Anand Patel"
__version__ = "sprint1"
__credits__ = ["jeffrey creighton"]
# imports
from ServerPackage import ScorekeeperHistoryItem, ScoreKeeperListItem
class Scorekeeper:
"""
Purpose: to collect and store scores from all players and matches
Attributes:
leader_board: the list of player's score keeper list items
match_history: the list scorekeeperhistoryitems from the match
"""
leader_board = []
match_history = []
def __init__(self):
self.leader_board = []
self.match_history = []
def update_tournament(self, player_1, player_2, winner, score_1, score_2):
"""
updates both the leader_board and match_history
:param player_1: player in position 1
:type player_1: ScoreKeeperListItem.ScorekeeperListItem
:param player_2: player in position 2
:type player_2: ScoreKeeperListItem.ScorekeeperListItem
:param winner: the winning player
:type winner: Player.Player
:param score_1: player_1's score
:type score_1: int
:param score_2: player_2's score
:type score_2: int
"""
# creates the history item based on the match details
history_item = ScorekeeperHistoryItem.ScorekeeperHistoryItem(player_1, player_2, winner, score_1, score_2)
# adds the history item to match_history[]
self.match_history.append(history_item)
# creates the list_items for both players and checks to see if they are on the leader_board
# if not present on the leader board, they are appended
list_item_a = ScoreKeeperListItem.ScorekeeperListItem(player_1)
list_item_b = ScoreKeeperListItem.ScorekeeperListItem(player_2)
if self.check_player(list_item_a):
self.leader_board.append(list_item_a)
if self.check_player(list_item_b):
self.leader_board.append(list_item_b)
# Checks the winner and awards a point to that player
if score_1 > score_2:
self.make_winner(player_1)
elif score_2 > score_1:
self.make_winner(player_2)
# in the unlikely event of a tie, no points awarded
else:
pass
def check_player(self, player_list_item):
"""
checks to see if the player is already on the leader board.
returns true if they are not, false if they are.
:param player_list_item: the player list item to check against on the board
:type player_list_item: ScoreKeeperListItem.ScorekeeperListItem
:return check: the result of the check
:rtype: bool
"""
check = True
for i in range(len(self.leader_board)):
if self.leader_board[i] == player_list_item.get_player():
check = False
else:
check = True
return check
def make_winner(self, player_list_item):
"""
update the winner's score
:param player_list_item: the player's list item to update as the winner
:type player_list_item: ScoreKeeperListItem.ScorekeeperListItem
"""
for i in range(len(self.leader_board)):
if player_list_item is self.leader_board[i]:
player_list_item.update_score()
def get_leader_board(self):
"""
#returns this leader_board
:return: the list of player's who have won
:rtype: list
"""
return self.leader_board
def get_match_history(self):
"""
#returns this match_history
:return: the list of scorekeeperhistory items from the match
:rtype: list
"""
return self.match_history
|
apache-2.0
|
eleonrk/SickRage
|
lib/hachoir_parser/program/nds.py
|
58
|
13832
|
"""
Nintendo DS .nds game file parser
File format references:
- http://www.bottledlight.com/ds/index.php/FileFormats/NDSFormat
- http://imrannazar.com/The-Smallest-NDS-File
- http://darkfader.net/ds/files/ndstool.cpp
- http://crackerscrap.com/docs/dsromstructure.html
- http://nocash.emubase.de/gbatek.htm
"""
from hachoir_parser import Parser
from hachoir_core.field import (ParserError,
UInt8, UInt16, UInt32, UInt64, String, RawBytes, SubFile, FieldSet, NullBits, Bit, Bits, Bytes,
SeekableFieldSet, RootSeekableFieldSet)
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
"""
CRC16 Calculation
Modified from:
http://www.mail-archive.com/[email protected]/msg47844.html
Original License:
crc16.py by Bryan G. Olson, 2005
This module is free software and may be used and
distributed under the same terms as Python itself.
"""
class CRC16:
_table = None
def _initTable (self):
from array import array
# CRC-16 poly: p(x) = x**16 + x**15 + x**2 + 1
# top bit implicit, reflected
poly = 0xa001
CRC16._table = array('H')
for byte in range(256):
crc = 0
for bit in range(8):
if (byte ^ crc) & 1:
crc = (crc >> 1) ^ poly
else:
crc >>= 1
byte >>= 1
CRC16._table.append(crc)
def checksum (self, string, value):
if CRC16._table is None:
self._initTable()
for ch in string:
value = self._table[ord(ch) ^ (value & 0xff)] ^ (value >> 8)
return value
class Crc16(UInt16):
"16 bit field for calculating and comparing CRC-16 of specified string"
def __init__(self, parent, name, targetBytes):
UInt16.__init__(self, parent, name)
self.targetBytes = targetBytes
def createDescription(self):
crc = CRC16().checksum(self.targetBytes, 0xffff)
if crc == self.value:
return "matches CRC of %d bytes" % len(self.targetBytes)
else:
return "mismatch (calculated CRC %d for %d bytes)" % (crc, len(self.targetBytes))
class FileNameDirTable(FieldSet):
static_size = (4+2+2)*8
def createFields(self):
yield UInt32(self, "entry_start")
yield UInt16(self, "entry_file_id")
yield UInt16(self, "parent_id")
def createDescription(self):
return "first file id: %d; parent directory id: %d (%d)" % (self["entry_file_id"].value, self["parent_id"].value, self["parent_id"].value & 0xFFF)
class FileNameEntry(FieldSet):
def createFields(self):
yield Bits(self, "name_len", 7)
yield Bit(self, "is_directory")
yield String(self, "name", self["name_len"].value)
if self["is_directory"].value:
yield UInt16(self, "dir_id")
def createDescription(self):
s = ""
if self["is_directory"].value:
s = "[D] "
return s + self["name"].value
class Directory(FieldSet):
def createFields(self):
while True:
fne = FileNameEntry(self, "entry[]")
if fne["name_len"].value == 0:
yield UInt8(self, "end_marker")
break
yield fne
class FileNameTable(SeekableFieldSet):
def createFields(self):
self.startOffset = self.absolute_address / 8
# parent_id of first FileNameDirTable contains number of directories:
dt = FileNameDirTable(self, "dir_table[]")
numDirs = dt["parent_id"].value
yield dt
for i in range(1, numDirs):
yield FileNameDirTable(self, "dir_table[]")
for i in range(0, numDirs):
dt = self["dir_table[%d]" % i]
offset = self.startOffset + dt["entry_start"].value
self.seekByte(offset, relative=False)
yield Directory(self, "directory[]")
class FATFileEntry(FieldSet):
static_size = 2*4*8
def createFields(self):
yield UInt32(self, "start")
yield UInt32(self, "end")
def createDescription(self):
return "start: %d; size: %d" % (self["start"].value, self["end"].value - self["start"].value)
class FATContent(FieldSet):
def createFields(self):
num_entries = self.parent["header"]["fat_size"].value / 8
for i in range(0, num_entries):
yield FATFileEntry(self, "entry[]")
class BannerTile(FieldSet):
static_size = 32*8
def createFields(self):
for y in range(8):
for x in range(8):
yield Bits(self, "pixel[%d,%d]" % (x,y), 4)
class BannerIcon(FieldSet):
static_size = 16*32*8
def createFields(self):
for y in range(4):
for x in range(4):
yield BannerTile(self, "tile[%d,%d]" % (x,y))
class NdsColor(FieldSet):
static_size = 16
def createFields(self):
yield Bits(self, "red", 5)
yield Bits(self, "green", 5)
yield Bits(self, "blue", 5)
yield NullBits(self, "pad", 1)
def createDescription(self):
return "#%02x%02x%02x" % (self["red"].value << 3, self["green"].value << 3, self["blue"].value << 3)
class Banner(FieldSet):
static_size = 2112*8
def createFields(self):
yield UInt16(self, "version")
# CRC of this structure, excluding first 32 bytes:
yield Crc16(self, "crc", self.stream.readBytes(self.absolute_address+(32*8), (2112-32)))
yield RawBytes(self, "reserved", 28)
yield BannerIcon(self, "icon_data")
for i in range(0, 16):
yield NdsColor(self, "palette_color[]")
yield String(self, "title_jp", 256, charset="UTF-16-LE", truncate="\0")
yield String(self, "title_en", 256, charset="UTF-16-LE", truncate="\0")
yield String(self, "title_fr", 256, charset="UTF-16-LE", truncate="\0")
yield String(self, "title_de", 256, charset="UTF-16-LE", truncate="\0")
yield String(self, "title_it", 256, charset="UTF-16-LE", truncate="\0")
yield String(self, "title_es", 256, charset="UTF-16-LE", truncate="\0")
class Overlay(FieldSet):
static_size = 8*4*8
def createFields(self):
yield UInt32(self, "id")
yield textHandler(UInt32(self, "ram_address"), hexadecimal)
yield UInt32(self, "ram_size")
yield UInt32(self, "bss_size")
yield textHandler(UInt32(self, "init_start_address"), hexadecimal)
yield textHandler(UInt32(self, "init_end_address"), hexadecimal)
yield UInt32(self, "file_id")
yield RawBytes(self, "reserved[]", 4)
def createDescription(self):
return "file #%d, %d (+%d) bytes to 0x%08x" % (
self["file_id"].value, self["ram_size"].value, self["bss_size"].value, self["ram_address"].value)
class SecureArea(FieldSet):
static_size=2048*8
def createFields(self):
yield textHandler(UInt64(self, "id"), hexadecimal)
if self["id"].value == 0xe7ffdeffe7ffdeff: # indicates that secure area is decrypted
yield Bytes(self, "fixed[]", 6) # always \xff\xde\xff\xe7\xff\xde
yield Crc16(self, "header_crc16", self.stream.readBytes(self.absolute_address+(16*8), 2048-16))
yield RawBytes(self, "unknown[]", 2048-16-2)
yield Bytes(self, "fixed[]", 2) # always \0\0
else:
yield RawBytes(self, "encrypted[]", 2048-8)
class DeviceSize(UInt8):
def createDescription(self):
return "%d Mbit" % ((2**(20+self.value)) / (1024*1024))
class Header(FieldSet):
def createFields(self):
yield String(self, "game_title", 12, truncate="\0")
yield String(self, "game_code", 4)
yield String(self, "maker_code", 2)
yield UInt8(self, "unit_code")
yield UInt8(self, "device_code")
yield DeviceSize(self, "card_size")
yield String(self, "card_info", 9)
yield UInt8(self, "rom_version")
yield Bits(self, "unknown_flags[]", 2)
yield Bit(self, "autostart_flag")
yield Bits(self, "unknown_flags[]", 5)
yield UInt32(self, "arm9_source", "ARM9 ROM offset")
yield textHandler(UInt32(self, "arm9_execute_addr", "ARM9 entry address"), hexadecimal)
yield textHandler(UInt32(self, "arm9_copy_to_addr", "ARM9 RAM address"), hexadecimal)
yield UInt32(self, "arm9_bin_size", "ARM9 code size")
yield UInt32(self, "arm7_source", "ARM7 ROM offset")
yield textHandler(UInt32(self, "arm7_execute_addr", "ARM7 entry address"), hexadecimal)
yield textHandler(UInt32(self, "arm7_copy_to_addr", "ARM7 RAM address"), hexadecimal)
yield UInt32(self, "arm7_bin_size", "ARM7 code size")
yield UInt32(self, "filename_table_offset")
yield UInt32(self, "filename_table_size")
yield UInt32(self, "fat_offset")
yield UInt32(self, "fat_size")
yield UInt32(self, "arm9_overlay_src")
yield UInt32(self, "arm9_overlay_size")
yield UInt32(self, "arm7_overlay_src")
yield UInt32(self, "arm7_overlay_size")
yield textHandler(UInt32(self, "ctl_read_flags"), hexadecimal)
yield textHandler(UInt32(self, "ctl_init_flags"), hexadecimal)
yield UInt32(self, "banner_offset")
yield Crc16(self, "secure_crc16", self.stream.readBytes(0x4000*8, 0x4000))
yield UInt16(self, "rom_timeout")
yield UInt32(self, "arm9_unk_addr")
yield UInt32(self, "arm7_unk_addr")
yield UInt64(self, "unenc_mode_magic")
yield UInt32(self, "rom_size")
yield UInt32(self, "header_size")
yield RawBytes(self, "unknown[]", 36)
yield String(self, "passme_autoboot_detect", 4)
yield RawBytes(self, "unknown[]", 16)
yield RawBytes(self, "gba_logo", 156)
yield Crc16(self, "logo_crc16", self.stream.readBytes(0xc0*8, 156))
yield Crc16(self, "header_crc16", self.stream.readBytes(0, 350))
yield UInt32(self, "debug_rom_offset")
yield UInt32(self, "debug_size")
yield textHandler(UInt32(self, "debug_ram_address"), hexadecimal)
class NdsFile(Parser, RootSeekableFieldSet):
PARSER_TAGS = {
"id": "nds_file",
"category": "program",
"file_ext": ("nds",),
"mime": (u"application/octet-stream",),
"min_size": 352 * 8, # just a minimal header
"description": "Nintendo DS game file",
}
endian = LITTLE_ENDIAN
def validate(self):
try:
header = self["header"]
except Exception, e:
return False
return (self.stream.readBytes(0, 1) != "\0"
and (header["device_code"].value & 7) == 0
and header["header_size"].value >= 352
and header["card_size"].value < 15 # arbitrary limit at 32Gbit
and header["arm9_bin_size"].value > 0 and header["arm9_bin_size"].value <= 0x3bfe00
and header["arm7_bin_size"].value > 0 and header["arm7_bin_size"].value <= 0x3bfe00
and header["arm9_source"].value + header["arm9_bin_size"].value < self._size
and header["arm7_source"].value + header["arm7_bin_size"].value < self._size
and header["arm9_execute_addr"].value >= 0x02000000 and header["arm9_execute_addr"].value <= 0x023bfe00
and header["arm9_copy_to_addr"].value >= 0x02000000 and header["arm9_copy_to_addr"].value <= 0x023bfe00
and header["arm7_execute_addr"].value >= 0x02000000 and header["arm7_execute_addr"].value <= 0x03807e00
and header["arm7_copy_to_addr"].value >= 0x02000000 and header["arm7_copy_to_addr"].value <= 0x03807e00
)
def createFields(self):
# Header
yield Header(self, "header")
# Secure Area
if self["header"]["arm9_source"].value >= 0x4000 and self["header"]["arm9_source"].value < 0x8000:
secStart = self["header"]["arm9_source"].value & 0xfffff000
self.seekByte(secStart, relative=False)
yield SecureArea(self, "secure_area", size=0x8000-secStart)
# ARM9 binary
self.seekByte(self["header"]["arm9_source"].value, relative=False)
yield RawBytes(self, "arm9_bin", self["header"]["arm9_bin_size"].value)
# ARM7 binary
self.seekByte(self["header"]["arm7_source"].value, relative=False)
yield RawBytes(self, "arm7_bin", self["header"]["arm7_bin_size"].value)
# File Name Table
if self["header"]["filename_table_size"].value > 0:
self.seekByte(self["header"]["filename_table_offset"].value, relative=False)
yield FileNameTable(self, "filename_table", size=self["header"]["filename_table_size"].value*8)
# FAT
if self["header"]["fat_size"].value > 0:
self.seekByte(self["header"]["fat_offset"].value, relative=False)
yield FATContent(self, "fat_content", size=self["header"]["fat_size"].value*8)
# banner
if self["header"]["banner_offset"].value > 0:
self.seekByte(self["header"]["banner_offset"].value, relative=False)
yield Banner(self, "banner")
# ARM9 overlays
if self["header"]["arm9_overlay_src"].value > 0:
self.seekByte(self["header"]["arm9_overlay_src"].value, relative=False)
numOvls = self["header"]["arm9_overlay_size"].value / (8*4)
for i in range(numOvls):
yield Overlay(self, "arm9_overlay[]")
# files
if self["header"]["fat_size"].value > 0:
for field in self["fat_content"]:
if field["end"].value > field["start"].value:
self.seekByte(field["start"].value, relative=False)
yield SubFile(self, "file[]", field["end"].value - field["start"].value)
|
gpl-3.0
|
saradbowman/osf.io
|
osf/migrations/0135_migrate_registrations_to_osf_registries_provider.py
|
13
|
2304
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-18 13:51
from __future__ import unicode_literals
import logging
from django.db import migrations
logger = logging.getLogger(__file__)
def migrate_existing_registrations_into_osf_registries(state, schema):
AbstractProvider = state.get_model('osf', 'abstractprovider')
AbstractNode = state.get_model('osf', 'abstractnode')
DraftRegistration = state.get_model('osf', 'draftregistration')
try:
osf_registries = AbstractProvider.objects.get(_id='osf', type='osf.registrationprovider')
except AbstractProvider.DoesNotExist:
# Allow test / local dev DBs to pass
logger.warn('Unable to find OSF Registries provider - assuming test environment.')
pass
else:
draft_registrations = DraftRegistration.objects.all()
registrations = AbstractNode.objects.filter(type='osf.registration')
updated_drafts = draft_registrations.update(provider_id=osf_registries.id)
updated_registrations = registrations.update(provider_id=osf_registries.id)
assert (updated_drafts, updated_registrations) == (draft_registrations.count(), registrations.count())
logger.info('Successfully migrated {} draft registrations and {} public registrations into OSFRegistries'.format(updated_drafts, updated_registrations))
def remove_existing_registrations_from_osf_registries(state, schema):
AbstractProvider = state.get_model('osf', 'abstractprovider')
try:
osf_registries = AbstractProvider.objects.get(_id='osf', type='osf.registrationprovider')
except AbstractProvider.DoesNotExist:
pass
else:
total_registrations = osf_registries.registrations.count() + osf_registries.draft_registrations.count()
osf_registries.draft_registrations.clear()
osf_registries.registrations.clear()
logger.info('Successfully removed {} public and draft registrations from OSFRegistries'.format(total_registrations))
class Migration(migrations.Migration):
dependencies = [
('osf', '0134_add_provider_reg_fks'),
]
operations = [
migrations.RunPython(
migrate_existing_registrations_into_osf_registries,
remove_existing_registrations_from_osf_registries
)
]
|
apache-2.0
|
TiVo/kafka
|
tests/kafkatest/tests/verifiable_consumer_test.py
|
4
|
4599
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.utils.util import wait_until
from kafkatest.tests.kafka_test import KafkaTest
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.verifiable_consumer import VerifiableConsumer
from kafkatest.services.kafka import TopicPartition
class VerifiableConsumerTest(KafkaTest):
PRODUCER_REQUEST_TIMEOUT_SEC = 30
def __init__(self, test_context, num_consumers=1, num_producers=0,
group_id="test_group_id", session_timeout_sec=10, **kwargs):
super(VerifiableConsumerTest, self).__init__(test_context, **kwargs)
self.num_consumers = num_consumers
self.num_producers = num_producers
self.group_id = group_id
self.session_timeout_sec = session_timeout_sec
self.consumption_timeout_sec = max(self.PRODUCER_REQUEST_TIMEOUT_SEC + 5, 2 * session_timeout_sec)
def _all_partitions(self, topic, num_partitions):
partitions = set()
for i in range(num_partitions):
partitions.add(TopicPartition(topic=topic, partition=i))
return partitions
def _partitions(self, assignment):
partitions = []
for parts in assignment.itervalues():
partitions += parts
return partitions
def valid_assignment(self, topic, num_partitions, assignment):
all_partitions = self._all_partitions(topic, num_partitions)
partitions = self._partitions(assignment)
return len(partitions) == num_partitions and set(partitions) == all_partitions
def min_cluster_size(self):
"""Override this since we're adding services outside of the constructor"""
return super(VerifiableConsumerTest, self).min_cluster_size() + self.num_consumers + self.num_producers
def setup_consumer(self, topic, static_membership=False, enable_autocommit=False,
assignment_strategy="org.apache.kafka.clients.consumer.RangeAssignor", **kwargs):
return VerifiableConsumer(self.test_context, self.num_consumers, self.kafka,
topic, self.group_id, static_membership=static_membership, session_timeout_sec=self.session_timeout_sec,
assignment_strategy=assignment_strategy, enable_autocommit=enable_autocommit,
log_level="TRACE", **kwargs)
def setup_producer(self, topic, max_messages=-1, throughput=500):
return VerifiableProducer(self.test_context, self.num_producers, self.kafka, topic,
max_messages=max_messages, throughput=throughput,
request_timeout_sec=self.PRODUCER_REQUEST_TIMEOUT_SEC,
log_level="DEBUG")
def await_produced_messages(self, producer, min_messages=1000, timeout_sec=10):
current_acked = producer.num_acked
wait_until(lambda: producer.num_acked >= current_acked + min_messages, timeout_sec=timeout_sec,
err_msg="Timeout awaiting messages to be produced and acked")
def await_consumed_messages(self, consumer, min_messages=1):
current_total = consumer.total_consumed()
wait_until(lambda: consumer.total_consumed() >= current_total + min_messages,
timeout_sec=self.consumption_timeout_sec,
err_msg="Timed out waiting for consumption")
def await_members(self, consumer, num_consumers):
# Wait until all members have joined the group
wait_until(lambda: len(consumer.joined_nodes()) == num_consumers,
timeout_sec=self.session_timeout_sec*2,
err_msg="Consumers failed to join in a reasonable amount of time")
def await_all_members(self, consumer):
self.await_members(consumer, self.num_consumers)
|
apache-2.0
|
DataONEorg/d1_python
|
test_utilities/src/d1_test/stress_tester/projects/combination_1/test_scripts/tier_3_mn_storage_create.py
|
2
|
3141
|
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`tier_3_mn_storage_create`
===============================
:Created: 2011-04-22
:Author: DataONE (Dahl)
"""
import random
import string
import settings
import transaction
import d1_common.types.dataoneTypes
from d1_test.instance_generator import random_data
from d1_test.instance_generator import system_metadata
# Config
# The number of bytes in each science object.
N_SCI_OBJ_BYTES = 1024
# The number of subjects to allow access to each created test object.
N_SUBJECTS = 10
# Chance, in percent, that an object is created with public access.
PUBLIC_ACCESS_PERCENT = 25.0
class Transaction(transaction.Transaction):
def __init__(self):
super().__init__()
def d1_mn_api_call(self):
"""MNStorage.create()"""
sci_obj = self.create_science_object()
subjects = self.get_random_subjects(PUBLIC_ACCESS_PERCENT, N_SUBJECTS)
access_policy = self.create_access_policy(subjects)
sys_meta = self.create_system_metadata(sci_obj, access_policy)
client = self.create_client_for_subject(
settings.SUBJECT_WITH_CREATE_PERMISSIONS
)
response = client.createResponse(sys_meta.identifier.value(), sci_obj, sys_meta)
self.check_response(response)
def create_science_object(self):
return random_data.random_bytes_file(N_SCI_OBJ_BYTES)
def create_system_metadata(self, sci_obj, access_policy):
return system_metadata.generate_from_file(
sci_obj,
{
"identifier": self.generate_random_ascii_pid(),
"accessPolicy": access_policy,
"rightsHolder": self.select_random_subject(),
},
)
def create_access_policy(self, subjects):
ap = d1_common.types.dataoneTypes.AccessPolicy()
ar = d1_common.types.dataoneTypes.AccessRule()
ar.subject = subjects
ar.permission = ["changePermission"]
ap.allow.append(ar)
return ap
def generate_random_ascii_pid(self):
return "".join(
random.choice(
string.ascii_uppercase + string.ascii_lowercase + string.digits
)
for x in range(10)
)
if __name__ == "__main__":
t = Transaction()
t.run()
# import cProfile
# cProfile.run('t.run()', 'profile')
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.