repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
sdcharle/brat | app/forms.py | 1 | 1692 | from flask.ext.wtf import Form
from wtforms import TextField, BooleanField, TextAreaField
from wtforms.validators import Required, Length
from models import User
class PostForm(Form):
post = TextField('post', validators = [Required()])
class LoginForm(Form):
openid = TextField('openid', validators = [Required()])
remember_me = BooleanField('remember_me', default = False)
class EditForm(Form):
nickname = TextField('nickname', validators = [Required()])
first_name = TextField('first_name', validators = [], default = "")
last_name = TextField('last_name', validators = [], default = "")
about_me = TextAreaField('about_me', validators = [Length(min = 0, max = 140)])
rfid_access = BooleanField('rfid_access')
rfid_tag = TextField('rfid_tag', validators = [Length(min = 5, max = 10)])
is_active = BooleanField('is_active')
def __init__(self, original_nickname, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
self.original_nickname = original_nickname
def validate(self):
if not Form.validate(self):
return False
if self.nickname.data == self.original_nickname:
return True
if self.nickname.data != User.make_valid_nickname(self.nickname.data):
self.nickname.errors.append('This nickname has invalid characters. Please use letters, numbers, dots and underscores only.')
return False
user = User.query.filter_by(nickname = self.nickname.data).first()
if user != None:
self.nickname.errors.append('This nickname is already in use. Please choose another one.')
return False
return True | bsd-3-clause | -5,981,637,791,839,527,000 | 40.292683 | 136 | 0.654255 | false |
fcurella/django-fakery | django_fakery/field_mappings.py | 1 | 6030 | import sys
from collections import OrderedDict
from typing import Any, Tuple
from django.conf import settings
from django.db import models
from django.utils import timezone
from . import fakes
from .compat import HAS_GEOS, HAS_PSYCOPG2
STRING_FIELDS = (
models.CharField,
models.TextField,
) # type: Tuple[Any, ...]
if HAS_PSYCOPG2:
from django.contrib.postgres import fields as pg_fields
STRING_FIELDS += (pg_fields.CICharField, pg_fields.CITextField)
"""
This module maps fields to functions generating values.
It first tries by looking at the field's class, then falls back to some
special-cased names.
Values are 3-tuples composed of ``(<function>, <args>, <kwargs>)``.
When ``<function>`` is a string, it's assumed to be a faker provider. Whenever
``faker`` doesn't provide a suitable provider, we ship our own function. They
are defined in ``django_fakery.fakes``.
"""
TZINFO = timezone.get_current_timezone() if settings.USE_TZ else None
mappings_types = OrderedDict(
[
(
models.BigIntegerField,
("random_int", [], {"min": -sys.maxsize, "max": sys.maxsize}),
),
(models.BinaryField, ("binary", [1024], {})),
(models.BooleanField, ("pybool", [], {})),
(models.DateTimeField, ("date_time", [], {"tzinfo": TZINFO})),
# ``DateField`` must come after ``DateTimeField`` because it's its superclass
(models.DateField, (lambda faker, field: faker.date_time().date(), [], {})),
(models.DecimalField, (fakes.decimal, [], {})),
(models.EmailField, ("email", [], {})),
(models.FileField, ("file_name", [], {})),
(models.FilePathField, ("file_name", [], {})),
(models.FloatField, ("pyfloat", [], {})),
(models.ImageField, ("file_name", [], {"extension": "jpg"})),
(models.IntegerField, ("pyint", [], {})),
(models.IPAddressField, ("ipv4", [], {})),
(models.GenericIPAddressField, ("ipv4", [], {})),
(models.PositiveIntegerField, ("random_int", [], {"max": 2147483647})),
(models.PositiveSmallIntegerField, ("random_int", [], {"max": 32767})),
(models.SlugField, (fakes.slug, [3], {})),
(models.SmallIntegerField, ("random_int", [], {"min": -32768, "max": 32767})),
(
models.TextField,
(
lambda faker, field: field.unique
and faker.pystr(max_chars=2700)
or faker.paragraph(),
[],
{},
),
),
(
models.TimeField,
(lambda faker, field: faker.date_time(tzinfo=TZINFO).time(), [], {}),
),
(models.URLField, ("url", [], {})),
(
models.CharField,
(
lambda faker, field: field.unique
and faker.pystr(max_chars=field.max_length)
or faker.word()[: field.max_length],
[],
{},
),
),
(models.DurationField, ("time_delta", [], {})),
(models.UUIDField, ("uuid4", [], {})),
]
)
if HAS_GEOS:
from django.contrib.gis.db import models as geo_models
mappings_types.update(
{
geo_models.PointField: (fakes.point, (), {"srid": 4326}),
geo_models.LineStringField: (fakes.linestring, (), {"srid": 4326}),
geo_models.PolygonField: (fakes.polygon, (), {"srid": 4326}),
geo_models.MultiPointField: (fakes.multipoint, (), {"srid": 4326}),
geo_models.MultiLineStringField: (
fakes.multilinestring,
(),
{"srid": 4326},
),
geo_models.MultiPolygonField: (fakes.multipolygon, (), {"srid": 4326}),
geo_models.GeometryCollectionField: (
fakes.geometrycollection,
(),
{"srid": 4326},
),
geo_models.RasterField: (fakes.gdal_raster, [], {"srid": 4326}),
}
)
if HAS_PSYCOPG2:
mappings_types.update(
{
pg_fields.CICharField: mappings_types[models.CharField],
pg_fields.CIEmailField: mappings_types[models.EmailField],
pg_fields.CITextField: mappings_types[models.TextField],
pg_fields.ArrayField: (fakes.array, [], {}),
pg_fields.HStoreField: ("pydict", [10, True, "str"], {}),
pg_fields.IntegerRangeField: (
fakes.integerrange,
[],
{"min": -2147483647, "max": 2147483647},
),
pg_fields.BigIntegerRangeField: (
fakes.integerrange,
[],
{"min": -sys.maxsize, "max": sys.maxsize},
),
pg_fields.FloatRangeField: (fakes.floatrange, [], {}),
pg_fields.DateTimeRangeField: (fakes.datetimerange, [], {}),
pg_fields.DateRangeField: (fakes.daterange, [], {}),
pg_fields.JSONField: (fakes.random_dict, [], {}),
}
)
mappings_names = {
"name": (
lambda faker, field: field.unique
and faker.pystr(max_chars=field.max_length or 2700)
or faker.word()[: field.max_length],
[],
{},
), # `name` is too generic to assume it's a person
"slug": (fakes.slug, [3], {}),
"first_name": ("first_name", [], {}),
"last_name": ("last_name", [], {}),
"full_name": ("full_name", [], {}),
"email": ("email", [], {}),
"created": (
"date_time_between",
[],
{"start_date": "-30d", "end_date": "+30d", "tzinfo": TZINFO},
),
"created_at": (
"date_time_between",
[],
{"start_date": "-30d", "end_date": "+30d", "tzinfo": TZINFO},
),
"updated": (
"date_time_between",
[],
{"start_date": "-30d", "end_date": "+30d", "tzinfo": TZINFO},
),
"updated_at": (
"date_time_between",
[],
{"start_date": "-30d", "end_date": "+30d", "tzinfo": TZINFO},
),
}
| mit | -5,350,264,565,698,461,000 | 32.876404 | 86 | 0.521891 | false |
bjornwallner/proq2-server | apps/modeller9v8/modlib/modeller/saxsdata.py | 1 | 17392 | """Classes to handle SAXS (Small Angle X-ray Scattering) data"""
import _modeller
from modeller.util.modobject import modobject
from modeller.util import modlist, array
__docformat__ = "epytext en"
class SAXSList(modlist.LinkList):
"""A list of L{saxsdata} objects"""
def __init__(self, edat):
self.__edat = edat
self.__list = []
modlist.LinkList.__init__(self)
def _insfunc(self, indx, obj):
_modeller.mod_saxsdata_pt_new(self.__edat, indx, obj.modpt)
self.__list.insert(indx, obj)
def __len__(self):
return len(self.__list)
def _getfunc(self, indx):
return self.__list[indx]
def _delfunc(self, indx):
del self.__list[indx]
_modeller.mod_saxsdata_pt_del(self.__edat, indx)
class saxsdata(modobject):
"""Holds all SAXS (Small Angle X-ray Scattering) data"""
__modpt = None
env = None
def __init__(self, env, **vars):
self.__modpt = _modeller.mod_saxsdata_new()
self.env = env.copy()
def __setstate__(self, d):
self.__dict__.update(d)
self.__modpt = _modeller.mod_saxsdata_new()
def __del__(self):
if self.__modpt:
_modeller.mod_saxsdata_free(self.__modpt)
def __get_modpt(self):
return self.__modpt
def write(self, file):
"""Write SAXS data, which is currently in memory """
fh=open(file,'w')
for ii in range(0,self.ns):
fh.write('%10.7f ' % self.s[ii] + '%15.5f ' % self.intensity[ii] +'%15.5f\n' % self.int_exp[ii] )
fh.close()
def pr_score(self, mdl, maxr, filename=None, use_err=False, rfact=False):
"""Calculates P(r) score of a model by comparing model P(r) to expt
data saxs.pr_exp.
@param mdl: model
@param maxr: maximum radius to score P(r) in A
@param filename: filename of P(r) model to write (scaled to expt data)
@param use_err: use experimental error?
@param rfact: use rfactor as score?
@return: (pr score,
scaling factor of model P(r) to match expt. P(r))"""
from math import sqrt
sum_e = 0.0
sum_mm = 0.0
sum_em = 0.0
sum_ee = 0.0
mdl.saxs_pr(self, filename='None')
imaxr = min( int(maxr/self.dr_exp)+1, len(self.p_r_exp))
if (rfact):
for ii in range(0, imaxr):
sum_mm = sum_mm + self.p_r_resamp[ii]
sum_ee = sum_ee + self.p_r_exp[ii]
scf = sum_ee/sum_mm
sum_ee = 0.0
for ii in range(0, imaxr):
sum_em = sum_em + abs(self.p_r_exp[ii]-scf*self.p_r_resamp[ii])
sum_ee = sum_ee + abs(self.p_r_exp[ii])
psc = sum_em/sum_ee
else:
if (use_err):
for ii in range(0, imaxr):
sum_mm = sum_mm + self.p_r_resamp[ii]*self.p_r_resamp[ii]/self.p_r_sig[ii]
sum_em = sum_em + self.p_r_exp[ii]*self.p_r_resamp[ii]/self.p_r_sig[ii]
sum_ee = sum_ee + self.p_r_exp[ii]*self.p_r_exp[ii]/self.p_r_sig[ii]
else:
for ii in range(0, imaxr):
sum_mm = sum_mm + self.p_r_resamp[ii]*self.p_r_resamp[ii]
sum_em = sum_em + self.p_r_exp[ii]*self.p_r_resamp[ii]
sum_ee = sum_ee + self.p_r_exp[ii]*self.p_r_exp[ii]
norm_e = sqrt(sum_ee)
scf = sum_em / sum_mm
scf_norm = scf / norm_e
#psc = sum_mm*scf*scf + sum_ee - 2.*scf * sum_em
#psc = psc / (sqrt(sum_ee))
psc = sum_mm*scf_norm*scf_norm + 1 - 2.*scf_norm * sum_em / norm_e
if (filename):
fhandle=open(filename, 'w')
for ii in range(0, len(self.p_r_exp)):
tmp = scf*self.p_r_resamp[ii]
fhandle.write('%10.5f ' % self.r_exp[ii] +
' %15.6f\n' % tmp)
fhandle.close()
return (psc, scf)
def ini_saxs(self, atmsel,
filename='$(LIB)/formfactors-int_tab_solvation.lib',
s_min=0.0, s_max=2.0, maxs=100, nmesh=100, natomtyp=15,
represtyp='heav', wswitch='uniform', s_hybrid=0.0,
s_low=0.0, s_hi=2.0, spaceflag='real', rho_solv=0.334,
use_lookup=True, nr=5000, dr=0.1, nr_exp=300, dr_exp=1.0,
use_offset=False, use_rolloff=False, use_conv=False,
mixflag=False, pr_smooth=False):
"""Initialize saxsdata
@param atmsel: selection of atoms
@param s_min: minimum frequency in reciprocal space in A^-1
@param s_max: maximum frequency in reciprocal space in A^-1
@param maxs: maximum number of frequencies
@param nmesh: actual number of frequencies (<= maxs)
@param natomtyp: number of 'atoms', i.e. scattering centers
@param represtyp: representation: 'heav', 'allh', or 'CA'
@param filename: filename of the library for formfactors
@param wswitch: character for filter of scoring function options:
'unity', 'sq', or 'hybrid'
@param s_hybrid: frequency above which $ s^2$ weighting is applied
if wswitch='hybrid'
@param s_low: bandpass filter in A^-1 - lower cutoff
@param s_hi: bandpass filter in A^-1 - higher cutoff
@param spaceflag: how should I(s) be computed? 'real' space via P(r)
or 'reciprocal'? 'real' is more than a magnitude
faster but less accurate for high resolution
@param rho_solv: electron density of solvent;
default=0.334 e/A^-3 (H_2O)
@param use_lookup: use lookup tables for SINC and COS function -
significant increase in speed for 'reciprocal' mode
@param nr: number of points for P(r) sampling
@param dr: spacing (sampling) of P(r) in A
@param nr_exp: number of points for P_exp(r) sampling
@param dr_exp: spacing (sampling) of P(r) in A
@param use_offset: allow for additive constant in expt. spectrum
@param use_rolloff: allow for Gaussian rolloff of model spectrum
@param use_conv: convolute with nitrogen formfactor to mimic hydr
layer
@param mixflag: different conformations present? implemented for
HtpG project
@param pr_smooth: smoothing of P(r)"""
(inds, mdl) = atmsel.get_atom_indices()
return _modeller.mod_saxs_ini(self.modpt, mdl.modpt, inds, s_min,
s_max, maxs, nmesh, natomtyp, represtyp,
filename, wswitch, s_hybrid, s_low, s_hi,
spaceflag, rho_solv, use_lookup, nr, dr,
nr_exp, dr_exp, use_offset, use_rolloff,
use_conv, mixflag, pr_smooth)
def saxs_read(self, filename):
"""Read in experimental SAXS data"""
return _modeller.mod_saxs_read(self.modpt, filename)
def read(self,
saxsfilename,
atmsel,
formfacfilename='$(LIB)/formfactors-int_tab_solvation.lib',
natomtyp=15,
represtyp='heav', wswitch='uniform', s_hybrid=0.0,
s_low=None, s_hi=None,
spaceflag='real', rho_solv=0.334,
use_lookup=True, nr=5000, dr=0.1, nr_exp=300, dr_exp=1.0,
use_offset=False, use_rolloff=False, use_conv=False,
mixflag=False, pr_smooth=False):
"""Read in experimental SAXS data and initialize saxsdata
@param saxsfilename: Name of file containing SAXS spectrum
@param atmsel: selection of atoms
@param s_min: minimum frequency in reciprocal space in A^-1
@param s_max: maximum frequency in reciprocal space in A^-1
@param natomtyp: number of 'atoms', i.e. scattering centers
@param represtyp: representation: 'heav', 'allh', or 'CA'
@param formfacfilename: filename of the library for formfactors
@param wswitch: character for filter of scoring function options:
'unity', 'sq', or 'hybrid'
@param s_hybrid: frequency above which $ s^2$ weighting is applied
if wswitch='hybrid'
@param s_low: bandpass filter in A^-1 - lower cutoff
@param s_hi: bandpass filter in A^-1 - higher cutoff
@param spaceflag: how should I(s) be computed? 'real' space via P(r)
or 'reciprocal'? 'real' is more than a magnitude
faster but less accurate for high resolution
@param rho_solv: electron density of solvent;
default=0.334 e/A^-3 (H_2O)
@param use_lookup: use lookup tables for SINC and COS function -
significant increase in speed for 'reciprocal' mode
@param nr: number of points for P(r) sampling
@param dr: spacing (sampling) of P(r) in A
@param nr_exp: number of points for P_exp(r) sampling
@param dr_exp: spacing (sampling) of P(r) in A
@param use_offset: allow for additive constant in expt. spectrum
@param use_rolloff: allow for Gaussian rolloff of model spectrum
@param use_conv: convolute with nitrogen formfactor to mimic hydr
layer
@param mixflag: different conformations present? implemented for
HtpG project
@param pr_smooth: smoothing of P(r)"""
try:
fh = open(saxsfilename,'r')
except:
print "file "+saxsfilename+" not found :("
return
fh.close()
ns = 0
s_min = 10.
s_max = 0.
for line in open(saxsfilename,'r'):
s = line.split()
# '#' is comment
if (not s[0][0] == '#'):
ns = ns +1
if ( float(s[0]) > s_max):
s_max = float(s[0])
if ( float(s[0]) < s_min):
s_min = float(s[0])
if (not s_low):
s_low = s_min - .001
if (not s_hi):
s_hi = s_max + .001
print "s_min="+str(s_min)+", s_max="+str(s_max)
print "s_low="+str(s_low)+", s_hi="+str(s_hi)
self.ini_saxs(atmsel,
filename=formfacfilename,
s_min=s_min, s_max=s_max, maxs=ns, nmesh=ns, natomtyp=natomtyp,
represtyp=represtyp, wswitch=wswitch, s_hybrid=s_hybrid,
s_low=s_low, s_hi=s_hi, spaceflag=spaceflag, rho_solv=rho_solv,
use_lookup=use_lookup, nr=nr, dr=dr, nr_exp=nr_exp, dr_exp=dr_exp,
use_offset=use_offset, use_rolloff=use_rolloff, use_conv=use_conv,
mixflag=mixflag, pr_smooth=pr_smooth)
self.saxs_read(saxsfilename)
def saxs_pr_read(self, filename):
"""Read in experimental P(r)"""
return _modeller.mod_saxs_pr_read(self.modpt, filename)
def __get_s_hybrid(self):
return _modeller.mod_saxsdata_s_hybrid_get(self.modpt)
def __set_s_hybrid(self, val):
return _modeller.mod_saxsdata_s_hybrid_set(self.modpt, val)
def __get_s_max(self):
return _modeller.mod_saxsdata_s_max_get(self.modpt)
def __set_s_max(self, val):
return _modeller.mod_saxsdata_s_max_set(self.modpt, val)
def __get_s_min(self):
return _modeller.mod_saxsdata_s_min_get(self.modpt)
def __set_s_min(self, val):
return _modeller.mod_saxsdata_s_min_set(self.modpt, val)
def __get_s_low(self):
return _modeller.mod_saxsdata_s_low_get(self.modpt)
def __set_s_low(self, val):
return _modeller.mod_saxsdata_s_low_set(self.modpt, val)
def __get_s_hi(self):
return _modeller.mod_saxsdata_s_hi_get(self.modpt)
def __set_s_hi(self, val):
return _modeller.mod_saxsdata_s_hi_set(self.modpt, val)
def __get_normsq_exp(self):
return _modeller.mod_saxsdata_normsq_exp_get(self.modpt)
def __set_normsq_exp(self, val):
return _modeller.mod_saxsdata_normsq_exp_set(self.modpt, val)
def __get_ns(self):
return _modeller.mod_saxsdata_ns_get(self.modpt)
def __get_nr(self):
return _modeller.mod_saxsdata_nr_get(self.modpt)
def __get_nr_exp(self):
return _modeller.mod_saxsdata_nr_exp_get(self.modpt)
def __set_nr_exp(self):
return _modeller.mod_saxsdata_nr_exp_set(self.modpt, val)
def __get_dr(self):
return _modeller.mod_saxsdata_dr_get(self.modpt)
def __get_dr_exp(self):
return _modeller.mod_saxsdata_dr_exp_get(self.modpt)
def __set_dr_exp(self):
return _modeller.mod_saxsdata_dr_exp_set(self.modpt, val)
def __get_c(self):
return _modeller.mod_saxsdata_c_get(self.modpt)
def __set_c(self, val):
return _modeller.mod_saxsdata_c_set(self.modpt, val)
def __get_rolloff(self):
return _modeller.mod_saxsdata_rolloff_get(self.modpt)
def __set_rolloff(self):
return _modeller.mod_saxsdata_rolloff(self.modpt, val)
def __get_bfac(self):
return _modeller.mod_saxsdata_bfac_get(self.modpt)
def __set_bfac(self):
return _modeller.mod_saxsdata_bfac(self.modpt, val)
def __get_chi_sq(self):
return _modeller.mod_saxsdata_chi_sq_get(self.modpt)
def __set_chi_sq(self, val):
return _modeller.mod_saxsdata_chi_sq_set(self.modpt, val)
def __get_rho_solv(self):
return _modeller.mod_saxsdata_rho_solv_get(self.modpt)
def __set_rho_solv(self, val):
return _modeller.mod_saxsdata_rho_solv_set(self.modpt, val)
def __get_offset(self):
return _modeller.mod_saxsdata_offset_get(self.modpt)
def __set_offset(self):
return _modeller.mod_saxsdata_offset(self.modpt, val)
def __get_intensity(self):
ptarr = _modeller.mod_saxsdata_intensity_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_ns)
def __set_intensity(self, val):
modlist.set_fixlist(self.intensity, val)
def __get_int_exp(self):
ptarr = _modeller.mod_saxsdata_int_exp_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_ns)
def __set_int_exp(self, val):
modlist.set_fixlist(self.int_exp, val)
def __get_s(self):
ptarr = _modeller.mod_saxsdata_s_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_ns)
def __set_s(self, val):
modlist.set_fixlist(self.s, val)
def __get_sigma_exp(self):
ptarr = _modeller.mod_saxsdata_sigma_exp_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_ns)
def __set_sigma_exp(self, val):
modlist.set_fixlist(self.sigma_exp, val)
def __get_p_r(self):
ptarr = _modeller.mod_saxsdata_p_r_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_nr)
def __set_p_r(self):
modlist.set_fixlist(self.p_r, val)
def __get_p_r_exp(self):
ptarr = _modeller.mod_saxsdata_p_r_exp_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_nr_exp)
def __set_p_r_exp(self):
modlist.set_fixlist(self.p_r_exp, val)
def __get_r_exp(self):
ptarr = _modeller.mod_saxsdata_r_exp_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_nr_exp)
def __set_r_exp(self):
modlist.set_fixlist(self.r_exp, val)
def __get_p_r_resamp(self):
ptarr = _modeller.mod_saxsdata_p_r_resamp_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_nr_exp)
def __set_p_r_resamp(self):
modlist.set_fixlist(self.p_r_resamp, val)
def __get_p_r_sig(self):
ptarr = _modeller.mod_saxsdata_p_r_sig_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_nr_exp)
def __set_p_r_sig(self):
modlist.set_fixlist(self.p_r_sig, val)
modpt = property(__get_modpt)
s_hybrid = property(__get_s_hybrid, __set_s_hybrid)
s_max = property(__get_s_max, __set_s_max)
s_min = property(__get_s_min, __set_s_min)
s_low = property(__get_s_low, __set_s_low)
s_hi = property(__get_s_hi, __set_s_hi )
normsq_exp = property(__get_normsq_exp, __set_normsq_exp)
c = property(__get_c, __set_c)
ns = property(__get_ns)
nr = property(__get_nr)
nr_exp = property(__get_nr_exp, __set_nr_exp)
dr = property(__get_dr)
dr_exp = property(__get_dr_exp, __set_dr_exp)
intensity = property(__get_intensity, __set_intensity)
s = property(__get_s, __set_s)
int_exp = property(__get_int_exp, __set_int_exp)
sigma_exp = property(__get_sigma_exp, __set_sigma_exp)
p_r = property(__get_p_r, __set_p_r)
p_r_exp = property(__get_p_r_exp, __set_p_r_exp)
r_exp = property(__get_r_exp, __set_r_exp)
p_r_resamp = property(__get_p_r_resamp, __set_p_r_resamp)
p_r_sig = property(__get_p_r_sig, __set_p_r_sig)
p_r_sig = property(__get_p_r_sig, __set_p_r_sig)
chi_sq = property(__get_chi_sq, __set_chi_sq)
rho_solv = property(__get_rho_solv, __set_rho_solv)
rolloff= property(__get_rolloff, __set_rolloff)
bfac = property(__get_bfac, __set_bfac)
offset = property(__get_offset, __set_offset)
| gpl-3.0 | -8,359,119,961,480,063,000 | 43.824742 | 112 | 0.570607 | false |
APC524/tsap | tsap/cluster.py | 1 | 8181 |
import numpy as np
from scipy.cluster.hierarchy import linkage, fcluster
from scipy.spatial.distance import pdist, squareform
import scipy.linalg as Linalg
from scipy.stats import multivariate_normal as mvn
class Cluster(object):
def __init__(self, X):
"""Return a new object to cluster data based on selected clutering
algorithm.
Example usage: clusterObj = Cluster(X)
X: numpy array, shape (n_samples, n_features)"""
self._X = X
self._nsample = X.shape[0]
self._nfeature = X.shape[1]
def assign_label(self, Centers):
""" Assign labels to the data points
Input:
self
Centers: numpy array, shape (n_clusters, n_features) the centers of each cluster
Output:
clusters: the index of data points in each class
labels: the label of each class
"""
numClusters = Centers.shape[0]
clusters = {}
labels = []
for sample_idx in range(self._nsample):
x = self._X[sample_idx, :]
dist = []
label_x = min( [(i, np.linalg.norm( x-Centers[i,:] ) ) for i in range(numClusters)], key = lambda t:t[1])[0]
try:
clusters[label_x].append(sample_idx)
except KeyError:
clusters[label_x] = [sample_idx]
labels.append(label_x)
return clusters, labels
def kMeans(self, nClusters, maxIter=300):
"""
K-means clustering algorithm.
Function usage: kMeans(nClusters, maxIter, nInit)
Inputs:
nClusters : int
The number of clusters to form as well as the number of
centroids to generate.
maxIter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
Returns:
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the i-th
observation is closest to.
clusters : identity of the data point in the cluster
"""
# Initialize K-means algorithm by randomly sampling k points
idx = np.random.choice(range(self._nsample), size= nClusters, replace=False)
centroid = self._X[idx, :]
# fix centrod, get the label of each data point
old_clusters, old_labels = self.assign_label( Centers = centroid )
# set flag of convergence
flag_converge = False
nite = 0 # iteration counter
while not flag_converge:
nite = nite + 1
if nite > maxIter:
raise RuntimeError('Exceeds maximum number of iterations')
# obtain new estimate of clusters
for i in range(nClusters):
class_index = old_clusters[i]
centroid[i,:] = np.mean(self._X[class_index], axis = 0)
new_clusters, new_labels = self.assign_label( Centers = centroid)
if old_labels == new_labels:
flag_converge = True
old_labels = new_labels
old_clusters = new_clusters
clusters = new_clusters
labels = new_labels
return centroid, labels, clusters
def H_clustering(self, nClusters):
"""
Performe hierarchical clustering
"""
# construct hierarchical clustering matrix
Z = linkage(self._X, metric='euclidean', method = 'ward')
# obtain labels
labels = fcluster(Z, nClusters, criterion='maxclust')
clusters = {}
centroid = np.zeros( (nClusters, self._nfeature) )
for i in range(nClusters):
class_index = np.where( labels == i)[0]
clusters[i]= class_index
centroid[i,:] = np.mean(self._X[class_index, :], axis = 0)
return centroid, labels, clusters
# Gaussian mixture clustering using EM algorithm
def Gaussian_mixture(self, nClusters, max_iter = 300):
# Initialize EM algorithm by randomly sampling k points as centers
idx = np.random.choice(range(self._nsample), size= nClusters, replace=False)
centroid = self._X[idx, :] # initial mean vectors
# initialize the covariance matrices for each gaussians
Sigma= [np.eye(self._nfeature)] * nClusters
# initialize the probabilities/weights for each gaussians
w = [1./nClusters] * nClusters
# responsibility matrix is initialized to all zeros
# we have responsibility for each of n points for eack of k gaussians
R = np.zeros((self._nsample, nClusters))
### log_likelihoods
log_likelihoods = []
P = lambda mu, s: np.linalg.det(s) ** -.5 ** (2 * np.pi) ** (-self._X.shape[1]/2.) \
* np.exp(-.5 * np.einsum('ij, ij -> i',\
self._X - mu, np.dot(np.linalg.inv(s) , (self._X - mu).T).T ) )
# Iterate till max_iters iterations
while len(log_likelihoods) < max_iter:
# E - Step
## Vectorized implementation of e-step equation to calculate the
## membership for each of k -gaussians
for k in range(nClusters):
R[:, k] = w[k] * P(centroid[k], Sigma[k])
### Likelihood computation
log_likelihood = np.sum(np.log(np.sum(R, axis = 1)))
log_likelihoods.append(log_likelihood)
## Normalize so that the responsibility matrix is row stochastic
R = (R.T / np.sum(R, axis = 1)).T
## The number of datapoints belonging to each gaussian
N_ks = np.sum(R, axis = 0)
# M Step
## calculate the new mean and covariance for each gaussian by
## utilizing the new responsibilities
for k in range(nClusters):
## means
centroid[k] = 1. / N_ks[k] * np.sum(R[:, k] * self._X.T, axis = 1).T
x_mu = np.matrix(self._X - centroid[k])
## covariances
Sigma[k] = np.array(1 / N_ks[k] * np.dot(np.multiply(x_mu.T, R[:, k]), x_mu))
## and finally the probabilities
w[k] = 1. / self._nsample * N_ks[k]
# check for onvergence
if len(log_likelihoods) < 2 : continue
if np.abs(log_likelihood - log_likelihoods[-2]) < 1e-6: break
clusters, labels = self.assign_label( Centers = centroid )
return clusters, labels, centroid, Sigma, w
def Spectral(self, nClusters = 5, cluster_metric = 'euclidean', sigma = 0.05 ):
""" Spectral Clustering
cluster_metric is the metric used to compute the affinity matrix
sigma is the standard deviation used in the Gaussian kernel
"""
num_cls = nClusters
# compute the affinity matrix
aff_mat = squareform( pdist (self._X, metric = cluster_metric)/sigma )
np.fill_diagonal( aff_mat, 1)
aff_mat = 1 / aff_mat
np.fill_diagonal( aff_mat, 0)
# construct D^{-1/2} by taking the square root of the sum of column of A
#print(np.sqrt( np.sum(aff_mat, axis = 0)))
D_mat = np.diag( 1 / np.sqrt( np.sum(aff_mat, axis = 0)) )
# graph Laplacian, an n by n matrix
L = np.dot( np.dot(D_mat, aff_mat), D_mat )
# Now that we have the graph Laplacian, spectral clustering does eigen decomposition on L and obtain the first k eigenvectors
_ , X_embed = Linalg.eigh(L, eigvals = (self._nsample - nClusters, self._nsample-1))
# X_embed can be viewd as the embedding of data
# normalize the rows of X_embed to unit norm
row_norm = np.linalg.norm( X_embed, axis = 1).reshape( self._nsample, 1)
Y = np.divide(X_embed, row_norm) # n by k matrix, feed to K means
model1 = Cluster(Y)
_, labels, clusters = model1.kMeans(nClusters = num_cls)
return labels, clusters, X_embed
| gpl-3.0 | 5,548,100,727,968,990,000 | 35.199115 | 133 | 0.569124 | false |
i3visio/osrframework | osrframework/domains/geographic_tld.py | 1 | 2833 | # !/usr/bin/python
# -*- coding: utf-8 -*-
#
##################################################################################
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
tld = {
".capetown", # Cape Town, South Africa
".durban", # Durban, South Africa
".joburg", # Johannesburg, South Africa
".asia", # Asia-Pacific region
".krd", # Kurdistan
".nagoya", # Nagoya, Japan
".okinawa", # Okinawa, Japan
".ryukyu", # Ryukyu Islands, Japan
".taipei", # Taipei, Taiwan
".tatar", # Tatar peoples and places
".tokyo", # Tokyo, Japan
".yokohama", # Yokohama, Japan
".alsace", # Alsace, France
".amsterdam", # Amsterdam, The Netherlands
".barcelona", # Barcelona, Spain
".bayern", # Bavaria, Germany
".berlin", # Berlin, Germany
".brussels", # Brussels, Belgium
".budapest", # Budapest, Hungary
".bzh", # Brittany, France
".cat", # Catalonia; Catalan language and culture
".cologne", # Cologne, Germany
".corsica", # Corsica, France
".cymru", # Wales
".eus", # Basque, Spain and France
".frl", # Friesland
".gal", # Galiza, Spain
".gent", # Ghent, Belgium
".hamburg", # Hamburg, Germany
".irish", # Ireland; global Irish community
".koeln", # Cologne, Germany
".london", # London, United Kingdom
".madrid", # Madrid, Spain
".moscow", # Moscow, Russia
".nrw", # North Rhine-Westphalia, Germany
".paris", # Paris, France
".ruhr", # Ruhr, Germany
".saarland", # Saarland, Germany
".scot", # Scotland
".tirol", # Tyrol, now split between Austria and Italy
".vlaanderen", # Flanders, Belgium
".wales", # Wales
".wien", # Vienna, Austria
".zuerich", # Zurich, Switzerland
".miami", # Miami, USA
".nyc", # New York City, USA
".quebec", # Québec, Canada
".vegas", # Las Vegas, NV, USA
".kiwi", # New Zealanders (a.k.a. kiwis)
".melbourne", # Melbourne, Australia
".sydney", # Sydney, Australia
".lat", # Latin America
".rio", # Rio de Janeiro, Brazil
".xn--1qqw23a",
".xn--80adxhks", # .москва
}
| agpl-3.0 | 2,706,459,710,472,871,400 | 35.701299 | 82 | 0.580679 | false |
tensorflow/ranking | tensorflow_ranking/python/metrics.py | 1 | 27729 | # Copyright 2021 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines ranking metrics as TF ops.
The metrics here are meant to be used during the TF training. That is, a batch
of instances in the Tensor format are evaluated by ops. It works with listwise
Tensors only.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import tensorflow as tf
from tensorflow_ranking.python import metrics_impl
from tensorflow_ranking.python import utils
_DEFAULT_GAIN_FN = lambda label: tf.pow(2.0, label) - 1
_DEFAULT_RANK_DISCOUNT_FN = lambda rank: tf.math.log(2.) / tf.math.log1p(rank)
class RankingMetricKey(object):
"""Ranking metric key strings."""
# Mean Reciprocal Rank. For binary relevance.
MRR = 'mrr'
# Average Relevance Position.
ARP = 'arp'
# Normalized Discounted Cumulative Gain.
NDCG = 'ndcg'
# Discounted Cumulative Gain.
DCG = 'dcg'
# Precision. For binary relevance.
PRECISION = 'precision'
# Recall. For binary relevance.
RECALL = 'recall'
# Mean Average Precision. For binary relevance.
MAP = 'map'
# PrecisionIA. For binary relevance of subtopics.
PRECISION_IA = 'precision_ia'
# Ordered Pair Accuracy.
ORDERED_PAIR_ACCURACY = 'ordered_pair_accuracy'
# Alpha Discounted Cumulative Gain.
ALPHA_DCG = 'alpha_dcg'
# Binary Preference.
BPREF = 'bpref'
def compute_mean(metric_key,
labels,
predictions,
weights=None,
topn=None,
name=None):
"""Returns the mean of the specified metric given the inputs.
Args:
metric_key: A key in `RankingMetricKey`.
labels: A `Tensor` of the same shape as `predictions` representing
relevance.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: An `integer` specifying the cutoff of how many items are considered in
the metric.
name: A `string` used as the name for this metric.
Returns:
A scalar as the computed metric.
"""
metric_dict = {
RankingMetricKey.ARP: metrics_impl.ARPMetric(metric_key),
RankingMetricKey.MRR: metrics_impl.MRRMetric(metric_key, topn),
RankingMetricKey.NDCG: metrics_impl.NDCGMetric(name, topn),
RankingMetricKey.DCG: metrics_impl.DCGMetric(name, topn),
RankingMetricKey.PRECISION: metrics_impl.PrecisionMetric(name, topn),
RankingMetricKey.RECALL: metrics_impl.RecallMetric(name, topn),
RankingMetricKey.MAP: metrics_impl.MeanAveragePrecisionMetric(name, topn),
RankingMetricKey.ORDERED_PAIR_ACCURACY: metrics_impl.OPAMetric(name),
RankingMetricKey.BPREF: metrics_impl.BPrefMetric(name, topn),
}
assert metric_key in metric_dict, ('metric_key %s not supported.' %
metric_key)
# TODO: Add mask argument for metric.compute() call
metric, weight = metric_dict[metric_key].compute(labels, predictions, weights)
return tf.compat.v1.div_no_nan(
tf.reduce_sum(input_tensor=metric * weight),
tf.reduce_sum(input_tensor=weight))
def make_ranking_metric_fn(metric_key,
weights_feature_name=None,
topn=None,
name=None,
gain_fn=_DEFAULT_GAIN_FN,
rank_discount_fn=_DEFAULT_RANK_DISCOUNT_FN,
**kwargs):
"""Factory method to create a ranking metric function.
Args:
metric_key: A key in `RankingMetricKey`.
weights_feature_name: A `string` specifying the name of the weights feature
in `features` dict.
topn: An `integer` specifying the cutoff of how many items are considered in
the metric.
name: A `string` used as the name for this metric.
gain_fn: (function) Transforms labels. A method to calculate gain parameters
used in the definitions of the DCG and NDCG metrics, where the input is
the relevance label of the item. The gain is often defined to be of the
form 2^label-1.
rank_discount_fn: (function) The rank discount function. A method to define
the discount parameters used in the definitions of DCG and NDCG metrics,
where the input in the rank of item. The discount function is commonly
defined to be of the form log(rank+1).
**kwargs: Other keyword arguments (e.g. alpha, seed).
Returns:
A metric fn with the following Args:
* `labels`: A `Tensor` of the same shape as `predictions` representing
graded relevance.
* `predictions`: A `Tensor` with shape [batch_size, list_size]. Each value
is the ranking score of the corresponding example.
* `features`: A dict of `Tensor`s that contains all features.
"""
def _get_weights(features):
"""Get weights tensor from features and reshape it to 2-D if necessary."""
weights = None
if weights_feature_name:
weights = tf.convert_to_tensor(value=features[weights_feature_name])
# Convert weights to a 2-D Tensor.
weights = utils.reshape_to_2d(weights)
return weights
def _average_relevance_position_fn(labels, predictions, features):
"""Returns average relevance position as the metric."""
return average_relevance_position(
labels, predictions, weights=_get_weights(features), name=name)
def _mean_reciprocal_rank_fn(labels, predictions, features):
"""Returns mean reciprocal rank as the metric."""
return mean_reciprocal_rank(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name)
def _normalized_discounted_cumulative_gain_fn(labels, predictions, features):
"""Returns normalized discounted cumulative gain as the metric."""
return normalized_discounted_cumulative_gain(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name,
gain_fn=gain_fn,
rank_discount_fn=rank_discount_fn)
def _discounted_cumulative_gain_fn(labels, predictions, features):
"""Returns discounted cumulative gain as the metric."""
return discounted_cumulative_gain(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name,
gain_fn=gain_fn,
rank_discount_fn=rank_discount_fn)
def _precision_fn(labels, predictions, features):
"""Returns precision as the metric."""
return precision(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name)
def _recall_fn(labels, predictions, features):
"""Returns recall as the metric."""
return recall(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name)
def _mean_average_precision_fn(labels, predictions, features):
"""Returns mean average precision as the metric."""
return mean_average_precision(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name)
def _precision_ia_fn(labels, predictions, features):
"""Returns an intent-aware precision as the metric."""
return precision_ia(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name)
def _ordered_pair_accuracy_fn(labels, predictions, features):
"""Returns ordered pair accuracy as the metric."""
return ordered_pair_accuracy(
labels, predictions, weights=_get_weights(features), name=name)
def _alpha_discounted_cumulative_gain_fn(labels, predictions, features):
"""Returns alpha discounted cumulative gain as the metric."""
return alpha_discounted_cumulative_gain(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name,
rank_discount_fn=rank_discount_fn,
**kwargs)
def _binary_preference_fn(labels, predictions, features):
"""Returns binary preference as the metric."""
return binary_preference(
labels,
predictions,
weights=_get_weights(features),
topn=topn,
name=name,
**kwargs)
metric_fn_dict = {
RankingMetricKey.ARP: _average_relevance_position_fn,
RankingMetricKey.MRR: _mean_reciprocal_rank_fn,
RankingMetricKey.NDCG: _normalized_discounted_cumulative_gain_fn,
RankingMetricKey.DCG: _discounted_cumulative_gain_fn,
RankingMetricKey.RECALL: _recall_fn,
RankingMetricKey.PRECISION: _precision_fn,
RankingMetricKey.MAP: _mean_average_precision_fn,
RankingMetricKey.PRECISION_IA: _precision_ia_fn,
RankingMetricKey.ORDERED_PAIR_ACCURACY: _ordered_pair_accuracy_fn,
RankingMetricKey.ALPHA_DCG: _alpha_discounted_cumulative_gain_fn,
RankingMetricKey.BPREF: _binary_preference_fn,
}
assert metric_key in metric_fn_dict, ('metric_key %s not supported.' %
metric_key)
return metric_fn_dict[metric_key]
def mean_reciprocal_rank(labels,
predictions,
weights=None,
topn=None,
name=None):
"""Computes mean reciprocal rank (MRR).
Args:
labels: A `Tensor` of the same shape as `predictions`. A value >= 1 means a
relevant example.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: An integer cutoff specifying how many examples to consider for this
metric. If None, the whole list is considered.
name: A string used as the name for this metric.
Returns:
A metric for the weighted mean reciprocal rank of the batch.
"""
metric = metrics_impl.MRRMetric(name, topn)
with tf.compat.v1.name_scope(metric.name, 'mean_reciprocal_rank',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
mrr, per_list_weights = metric.compute(labels, predictions, weights)
return tf.compat.v1.metrics.mean(mrr, per_list_weights)
def average_relevance_position(labels, predictions, weights=None, name=None):
"""Computes average relevance position (ARP).
This can also be named as average_relevance_rank, but this can be confusing
with mean_reciprocal_rank in acronyms. This name is more distinguishing and
has been used historically for binary relevance as average_click_position.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
name: A string used as the name for this metric.
Returns:
A metric for the weighted average relevance position.
"""
metric = metrics_impl.ARPMetric(name)
with tf.compat.v1.name_scope(metric.name, 'average_relevance_position',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
per_list_arp, per_list_weights = metric.compute(labels, predictions,
weights)
return tf.compat.v1.metrics.mean(per_list_arp, per_list_weights)
def precision(labels, predictions, weights=None, topn=None, name=None):
"""Computes precision as weighted average of relevant examples.
Args:
labels: A `Tensor` of the same shape as `predictions`. A value >= 1 means a
relevant example.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
Returns:
A metric for the weighted precision of the batch.
"""
metric = metrics_impl.PrecisionMetric(name, topn)
with tf.compat.v1.name_scope(metric.name, 'precision',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
precision_at_k, per_list_weights = metric.compute(labels, predictions,
weights)
return tf.compat.v1.metrics.mean(precision_at_k, per_list_weights)
def recall(labels, predictions, weights=None, topn=None, name=None):
"""Computes recall as weighted average of relevant examples.
Args:
labels: A `Tensor` as the same shape as `predictions`. A value >= 1 means a
relevant example.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` as the same shape as predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
Returns:
A metric for the weighted recall of the batch.
"""
metric = metrics_impl.RecallMetric(name, topn)
with tf.compat.v1.name_scope(metric.name, 'recall',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call.
recall_at_k, per_list_weights = metric.compute(labels, predictions, weights)
return tf.compat.v1.metrics.mean(recall_at_k, per_list_weights)
def mean_average_precision(labels,
predictions,
weights=None,
topn=None,
name=None):
"""Computes mean average precision (MAP).
The implementation of MAP is based on Equation (1.7) in the following:
Liu, T-Y "Learning to Rank for Information Retrieval" found at
https://www.nowpublishers.com/article/DownloadSummary/INR-016
Args:
labels: A `Tensor` of the same shape as `predictions`. A value >= 1 means a
relevant example.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
Returns:
A metric for the mean average precision.
"""
metric = metrics_impl.MeanAveragePrecisionMetric(name, topn)
with tf.compat.v1.name_scope(metric.name, 'mean_average_precision',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
per_list_map, per_list_weights = metric.compute(labels, predictions,
weights)
return tf.compat.v1.metrics.mean(per_list_map, per_list_weights)
def precision_ia(labels, predictions, weights=None, topn=None, name=None):
"""Computes Intent-Aware Precision as weighted average of relevant examples.
Args:
labels: A `Tensor` with shape [batch_size, list_size, subtopic_size]. A
nonzero value means that the example covers the corresponding subtopic.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
Returns:
A metric for the weighted precision of the batch.
"""
metric = metrics_impl.PrecisionIAMetric(name, topn)
with tf.compat.v1.name_scope(metric.name, 'precision_ia',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
precision_at_k, per_list_weights = metric.compute(labels, predictions,
weights)
return tf.compat.v1.metrics.mean(precision_at_k, per_list_weights)
def normalized_discounted_cumulative_gain(
labels,
predictions,
weights=None,
topn=None,
name=None,
gain_fn=_DEFAULT_GAIN_FN,
rank_discount_fn=_DEFAULT_RANK_DISCOUNT_FN):
"""Computes normalized discounted cumulative gain (NDCG).
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
gain_fn: (function) Transforms labels. Note that this implementation of
NDCG assumes that this function is *increasing* as a function of its
imput.
rank_discount_fn: (function) The rank discount function. Note that this
implementation of NDCG assumes that this function is *decreasing* as a
function of its imput.
Returns:
A metric for the weighted normalized discounted cumulative gain of the
batch.
"""
metric = metrics_impl.NDCGMetric(name, topn, gain_fn, rank_discount_fn)
with tf.compat.v1.name_scope(metric.name,
'normalized_discounted_cumulative_gain',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
per_list_ndcg, per_list_weights = metric.compute(labels, predictions,
weights)
return tf.compat.v1.metrics.mean(per_list_ndcg, per_list_weights)
def discounted_cumulative_gain(labels,
predictions,
weights=None,
topn=None,
name=None,
gain_fn=_DEFAULT_GAIN_FN,
rank_discount_fn=_DEFAULT_RANK_DISCOUNT_FN):
"""Computes discounted cumulative gain (DCG).
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
gain_fn: (function) Transforms labels.
rank_discount_fn: (function) The rank discount function.
Returns:
A metric for the weighted discounted cumulative gain of the batch.
"""
metric = metrics_impl.DCGMetric(name, topn, gain_fn, rank_discount_fn)
with tf.compat.v1.name_scope(name, 'discounted_cumulative_gain',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
dcg, per_list_weights = metric.compute(labels, predictions, weights)
return tf.compat.v1.metrics.mean(dcg, per_list_weights)
def alpha_discounted_cumulative_gain(
labels,
predictions,
weights=None,
topn=None,
name=None,
rank_discount_fn=_DEFAULT_RANK_DISCOUNT_FN,
alpha=0.5,
seed=None):
"""Computes alpha discounted cumulative gain (alpha-DCG).
Args:
labels: A `Tensor` with shape [batch_size, list_size, subtopic_size]. Each
value represents graded relevance to a subtopic: 1 for relevent subtopic,
0 for irrelevant, and -1 for paddings. When the actual subtopic number
of a query is smaller than the `subtopic_size`, `labels` will be padded
to `subtopic_size` with -1, similar to the paddings used for queries
with doc number less then list_size.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of shape [batch_size, list_size] or [batch_size, 1].
They are per-example and per-list, respectively.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
rank_discount_fn: A function of rank discounts. Default is set to
discount = 1 / log2(rank+1).
alpha: A float between 0 and 1. Originally introduced as an assessor error
in judging whether a document is covering a subtopic of the query. It
can also be interpreted as the inverse number of documents covering the
same subtopic reader needs to get and confirm the subtopic information
of a query.
seed: The ops-level random seed used in shuffle ties in `sort_by_scores`.
Returns:
A metric for the weighted alpha discounted cumulative gain of the batch.
"""
metric = metrics_impl.AlphaDCGMetric(name, topn, alpha=alpha,
rank_discount_fn=rank_discount_fn,
seed=seed)
with tf.compat.v1.name_scope(name, 'alpha_discounted_cumulative_gain',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
alpha_dcg, per_list_weights = metric.compute(labels, predictions, weights)
return tf.compat.v1.metrics.mean(alpha_dcg, per_list_weights)
def ordered_pair_accuracy(labels, predictions, weights=None, name=None):
"""Computes the percentage of correctly ordered pair.
For any pair of examples, we compare their orders determined by `labels` and
`predictions`. They are correctly ordered if the two orders are compatible.
That is, labels l_i > l_j and predictions s_i > s_j and the weight for this
pair is the weight from the l_i.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
name: A string used as the name for this metric.
Returns:
A metric for the accuracy or ordered pairs.
"""
metric = metrics_impl.OPAMetric(name)
with tf.compat.v1.name_scope(metric.name, 'ordered_pair_accuracy',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
per_list_opa, per_list_weights = metric.compute(labels, predictions,
weights)
return tf.compat.v1.metrics.mean(per_list_opa, per_list_weights)
def binary_preference(labels,
predictions,
weights=None,
topn=None,
name=None,
use_trec_version=True):
"""Computes binary preference (BPref).
The implementation of BPref is based on the desciption in the following:
https://trec.nist.gov/pubs/trec15/appendices/CE.MEASURES06.pdf
BPref = 1 / R SUM_r(1 - |n ranked higher than r| / min(R, N))
Args:
labels: A `Tensor` of the same shape as `predictions`. A value >= 1 means a
relevant example.
predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
the ranking score of the corresponding example.
weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
former case is per-example and the latter case is per-list.
topn: A cutoff for how many examples to consider for this metric.
name: A string used as the name for this metric.
use_trec_version: A boolean to choose the version of the formula to use.
If False, than the alternative BPref formula will be used:
BPref = 1 / R SUM_r(1 - |n ranked higher than r| / R)
Returns:
A metric for binary preference metric of the batch.
"""
metric = metrics_impl.BPrefMetric(
name, topn, use_trec_version=use_trec_version)
with tf.compat.v1.name_scope(metric.name, 'binary_preference',
(labels, predictions, weights)):
# TODO: Add mask argument for metric.compute() call
per_list_bpref, per_list_weights = metric.compute(labels, predictions,
weights)
return tf.compat.v1.metrics.mean(per_list_bpref, per_list_weights)
def eval_metric(metric_fn, **kwargs):
"""A stand-alone method to evaluate metrics on ranked results.
Note that this method requires for the arguments of the metric to called
explicitly. So, the correct usage is of the following form:
tfr.metrics.eval_metric(tfr.metrics.mean_reciprocal_rank,
labels=my_labels,
predictions=my_scores).
Here is a simple example showing how to use this method:
import tensorflow_ranking as tfr
scores = [[1., 3., 2.], [1., 2., 3.]]
labels = [[0., 0., 1.], [0., 1., 2.]]
weights = [[1., 2., 3.], [4., 5., 6.]]
tfr.metrics.eval_metric(
metric_fn=tfr.metrics.mean_reciprocal_rank,
labels=labels,
predictions=scores,
weights=weights)
Args:
metric_fn: (function) Metric definition. A metric appearing in the
TF-Ranking metrics module, e.g. tfr.metrics.mean_reciprocal_rank
**kwargs: A collection of argument values to be passed to the metric, e.g.
labels and predictions. See `_RankingMetric` and the various metric
definitions in tfr.metrics for the specifics.
Returns:
The evaluation of the metric on the input ranked lists.
Raises:
ValueError: One of the arguments required by the metric is not provided in
the list of arguments included in kwargs.
"""
metric_spec = inspect.getargspec(metric_fn)
metric_args = metric_spec.args
required_metric_args = (metric_args[:-len(metric_spec.defaults)])
for arg in required_metric_args:
if arg not in kwargs:
raise ValueError('Metric %s requires argument %s.' %
(metric_fn.__name__, arg))
args = {}
for arg in kwargs:
if arg not in metric_args:
raise ValueError('Metric %s does not accept argument %s.' %
(metric_fn.__name__, arg))
args[arg] = kwargs[arg]
with tf.compat.v1.Session() as sess:
metric_op, update_op = metric_fn(**args)
sess.run(tf.compat.v1.local_variables_initializer())
sess.run([metric_op, update_op])
return sess.run(metric_op)
| apache-2.0 | -822,287,671,420,614,300 | 40.20208 | 80 | 0.662664 | false |
GiulioDenardi/constrained-kmeans | constrainedKMeans.py | 1 | 7768 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import random as rand;
import math;
from sets import Set;
import copy;
import numpy as np;
import itertools;
## mlCons / dlCons structure: [(instance, instance), ... (instance, instance)]
## instance / point structure: Set(attr1, attr2...)
class ConstrainedKMeans:
def __init__(self, clustersQty, convergeThreshold, distFunction):
self.clustersQty = clustersQty;
self.convergeThreshold = convergeThreshold;
self.distFunction = distFunction;
#This functiion trains with the dataset.
def clusterize(self, dataset, mlCons, dlCons):
print('clusterizing with ', self.clustersQty, " clusters...");
self.clusters = self.__getInitialClusters(dataset.copy());
self.oldClusters = None;
while (not self.__converged()):
self.clusterPoints = {k : [] for k in self.clusters.keys()};
self.noCluster = [];
self.__assignPoints(dataset, mlCons, dlCons);
self.oldClusters = copy.deepcopy(self.clusters);
self.__updateClusters();
# print('Cluster x Points: ', self.clusterPoints);
# print('Clusters: ', self.clusters);
return self.clusterPoints;
#This function shall check if the function has stop converging (we should limit a threshold)
def __converged(self):
if (self.oldClusters != None):
for i in self.oldClusters.keys():
if (abs(np.std(self.oldClusters[i]) - np.std(self.clusters[i])) > self.convergeThreshold):
print('CONVERGE MORE!')
return False;
else:
return False;
return True;
#This function shall assign the points to the clusters according to its distance from the clusters.
def __assignPoints(self, dataset, mlCons, dlCons):
for point in dataset:
##TODO check if should insert the points with constraints first.
cluster = self.__findNearestCluster(point);
if (not self.__violateConstraints(point, cluster, mlCons, dlCons)):
self.clusterPoints[cluster].append(point);
else:
self.noCluster.append(point);
def __findNearestCluster(self, point):
choosenCluster = None;
choosenDist = None;
for c in self.clusters.items():
if (choosenCluster == None):
choosenCluster = c[0];
choosenDist = self.distFunction.getDist(point, c[1]);
elif (self.distFunction.getDist(point, c[1]) < choosenDist):
choosenCluster = c[0];
choosenDist = self.distFunction.getDist(point, c[1]);
return choosenCluster;
#This function shall move the clusters according to its points' positions.
def __updateClusters(self):
for cp in self.clusterPoints.keys():
for attr in range(0, self.clusters[cp].size):
self.clusters[cp][attr] = sum(x[attr] for x in self.clusterPoints[cp])/len(self.clusterPoints[cp]);
#This function gets the initial clusters, avoiding to get the same cluster point at the same time.
##TODO do this better.
def __getInitialClusters(self, dataset):
if (np.unique(dataset).size < self.clustersQty):
raise ValueError('O número de instâncias únicas do dataset deve ser maior ou igual o número de grupos.');
keepChoosingPoints = True;
while (keepChoosingPoints):
cls = {k : rand.choice(dataset) for k in range(self.clustersQty)};
aux = set([tuple(cl) for cl in cls.values()]);
if (self.clustersQty == len(aux)):
keepChoosingPoints = False;
return cls;
#This function is the article's violate-contraint function.
def __violateConstraints(self, point, cluster, mlCons, dlCons):
mustLink = [x for x in mlCons if (any((point == y).all() for y in x))];
if (len(mustLink) > 0):
for ml in mustLink:
if ((point == ml[0]).all()):
pairCluster = self.__findNearestCluster(ml[1]);
else:
pairCluster = self.__findNearestCluster(ml[0]);
if (pairCluster != cluster):
return True;
dontLink = [x for x in dlCons if (any((point == y).all() for y in x))];
if (len(dontLink) > 0):
for dl in dontLink:
if ((point == dl[0]).all()):
pairCluster = self.__findNearestCluster(dl[1]);
else:
pairCluster = self.__findNearestCluster(dl[0]);
if (pairCluster == cluster):
return True;
return False;
class DistanceMetrics:
class EuclidianDistance:
def getDist(self, X, Y):
tuples = zip(X, Y)
distance = 0
for x, y in tuples:
distance += (x - y) ** 2
return math.sqrt(distance)
class SimpleMatchDistance:
def getDist(self, X, Y):
tuples = zip(X, Y)
distance = 0
for x, y in tuples:
if(x != y):
distance += 1
return distance
class ReportResults:
def __print_index(self, source, item_to_search):
list_items = list(source)
for i, item in enumerate(list_items):
if((item==item_to_search).all()):
print(i, end=' ')
def print_clusters(self, dataset, results):
for i in range(len(results)):
cluster = results[i]
print("\nCluster " + str(i) + "(" + str(len(cluster)) + " items):")
for item in cluster:
self.__print_index(dataset, item)
print("\n")
def __item_in_cluster(self, results, constraint_pair):
for i in range(len(results)):
cluster = list(results[i])
res = 0
for i, item in enumerate(cluster):
if((item==constraint_pair[0]).all() or (item==constraint_pair[1]).all()):
res+=1
if(res==2):
return True
return False
def __compute_evaluation_must_link(self, n_ml, a):
return a/float(n_ml)
def __compute_evaluation_cannot_link(self, n_cl, b):
return b/float(n_cl)
def __compute_evaluation_ordinary(self, n, a, b):
return (a + b)/float(n)
def __compute_evaluation_overall(self, n, a, b):
return ((a + b)/((n*(float(n)-1))/2))
def print_evaluation(self, dataset, results, must_link, cannot_link):
n_ml = len(must_link)
n_cl = len(cannot_link)
n = n_ml + n_cl
a=0
for i in range(len(must_link)):
constraint = must_link[i]
if(self.__item_in_cluster(results,constraint)):
a+=1
b=0
for i in range(len(cannot_link)):
constraint = cannot_link[i]
if(not self.__item_in_cluster(results,constraint)):
b+=1
evaluation_must_link = self.__compute_evaluation_must_link(n_ml, a)
evaluation_cannot_link = self.__compute_evaluation_cannot_link(n_cl, b)
evaluation_ordinary = self.__compute_evaluation_ordinary(n, a, b)
evaluation_overall = self.__compute_evaluation_overall(n, a, b)
print("n=" + str(n))
print("a=" + str(a))
print("b=" + str(b))
print("evaluation_must_link=" + str(evaluation_must_link))
print("evaluation_cannot_link=" + str(evaluation_cannot_link))
print("evaluation_ordinary=" + str(evaluation_ordinary))
print("evaluation_overall=" + str(evaluation_overall)) | apache-2.0 | -6,149,740,040,443,396,000 | 36.331731 | 117 | 0.565688 | false |
all-of-us/raw-data-repository | tests/cron_job_tests/test_enrollment_status_checking.py | 1 | 3062 | from datetime import datetime, timedelta
from rdr_service.participant_enums import EnrollmentStatus, PhysicalMeasurementsStatus, QuestionnaireStatus, \
SampleStatus
from tests.helpers.unittest_base import BaseTestCase
class CheckEnrollmentStatusTest(BaseTestCase):
"""Tests checking enrollment status of participants cron job"""
def setUp(self):
super().setUp()
# test core participant meets requirements
def test_core_meets_req(self):
from rdr_service.offline.enrollment_check import check_enrollment
person, ps_dao = self.setup_participant()
# missing questionnaires and pm status
self.assertEqual(check_enrollment(create_ticket=False), False)
# update required attributes
person.questionnaireOnLifestyle = QuestionnaireStatus.SUBMITTED
person.questionnaireOnOverallHealth = QuestionnaireStatus.SUBMITTED
person.questionnaireOnTheBasics = QuestionnaireStatus.SUBMITTED
person.physicalMeasurementsStatus = PhysicalMeasurementsStatus.COMPLETED
with ps_dao.session() as session:
session.add(person)
self.assertEqual(check_enrollment(create_ticket=False), True)
def setup_participant(self):
""" A full participant (core) is defined as:
completed the primary informed consent process
HIPAA Authorization/EHR consent
required PPI modules (Basics, Overall Health, and Lifestyle modules)
provide physical measurements
at least one biosample suitable for genetic sequencing.
"""
twenty_nine = datetime.now() - timedelta(days=29)
p = self.data_generator._participant_with_defaults(participantId=6666666, biobankId=9999999, version=1,
lastModified=twenty_nine, signUpTime=twenty_nine)
valid_kwargs = dict(
participantId=p.participantId,
biobankId=p.biobankId,
withdrawalStatus=p.withdrawalStatus,
dateOfBirth=datetime(2000, 1, 1),
firstName="foo",
lastName="bar",
zipCode="12345",
sampleStatus1ED04=SampleStatus.RECEIVED,
sampleStatus1SAL2=SampleStatus.RECEIVED,
samplesToIsolateDNA=SampleStatus.RECEIVED,
consentForStudyEnrollmentTime=datetime(2019, 1, 1),
numCompletedBaselinePPIModules=3,
consentForStudyEnrollment=1,
consentForElectronicHealthRecords=1,
enrollmentStatus=EnrollmentStatus.FULL_PARTICIPANT,
lastModified=twenty_nine)
person = self.data_generator._participant_summary_with_defaults(**valid_kwargs)
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.dao.participant_dao import ParticipantDao
dao = ParticipantDao()
with dao.session() as session:
session.add(p)
ps_dao = ParticipantSummaryDao()
with ps_dao.session() as session:
session.add(person)
return person, ps_dao
| bsd-3-clause | -156,174,767,499,015,650 | 44.029412 | 111 | 0.684847 | false |
pycroscopy/pycroscopy | pycroscopy/processing/histogram.py | 1 | 3539 | """
Utilities for building 2D histograms of spectroscopic data
Created on Mar 1, 2016
@author: Chris Smith -- [email protected]
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import sys
if sys.version_info.major == 3 and sys.version_info.minor >= 6:
disable_histogram = True
else:
disable_histogram = False
from numpy_groupies import aggregate_np
def build_histogram(x_hist, data_mat, N_x_bins, N_y_bins, weighting_vec=1, min_resp=None, max_resp=None, func=None,
debug=False, *args, **kwargs):
"""
Creates histogram for a single block of pixels
Parameters
----------
x_hist : 1D numpy array
bins for x-axis of 2d histogram
data_mat : numpy array
data to be binned for y-axis of 2d histogram
weighting_vec : 1D numpy array or float
weights. If setting all to one value, can be a scalar
N_x_bins : integer
number of bins in the x-direction
N_y_bins : integer
number of bins in the y-direction
min_resp : float
minimum value for y binning
max_resp : float
maximum value for y binning
func : function
function to be used to bin data_vec. All functions should take as input data_vec.
Arguments should be passed properly to func. This has not been heavily tested.
debug : bool, optional
If True, extra debugging statements are printed. Default False
Returns
-------
pixel_hist : 2D numpy array
contains the histogram of the input data
Apply func to input data, convert to 1D array, and normalize
"""
if func is not None:
y_hist = func(data_mat, *args, **kwargs)
else:
y_hist = data_mat
'''
Get the min_resp and max_resp from y_hist if they are none
'''
if min_resp is None:
min_resp = np.min(y_hist)
if max_resp is None:
max_resp = np.max(y_hist)
if debug:
print('min_resp', min_resp, 'max_resp', max_resp)
y_hist = __scale_and_discretize(y_hist, N_y_bins, max_resp, min_resp, debug)
'''
Combine x_hist and y_hist into one matrix
'''
if debug:
print(np.shape(x_hist))
print(np.shape(y_hist))
try:
group_idx = np.zeros((2, x_hist.size), dtype=np.int32)
group_idx[0, :] = x_hist
group_idx[1, :] = y_hist
except:
raise
'''
Aggregate matrix for histogram of current chunk
'''
if debug:
print(np.shape(group_idx))
print(np.shape(weighting_vec))
print(N_x_bins, N_y_bins)
try:
if not disable_histogram:
pixel_hist = aggregate_np(group_idx, weighting_vec, func='sum', size=(N_x_bins, N_y_bins), dtype=np.int32)
else:
pixel_hist = None
except:
raise
return pixel_hist
def __scale_and_discretize(y_hist, N_y_bins, max_resp, min_resp, debug=False):
"""
Normalizes and discretizes the `y_hist` array
Parameters
----------
y_hist : numpy.ndarray
N_y_bins : int
max_resp : float
min_resp : float
debug : bool
Returns
-------
y_hist numpy.ndarray
"""
y_hist = y_hist.flatten()
y_hist = np.clip(y_hist, min_resp, max_resp)
y_hist = np.add(y_hist, -min_resp)
y_hist = np.dot(y_hist, 1.0 / (max_resp - min_resp))
'''
Discretize y_hist
'''
y_hist = np.rint(y_hist * (N_y_bins - 1))
if debug:
print('ymin', min(y_hist), 'ymax', max(y_hist))
return y_hist
| mit | 4,812,204,602,772,528,000 | 26.015267 | 118 | 0.598757 | false |
fatty-arbuckle/crypto.challenges | set_1/challenge_5.py | 1 | 1143 | #! /usr/bin/python
import string
import sys
INPUT="Burning 'em, if you ain't quick and nimble\nI go crazy when I hear a cymbal"
XOR_STRING="ICE"
EXPECT="0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a26226324272765272a282b2f20430a652e2c652a3124333a653e2b2027630c692b20283165286326302e27282f"
# hex string generator
def byteArrayToHexString(byteArray):
hex = []
for i in xrange(0, len(byteArray)):
hex.append(format(byteArray[i], '02x'))
return "".join(hex)
# byte array generator
def hexStringToByteArray(hexString, byteSize=2):
for i in xrange(0, len(hexString), byteSize):
yield int(hexString[i:i+byteSize], 16)
# xor generator
def xorByteArrays(first, second):
assert len(first) == len(second)
for i in range(0, len(first)):
yield first[i] ^ second[i]
key = XOR_STRING
while len(key) < len(INPUT):
key += XOR_STRING
extra = 0 - (len(key) - len(INPUT))
if (extra < 0):
key = key[:extra]
input_bytes = bytearray(INPUT)
key_bytes = bytearray(key)
cipher_bytes = bytearray(xorByteArrays(input_bytes, key_bytes))
cipher = byteArrayToHexString(cipher_bytes)
print cipher
if cipher == EXPECT:
print "Success!"
| mit | 4,259,251,163,925,024,000 | 24.4 | 157 | 0.745407 | false |
Orange-OpenSource/vespa-core | vespa/agent_connections.py | 1 | 3930 | # -*- coding: utf-8 -*-
#
# Module name: agent_connections.py
# Version: 1.0
# Created: 29/04/2014 by Aurélien Wailly <[email protected]>
#
# Copyright (C) 2010-2014 Orange
#
# This file is part of VESPA.
#
# VESPA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation version 2.1.
#
# VESPA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with VESPA. If not, see <http://www.gnu.org/licenses/>.
"""
Counting connections as suggested into:
http://www.linuxjournal.com/content/back-dead-simple-bash-complex-ddos
SynFlood
https://raw.github.com/arthurnn/SynFlood/master/synflood
"""
import socket
from log_pipe import *
from threading import Thread
import subprocess
from node import Node
from agent import Agent
import Queue
import os
import psutil
# End Flag
EOT_FLAG = "EndOfTransmission"
LIST_ITEM_SEPARATOR = ':'
LIST_SEPARATOR = '\r'
class Agent_Connections(Agent):
"""An agent gathering network links through psutil python module or
system lsof command
:return: The wrapper
:rtype: Node
"""
def __init__(self, name, host, port, master, run=True):
# self.proc = None
super(Agent_Connections, self,).__init__(name, host, port, master, run)
self.backend = self.desc()
self.daemonname = "vlc"
def launch(self):
"""Return network connections to orchestrator layer every second using
either psutil or lsof
"""
import time
while not self.quitting:
infos = self.__get_conns()
addresses = {}
intruders = []
for conn in infos:
if conn.remote_address:
addresses[
conn.remote_address[0].replace(
":",
"").replace(
"f",
"")] = 0
for conn in infos:
if conn.remote_address:
addresses[
conn.remote_address[0].replace(
":",
"").replace(
"f",
"")] += 1
for item in addresses:
intruders.append({'ip': item, 'value': addresses[item]})
self.sendAlert("ip_connections#%s" % intruders)
# debug_info("Intruders: %s" % intruders)
time.sleep(1)
def _get_conns(self):
"""Gather psutil connections
:return: List of network links
:rtype: list
"""
res = []
for p in psutil.process_iter():
try:
res += p.get_connections(kind='inet')
except:
continue
return res
def _get_conns_lsof(self):
"""Gather network connections with lsof
:return: Dict of network links
:rtype: dict
"""
lines = os.popen('lsof -ni').readlines()
from subprocess import Popen, PIPE
p1 = Popen(['lsof', '-ni'], stdout=PIPE)
p2 = Popen(["grep", "LISTEN"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
cols = ("COMMAND PID USER FD TYPE DEVICE SIZE/OFF"
"NODE NAME").split()
res = {}
for l in output.split("\n"):
d = dict(zip(cols, l.split()))
if not d:
continue
if d['COMMAND'] not in res:
res[d['COMMAND']] = []
res[d['COMMAND']].append(d)
return res
| gpl-3.0 | -2,598,357,917,571,393,500 | 27.889706 | 79 | 0.549758 | false |
dmend/PyKMIP | kmip/demos/pie/destroy.py | 1 | 1695 | # Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import sys
from kmip.core import enums
from kmip.demos import utils
from kmip.pie import client
if __name__ == '__main__':
# Build and parse arguments
parser = utils.build_cli_parser(enums.Operation.DESTROY)
opts, args = parser.parse_args(sys.argv[1:])
config = opts.config
uid = opts.uuid
# Exit early if the UUID is not specified
if uid is None:
logging.debug('No UUID provided, exiting early from demo')
sys.exit()
# Build and setup logging and needed factories
f_log = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
'logconfig.ini')
logging.config.fileConfig(f_log)
logger = logging.getLogger(__name__)
# Build the client and connect to the server
with client.ProxyKmipClient(config=config) as client:
try:
client.destroy(uid)
logger.info("Successfully destroyed secret with ID: {0}".format(
uid))
except Exception as e:
logger.error(e)
| apache-2.0 | 7,276,500,772,121,612,000 | 32.235294 | 76 | 0.684366 | false |
jerabaul29/python_huffman | test/test_example_build_tree.py | 1 | 3381 | from __future__ import print_function
from bitarray import bitarray
import pyhuffman.pyhuffman as pyhuffman
"""
A test case that can also be used as example, about how to build trees.
"""
def test_valid_dicts():
# example of data: frequencies in the alphabet for typical english text
# this data is from: https://stackoverflow.com/questions/11587044/how-can-i-create-a-tree-for-huffman-encoding-and-decoding
freq = [
(8.167, 'a'), (1.492, 'b'), (2.782, 'c'), (4.253, 'd'),
(12.702, 'e'), (2.228, 'f'), (2.015, 'g'), (6.094, 'h'),
(6.966, 'i'), (0.153, 'j'), (0.747, 'k'), (4.025, 'l'),
(2.406, 'm'), (6.749, 'n'), (7.507, 'o'), (1.929, 'p'),
(0.095, 'q'), (5.987, 'r'), (6.327, 's'), (9.056, 't'),
(2.758, 'u'), (1.037, 'v'), (2.365, 'w'), (0.150, 'x'),
(1.974, 'y'), (0.074, 'z')]
# build the Huffman tree, dictionary and reverse dictionary
huffman_tree = pyhuffman.HuffmanTree(frequency_data=freq)
assert len(huffman_tree.huffman_dict.keys()) == 26
valid_dict = {'a': '1110',
'b': '110000',
'c': '01001',
'd': '11111',
'e': '100',
'f': '00100',
'g': '110011',
'h': '0110',
'i': '1011',
'j': '001011011',
'k': '0010111',
'l': '11110',
'm': '00111',
'n': '1010',
'o': '1101',
'p': '110001',
'q': '001011001',
'r': '0101',
's': '0111',
't': '000',
'u': '01000',
'v': '001010',
'w': '00110',
'x': '001011010',
'y': '110010',
'z': '001011000'}
assert huffman_tree.huffman_dict == valid_dict
valid_bitarray_tree_ = {'a': bitarray('1110'),
'b': bitarray('110000'),
'c': bitarray('01001'),
'd': bitarray('11111'),
'e': bitarray('100'),
'f': bitarray('00100'),
'g': bitarray('110011'),
'h': bitarray('0110'),
'i': bitarray('1011'),
'j': bitarray('001011011'),
'k': bitarray('0010111'),
'l': bitarray('11110'),
'm': bitarray('00111'),
'n': bitarray('1010'),
'o': bitarray('1101'),
'p': bitarray('110001'),
'q': bitarray('001011001'),
'r': bitarray('0101'),
's': bitarray('0111'),
't': bitarray('000'),
'u': bitarray('01000'),
'v': bitarray('001010'),
'w': bitarray('00110'),
'x': bitarray('001011010'),
'y': bitarray('110010'),
'z': bitarray('001011000')}
assert huffman_tree.bitarray_dict == valid_bitarray_tree_
test_valid_dicts()
| mit | 3,625,160,815,430,537,000 | 38.776471 | 127 | 0.371192 | false |
jbradberry/django-turn-generation | turngeneration/tests/test_tasks.py | 1 | 1968 | from django.test import TestCase
from mock import patch
from sample_app.models import TestRealm
from ..models import Generator, GenerationTime, Ready, Pause
class TimedGenerationTestCase(TestCase):
def setUp(self):
from .. import tasks
self.timed_generation = tasks.timed_generation
self.ready_generation = tasks.ready_generation
self.realm = TestRealm.objects.create()
self.generator = Generator(realm=self.realm)
self.generator.save()
def test_simple(self):
self.assertEqual(GenerationTime.objects.count(), 0)
try:
result = self.timed_generation.apply((self.generator.pk,), throw=True)
except Exception as e:
self.fail(e)
self.assertEqual(result.status, 'SUCCESS')
self.assertEqual(GenerationTime.objects.count(), 1)
class ReadyGenerationTestCase(TestCase):
def setUp(self):
from .. import tasks
self.timed_generation = tasks.timed_generation
self.ready_generation = tasks.ready_generation
self.realm = TestRealm.objects.create()
self.generator = Generator(realm=self.realm)
self.generator.save()
@patch.object(Generator, 'is_ready', autospec=True)
def test_simple(self, is_ready):
is_ready.return_value = True
self.assertEqual(GenerationTime.objects.count(), 0)
try:
result = self.ready_generation.apply((self.generator.pk,), throw=True)
except Exception as e:
self.fail(e)
self.assertEqual(result.status, 'SUCCESS')
self.assertEqual(GenerationTime.objects.count(), 1)
class IntegrationTestCase(TestCase):
def setUp(self):
from .. import tasks
self.timed_generation = tasks.timed_generation
self.ready_generation = tasks.ready_generation
self.realm = TestRealm.objects.create()
self.generator = Generator(realm=self.realm)
self.generator.save()
| mit | 7,976,091,698,914,318,000 | 30.741935 | 82 | 0.664126 | false |
MSFTOSSMgmt/WPSDSCLinux | Providers/Scripts/2.4x-2.5x/Scripts/nxFile.py | 2 | 46910 | #!/usr/bin/env python
# ====================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# See license.txt for license information.
# ====================================
import os
import sys
import pwd
import shutil
import grp
import urllib2
import time
import imp
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
helperlib = imp.load_source('helperlib', '../helperlib.py')
LG = nxDSCLog.DSCLog
try:
import hashlib
md5const = hashlib.md5
except ImportError:
import md5
md5const = md5.md5
BLOCK_SIZE = 8192
global show_mof
show_mof = False
RemoteFileRetryCount = 4
RemoteFileRetryInterval = 10
def init_locals(DestinationPath, SourcePath, Ensure, Type, Force, Contents,
Checksum, Recurse, Links, Owner, Group, Mode):
if DestinationPath is None :
DestinationPath = ''
if SourcePath is None :
SourcePath = ''
if Ensure is None or Ensure == '':
Ensure = 'present'
if Type is None :
Type = 'file'
if Force is None :
Force = False
Force = ( Force == True )
if Contents is None :
Contents = ''
if Checksum is None :
Checksum = ''
if Recurse is None :
Recurse = False
Recurse = ( Recurse == True )
if Links is None :
Links = 'follow'
if Owner is None :
Owner = ''
if Group is None :
Group = ''
if Mode is None :
Mode = ''
return DestinationPath.encode('ascii', 'ignore'), SourcePath.encode('ascii', 'ignore'), \
Ensure.encode('ascii', 'ignore').lower(), Type.encode('ascii', 'ignore').lower(), Force,\
Contents, Checksum.encode('ascii', 'ignore').lower(), Recurse, \
Links.encode('ascii', 'ignore').lower(), Owner, Group, Mode
def Set_Marshall(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode):
DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode \
= init_locals(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
retval = Set(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
return retval
def Test_Marshall(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode):
DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode \
= init_locals(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
retval = Test(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
return retval
def Get_Marshall(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode):
arg_names = list(locals().keys())
DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode \
= init_locals(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
retval = 0
(retval, DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode, ModifiedDate) \
= Get(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
DestinationPath = protocol.MI_String(DestinationPath)
SourcePath = protocol.MI_String(SourcePath)
Ensure = protocol.MI_String(Ensure)
Type = protocol.MI_String(Type)
Force = protocol.MI_Boolean(Force)
Contents = protocol.MI_String(Contents)
Checksum = protocol.MI_String(Checksum)
Recurse = protocol.MI_Boolean(Recurse)
Links = protocol.MI_String(Links)
Owner = protocol.MI_String(Owner)
Group = protocol.MI_String(Group)
Mode = protocol.MI_String(Mode)
ModifiedDate = protocol.MI_Timestamp.from_time(ModifiedDate)
arg_names.append('ModifiedDate')
retd = {}
ld = locals()
for k in arg_names :
retd[k] = ld[k]
return retval, retd
# ###########################################################
# Begin user defined DSC functions
# ###########################################################
def opened_w_error(filename, mode="r"):
"""
This context ensures the file is closed.
"""
try:
f = open(filename, mode)
except IOError, err:
return None, err
return f, None
def opened_bin_w_error(filename, mode="rb"):
"""
This context ensures the file is closed.
"""
try:
f = open(filename, mode)
except IOError, err:
return None, err
return f, None
def ReadFile1k(path):
"""
Safely attempt to read a file,
ensuring file is always closed at exit.
Read only 1k.
Return the data and the exception object.
The data is None if an error occurred.
The error is None if the data was read.
Log results to stderr.
"""
d = None
error = None
F, error = opened_bin_w_error(path)
if error:
Print("Exception opening file " + path + " Error: " + str(error), file=sys.stderr )
LG().Log('ERROR', "Exception opening file " + path + " Error: " + str(error))
else:
d = F.read(1024)
F.close()
return d, error
def ReadFile(path):
"""
Safely attempt to read a file,
ensuring file is always closed at exit.
Return the data and the exception object.
The data is None if an error occurred.
The error is None if the data was read.
Log results to stderr.
"""
d = None
error = None
F, error = opened_bin_w_error(path)
if error:
Print("Exception opening file " + path + " Error: " + str(error), file=sys.stderr )
LG().Log('ERROR', "Exception opening file " + path + " Error: " + str(error))
else:
d=F.read()
F.close()
return d,error
def WriteFile(path, contents):
"""
Safely attempt to write data to a file,
replacing the existing file or creating it and
ensuring file is always closed at exit.
Return the exception object.
The error is None if the data was written.
Log results to stderr.
"""
error = None
F, error = opened_w_error(path, 'w+')
if error:
Print("Exception opening file " + path + " Error Code: " + str(error) , file=sys.stderr)
LG().Log('ERROR', "Exception opening file " + path + " Error Code: " + str(error))
else:
F.write(contents)
F.close()
return error
def Print(s, file=sys.stderr):
file.write(s.encode('utf8') + '\n')
def LStatFile(path):
"""
LStat the file. Do not follow the symlink.
"""
d = None
error = None
try:
d = os.lstat(path)
except OSError, error:
Print("Exception lstating file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception lstating file " + path + " Error: " + str(error))
except IOError, error:
Print("Exception lstating file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception lstating file " + path + " Error: " + str(error))
return d
def StatFile(path):
"""
Stat the file, following the symlink.
"""
d = None
error = None
try:
d = os.stat(path)
except OSError, error:
Print("Exception stating file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception stating file " + path + " Error: " + str(error))
except IOError, error:
Print("Exception stating file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception stating file " + path + " Error: " + str(error))
return d
def Chown(path, owner, group):
error = None
try:
os.chown(path, owner, group)
except OSError, error:
Print("Exception changing ownership of file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception changing ownership of file " + path + " Error: " + str(error))
except IOError, error:
Print("Exception changing ownership of file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception changing ownership of file " + path + " Error: " + str(error))
return error
def Chmod(path, mode):
error = None
if type(mode) != int:
mode = int(mode, 8)
try:
os.chmod(path, mode)
except OSError, error:
Print("Exception changing mode of file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception changing mode of file " + path + " Error: " + str(error))
except IOError, error:
Print("Exception changing mode of file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception changing mode of file " + path + " Error: " + str(error))
return error
def LChown(path, owner, group):
error = None
try:
os.lchown(path, owner, group)
except OSError, error:
Print("Exception changing ownership of file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception changing ownership of file " + path + " Error: " + str(error))
except IOError, error:
Print("Exception changing ownership of file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception changing ownership of file " + path + " Error: " + str(error))
return error
def LChmod(path, mode):
error = None
try:
os.lchmod(path, mode)
except OSError, error:
Print("Exception changing mode of file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception changing mode of file " + path + " Error: " + str(error))
except IOError, error:
Print("Exception changing mode of file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception changing mode of file " + path + " Error: " + str(error))
return error
def ListDir(path):
d = None
error = None
try:
d = os.listdir(path)
except OSError, error:
Print("Exception listing dir " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception listing dir " + path + " Error: " + str(error))
except IOError, error:
Print("Exception listing dir " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception listing dir " + path + " Error: " + str(error))
return d
def Symlink(spath, dpath):
error = None
if spath == dpath: # Nothing to Link
return error
# remove the destination if present
if os.path.exists(dpath):
try:
os.unlink(dpath)
except OSError, error:
Print("Exception removing " + dpath + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception removing " + dpath + " Error: " + str(error))
return error
except IOError, error:
Print("Exception removing " + dpath + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception removing " + dpath + " Error: " + str(error))
return error
try:
os.symlink(spath, dpath)
except OSError, error:
Print("Exception creating symlink from " + spath + ' to ' + dpath + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception creating symlink from " + spath + ' to ' + dpath + " Error: " + str(error))
except IOError, error:
Print("Exception creating symlink from " + spath + ' to ' + dpath + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception creating symlink from " + spath + ' to ' + dpath + " Error: " + str(error))
return error
def MakeDirs(path):
error = None
try:
os.makedirs(path)
except OSError, error:
Print("Exception making dir " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception making dir " + path + " Error: " + str(error))
except IOError, error:
Print("Exception making dir " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception making dir " + path + " Error: " + str(error))
return error
def RemoveFile(path):
error = None
try:
os.remove(path)
except OSError, error:
Print("Exception removing file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception removing file " + path + " Error: " + str(error))
except IOError, error:
Print("Exception removing file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception removing file " + path + " Error: " + str(error))
return error
def CopyFile(spath, dpath):
error = None
if spath == dpath: # Nothing to copy!
return error
try:
shutil.copyfile(spath, dpath)
except OSError, error:
Print("Exception copying tree " + spath + ' to ' + dpath + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception copying tree " + spath + ' to ' + dpath + " Error: " + str(error))
except IOError, error:
Print("Exception copying tree " + spath + ' to ' + dpath + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception copying tree " + spath + ' to ' + dpath + " Error: " + str(error))
return error
def CompareFiles(DestinationPath, SourcePath, Checksum):
"""
If the files differ in size, return -1.
Reading and computing the hash here is done in a block-by-block manner,
in case the file is quite large.
"""
if SourcePath == DestinationPath: # Files are the same!
return 0
stat_dest = StatFile(DestinationPath)
stat_src = StatFile(SourcePath)
if stat_src.st_size != stat_dest.st_size:
return -1
if Checksum == "md5":
src_error = None
dest_error = None
src_hash = md5const()
dest_hash = md5const()
src_block = 'loopme'
dest_block = 'loopme'
src_file,src_error = opened_bin_w_error(SourcePath, 'rb')
if src_error:
Print("Exception opening source file " + SourcePath + " Error : " + str(src_error), file=sys.stderr)
LG().Log('ERROR', "Exception opening source file " + SourcePath + " Error : " + str(src_error))
return -1
dest_file, dest_error = opened_bin_w_error(DestinationPath, 'rb')
if dest_error:
Print("Exception opening destination file " + DestinationPath + " Error : " + str(dest_error), file=sys.stderr)
LG().Log('ERROR', "Exception opening destination file " + DestinationPath + " Error : " + str(dest_error))
src_file.close()
return -1
while src_block != '' or dest_block != '':
src_block = src_file.read(BLOCK_SIZE)
dest_block = dest_file.read(BLOCK_SIZE)
src_hash.update(src_block)
dest_hash.update(dest_block)
if src_hash.hexdigest() != dest_hash.hexdigest():
src_file.close()
dest_file.close()
return -1
if src_hash.hexdigest() == dest_hash.hexdigest():
src_file.close()
dest_file.close()
return 0
elif Checksum == "ctime":
if stat_src.st_ctime != stat_dest.st_ctime:
return -1
else:
return 0
elif Checksum == "mtime":
if stat_src.st_mtime != stat_dest.st_mtime:
return -1
else:
return 0
def RemoveTree(path):
error = None
try:
shutil.rmtree(path)
except OSError, error:
Print("Exception removing folder " + path + " Error Code: "
+ " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception removing folder " + path + " Error Code: "
+ " Error: " + str(error))
except IOError, error:
Print("Exception removing folder " + path + " Error Code: "
+ " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception removing folder " + path + " Error Code: "
+ " Error: " + str(error))
return error
def RemovePath(path):
error = None
if os.path.islink(path) or os.path.isfile(path):
error = RemoveFile(path)
elif os.path.isdir(path):
error = RemoveTree(path)
else:
Print("Error: Unknown file type for file: " + path, file=sys.stderr)
LG().Log('ERROR', "Error: Unknown file type for file: " + path)
return error
def TestOwnerGroupMode(DestinationPath, SourcePath, fc):
stat_info = LStatFile(DestinationPath)
if stat_info is None :
return False
if SourcePath:
stat_info_src = LStatFile(SourcePath)
if stat_info_src is None :
return False
if fc.Owner:
try:
Specified_Owner_ID = pwd.getpwnam(fc.Owner)[2]
except KeyError, error:
Print("Exception obtaining gid from group name " + fc.Group + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception obtaining gid from group name " + fc.Group + " Error: " + str(error))
return False
if Specified_Owner_ID != pwd.getpwuid(stat_info.st_uid)[2]:
return False
elif SourcePath:
# Owner wasn't specified, if SourcePath is specified then check that the Owners match
if pwd.getpwuid(stat_info.st_uid)[2] != pwd.getpwuid(stat_info_src.st_uid)[2]:
return False
if fc.Group:
try:
Specified_Group_ID = grp.getgrnam(fc.Group)[2]
except KeyError, error:
Print("Exception obtaining gid from group name " + fc.Group + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception obtaining gid from group name " + fc.Group + " Error: " + str(error))
return False
if Specified_Group_ID != grp.getgrgid(stat_info.st_gid)[2]:
return False
elif SourcePath:
# Group wasn't specified, if SourcePath is specified then check that the Groups match
if grp.getgrgid(stat_info.st_gid)[2] != grp.getgrgid(stat_info_src.st_gid)[2]:
return False
# Mode is irrelevant to symlinks
if not os.path.islink(DestinationPath):
if fc.Mode:
if str(oct(stat_info.st_mode))[-3:] != fc.Mode:
return False
elif SourcePath:
# Mode wasn't specified, if SourcePath is specified then check that the Modes match
if str(oct(stat_info.st_mode))[-3:] != str(oct(stat_info_src.st_mode))[-3:]:
return False
return True
def ConvertLongModeToNumeric(Mode):
u_r = Mode[0]
u_w = Mode[1]
u_x = Mode[2]
g_r = Mode[3]
g_w = Mode[4]
g_x = Mode[5]
o_r = Mode[6]
o_w = Mode[7]
o_x = Mode[8]
first_digit = 0
second_digit = 0
third_digit = 0
if u_r == "r":
first_digit += 4
elif u_r == "-":
pass
else:
raise Exception("Error: Invalid character for character 0 in Mode")
if u_w == "w":
first_digit += 2
elif u_w == "-":
pass
else:
raise Exception("Error: Invalid character for character 1 in Mode")
if u_x == "x":
first_digit += 1
elif u_x == "-":
pass
else:
raise Exception("Error: Invalid character for character 2 in Mode")
if g_r == "r":
second_digit += 4
elif g_r == "-":
pass
else:
raise Exception("Error: Invalid character for character 3 in Mode")
if g_w == "w":
second_digit += 2
elif g_w == "-":
pass
else:
raise Exception("Error: Invalid character for character 4 in Mode")
if g_x == "x":
second_digit += 1
elif g_x == "-":
pass
else:
raise Exception("Error: Invalid character for character 5 in Mode")
if o_r == "r":
third_digit += 4
elif o_r == "-":
pass
else:
raise Exception("Error: Invalid character for character 6 in Mode")
if o_w == "w":
third_digit += 2
elif o_w == "-":
pass
else:
raise Exception("Error: Invalid character for character 7 in Mode")
if o_x == "x":
third_digit += 1
elif o_x == "-":
pass
else:
raise Exception("Error: Invalid character for character 8 in Mode")
return str(first_digit) + str(second_digit) + str(third_digit)
def SetOwnerGroupMode(DestinationPath, SourcePath, fc):
stat_info = LStatFile(DestinationPath)
if stat_info is None :
return False
if SourcePath:
stat_info_src = LStatFile(SourcePath)
if stat_info_src is None:
return False
if fc.Owner:
Specified_Owner_ID = pwd.getpwnam(fc.Owner)[2]
if Specified_Owner_ID != pwd.getpwuid(stat_info.st_uid)[2]:
Print("Changing owner of " + DestinationPath + " to " + str(Specified_Owner_ID))
LG().Log('INFO', "Changing owner of " + DestinationPath + " to " + str(Specified_Owner_ID))
if LChown(DestinationPath, Specified_Owner_ID, -1) is not None :
return False
elif SourcePath:
src_uid = pwd.getpwuid(stat_info_src.st_uid)[2]
if pwd.getpwuid(stat_info.st_uid)[2] != src_uid:
Print("Changing owner of " + DestinationPath + " to " + str(src_uid))
LG().Log('INFO', "Changing owner of " + DestinationPath + " to " + str(src_uid))
if LChown(DestinationPath, src_uid, -1) is not None :
return False
if fc.Group:
Specified_Group_ID = grp.getgrnam(fc.Group)[2]
if Specified_Group_ID != grp.getgrgid(stat_info.st_gid)[2]:
Print("Changing group of " + DestinationPath + " to " + str(Specified_Group_ID))
LG().Log('INFO', "Changing group of " + DestinationPath + " to " + str(Specified_Group_ID))
if LChown(DestinationPath, -1, Specified_Group_ID) is not None :
return False
elif SourcePath:
src_gid = grp.getgrgid(stat_info_src.st_gid)[2]
if grp.getgrgid(stat_info.st_gid)[2] != src_gid:
Print("Changing group of " + DestinationPath + " to " + str(src_gid))
LG().Log('INFO', "Changing group of " + DestinationPath + " to " + str(src_gid))
if LChown(DestinationPath, src_gid , -1) is not None :
return False
# Mode is irrelevant to symlinks
if not os.path.islink(DestinationPath):
if fc.Mode:
if str(oct(stat_info.st_mode))[-3:] != fc.Mode:
Print("Changing mode of " + DestinationPath + " to " + fc.Mode)
LG().Log('INFO', "Changing mode of " + DestinationPath + " to " + fc.Mode)
if Chmod(DestinationPath, fc.Mode) is not None :
return False
elif SourcePath:
src_mode = str(oct(stat_info_src.st_mode))[-3:]
if str(oct(stat_info.st_mode))[-3:] != src_mode:
Print("Changing mode of " + DestinationPath + " to " + src_mode)
LG().Log('INFO', "Changing mode of " + DestinationPath + " to " + src_mode)
if Chmod(DestinationPath, src_mode) is not None :
return False
return True
def SetDirectoryRecursive(DestinationPath, SourcePath, fc):
if not os.path.exists(DestinationPath):
if MakeDirs(DestinationPath) is not None:
return False
if SetOwnerGroupMode(DestinationPath, SourcePath, fc) is False:
return False
Destination_subfiles = ListDir(DestinationPath)
if Destination_subfiles is None:
return False
if not SourcePath:
if not fc.Recurse :
return True
# Enforce Owner/Group/Mode specified
for f in Destination_subfiles:
f_destpath = os.path.join(DestinationPath, f)
if not os.path.islink(f_destpath):
if os.path.isfile(f_destpath):
if SetOwnerGroupMode(f_destpath, "", fc) is False :
return False
elif os.path.isdir(f_destpath):
if SetDirectoryRecursive(f_destpath, "", fc) is False :
return False
return True
Source_subfiles = ListDir(SourcePath)
# For all files in SourcePath's directory, ensure they exist with proper contents and stat in DestionationPath's directory
for f in Source_subfiles:
f_srcpath = os.path.join(SourcePath, f)
f_destpath = os.path.join(DestinationPath, f)
if os.path.islink(f_srcpath):
if TestLink(f_destpath, f_srcpath, fc) is False:
if SetLink(f_destpath, f_srcpath, fc) is False:
return False
elif os.path.isfile(f_srcpath):
if TestFile(f_destpath, f_srcpath, fc) is False:
if SetFile(f_destpath, f_srcpath, fc) is False:
return False
elif os.path.isdir(f_srcpath):
if fc.Recurse :
if SetDirectoryRecursive(f_destpath, f_srcpath, fc) is False:
return False
return True
def SetFile(DestinationPath, SourcePath, fc):
error = None
if os.path.exists(DestinationPath) and (os.path.islink(DestinationPath) or os.path.isdir(DestinationPath)):
if fc.Force :
if RemovePath(DestinationPath) is not None:
return False
else:
Print("Error: " + DestinationPath + " is not a file; cannot overwrite without the 'Force' option being true")
LG().Log("ERROR", DestinationPath + " is not a file; cannot overwrite without the 'Force' option being true")
return False
if SourcePath and len(SourcePath) > 0:
if '://' in SourcePath and fc.LocalPath == '':
ret = GetRemoteFileWithRetries(fc)
if ret != 0:
raise Exception('Unable to retrieve remote resource '+fc.SourcePath+' Error is ' + str(ret))
else:
SourcePath = fc.LocalPath
should_copy_file = False
if os.path.isfile(DestinationPath):
if CompareFiles(DestinationPath, SourcePath, fc.Checksum) == -1:
should_copy_file = True
else:
should_copy_file = False
else:
should_copy_file = True
if should_copy_file:
if CopyFile(SourcePath, DestinationPath) is not None :
return False
elif fc.Contents:
if WriteFile(DestinationPath, fc.Contents) is not None:
Print("Error: Unable to write file at " + DestinationPath)
LG().Log("ERROR", "Unable to write file at " + DestinationPath)
return False
else:
# Create a file with nothing in it
try:
open(DestinationPath, 'a').close()
except OSError, error:
Print("Exception creating file " + DestinationPath + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception creating file " + DestinationPath + " Error: " + str(error))
except IOError, error:
Print("Exception creating file " + DestinationPath + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception creating file " + DestinationPath + " Error: " + str(error))
SetOwnerGroupMode(DestinationPath, SourcePath, fc)
if len(fc.LocalPath) > 0 :
if RemoveFile(fc.LocalPath) is not None:
return False
return True
def SetDirectory(DestinationPath, SourcePath, fc):
if os.path.exists(DestinationPath) and not os.path.isdir(DestinationPath):
if fc.Force :
if RemovePath(DestinationPath) is not None:
return False
else:
Print("Error: Unable to overwrite currently existing non-directory object at " + DestinationPath + " without the Force option being true.")
LG().Log("ERROR", "Unable to overwrite currently existing non-directory object at " + DestinationPath + " without the Force option being true.")
return False
return SetDirectoryRecursive(DestinationPath, SourcePath, fc)
def SetLink(DestinationPath, SourcePath, fc):
if SourcePath is None or len(SourcePath) < 1 or not os.path.exists(SourcePath) :
Print("Error: Need a valid source path in order to create a new symbolic link.")
LG().Log("ERROR", "Need a valid source path in order to create a new symbolic link.")
return False
if os.path.exists(DestinationPath) and not os.path.islink(DestinationPath) :
if fc.Force :
if RemovePath(DestinationPath) is not None:
return False
else:
Print("Error: Unable to overwrite currently existing non-link object at " + DestinationPath + " without the Force option being true.")
LG().Log("ERROR", "Unable to overwrite currently existing non-link object at " + DestinationPath + " without the Force option being true.")
return False
if os.path.islink(SourcePath):
if fc.Links == "follow":
if os.path.isfile(SourcePath):
if SetFile(DestinationPath, os.path.realpath(SourcePath), fc) is False:
return False
elif os.path.isdir(SourcePath):
if SetDirectoryRecursive(DestinationPath, os.path.realpath(SourcePath), fc) is False:
return False
elif fc.Links == "manage":
if Symlink(os.readlink(SourcePath), DestinationPath) is not None:
return False
elif fc.Links == "ignore":
# Ignore all symlinks
return True
else:
if Symlink(SourcePath, DestinationPath) is not None:
return False
SetOwnerGroupMode(DestinationPath, SourcePath, fc)
return True
def SetShowMof(a):
global show_mof
show_mof = a
def ShowMof(op, DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode):
if not show_mof:
return
mof = ''
mof += op + ' nxFile MyFile\n'
mof += '{\n'
mof += ' DestinationPath = "' + DestinationPath + '"\n'
mof += ' SourcePath = "' + SourcePath + '"\n'
mof += ' Ensure = "' + Ensure + '"\n'
mof += ' Type = "' + Type + '"\n'
mof += ' Force = ' + str(Force) + '\n'
mof += ' Contents = "' + Contents + '"\n'
mof += ' Checksum = "' + Checksum + '"\n'
mof += ' Recurse = ' + str(Recurse) + '\n'
mof += ' Links = "' + Links + '"\n'
mof += ' Group = "' + Group + '"\n'
mof += ' Mode = "' + Mode + '"\n'
mof += ' Owner = "' + Owner + '"\n'
mof += '}\n'
f = open('./test_mofs.log', 'a')
Print(mof, file=f)
LG().Log('INFO', mof)
f.close()
def Set(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode):
ShowMof('SET', DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
fc = FileContext(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
if not DestinationPath:
return [-1]
if fc.Ensure == "present":
if fc.Type == "file":
if SetFile(DestinationPath, SourcePath, fc) is False:
return [-1]
elif fc.Type == "directory":
if SetDirectory(DestinationPath, SourcePath, fc) is False:
return [-1]
elif fc.Type == "link":
if SetLink(DestinationPath, SourcePath, fc) is False:
return [-1]
elif fc.Ensure == "absent":
if RemovePath(DestinationPath) is not None:
return [-1]
return [0]
def TestDirectory(DestinationPath, SourcePath, fc):
if not os.path.exists(DestinationPath) or not os.path.isdir(DestinationPath):
return False
if TestOwnerGroupMode(DestinationPath, SourcePath, fc) is False:
return False
if fc.Recurse is False:
return True
Destination_subfiles = ListDir(DestinationPath)
if Destination_subfiles is None:
return False
if not SourcePath:
# Enforce Owner/Group/Mode specified
for f in Destination_subfiles:
f_destpath = os.path.join(DestinationPath, f)
if not os.path.islink(f_destpath):
if os.path.isfile(f_destpath):
if TestOwnerGroupMode(f_destpath, "", fc) is False:
return False
elif os.path.isdir(f_destpath):
if TestDirectory(f_destpath, "", fc) is False:
return False
return True
Source_subfiles = ListDir(SourcePath)
if Source_subfiles is None:
return False
for f in Source_subfiles:
if f not in Destination_subfiles:
Print("File: " + f + " does not exist in: " + SourcePath)
LG().Log('ERROR', "File: " + f + " does not exist in: " + SourcePath)
return False
f_destpath = os.path.join(DestinationPath, f)
f_srcpath = os.path.join(SourcePath, f)
if os.path.islink(f_srcpath):
if TestLink(f_destpath, f_srcpath, fc) is False:
return False
elif os.path.isfile(f_srcpath):
if TestFile(f_destpath, f_srcpath, fc) is False:
return False
elif os.path.isdir(f_srcpath):
if TestDirectory(f_destpath, f_srcpath, fc) is False:
return False
return True
def TestFile(DestinationPath, SourcePath, fc):
if not os.path.exists(DestinationPath) or not os.path.isfile(DestinationPath) or os.path.islink(DestinationPath):
return False
if '://' in SourcePath:
return TestRemoteFileWithRetries(fc)
if TestOwnerGroupMode(DestinationPath, SourcePath, fc) is False:
return False
if SourcePath and len(SourcePath) > 0:
if not os.path.isfile(SourcePath):
return False
if os.path.islink(SourcePath):
if fc.Links == "follow":
if os.path.isdir(os.path.realpath(SourcePath)):
Print("Error: Expecting a file, but source link points to directory")
LG().Log("ERROR", "Expecting a file, but source link points to directory")
return False
else:
if not os.path.islink(DestinationPath):
return False
if os.readlink(DestinationPath) != os.readlink(SourcePath):
return False
elif CompareFiles(DestinationPath, SourcePath, fc.Checksum) == -1:
return False
elif fc.Contents:
dest_file, error = ReadFile(DestinationPath)
if fc.Contents.encode('utf8') != dest_file:
return False
return True
def TestLink(DestinationPath, SourcePath, fc):
if SourcePath:
if os.path.islink(SourcePath):
if fc.Links == "follow":
if os.path.isdir(SourcePath):
if TestDirectory(DestinationPath, os.path.realpath(SourcePath), fc) is False:
return False
elif os.path.isfile(SourcePath):
if TestFile(DestinationPath, os.path.realpath(SourcePath), fc) is False:
return False
elif fc.Links == "manage":
if not os.path.islink(DestinationPath):
return False
if os.readlink(DestinationPath) != os.readlink(SourcePath):
return False
elif fc.Links == "ignore":
return True
else:
if not os.path.exists(DestinationPath) or not os.path.exists(SourcePath) or not os.path.islink(DestinationPath) :
return False
if os.readlink(DestinationPath) != SourcePath:
return False
if os.path.exists(DestinationPath) != True:
return False
if TestOwnerGroupMode(DestinationPath, SourcePath, fc) is False:
return False
return True
def Test(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode):
ShowMof('TEST', DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
fc = FileContext(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
if not DestinationPath:
return [-1]
if fc.Ensure == "present":
if fc.Type == "file":
if TestFile(DestinationPath, SourcePath, fc) is False:
return [-1]
elif fc.Type == "directory":
if TestDirectory(DestinationPath, SourcePath, fc) is False:
return [-1]
elif fc.Type == "link":
if TestLink(DestinationPath, SourcePath, fc) is False:
return [-1]
elif fc.Ensure == "absent":
if os.path.exists(DestinationPath):
return [-1]
return [0]
return [0]
def Get(DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode):
ShowMof('GET', DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode)
if '://' in SourcePath and Type != 'file':
raise Exception('ERROR: Remote paths are only valid for Type = "file".')
if not DestinationPath:
Ensure = "absent"
ModifiedDate = 0
return [-1, DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode, ModifiedDate]
if not os.path.exists(DestinationPath):
Ensure = "absent"
ModifiedDate = 0
return [0, DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode, ModifiedDate]
Ensure = "present"
stat_info = os.lstat(DestinationPath)
Owner = pwd.getpwuid(stat_info.st_uid)[0]
Group = grp.getgrgid(stat_info.st_gid)[0]
Mode = str(oct(stat_info.st_mode))[-3:]
if os.path.islink(DestinationPath):
Type = "link"
elif os.path.isfile(DestinationPath):
Type = "file"
elif os.path.isdir(DestinationPath):
Type = "directory"
ModifiedDate = stat_info.st_mtime
if Type == "directory":
Contents = repr(ListDir(DestinationPath))
elif Type == 'link':
if Links == 'manage' :
Contents = LStatFile(DestinationPath)
Contents = repr(Contents)
elif Links == 'follow':
if os.path.isdir(os.readlink(DestinationPath)):
Contents = repr(ListDir(DestinationPath))
else:
Contents, error = ReadFile1k(DestinationPath)
else :
Contents, error = ReadFile1k(DestinationPath)
if Contents is None:
Contents = ''
return [0, DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode, ModifiedDate]
def GetTimeFromString(s):
if s is None or len(s) == 0:
return None
fmt = []
fmt.append('%a, %d %b %Y %H:%M:%S %Z')
st = None
for f in fmt:
try:
st = time.strptime(s, f)
except ValueError:
continue
return st
def SetProxyFromConf():
"""
Check for PROXY definition in dsc.conf.
All we must do is set the appropriate value in the environment.
HTTP_PROXY
HTTPS_PROXY
"""
path = helperlib.CONFIG_SYSCONFDIR+ '/' + helperlib.CONFIG_SYSCONFDIR_DSC + '/dsc.conf'
txt, error = ReadFile(path)
if error :
return
for l in txt.splitlines():
if l.startswith('PROXY'):
info = l.split('=')[1].strip()
if 'https' in info:
os.environ['HTTPS_PROXY'] = info
if 'http:' in info:
os.environ['HTTP_PROXY'] = info
return
def GetRemoteFileWithRetries(fc):
retryCount = 0
ret = 1
while True:
try:
ret = GetRemoteFile(fc)
except urllib2.URLError , e:
print("Exception encountered when getting Remote File '" + fc.SourcePath + "', No Retry attempts will be done - " + repr(e))
LG().Log('ERROR', "Exception encountered when getting Remote File '" + fc.SourcePath + "', No Retry attempts will be done - " + repr(e))
if hasattr(e, 'code'):
# Client code are not likely to succeed on retry
if e.code >= 400 and e.code < 500:
return 1
retryCount = retryCount + 1
if ret == 0 or retryCount > RemoteFileRetryCount:
return ret
print("Exception encountered when getting Remote File, Sleeping for " + str(RemoteFileRetryInterval) + " seconds Then Retrying again")
LG().Log('ERROR', "Exception encountered when getting Remote File, Sleeping for " + str(RemoteFileRetryInterval) + " seconds Then Retrying again")
time.sleep(RemoteFileRetryInterval)
return ret
def GetRemoteFile(fc):
SetProxyFromConf()
req = urllib2.Request(fc.SourcePath)
resp = urllib2.urlopen(req)
fc.LocalPath = '/tmp/'+os.path.basename(fc.DestinationPath)+'_remote'
data='keep going'
hasWritten = False
try:
F = open(fc.LocalPath, 'wb+')
while data:
data = resp.read(1048576)
if data is not None and len(data) > 0:
hasWritten = True
F.write(data)
if hasWritten == False:
LG().Log('ERROR', "Data at URL: " + fc.SourcePath + " was empty. Please ensure this file exists at this remote location.")
F.close()
os.unlink(fc.LocalPath)
return 1
except Exception, e:
Print(repr(e))
LG().Log('ERROR', repr(e))
F.close()
os.unlink(fc.LocalPath)
return 1
return 0
def TestRemoteFileWithRetries(fc):
retryCount = 0
ret = False
while True:
try:
ret = TestRemoteFile(fc)
except urllib2.URLError , e:
print("Exception encountered when getting Remote File '" + fc.SourcePath + "', No Retry attempts will be done - " + repr(e))
LG().Log('ERROR', "Exception encountered when getting Remote File '" + fc.SourcePath + "', No Retry attempts will be done - " + repr(e))
if hasattr(e, 'code'):
# Client code are not likely to succeed on retry
if e.code >= 400 and e.code < 500:
return 1
retryCount = retryCount + 1
if ret or retryCount > RemoteFileRetryCount:
return ret
print("ERROR encountered when getting Remote File "+ fc.SourcePath + " Sleeping for " + str(RemoteFileRetryInterval) + " seconds Then Retrying again")
LG().Log('ERROR', "ERROR encountered when getting Remote File "+ fc.SourcePath + " Sleeping for " + str(RemoteFileRetryInterval) + " seconds Then Retrying again")
time.sleep(RemoteFileRetryInterval)
return ret
def TestRemoteFile(fc):
SetProxyFromConf()
req = urllib2.Request(fc.SourcePath)
resp = urllib2.urlopen(req)
h = resp.info()
if fc.Checksum != 'md5' : # if not 'md5' check the last_modified header time before we download
lm = h.getheader('last-modified')
remote_mtime = GetTimeFromString(lm)
destination_mtime = None
dst_st = None
if os.path.exists(fc.DestinationPath):
dst_st = LStatFile(fc.DestinationPath)
if dst_st is not None:
if fc.Checksum == 'ctime':
destination_mtime = time.gmtime(dst_st.st_ctime)
else:
destination_mtime = time.gmtime(dst_st.st_mtime)
if remote_mtime is not None and destination_mtime is not None and destination_mtime >= remote_mtime:
return True
else:
return False
#md5
if not os.path.exists(fc.DestinationPath):
return False
src_data='keep going'
dest_data='keep going'
src_hash = md5const()
dest_hash = md5const()
try:
F = open(fc.DestinationPath, 'rb')
while src_data or dest_data:
src_data = resp.read(1048576)
src_hash.update(src_data)
dest_data = F.read(1048576)
dest_hash.update(dest_data)
if src_hash.hexdigest() != dest_hash.hexdigest():
print("Hash MisMatch found between Source File "+ fc.SourcePath + " and Destination File " + fc.DestinationPath)
LG().Log('ERROR', "Hash MisMatch found between Source File "+ fc.SourcePath + " and Destination File " + fc.DestinationPath)
return False
except Exception, e:
F.close()
return False
F.close()
return True
class FileContext:
def __init__(self, DestinationPath, SourcePath, Ensure, Type, Force, Contents, Checksum, Recurse, Links, Owner, Group, Mode):
if not Checksum:
Checksum = "md5"
if not Type:
Type = "file"
if not Ensure:
Ensure = "present"
if not Links or len(Links) == 0:
Links = "follow"
self.DestinationPath = DestinationPath
self.SourcePath = SourcePath
if len(SourcePath) > 0 and '://' in SourcePath and Type != 'file':
raise Exception('ERROR: Remote paths are only valid for Type = file.')
self.LocalPath = ''
self.Ensure = Ensure.lower()
self.Type = Type.lower()
self.Force = Force
self.Contents = Contents
self.Checksum = Checksum.lower()
self.Recurse = Recurse
self.Links = Links.lower()
self.Owner = Owner
self.Group = Group
self.ModifiedDate = ''
error = None
if Mode:
if len(Mode) == 9:
try:
Mode = ConvertLongModeToNumeric(Mode)
except Exception, error:
Print("Exception in ConvertLongModeToNumeric on " + Mode + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception in ConvertLongModeToNumeric on " + Mode + " Error: " + str(error))
elif len(Mode) == 3:
# Already in proper format
pass
else:
Print("Error: Invalid Mode: " + Mode)
LG().Log("ERROR", "Invalid Mode: " + Mode)
Mode = ""
self.Mode = Mode
| mit | 2,383,727,069,204,466,700 | 37.015397 | 169 | 0.586826 | false |
hankcs/HanLP | hanlp/components/parsers/alg.py | 1 | 29627 | # MIT License
#
# Copyright (c) 2020 Yu Zhang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
from hanlp_common.conll import isprojective
def kmeans(x, k, max_it=32):
r"""
KMeans algorithm for clustering the sentences by length.
Args:
x (list[int]):
The list of sentence lengths.
k (int):
The number of clusters.
This is an approximate value. The final number of clusters can be less or equal to `k`.
max_it (int):
Maximum number of iterations.
If centroids does not converge after several iterations, the algorithm will be early stopped.
Returns:
list[float], list[list[int]]:
The first list contains average lengths of sentences in each cluster.
The second is the list of clusters holding indices of data points.
Examples:
>>> x = torch.randint(10,20,(10,)).tolist()
>>> x
[15, 10, 17, 11, 18, 13, 17, 19, 18, 14]
>>> centroids, clusters = kmeans(x, 3)
>>> centroids
[10.5, 14.0, 17.799999237060547]
>>> clusters
[[1, 3], [0, 5, 9], [2, 4, 6, 7, 8]]
"""
# the number of clusters must not be greater than the number of datapoints
x, k = torch.tensor(x, dtype=torch.float), min(len(x), k)
# collect unique datapoints
d = x.unique()
# initialize k centroids randomly
c = d[torch.randperm(len(d))[:k]]
# assign each datapoint to the cluster with the closest centroid
dists, y = torch.abs_(x.unsqueeze(-1) - c).min(-1)
for _ in range(max_it):
# if an empty cluster is encountered,
# choose the farthest datapoint from the biggest cluster and move that the empty one
mask = torch.arange(k).unsqueeze(-1).eq(y)
none = torch.where(~mask.any(-1))[0].tolist()
while len(none) > 0:
for i in none:
# the biggest cluster
b = torch.where(mask[mask.sum(-1).argmax()])[0]
# the datapoint farthest from the centroid of cluster b
f = dists[b].argmax()
# update the assigned cluster of f
y[b[f]] = i
# re-calculate the mask
mask = torch.arange(k).unsqueeze(-1).eq(y)
none = torch.where(~mask.any(-1))[0].tolist()
# update the centroids
c, old = (x * mask).sum(-1) / mask.sum(-1), c
# re-assign all datapoints to clusters
dists, y = torch.abs_(x.unsqueeze(-1) - c).min(-1)
# stop iteration early if the centroids converge
if c.equal(old):
break
# assign all datapoints to the new-generated clusters
# the empty ones are discarded
assigned = y.unique().tolist()
# get the centroids of the assigned clusters
centroids = c[assigned].tolist()
# map all values of datapoints to buckets
clusters = [torch.where(y.eq(i))[0].tolist() for i in assigned]
return centroids, clusters
def eisner(scores, mask):
r"""
First-order Eisner algorithm for projective decoding.
References:
- Ryan McDonald, Koby Crammer and Fernando Pereira. 2005.
`Online Large-Margin Training of Dependency Parsers`_.
Args:
scores (~torch.Tensor): ``[batch_size, seq_len, seq_len]``.
Scores of all dependent-head pairs.
mask (~torch.BoolTensor): ``[batch_size, seq_len]``.
The mask to avoid parsing over padding tokens.
The first column serving as pseudo words for roots should be ``False``.
Returns:
~torch.Tensor:
A tensor with shape ``[batch_size, seq_len]`` for the resulting projective parse trees.
Examples:
>>> scores = torch.tensor([[[-13.5026, -18.3700, -13.0033, -16.6809],
[-36.5235, -28.6344, -28.4696, -31.6750],
[ -2.9084, -7.4825, -1.4861, -6.8709],
[-29.4880, -27.6905, -26.1498, -27.0233]]])
>>> mask = torch.tensor([[False, True, True, True]])
>>> eisner(scores, mask)
tensor([[0, 2, 0, 2]])
.. _Online Large-Margin Training of Dependency Parsers:
https://www.aclweb.org/anthology/P05-1012/
"""
lens = mask.sum(1)
batch_size, seq_len, _ = scores.shape
scores = scores.permute(2, 1, 0)
s_i = torch.full_like(scores, float('-inf'))
s_c = torch.full_like(scores, float('-inf'))
p_i = scores.new_zeros(seq_len, seq_len, batch_size).long()
p_c = scores.new_zeros(seq_len, seq_len, batch_size).long()
s_c.diagonal().fill_(0)
for w in range(1, seq_len):
n = seq_len - w
starts = p_i.new_tensor(range(n)).unsqueeze(0)
# ilr = C(i->r) + C(j->r+1)
ilr = stripe(s_c, n, w) + stripe(s_c, n, w, (w, 1))
# [batch_size, n, w]
il = ir = ilr.permute(2, 0, 1)
# I(j->i) = max(C(i->r) + C(j->r+1) + s(j->i)), i <= r < j
il_span, il_path = il.max(-1)
s_i.diagonal(-w).copy_(il_span + scores.diagonal(-w))
p_i.diagonal(-w).copy_(il_path + starts)
# I(i->j) = max(C(i->r) + C(j->r+1) + s(i->j)), i <= r < j
ir_span, ir_path = ir.max(-1)
s_i.diagonal(w).copy_(ir_span + scores.diagonal(w))
p_i.diagonal(w).copy_(ir_path + starts)
# C(j->i) = max(C(r->i) + I(j->r)), i <= r < j
cl = stripe(s_c, n, w, (0, 0), 0) + stripe(s_i, n, w, (w, 0))
cl_span, cl_path = cl.permute(2, 0, 1).max(-1)
s_c.diagonal(-w).copy_(cl_span)
p_c.diagonal(-w).copy_(cl_path + starts)
# C(i->j) = max(I(i->r) + C(r->j)), i < r <= j
cr = stripe(s_i, n, w, (0, 1)) + stripe(s_c, n, w, (1, w), 0)
cr_span, cr_path = cr.permute(2, 0, 1).max(-1)
s_c.diagonal(w).copy_(cr_span)
s_c[0, w][lens.ne(w)] = float('-inf')
p_c.diagonal(w).copy_(cr_path + starts + 1)
def backtrack(p_i, p_c, heads, i, j, complete):
if i == j:
return
if complete:
r = p_c[i, j]
backtrack(p_i, p_c, heads, i, r, False)
backtrack(p_i, p_c, heads, r, j, True)
else:
r, heads[j] = p_i[i, j], i
i, j = sorted((i, j))
backtrack(p_i, p_c, heads, i, r, True)
backtrack(p_i, p_c, heads, j, r + 1, True)
preds = []
p_c = p_c.permute(2, 0, 1).cpu()
p_i = p_i.permute(2, 0, 1).cpu()
for i, length in enumerate(lens.tolist()):
heads = p_c.new_zeros(length + 1, dtype=torch.long)
backtrack(p_i[i], p_c[i], heads, 0, length, True)
preds.append(heads.to(mask.device))
return pad(preds, total_length=seq_len).to(mask.device)
def backtrack(p_i, p_c, heads, i, j, complete):
if i == j:
return
if complete:
r = p_c[i, j]
backtrack(p_i, p_c, heads, i, r, False)
backtrack(p_i, p_c, heads, r, j, True)
else:
r, heads[j] = p_i[i, j], i
i, j = sorted((i, j))
backtrack(p_i, p_c, heads, i, r, True)
backtrack(p_i, p_c, heads, j, r + 1, True)
def stripe(x, n, w, offset=(0, 0), dim=1):
"""r'''Returns a diagonal stripe of the tensor.
Args:
x: Tensor
n: int
w: int
offset: tuple (Default value = (0)
dim: int (Default value = 1)
Example:
0):
Returns:
>>> x = torch.arange(25).view(5, 5)
>>> x
tensor([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> stripe(x, 2, 3, (1, 1))
tensor([[ 6, 7, 8],
[12, 13, 14]])
>>> stripe(x, 2, 3, dim=0)
tensor([[ 0, 5, 10],
[ 6, 11, 16]])
"""
x, seq_len = x.contiguous(), x.size(1)
stride, numel = list(x.stride()), x[0, 0].numel()
stride[0] = (seq_len + 1) * numel
stride[1] = (1 if dim == 1 else seq_len) * numel
return x.as_strided(size=(n, w, *x.shape[2:]),
stride=stride,
storage_offset=(offset[0] * seq_len + offset[1]) * numel)
def cky(scores, mask):
r"""
The implementation of `Cocke-Kasami-Younger`_ (CKY) algorithm to parse constituency trees.
References:
- Yu Zhang, Houquan Zhou and Zhenghua Li. 2020.
`Fast and Accurate Neural CRF Constituency Parsing`_.
Args:
scores (~torch.Tensor): ``[batch_size, seq_len, seq_len]``.
Scores of all candidate constituents.
mask (~torch.BoolTensor): ``[batch_size, seq_len, seq_len]``.
The mask to avoid parsing over padding tokens.
For each square matrix in a batch, the positions except upper triangular part should be masked out.
Returns:
Sequences of factorized predicted bracketed trees that are traversed in pre-order.
Examples:
>>> scores = torch.tensor([[[ 2.5659, 1.4253, -2.5272, 3.3011],
[ 1.3687, -0.5869, 1.0011, 3.3020],
[ 1.2297, 0.4862, 1.1975, 2.5387],
[-0.0511, -1.2541, -0.7577, 0.2659]]])
>>> mask = torch.tensor([[[False, True, True, True],
[False, False, True, True],
[False, False, False, True],
[False, False, False, False]]])
>>> cky(scores, mask)
[[(0, 3), (0, 1), (1, 3), (1, 2), (2, 3)]]
.. _Cocke-Kasami-Younger:
https://en.wikipedia.org/wiki/CYK_algorithm
.. _Fast and Accurate Neural CRF Constituency Parsing:
https://www.ijcai.org/Proceedings/2020/560/
"""
lens = mask[:, 0].sum(-1)
scores = scores.permute(1, 2, 0)
seq_len, seq_len, batch_size = scores.shape
s = scores.new_zeros(seq_len, seq_len, batch_size)
p = scores.new_zeros(seq_len, seq_len, batch_size).long()
for w in range(1, seq_len):
n = seq_len - w
starts = p.new_tensor(range(n)).unsqueeze(0)
if w == 1:
s.diagonal(w).copy_(scores.diagonal(w))
continue
# [n, w, batch_size]
s_span = stripe(s, n, w - 1, (0, 1)) + stripe(s, n, w - 1, (1, w), 0)
# [batch_size, n, w]
s_span = s_span.permute(2, 0, 1)
# [batch_size, n]
s_span, p_span = s_span.max(-1)
s.diagonal(w).copy_(s_span + scores.diagonal(w))
p.diagonal(w).copy_(p_span + starts + 1)
def backtrack(p, i, j):
if j == i + 1:
return [(i, j)]
split = p[i][j]
ltree = backtrack(p, i, split)
rtree = backtrack(p, split, j)
return [(i, j)] + ltree + rtree
p = p.permute(2, 0, 1).tolist()
trees = [backtrack(p[i], 0, length) if length else [] for i, length in enumerate(lens.tolist())]
return trees
def istree(sequence, proj=False, multiroot=False):
r"""
Checks if the arcs form an valid dependency tree.
Args:
sequence (list[int]):
A list of head indices.
proj (bool):
If ``True``, requires the tree to be projective. Default: ``False``.
multiroot (bool):
If ``False``, requires the tree to contain only a single root. Default: ``True``.
Returns:
``True`` if the arcs form an valid tree, ``False`` otherwise.
Examples:
>>> istree([3, 0, 0, 3], multiroot=True)
True
>>> istree([3, 0, 0, 3], proj=True)
False
"""
if proj and not isprojective(sequence):
return False
n_roots = sum(head == 0 for head in sequence)
if n_roots == 0:
return False
if not multiroot and n_roots > 1:
return False
if any(i == head for i, head in enumerate(sequence, 1)):
return False
return next(tarjan(sequence), None) is None
def tarjan(sequence):
r"""
Tarjan algorithm for finding Strongly Connected Components (SCCs) of a graph.
Args:
sequence (list):
List of head indices.
Yields:
A list of indices that make up a SCC. All self-loops are ignored.
Examples:
>>> next(tarjan([2, 5, 0, 3, 1])) # (1 -> 5 -> 2 -> 1) is a cycle
[2, 5, 1]
"""
sequence = [-1] + sequence
# record the search order, i.e., the timestep
dfn = [-1] * len(sequence)
# record the the smallest timestep in a SCC
low = [-1] * len(sequence)
# push the visited into the stack
stack, onstack = [], [False] * len(sequence)
def connect(i, timestep):
dfn[i] = low[i] = timestep[0]
timestep[0] += 1
stack.append(i)
onstack[i] = True
for j, head in enumerate(sequence):
if head != i:
continue
if dfn[j] == -1:
yield from connect(j, timestep)
low[i] = min(low[i], low[j])
elif onstack[j]:
low[i] = min(low[i], dfn[j])
# a SCC is completed
if low[i] == dfn[i]:
cycle = [stack.pop()]
while cycle[-1] != i:
onstack[cycle[-1]] = False
cycle.append(stack.pop())
onstack[i] = False
# ignore the self-loop
if len(cycle) > 1:
yield cycle
timestep = [0]
for i in range(len(sequence)):
if dfn[i] == -1:
yield from connect(i, timestep)
def chuliu_edmonds(s):
r"""
ChuLiu/Edmonds algorithm for non-projective decoding.
Some code is borrowed from `tdozat's implementation`_.
Descriptions of notations and formulas can be found in
`Non-projective Dependency Parsing using Spanning Tree Algorithms`_.
Notes:
The algorithm does not guarantee to parse a single-root tree.
References:
- Ryan McDonald, Fernando Pereira, Kiril Ribarov and Jan Hajic. 2005.
`Non-projective Dependency Parsing using Spanning Tree Algorithms`_.
Args:
s (~torch.Tensor): ``[seq_len, seq_len]``.
Scores of all dependent-head pairs.
Returns:
~torch.Tensor:
A tensor with shape ``[seq_len]`` for the resulting non-projective parse tree.
.. _tdozat's implementation:
https://github.com/tdozat/Parser-v3
.. _Non-projective Dependency Parsing using Spanning Tree Algorithms:
https://www.aclweb.org/anthology/H05-1066/
"""
s[0, 1:] = float('-inf')
# prevent self-loops
s.diagonal()[1:].fill_(float('-inf'))
# select heads with highest scores
tree = s.argmax(-1)
# return the cycle finded by tarjan algorithm lazily
cycle = next(tarjan(tree.tolist()[1:]), None)
# if the tree has no cycles, then it is a MST
if not cycle:
return tree
# indices of cycle in the original tree
cycle = torch.tensor(cycle)
# indices of noncycle in the original tree
noncycle = torch.ones(len(s)).index_fill_(0, cycle, 0)
noncycle = torch.where(noncycle.gt(0))[0]
def contract(s):
# heads of cycle in original tree
cycle_heads = tree[cycle]
# scores of cycle in original tree
s_cycle = s[cycle, cycle_heads]
# calculate the scores of cycle's potential dependents
# s(c->x) = max(s(x'->x)), x in noncycle and x' in cycle
s_dep = s[noncycle][:, cycle]
# find the best cycle head for each noncycle dependent
deps = s_dep.argmax(1)
# calculate the scores of cycle's potential heads
# s(x->c) = max(s(x'->x) - s(a(x')->x') + s(cycle)), x in noncycle and x' in cycle
# a(v) is the predecessor of v in cycle
# s(cycle) = sum(s(a(v)->v))
s_head = s[cycle][:, noncycle] - s_cycle.view(-1, 1) + s_cycle.sum()
# find the best noncycle head for each cycle dependent
heads = s_head.argmax(0)
contracted = torch.cat((noncycle, torch.tensor([-1])))
# calculate the scores of contracted graph
s = s[contracted][:, contracted]
# set the contracted graph scores of cycle's potential dependents
s[:-1, -1] = s_dep[range(len(deps)), deps]
# set the contracted graph scores of cycle's potential heads
s[-1, :-1] = s_head[heads, range(len(heads))]
return s, heads, deps
# keep track of the endpoints of the edges into and out of cycle for reconstruction later
s, heads, deps = contract(s)
# y is the contracted tree
y = chuliu_edmonds(s)
# exclude head of cycle from y
y, cycle_head = y[:-1], y[-1]
# fix the subtree with no heads coming from the cycle
# len(y) denotes heads coming from the cycle
subtree = y < len(y)
# add the nodes to the new tree
tree[noncycle[subtree]] = noncycle[y[subtree]]
# fix the subtree with heads coming from the cycle
subtree = ~subtree
# add the nodes to the tree
tree[noncycle[subtree]] = cycle[deps[subtree]]
# fix the root of the cycle
cycle_root = heads[cycle_head]
# break the cycle and add the root of the cycle to the tree
tree[cycle[cycle_root]] = noncycle[cycle_head]
return tree
def mst(scores, mask, multiroot=False):
r"""
MST algorithm for decoding non-pojective trees.
This is a wrapper for ChuLiu/Edmonds algorithm.
The algorithm first runs ChuLiu/Edmonds to parse a tree and then have a check of multi-roots,
If ``multiroot=True`` and there indeed exist multi-roots, the algorithm seeks to find
best single-root trees by iterating all possible single-root trees parsed by ChuLiu/Edmonds.
Otherwise the resulting trees are directly taken as the final outputs.
Args:
scores (~torch.Tensor): ``[batch_size, seq_len, seq_len]``.
Scores of all dependent-head pairs.
mask (~torch.BoolTensor): ``[batch_size, seq_len]``.
The mask to avoid parsing over padding tokens.
The first column serving as pseudo words for roots should be ``False``.
muliroot (bool):
Ensures to parse a single-root tree If ``False``.
Returns:
~torch.Tensor:
A tensor with shape ``[batch_size, seq_len]`` for the resulting non-projective parse trees.
Examples:
>>> scores = torch.tensor([[[-11.9436, -13.1464, -6.4789, -13.8917],
[-60.6957, -60.2866, -48.6457, -63.8125],
[-38.1747, -49.9296, -45.2733, -49.5571],
[-19.7504, -23.9066, -9.9139, -16.2088]]])
>>> scores[:, 0, 1:] = float('-inf')
>>> scores.diagonal(0, 1, 2)[1:].fill_(float('-inf'))
>>> mask = torch.tensor([[False, True, True, True]])
>>> mst(scores, mask)
tensor([[0, 2, 0, 2]])
"""
batch_size, seq_len, _ = scores.shape
scores = scores.detach().cpu().unbind()
preds = []
for i, length in enumerate(mask.sum(1).tolist()):
s = scores[i][:length + 1, :length + 1]
tree = chuliu_edmonds(s)
roots = torch.where(tree[1:].eq(0))[0] + 1
if not multiroot and len(roots) > 1:
s_root = s[:, 0]
s_best = float('-inf')
s = s.index_fill(1, torch.tensor(0), float('-inf'))
for root in roots:
s[:, 0] = float('-inf')
s[root, 0] = s_root[root]
t = chuliu_edmonds(s)
s_tree = s[1:].gather(1, t[1:].unsqueeze(-1)).sum()
if s_tree > s_best:
s_best, tree = s_tree, t
preds.append(tree)
return pad(preds, total_length=seq_len).to(mask.device)
def eisner2o(scores, mask):
r"""
Second-order Eisner algorithm for projective decoding.
This is an extension of the first-order one that further incorporates sibling scores into tree scoring.
References:
- Ryan McDonald and Fernando Pereira. 2006.
`Online Learning of Approximate Dependency Parsing Algorithms`_.
Args:
scores (~torch.Tensor, ~torch.Tensor):
A tuple of two tensors representing the first-order and second-order scores repectively.
The first (``[batch_size, seq_len, seq_len]``) holds scores of all dependent-head pairs.
The second (``[batch_size, seq_len, seq_len, seq_len]``) holds scores of all dependent-head-sibling triples.
mask (~torch.BoolTensor): ``[batch_size, seq_len]``.
The mask to avoid parsing over padding tokens.
The first column serving as pseudo words for roots should be ``False``.
Returns:
~torch.Tensor:
A tensor with shape ``[batch_size, seq_len]`` for the resulting projective parse trees.
Examples:
>>> s_arc = torch.tensor([[[ -2.8092, -7.9104, -0.9414, -5.4360],
[-10.3494, -7.9298, -3.6929, -7.3985],
[ 1.1815, -3.8291, 2.3166, -2.7183],
[ -3.9776, -3.9063, -1.6762, -3.1861]]])
>>> s_sib = torch.tensor([[[[ 0.4719, 0.4154, 1.1333, 0.6946],
[ 1.1252, 1.3043, 2.1128, 1.4621],
[ 0.5974, 0.5635, 1.0115, 0.7550],
[ 1.1174, 1.3794, 2.2567, 1.4043]],
[[-2.1480, -4.1830, -2.5519, -1.8020],
[-1.2496, -1.7859, -0.0665, -0.4938],
[-2.6171, -4.0142, -2.9428, -2.2121],
[-0.5166, -1.0925, 0.5190, 0.1371]],
[[ 0.5827, -1.2499, -0.0648, -0.0497],
[ 1.4695, 0.3522, 1.5614, 1.0236],
[ 0.4647, -0.7996, -0.3801, 0.0046],
[ 1.5611, 0.3875, 1.8285, 1.0766]],
[[-1.3053, -2.9423, -1.5779, -1.2142],
[-0.1908, -0.9699, 0.3085, 0.1061],
[-1.6783, -2.8199, -1.8853, -1.5653],
[ 0.3629, -0.3488, 0.9011, 0.5674]]]])
>>> mask = torch.tensor([[False, True, True, True]])
>>> eisner2o((s_arc, s_sib), mask)
tensor([[0, 2, 0, 2]])
.. _Online Learning of Approximate Dependency Parsing Algorithms:
https://www.aclweb.org/anthology/E06-1011/
"""
# the end position of each sentence in a batch
lens = mask.sum(1)
s_arc, s_sib = scores
batch_size, seq_len, _ = s_arc.shape
# [seq_len, seq_len, batch_size]
s_arc = s_arc.permute(2, 1, 0)
# [seq_len, seq_len, seq_len, batch_size]
s_sib = s_sib.permute(2, 1, 3, 0)
s_i = torch.full_like(s_arc, float('-inf'))
s_s = torch.full_like(s_arc, float('-inf'))
s_c = torch.full_like(s_arc, float('-inf'))
p_i = s_arc.new_zeros(seq_len, seq_len, batch_size).long()
p_s = s_arc.new_zeros(seq_len, seq_len, batch_size).long()
p_c = s_arc.new_zeros(seq_len, seq_len, batch_size).long()
s_c.diagonal().fill_(0)
for w in range(1, seq_len):
# n denotes the number of spans to iterate,
# from span (0, w) to span (n, n+w) given width w
n = seq_len - w
starts = p_i.new_tensor(range(n)).unsqueeze(0)
# I(j->i) = max(I(j->r) + S(j->r, i)), i < r < j |
# C(j->j) + C(i->j-1))
# + s(j->i)
# [n, w, batch_size]
il = stripe(s_i, n, w, (w, 1)) + stripe(s_s, n, w, (1, 0), 0)
il += stripe(s_sib[range(w, n + w), range(n)], n, w, (0, 1))
# [n, 1, batch_size]
il0 = stripe(s_c, n, 1, (w, w)) + stripe(s_c, n, 1, (0, w - 1))
# il0[0] are set to zeros since the scores of the complete spans starting from 0 are always -inf
il[:, -1] = il0.index_fill_(0, lens.new_tensor(0), 0).squeeze(1)
il_span, il_path = il.permute(2, 0, 1).max(-1)
s_i.diagonal(-w).copy_(il_span + s_arc.diagonal(-w))
p_i.diagonal(-w).copy_(il_path + starts + 1)
# I(i->j) = max(I(i->r) + S(i->r, j), i < r < j |
# C(i->i) + C(j->i+1))
# + s(i->j)
# [n, w, batch_size]
ir = stripe(s_i, n, w) + stripe(s_s, n, w, (0, w), 0)
ir += stripe(s_sib[range(n), range(w, n + w)], n, w)
ir[0] = float('-inf')
# [n, 1, batch_size]
ir0 = stripe(s_c, n, 1) + stripe(s_c, n, 1, (w, 1))
ir[:, 0] = ir0.squeeze(1)
ir_span, ir_path = ir.permute(2, 0, 1).max(-1)
s_i.diagonal(w).copy_(ir_span + s_arc.diagonal(w))
p_i.diagonal(w).copy_(ir_path + starts)
# [n, w, batch_size]
slr = stripe(s_c, n, w) + stripe(s_c, n, w, (w, 1))
slr_span, slr_path = slr.permute(2, 0, 1).max(-1)
# S(j, i) = max(C(i->r) + C(j->r+1)), i <= r < j
s_s.diagonal(-w).copy_(slr_span)
p_s.diagonal(-w).copy_(slr_path + starts)
# S(i, j) = max(C(i->r) + C(j->r+1)), i <= r < j
s_s.diagonal(w).copy_(slr_span)
p_s.diagonal(w).copy_(slr_path + starts)
# C(j->i) = max(C(r->i) + I(j->r)), i <= r < j
cl = stripe(s_c, n, w, (0, 0), 0) + stripe(s_i, n, w, (w, 0))
cl_span, cl_path = cl.permute(2, 0, 1).max(-1)
s_c.diagonal(-w).copy_(cl_span)
p_c.diagonal(-w).copy_(cl_path + starts)
# C(i->j) = max(I(i->r) + C(r->j)), i < r <= j
cr = stripe(s_i, n, w, (0, 1)) + stripe(s_c, n, w, (1, w), 0)
cr_span, cr_path = cr.permute(2, 0, 1).max(-1)
s_c.diagonal(w).copy_(cr_span)
# disable multi words to modify the root
s_c[0, w][lens.ne(w)] = float('-inf')
p_c.diagonal(w).copy_(cr_path + starts + 1)
def backtrack(p_i, p_s, p_c, heads, i, j, flag):
if i == j:
return
if flag == 'c':
r = p_c[i, j]
backtrack(p_i, p_s, p_c, heads, i, r, 'i')
backtrack(p_i, p_s, p_c, heads, r, j, 'c')
elif flag == 's':
r = p_s[i, j]
i, j = sorted((i, j))
backtrack(p_i, p_s, p_c, heads, i, r, 'c')
backtrack(p_i, p_s, p_c, heads, j, r + 1, 'c')
elif flag == 'i':
r, heads[j] = p_i[i, j], i
if r == i:
r = i + 1 if i < j else i - 1
backtrack(p_i, p_s, p_c, heads, j, r, 'c')
else:
backtrack(p_i, p_s, p_c, heads, i, r, 'i')
backtrack(p_i, p_s, p_c, heads, r, j, 's')
preds = []
p_i = p_i.permute(2, 0, 1).cpu()
p_s = p_s.permute(2, 0, 1).cpu()
p_c = p_c.permute(2, 0, 1).cpu()
for i, length in enumerate(lens.tolist()):
heads = p_c.new_zeros(length + 1, dtype=torch.long)
backtrack(p_i[i], p_s[i], p_c[i], heads, 0, length, 'c')
preds.append(heads.to(mask.device))
return pad(preds, total_length=seq_len).to(mask.device)
def pad(tensors, padding_value=0, total_length=None):
size = [len(tensors)] + [max(tensor.size(i) for tensor in tensors)
for i in range(len(tensors[0].size()))]
if total_length is not None:
assert total_length >= size[1]
size[1] = total_length
out_tensor = tensors[0].data.new(*size).fill_(padding_value)
for i, tensor in enumerate(tensors):
out_tensor[i][[slice(0, i) for i in tensor.size()]] = tensor
return out_tensor
def decode_dep(s_arc, mask, tree=False, proj=False):
r"""
Args:
s_arc (~torch.Tensor): ``[batch_size, seq_len, seq_len]``.
Scores of all possible arcs.
mask (~torch.BoolTensor): ``[batch_size, seq_len]``.
The mask for covering the unpadded tokens.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
Returns:
~torch.Tensor, ~torch.Tensor:
Predicted arcs and labels of shape ``[batch_size, seq_len]``.
"""
lens = mask.sum(1)
arc_preds = s_arc.argmax(-1)
bad = [not istree(seq[1:i + 1], proj) for i, seq in zip(lens.tolist(), arc_preds.tolist())]
if tree and any(bad):
if proj:
alg = eisner
else:
alg = mst
s_arc.diagonal(0, 1, 2)[1:].fill_(float('-inf'))
arc_preds[bad] = alg(s_arc[bad], mask[bad])
return arc_preds
| apache-2.0 | -5,404,664,505,970,615,000 | 37.931669 | 120 | 0.52972 | false |
fingeronthebutton/RIDE | src/robotide/contrib/testrunner/runprofiles.py | 1 | 11956 | # Copyright 2010 Orbitz WorldWide
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''runProfiles.py
This module contains profiles for running robot tests via the
runnerPlugin.
Each class that is a subclass as BaseProfile will appear in a
drop-down list within the plugin. The chosen profile will be used to
build up a command that will be passed in the tests to run as well as
any additional arguments.
'''
import wx
from wx.lib.filebrowsebutton import FileBrowseButton
import os
from robotide import pluginapi
from robotide.widgets import Label
from robotide.robotapi import DataError, Information
from robotide.utils import overrides, SYSTEM_ENCODING, ArgumentParser
from robotide.contrib.testrunner.usages import USAGE
class BaseProfile(object):
'''Base class for all test runner profiles
At a minimum each profile must set the name attribute, which is
how the profile will appear in the dropdown list.
In case some settings are needed, provide default_settings class attribute
with default values.
This class (BaseProfile) will _not_ appear as one of the choices.
Think of it as an abstract class, if Python 2.5 had such a thing.
'''
# this will be set to the plugin instance at runtime
plugin = None
default_settings = {}
def __init__(self, plugin):
'''plugin is required so that the profiles can save their settings'''
self.plugin = plugin
def get_toolbar(self, parent):
'''Returns a panel with toolbar controls for this profile'''
return wx.Panel(parent, wx.ID_ANY)
def delete_pressed(self):
'''Handle delete key pressing'''
pass
def get_custom_args(self):
'''Return a list of arguments unique to this profile.
Returned arguments are in format accepted by Robot Framework's argument
file.
'''
return []
def get_command_prefix(self):
'''Returns a command and any special arguments for this profile'''
return ["pybot.bat" if os.name == "nt" else "pybot"]
def set_setting(self, name, value):
'''Sets a plugin setting
setting is automatically prefixed with profile's name and it can be
accessed with direct attribute access. See also __getattr__.
'''
self.plugin.save_setting(self._get_setting_name(name), value, delay=2)
def format_error(self, error, returncode):
return error, self._create_error_log_message(error, returncode)
def _create_error_log_message(self, error, returncode):
return None
def __getattr__(self, name):
"""Provides attribute access to profile's settings
If for example default_settings = {'setting1' = ""} is defined
then setting1 value can be used like self.setting1
set_setting is used to store the value.
"""
try:
return getattr(self.plugin, self._get_setting_name(name))
except AttributeError:
try:
return getattr(self.plugin, name)
except AttributeError:
if name in self.default_settings:
return self.default_settings[name]
raise
def _get_setting_name(self, name):
"""Adds profile's name to the setting."""
return "%s_%s" % (self.name.replace(' ', '_'), name)
RF_INSTALLATION_NOT_FOUND = """Robot Framework installation not found.<br>
To run tets, you need to install Robot Framework separately.<br>
See <a href="http://robotframework.org">http://robotframework.org</a> for
installation instructions.
"""
class PybotProfile(BaseProfile):
'''A runner profile which uses pybot
It is assumed that pybot is on the path
'''
name = "pybot"
default_settings = {"arguments": u"",
"include_tags": "",
"exclude_tags": "",
"apply_include_tags": False,
"apply_exclude_tags": False}
def __init__(self, plugin):
BaseProfile.__init__(self, plugin)
self._toolbar = None
def get_command_prefix(self):
'''Returns a command and any special arguments for this profile'''
return [self.get_command()] + self._get_arguments()
def _get_arguments(self):
return self.arguments.split()
def get_command(self):
return "pybot.bat" if os.name == "nt" else "pybot"
def get_custom_args(self):
args = []
if self.apply_include_tags and self.include_tags:
for include in self._get_tags_from_string(self.include_tags):
args.append("--include=%s" % include)
if self.apply_exclude_tags and self.exclude_tags:
for exclude in self._get_tags_from_string(self.exclude_tags):
args.append("--exclude=%s" % exclude)
return args
def _get_tags_from_string(self, tag_string):
tags = []
for tag in tag_string.split(","):
tag = tag.strip().replace(' ', '')
if len(tag) > 0:
tags.append(tag)
return tags
def get_toolbar(self, parent):
if self._toolbar is None:
self._toolbar = self._get_toolbar(parent)
return self._toolbar
def _get_toolbar(self, parent):
panel = wx.Panel(parent, wx.ID_ANY)
sizer = wx.BoxSizer(wx.VERTICAL)
for item in self.get_toolbar_items():
sizer.Add(item(panel), 0, wx.ALL | wx.EXPAND)
panel.SetSizerAndFit(sizer)
return panel
def get_toolbar_items(self):
return [self.ArgumentsPanel, self.TagsPanel]
def _create_error_log_message(self, error, returncode):
# bash and zsh use return code 127 and the text `command not found`
# In Windows, the error is `The system cannot file the file specified`
if 'not found' in error or returncode is 127 or \
'system cannot find the file specified' in error:
return pluginapi.RideLogMessage(
RF_INSTALLATION_NOT_FOUND, notify_user=True)
return None
@overrides(BaseProfile)
def delete_pressed(self):
focused = wx.Window.FindFocus()
if focused not in [self._arguments, self._include_tags,
self._exclude_tags]:
return
start, end = focused.GetSelection()
focused.Remove(start, max(end, start+1))
def ArgumentsPanel(self, parent):
panel = wx.Panel(parent, wx.ID_ANY)
label = Label(panel, label="Arguments: ")
self._arguments = wx.TextCtrl(
panel, wx.ID_ANY, size=(-1, -1), value=self.arguments)
self._arguments.SetToolTipString(
"Arguments for the test run. Arguments are space separated list.")
self._arguments.Bind(wx.EVT_TEXT, self.OnArgumentsChanged)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(label, 0, wx.ALL | wx.EXPAND)
sizer.Add(self._arguments, 1, wx.ALL | wx.EXPAND)
panel.SetSizerAndFit(sizer)
self._validate_arguments(self.arguments or u'')
return panel
def TagsPanel(self, parent):
'''Create a panel to input include/exclude tags'''
panel = wx.Panel(parent, wx.ID_ANY)
include_cb = self._create_checkbox(panel, self.apply_include_tags,
"Only run tests with these tags")
exclude_cb = self._create_checkbox(panel, self.apply_exclude_tags,
"Skip tests with these tags")
self._include_tags = wx.TextCtrl(panel, wx.ID_ANY, size=(150, -1),
value=self.include_tags)
self._exclude_tags = wx.TextCtrl(panel, wx.ID_ANY, size=(150, -1),
value=self.exclude_tags)
panel.Bind(wx.EVT_CHECKBOX, self.OnIncludeCheckbox, include_cb)
panel.Bind(wx.EVT_CHECKBOX, self.OnExcludeCheckbox, exclude_cb)
self._include_tags.Bind(wx.EVT_TEXT, self.OnIncludeTagsChanged)
self._exclude_tags.Bind(wx.EVT_TEXT, self.OnExcludeTagsChanged)
panelsizer = wx.GridBagSizer(2, 2)
panelsizer.Add(include_cb, (0, 0), flag=wx.EXPAND)
panelsizer.Add(exclude_cb, (0, 1), flag=wx.EXPAND)
panelsizer.Add(self._include_tags, (1, 0), flag=wx.EXPAND)
panelsizer.Add(self._exclude_tags, (1, 1), flag=wx.EXPAND)
panelsizer.AddGrowableCol(0)
panelsizer.AddGrowableCol(1)
panel.SetSizerAndFit(panelsizer)
return panel
def _create_checkbox(self, parent, value, title):
checkbox = wx.CheckBox(parent, wx.ID_ANY, title)
checkbox.SetValue(value)
return checkbox
def OnArgumentsChanged(self, evt):
args = self._arguments.GetValue()
self._validate_arguments(args or u'')
self.set_setting("arguments", args)
def _validate_arguments(self, args):
assert type(args) is unicode
invalid_message = self._get_invalid_message(args)
self._arguments.SetBackgroundColour(
'red' if invalid_message else 'white')
self._arguments.SetForegroundColour(
'white' if invalid_message else 'black')
self._arguments.SetToolTipString(
invalid_message or
'Arguments for the test run. Arguments are space separated list.')
def _get_invalid_message(self, args):
try:
args = args.encode(SYSTEM_ENCODING)
_, invalid = ArgumentParser(USAGE).parse_args(args.split())
if bool(invalid):
return 'Unknown option(s): '+' '.join(invalid)
return None
except DataError, e:
return e.message
except Information:
return 'Does not execute - help or version option given'
def OnExcludeCheckbox(self, evt):
self.set_setting("apply_exclude_tags", evt.IsChecked())
def OnIncludeCheckbox(self, evt):
self.set_setting("apply_include_tags", evt.IsChecked())
def OnIncludeTagsChanged(self, evt):
self.set_setting("include_tags", self._include_tags.GetValue())
def OnExcludeTagsChanged(self, evt):
self.set_setting("exclude_tags", self._exclude_tags.GetValue())
class CustomScriptProfile(PybotProfile):
'''A runner profile which uses script given by the user'''
name = "custom script"
default_settings = dict(PybotProfile.default_settings, runner_script="")
def get_command(self):
return self.runner_script
def get_cwd(self):
return os.path.dirname(self.runner_script)
def get_toolbar_items(self):
return [self.RunScriptPanel, self.ArgumentsPanel, self.TagsPanel]
def _validate_arguments(self, args):
# Can't say anything about custom script argument validity
pass
def _create_error_log_message(self, error, returncode):
return None
def RunScriptPanel(self, parent):
panel = wx.Panel(parent, wx.ID_ANY)
self._script = FileBrowseButton(
panel, labelText="Script to run tests:", size=(-1, -1),
fileMask="*", changeCallback=self.OnCustomScriptChanged)
self._script.SetValue(self.runner_script)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self._script, 0, wx.ALL | wx.EXPAND)
panel.SetSizerAndFit(sizer)
return panel
def OnCustomScriptChanged(self, evt):
self.set_setting("runner_script", self._script.GetValue())
| apache-2.0 | 2,019,078,855,946,588,400 | 36.246106 | 79 | 0.632151 | false |
mafrosis/flask-security | flask_security/forms.py | 1 | 9697 | # -*- coding: utf-8 -*-
"""
flask_security.forms
~~~~~~~~~~~~~~~~~~~~
Flask-Security forms module
:copyright: (c) 2012 by Matt Wright.
:copyright: (c) 2017 by CERN.
:license: MIT, see LICENSE for more details.
"""
import inspect
from flask import Markup, current_app, flash, request
from flask_login import current_user
from flask_wtf import FlaskForm as BaseForm
from speaklater import make_lazy_gettext
from wtforms import BooleanField, Field, HiddenField, PasswordField, \
StringField, SubmitField, ValidationError, validators
from .confirmable import requires_confirmation
from .utils import _, _datastore, config_value, get_message, \
localize_callback, url_for_security, validate_redirect_url, \
verify_and_update_password
lazy_gettext = make_lazy_gettext(lambda: localize_callback)
_default_field_labels = {
'email': _('Email Address'),
'password': _('Password'),
'remember_me': _('Remember Me'),
'login': _('Login'),
'register': _('Register'),
'send_confirmation': _('Resend Confirmation Instructions'),
'recover_password': _('Recover Password'),
'reset_password': _('Reset Password'),
'retype_password': _('Retype Password'),
'new_password': _('New Password'),
'change_password': _('Change Password'),
'send_login_link': _('Send Login Link')
}
class ValidatorMixin(object):
def __call__(self, form, field):
if self.message and self.message.isupper():
self.message = get_message(self.message)[0]
return super(ValidatorMixin, self).__call__(form, field)
class EqualTo(ValidatorMixin, validators.EqualTo):
pass
class Required(ValidatorMixin, validators.DataRequired):
pass
class Email(ValidatorMixin, validators.Email):
pass
class Length(ValidatorMixin, validators.Length):
pass
email_required = Required(message='EMAIL_NOT_PROVIDED')
email_validator = Email(message='INVALID_EMAIL_ADDRESS')
password_required = Required(message='PASSWORD_NOT_PROVIDED')
password_length = Length(min=6, max=128, message='PASSWORD_INVALID_LENGTH')
def get_form_field_label(key):
return lazy_gettext(_default_field_labels.get(key, ''))
def unique_user_email(form, field):
if _datastore.get_user(field.data) is not None:
msg = get_message('EMAIL_ALREADY_ASSOCIATED', email=field.data)[0]
raise ValidationError(msg)
def valid_user_email(form, field):
form.user = _datastore.get_user(field.data)
if form.user is None:
raise ValidationError(get_message('USER_DOES_NOT_EXIST')[0])
class Form(BaseForm):
def __init__(self, *args, **kwargs):
if current_app.testing:
self.TIME_LIMIT = None
super(Form, self).__init__(*args, **kwargs)
class EmailFormMixin():
email = StringField(
get_form_field_label('email'),
validators=[email_required, email_validator])
class UserEmailFormMixin():
user = None
email = StringField(
get_form_field_label('email'),
validators=[email_required, email_validator, valid_user_email])
class UniqueEmailFormMixin():
email = StringField(
get_form_field_label('email'),
validators=[email_required, email_validator, unique_user_email])
class PasswordFormMixin():
password = PasswordField(
get_form_field_label('password'), validators=[password_required])
class NewPasswordFormMixin():
password = PasswordField(
get_form_field_label('password'),
validators=[password_required, password_length])
class PasswordConfirmFormMixin():
password_confirm = PasswordField(
get_form_field_label('retype_password'),
validators=[EqualTo('password', message='RETYPE_PASSWORD_MISMATCH'),
password_required])
class NextFormMixin():
next = HiddenField()
def validate_next(self, field):
if field.data and not validate_redirect_url(field.data):
field.data = ''
flash(*get_message('INVALID_REDIRECT'))
raise ValidationError(get_message('INVALID_REDIRECT')[0])
class RegisterFormMixin():
submit = SubmitField(get_form_field_label('register'))
def to_dict(form):
def is_field_and_user_attr(member):
return isinstance(member, Field) and \
hasattr(_datastore.user_model, member.name)
fields = inspect.getmembers(form, is_field_and_user_attr)
return dict((key, value.data) for key, value in fields)
class SendConfirmationForm(Form, UserEmailFormMixin):
"""The default forgot password form"""
submit = SubmitField(get_form_field_label('send_confirmation'))
def __init__(self, *args, **kwargs):
super(SendConfirmationForm, self).__init__(*args, **kwargs)
if request.method == 'GET':
self.email.data = request.args.get('email', None)
def validate(self):
if not super(SendConfirmationForm, self).validate():
return False
if self.user.confirmed_at is not None:
self.email.errors.append(get_message('ALREADY_CONFIRMED')[0])
return False
return True
class ForgotPasswordForm(Form, UserEmailFormMixin):
"""The default forgot password form"""
submit = SubmitField(get_form_field_label('recover_password'))
def validate(self):
if not super(ForgotPasswordForm, self).validate():
return False
if requires_confirmation(self.user):
self.email.errors.append(get_message('CONFIRMATION_REQUIRED')[0])
return False
return True
class PasswordlessLoginForm(Form, UserEmailFormMixin):
"""The passwordless login form"""
submit = SubmitField(get_form_field_label('send_login_link'))
def __init__(self, *args, **kwargs):
super(PasswordlessLoginForm, self).__init__(*args, **kwargs)
def validate(self):
if not super(PasswordlessLoginForm, self).validate():
return False
if not self.user.is_active:
self.email.errors.append(get_message('DISABLED_ACCOUNT')[0])
return False
return True
class LoginForm(Form, NextFormMixin):
"""The default login form"""
email = StringField(get_form_field_label('email'),
validators=[Required(message='EMAIL_NOT_PROVIDED')])
password = PasswordField(get_form_field_label('password'),
validators=[password_required])
remember = BooleanField(get_form_field_label('remember_me'))
submit = SubmitField(get_form_field_label('login'))
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
if not self.next.data:
self.next.data = request.args.get('next', '')
self.remember.default = config_value('DEFAULT_REMEMBER_ME')
if current_app.extensions['security'].recoverable and \
not self.password.description:
html = Markup('<a href="{url}">{message}</a>'.format(
url=url_for_security("forgot_password"),
message=get_message("FORGOT_PASSWORD")[0],
))
self.password.description = html
def validate(self):
if not super(LoginForm, self).validate():
return False
self.user = _datastore.get_user(self.email.data)
if self.user is None:
self.email.errors.append(get_message('USER_DOES_NOT_EXIST')[0])
return False
if not self.user.password:
self.password.errors.append(get_message('PASSWORD_NOT_SET')[0])
return False
if not verify_and_update_password(self.password.data, self.user):
self.password.errors.append(get_message('INVALID_PASSWORD')[0])
return False
if requires_confirmation(self.user):
self.email.errors.append(get_message('CONFIRMATION_REQUIRED')[0])
return False
if not self.user.is_active:
self.email.errors.append(get_message('DISABLED_ACCOUNT')[0])
return False
return True
class ConfirmRegisterForm(Form, RegisterFormMixin,
UniqueEmailFormMixin, NewPasswordFormMixin):
pass
class RegisterForm(ConfirmRegisterForm, PasswordConfirmFormMixin,
NextFormMixin):
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
if not self.next.data:
self.next.data = request.args.get('next', '')
class ResetPasswordForm(Form, NewPasswordFormMixin, PasswordConfirmFormMixin):
"""The default reset password form"""
submit = SubmitField(get_form_field_label('reset_password'))
class ChangePasswordForm(Form, PasswordFormMixin):
"""The default change password form"""
new_password = PasswordField(
get_form_field_label('new_password'),
validators=[password_required, password_length])
new_password_confirm = PasswordField(
get_form_field_label('retype_password'),
validators=[EqualTo('new_password',
message='RETYPE_PASSWORD_MISMATCH'),
password_required])
submit = SubmitField(get_form_field_label('change_password'))
def validate(self):
if not super(ChangePasswordForm, self).validate():
return False
if not verify_and_update_password(self.password.data, current_user):
self.password.errors.append(get_message('INVALID_PASSWORD')[0])
return False
if self.password.data == self.new_password.data:
self.password.errors.append(get_message('PASSWORD_IS_THE_SAME')[0])
return False
return True
| mit | -6,662,313,277,829,583,000 | 31.649832 | 79 | 0.64587 | false |
mattsmart/biomodels | oncogenesis_dynamics/stability_diagram.py | 1 | 14631 | import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
from os import sep
from constants import PARAMS_ID, PARAMS_ID_INV, STATES_ID_INV, OUTPUT_DIR, Z_TO_COLOUR_BISTABLE_WIDE, Z_TO_COLOUR_ORIG
from data_io import write_matrix_data_and_idx_vals, read_matrix_data_and_idx_vals, read_params
from formulae import is_stable, fp_location_general, get_physical_fp_stable_and_not, get_fp_stable_and_not
from params import Params
from plotting import plot_table_params
# TODO: have ONLY 1 plotting script with datatype flags (e.g. fp count flag, stability data flag, other...)
def get_stability_data_2d(params_general, param_1_name, param_1_range, param_2_name, param_2_range, flag_write=True):
# assumes flow=0 and no feedback; uses is_stable with fp=[0,0,N]
#TODO: maybe solve a1 and a0, or just compute and show signs, instead
#TODO: also show a1 and a0 solutions never intercept (guess/check)
#TODO: if flow!=0, need to characterize the shifted "[0,0,N]" fp
#TODO: how to check stability in feedback case
#TODO: true/false on stability of fp is one visualization but maybe det J(fp) = order parameter?
assert param_1_name, param_2_name in PARAMS_ID_INV.keys()
#assert [params_general.params[PARAMS_ID_INV[key]] for key in ['v_x','v_y','v_z']] == [0.0, 0.0, 0.0]
assert [params_general.get(key) for key in ['v_x','v_y','v_z']] == [0.0, 0.0, 0.0]
fp_stationary = [0.0, 0.0, params_general[PARAMS_ID_INV["N"]]]
stab_array = np.zeros((len(param_1_range), len(param_2_range)), dtype=bool)
for i, p1 in enumerate(param_1_range):
for j, p2 in enumerate(param_2_range):
param_mod_dict = {param_1_name:p1, param_2_name: p2}
params_step = params_general.mod_copy(param_mod_dict)
#stab_array[i,j] = is_stable(params_step, fp_stationary, method="algebraic_3d")
stab_array[i, j] = is_stable(params_step, fp_stationary[0:2], method="numeric_2d")
if flag_write:
write_matrix_data_and_idx_vals(stab_array, param_1_range, param_2_range, "fpcount2d", param_1_name, param_2_name, output_dir=OUTPUT_DIR)
return stab_array
def plot_stability_data_2d(params_general, param_1_name, param_1_range, param_2_name, param_2_range, flag_show=False):
stability_data_2d = get_stability_data_2d(params_general, param_1_name, param_1_range, param_2_name, param_2_range)
plt.imshow(stability_data_2d, cmap='Greys', interpolation="none", origin='lower', aspect='auto',
extent=[param_2_range[0], param_2_range[-1], param_1_range[0], param_1_range[-1]])
ax = plt.gca()
ax.grid(which='major', axis='both', linestyle='-')
ax.set_xlabel(param_2_name)
ax.set_ylabel(param_1_name)
plt.title("Stability of fp [0,0,N] (black=stable), %s vs %s" % (param_1_name, param_2_name))
# create table of params
plot_table_params(ax, params_general, loc='best', bbox=(1.2, 0.2, 0.1, 0.75))
#plt.subplots_adjust(left=0.2, bottom=0.2)
plt.savefig(OUTPUT_DIR + sep + 'stability_data_2d_%s_%s.png' % (param_1_name, param_2_name), bbox_inches='tight')
if flag_show:
plt.show()
return plt.gca()
def get_gap_dist(params, axis="z", flag_simple=True):
N = params.N
fp_list = get_physical_fp_stable_and_not(params)[0]
if len(fp_list) > 2:
print "WARNING: %d phys/stable fixed points at these params:" % len(fp_list)
print params.printer()
print "FPs:", fp_list
params.write(OUTPUT_DIR, "broken_params.csv")
val = -1.0
elif len(fp_list) == 1:
#return fp_list[0][STATES_ID_INV[axis]]
#return N - fp_list[0][STATES_ID_INV[axis]]
#return N
if flag_simple:
val = fp_list[0][STATES_ID_INV[axis]]
else:
val = (N - fp_list[0][STATES_ID_INV[axis]]) / (N)
else:
if flag_simple:
val = -1.0 # should be ~ 1% of N or -0.01 if normalized
#val = np.abs(fp_list[0][STATES_ID_INV[axis]] - fp_list[1][STATES_ID_INV[axis]])
else:
val = (N - (fp_list[0][STATES_ID_INV[axis]] + fp_list[1][STATES_ID_INV[axis]])) / (N)
#val = np.abs(fp_list[0][STATES_ID_INV[axis]] - fp_list[1][STATES_ID_INV[axis]]) # gap in z-coordinate
return val
def get_gap_data_2d(params_general, param_1_name, param_1_range, param_2_name, param_2_range, axis_gap="z", figname_mod="", flag_write=True):
# gap between low-z and high-z FPs
assert param_1_name, param_2_name in PARAMS_ID_INV.keys()
assert [params_general.get(key) for key in ['v_x','v_y','v_z']] == [0.0, 0.0, 0.0]
gap_array = np.zeros((len(param_1_range), len(param_2_range)))
for i, p1 in enumerate(param_1_range):
for j, p2 in enumerate(param_2_range):
param_mod_dict = {param_1_name:p1, param_2_name: p2}
params_step = params_general.mod_copy(param_mod_dict)
gap_array[i, j] = get_gap_dist(params_step, axis=axis_gap)
print i, j, p1, p2
if flag_write:
write_matrix_data_and_idx_vals(gap_array, param_1_range, param_2_range, "gap2d", param_1_name, param_2_name, output_dir=OUTPUT_DIR)
if figname_mod is not None:
plot_gap_data_2d(gap_array, params_general, param_1_name, param_1_range, param_2_name,
param_2_range, axis_gap=axis_gap, figname_mod=figname_mod)
return gap_array
def plot_gap_data_2d(gap_data_2d, params_general, param_1_name, param_1_range, param_2_name, param_2_range,
axis_gap="z", figname_mod="", flag_show=True, colours=Z_TO_COLOUR_BISTABLE_WIDE):
fs = 12
# custom cmap for gap diagram
if params_general.feedback == 'constant':
colours = Z_TO_COLOUR_ORIG
xyz_cmap_gradient = LinearSegmentedColormap.from_list('xyz_cmap_gradient', colours, N=100)
# plot image
plt.imshow(gap_data_2d, cmap=xyz_cmap_gradient, interpolation="none", origin='lower', aspect='auto',
extent=[param_2_range[0], param_2_range[-1], param_1_range[0], param_1_range[-1]])
ax = plt.gca()
ax.grid(which='major', axis='both', linestyle='-')
plt.title("Gap in %s between FPs, vary %s, %s" % (axis_gap, param_1_name, param_2_name))
ax.set_xlabel(param_2_name, fontsize=fs)
ax.set_ylabel(param_1_name, fontsize=fs)
# create table of params
plot_table_params(ax, params_general, loc='best', bbox=(1.2, 0.2, 0.1, 0.75))
#plt.subplots_adjust(left=0.2, bottom=0.2)
# Now adding the colorbar
cbar = plt.colorbar(orientation='horizontal')
"""
plt.locator_params(axis='x', nbins=6)
plt.locator_params(axis='y', nbins=6)
plt.tick_params(axis='both', which='major', labelsize=16)
cbar.ax.tick_params(labelsize=16)
"""
plt.savefig(OUTPUT_DIR + sep + 'gap_data_2d_%s_%s_%s.pdf' % (param_1_name, param_2_name, figname_mod), bbox_inches='tight')
if flag_show:
plt.show()
return plt.gca()
def get_jump_dist(params_orig, param_1_name, param_2_name, param_1_delta=0.01, param_2_delta=0.01, axis="z"):
values_mod = {param_1_name: params_orig.get(param_1_name) + param_1_delta,
param_2_name: params_orig.get(param_2_name) + param_2_delta}
params_shift = params_orig.mod_copy(values_mod)
fp_orig_list = get_physical_fp_stable_and_not(params_orig)[0]
fp_shift_list = get_physical_fp_stable_and_not(params_shift)[0]
assert len(fp_orig_list) == 1
assert len(fp_shift_list) == 1
axis_idx = STATES_ID_INV[axis]
return fp_shift_list[0][axis_idx] - fp_orig_list[0][axis_idx]
def get_jump_data_2d(params_general, param_1_name, param_1_range, param_2_name, param_2_range, axis_jump, figname_mod=None):
assert param_1_name, param_2_name in PARAMS_ID_INV.keys()
assert [params_general.params_dict[x] for x in ['v_x', 'v_y', 'v_z']] == [0.0, 0.0, 0.0] # currently hard-code non-flow trivial FP location of [0,0,N]
jump_array = np.zeros((len(param_1_range), len(param_2_range)))
for i, p1 in enumerate(param_1_range):
for j, p2 in enumerate(param_2_range):
param_mod_dict = {param_1_name:p1, param_2_name: p2}
params_step = params_general.mod_copy(param_mod_dict)
jump_array[i, j] = get_jump_dist(params_step, param_1_name, param_2_name, axis=axis_jump)
print i, j, p1, p2
return jump_array
def plot_jump_data_2d(params_general, param_1_name, param_1_range, param_2_name, param_2_range, axis_jump):
jump_data_2d = get_jump_data_2d(params_general, param_1_name, param_1_range, param_2_name, param_2_range, axis_jump)
plt.imshow(jump_data_2d, cmap='seismic', interpolation="none", origin='lower', aspect='auto',
extent=[param_2_range[0], param_2_range[-1], param_1_range[0], param_1_range[-1]])
ax = plt.gca()
ax.grid(which='major', axis='both', linestyle='-')
ax.set_xlabel(param_2_name)
ax.set_ylabel(param_1_name)
plt.title("Jump in %s when perturbing %s, %s forward" % (axis_jump, param_1_name, param_2_name))
# create table of params
plot_table_params(ax, params_general, loc='best', bbox=(1.2, 0.2, 0.1, 0.75))
#plt.subplots_adjust(left=0.2, bottom=0.2)
# Now adding the colorbar
plt.colorbar(orientation='horizontal')
plt.savefig(OUTPUT_DIR + sep + 'jump_data_2d_%s_%s.png' % (param_1_name, param_2_name), bbox_inches='tight')
plt.show()
return plt.gca()
def get_fp_count_2d(params_general, param_1_name, param_1_range, param_2_name, param_2_range,
flag_stable=True, flag_phys=True, flag_write=True, figname_mod=None):
if flag_stable:
fp_subidx = 0
filemod = "Stable"
else:
fp_subidx = 1
filemod = "Unstable"
if flag_phys:
fpcollector = get_physical_fp_stable_and_not
filestr = "fpPhys%sCount2d" % filemod
else:
fpcollector = get_fp_stable_and_not
filestr = "fp%sCount2d" % filemod
assert param_1_name, param_2_name in PARAMS_ID_INV.keys()
fp_count_array = np.zeros((len(param_1_range), len(param_2_range)))
for i, p1 in enumerate(param_1_range):
for j, p2 in enumerate(param_2_range):
param_mod_dict = {param_1_name:p1, param_2_name: p2}
params_step = params_general.mod_copy(param_mod_dict)
fp_list = fpcollector(params_step)[fp_subidx]
fp_count_array[i, j] = len(fp_list)
print i, j, p1, p2
if flag_write:
write_matrix_data_and_idx_vals(fp_count_array, param_1_range, param_2_range, filestr, param_1_name, param_2_name, output_dir=OUTPUT_DIR)
if figname_mod is not None:
plot_fp_count_2d(fp_count_array, params_general, param_1_name, param_1_range, param_2_name,
param_2_range, figname_mod=figname_mod, flag_phys=flag_phys)
return fp_count_array
def plot_fp_count_2d(fp_count_array, params_general, param_1_name, param_1_range, param_2_name,
param_2_range, figname_mod="", flag_stable=True, flag_phys=True, flag_show=False):
stable_str = 'unstable'
if flag_stable:
stable_str = 'stable'
if flag_phys:
plt_title = "Physical and %s FP count (vary %s, %s) %dx%d" % (stable_str, param_1_name, param_2_name, len(fp_count_array), len(fp_count_array[0]))
filestr = 'physfp_count_2d_%s_%s_%s_%s.png' % (stable_str, param_1_name, param_2_name, figname_mod)
else:
plt_title = "%s FP count (vary %s, %s) %dx%d" % (stable_str, param_1_name, param_2_name, len(fp_count_array), len(fp_count_array[0]))
filestr = 'fp_count_2d_%s_%s_%s_%s.png' % (stable_str, param_1_name, param_2_name, figname_mod)
plt.imshow(fp_count_array, cmap='seismic', interpolation="none", origin='lower', aspect='auto',
extent=[param_2_range[0], param_2_range[-1], param_1_range[0], param_1_range[-1]])
ax = plt.gca()
ax.grid(which='major', axis='both', linestyle='-')
ax.set_xlabel(param_2_name)
ax.set_ylabel(param_1_name)
plt.title(plt_title)
# create table of params
plot_table_params(ax, params_general, loc='best', bbox=(1.2, 0.2, 0.1, 0.75))
#plt.subplots_adjust(left=0.2, bottom=0.2)
# Now adding the colorbar
plt.colorbar(orientation='horizontal')
plt.savefig(OUTPUT_DIR + sep + filestr, bbox_inches='tight')
if flag_show:
plt.show()
plt.close('all')
return plt.gca()
if __name__ == "__main__":
run_generate = True
run_load = False
# SCRIPT PARAMETERS
num_steps = 100000 # default 100000
ensemble = 5 # default 100
# DYNAMICS PARAMETERS
system = "feedback_z" # "default", "feedback_z", "feedback_yz", "feedback_mu_XZ_model", "feedback_XYZZprime"
feedback = "hill" # "constant", "hill", "step", "pwlinear"
mu = 1e-3
params_dict = {
'alpha_plus': 0.2,
'alpha_minus': 0.5, # 0.5
'mu': 0.001, # 0.01
'a': 1.0,
'b': 0.8,
'c': 0.6, # 1.2
'N': 100.0, # 100.0
'v_x': 0.0,
'v_y': 0.0,
'v_z': 0.0,
'mu_base': 0.0,
'c2': 0.0,
'v_z2': 0.0
}
params = Params(params_dict, system, feedback=feedback)
param_1_name = "mu"
param_1_start = 0.0
param_1_stop = 0.01
param_1_steps = 40
param_1_range = np.linspace(param_1_start, param_1_stop, param_1_steps)
param_2_name = "c"
param_2_start = 0.8 # 1.1 #0.7
param_2_stop = 0.9 # 1.3 #0.95
param_2_steps = 50
param_2_range = np.linspace(param_2_start, param_2_stop, param_2_steps)
# generate and plot data
if run_generate:
fp_data = get_fp_count_2d(params, param_1_name, param_1_range, param_2_name, param_2_range)
plot_fp_count_2d(fp_data, params, param_1_name, param_1_range, param_2_name, param_2_range, figname_mod="default")
# loaf data
if run_load:
row_name = 'c' # aka param 2 is row
col_name = 'b' # aka param 1 is col
datapath = OUTPUT_DIR + sep + "gapdist2d_full.txt"
rowpath = OUTPUT_DIR + sep + "gapdist2d_full_%s.txt" % row_name
colpath = OUTPUT_DIR + sep + "gapdist2d_full_%s.txt" % col_name
paramsname = "gapdist2d_full_params.csv"
gap_data_2d, param_2_range, param_1_range = read_matrix_data_and_idx_vals(datapath, rowpath, colpath)
param_1_name = col_name
param_2_name = row_name
params_general = read_params(OUTPUT_DIR, paramsname)
print params_general
plot_gap_data_2d(gap_data_2d, params_general, param_1_name, param_1_range, param_2_name, param_2_range,
axis_gap="z", figname_mod="", flag_show=True)
| mit | -1,257,420,118,555,779,300 | 46.970492 | 155 | 0.623197 | false |
bennymartinson/Oort | docs/quickstart/3_timing.py | 1 | 3826 | """ Quickstart guide 3: Timing"""
from oort import *
control_rate(15000)
print_off()
# Oort offers an alternative, wait-based approach to timing inspired by,
# but departing from languages like ChucK and Common Music. Oort's schedule
# module uses a system of threads and locks which reorganizes the operation
# of your code into a chronological sequence. This simulates threads
# running and waiting in real-time, but it executes in an instant!
# This is done using an internal moment cursor, which holds the time that is
# "now" in the script. Oort instruments automatically read from this cursor
# if no outskip is specified. To get the moment cursor at any time, call
# the now() function:
print "time at beginning:", now()
# to wait, just call wait(dur), where dur is the duration to wait in
# seconds.
# Having to order each event sequentially would not be very fun or useful
# if you have multiple ideas going on at once. To solve this, Oort lets you
# sprout multiple concurrent processes; that is, functions whose timers
# operate independently of the timers outside of their local scope. When
# a process is sprouted, the time outside the process remains unchanged.
# Creating and sprouting processes is easy. Just define a function, then
# call sprout() on the function and pass in its parameters. For example:
def loop(dur, pan=0.5):
w.dur = dur * 2
while True: # repeat forever, unless function returns
for pitch in 60, 64, 66, 69, 70, 67, 62: # cycle through 7 pitches
w.pitch = cpsmidi(pitch)
w.play()
wait(dur)
if not playing: # We'll come back to this
return
# setting some variables:
playing = True
w = WAVETABLE()
w.wavetable = maketable('wave', 1000,'tri')
w.amp = 10000 * maketable('expbrk', 1000, 1,999,0)
# Now you can sprout this function as many times as you'd like, to get some
# complexity:
sprout(loop, 0.2, 0)
sprout(loop, dur=0.3, pan=1)
sprout(loop, dur=0.4, pan=0.3)
sprout(loop, dur=0.5, pan=0.7)
# Even though we've sprouted these processes which do a lot of waiting, our
# time outside these processes is still 0. See:
print 'Time after sprouting loops:', now()
# Now, the coolest thing about working with code that is processed
# chronologically is you can now control different musical aspects or ideas
# in different processes. In ordinary procedural code, setting a value down
# here wouldn't affect what's going on in the code above. But since the
# code is reorganized chronologically by Oort, any variable we set here at
# some time t will be available everywhere else in the code that is schedule
# after that time t.
# Here's an example. At 5 seconds in, let's change our WAVETABLE object's
# internal wavetable:
def change_wavetable():
wait(4)
w.wavetable = maketable('wave', 1000, 'buzz')
sprout(change_wavetable)
# Now, let's change the bus_config at t=10 seconds to route our wavetable
# into some GVERB. Again, notice that even though this is happening in a
# completely different part of the code, every wavetable after 10 seconds
# is routed into our spectral delay, since those are processed after the
# bus_config commands below.
def apply_spectacle():
wait(10)
bus_config('WAVETABLE', 'aux 0-1 out')
bus_config('GVERB', 'aux 0-1 in', 'out 0-1')
GVERB(dur=10, amp=0.5, roomsize=50, rvbtime=5, damping=0.5,
bandwidth=0.5, drylevel=-10, earlyreflect=-11, rvbtail=-9,
ringdown=5)
sprout(apply_spectacle)
# You may have noticed that in the "loop" function above, the code is on an
# endless loop until the variable "playing" becomes False. We'd better end
# the loop at some point so your CPU doesn't melt.. Let's wait 20 seconds,
# then stop the function above.
wait(20)
playing = False | gpl-3.0 | -6,791,078,751,666,071,000 | 37.656566 | 77 | 0.7138 | false |
tensorflow/ecosystem | distribution_strategy/tf_std_server.py | 1 | 1269 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run a standard tensorflow server."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def main(unused_argv):
# Contrib ops are lazily loaded. So we touch one contrib module to load them
# immediately.
to_import_contrib_ops = tf.contrib.resampler
# Load you custom ops here before starting the standard TensorFlow server.
# Start and join the standard TensorFlow server.
tf.contrib.distribute.run_standard_tensorflow_server().join()
if __name__ == "__main__":
tf.app.run()
| apache-2.0 | 6,705,573,256,972,178,000 | 35.257143 | 80 | 0.700552 | false |
COL-IU/XLSearch | library/filterByFDR.py | 1 | 3141 |
def filterByFDR(tophits, cutoff):
tophits = sorted(tophits, key = lambda x : x[4], reverse = True)
intraCumCount = []
interCumCount = []
tardecCumCount = []
decdecCumCount = []
intraCount = 0
interCount = 0
tardecCount = 0
decdecCount = 0
xlType = []
for i in range(len(tophits)):
pro1 = tophits[i][2][0]
pro2 = tophits[i][2][1]
isTar = [[], []]
isDec = [[], []]
for part in pro1:
if 'reverse' in part:
isDec[0].append(True)
isTar[0].append(False)
else:
isDec[0].append(False)
isTar[0].append(True)
for part in pro2:
if 'reverse' in part:
isDec[1].append(True)
isTar[1].append(False)
else:
isDec[1].append(False)
isTar[1].append(True)
if any(isTar[0]) and any(isTar[1]):
if len(set(pro1).intersection(set(pro2))) > 0:
intraCount += 1
xl = 'intraxlink'
else:
interCount += 1
xl = 'interxlink'
elif (any(isTar[0]) and all(isDec[1])) or (all(isDec[0]) and any(isTar[1])):
tardecCount += 1
xl = 'target-decoy'
elif all(isDec[0]) and all(isDec[1]):
decdecCount += 1
xl = 'decoy-decoy'
else:
print '???????????'
intraCumCount.append(intraCount)
interCumCount.append(interCount)
tardecCumCount.append(tardecCount)
decdecCumCount.append(decdecCount)
xlType.append(xl)
fdrIntra = []
for i in range(len(tophits)):
if intraCumCount[i] != 0:
fdr = float(tardecCumCount[i] - decdecCumCount[i]) / intraCumCount[i]
fdrIntra.append([fdr, i])
fdrInter = []
for i in range(len(tophits)):
if interCumCount[i] != 0:
fdr = float(tardecCumCount[i] - decdecCumCount[i]) / interCumCount[i]
fdrInter.append([fdr, i])
fdrIntra = filter(lambda x : x[0] <= cutoff, fdrIntra)
fdrInter = filter(lambda x : x[0] <= cutoff, fdrInter)
maxIndexIntra = fdrIntra[-1][1] if len(fdrIntra) > 0 else -1
maxIndexInter = fdrInter[-1][1] if len(fdrInter) > 0 else -1
INTRA = []
for i in range(len(tophits)):
if xlType[i] == 'intraxlink' and i <= maxIndexIntra:
INTRA.append(tophits[i])
INTER = []
for i in range(len(tophits)):
if xlType[i] == 'interxlink' and i <= maxIndexInter:
INTER.append(tophits[i])
uniqueIntra = set()
f = open('intra' + str(cutoff), 'w')
for i in range(len(INTRA)):
pep = [INTRA[i][0][0], INTRA[i][0][1]]
pro = [','.join(INTRA[i][2][0]), ','.join(INTRA[i][2][1])]
pos = [INTRA[i][1][0], INTRA[i][1][1]]
score = INTRA[i][4]
ch = INTRA[i][3]
scan = INTRA[i][-1]
f.write('%d\t%s\t%s\t%d\t%d\t%s\t%s\t%d\t%f\t%s\n' % (i + 1, pep[0], pep[1], pos[0] + 1, pos[1] + 1, pro[0], pro[1], ch, score, scan))
uniqueIntra.add('_'.join(pep))
f.close()
uniqueInter = set()
f = open('inter' + str(cutoff), 'w')
for i in range(len(INTER)):
pep = [INTER[i][0][0], INTER[i][0][1]]
pro = [','.join(INTER[i][2][0]), ','.join(INTER[i][2][1])]
pos = [INTER[i][1][0], INTER[i][1][1]]
score = INTER[i][4]
ch = INTER[i][3]
scan = INTER[i][-1]
f.write('%d\t%s\t%s\t%d\t%d\t%s\t%s\t%d\t%f\t%s\n' % (i + 1, pep[0], pep[1], pos[0] + 1, pos[1] + 1, pro[0], pro[1], ch, score, scan))
uniqueInter.add('_'.join(pep))
f.close()
return [INTRA, uniqueIntra, INTER, uniqueInter]
| mit | -7,691,671,662,447,833,000 | 25.846154 | 136 | 0.596944 | false |
cloudify-cosmo/cloudify-manager | rest-service/manager_rest/rest/resources_v3_1/filters.py | 1 | 8528 | from flask import request
from cloudify.models_states import VisibilityState
from manager_rest import manager_exceptions
from manager_rest.security import SecuredResource
from manager_rest.constants import RESERVED_PREFIX
from manager_rest.utils import get_formatted_timestamp
from manager_rest.rest import rest_decorators, rest_utils
from manager_rest.security.authorization import authorize
from manager_rest.storage import models, get_storage_manager
from manager_rest.resource_manager import get_resource_manager
from manager_rest.rest.filters_utils import create_filter_rules_list
class BlueprintsFilters(SecuredResource):
@authorize('filter_list')
@rest_decorators.marshal_with(models.BlueprintsFilter)
@rest_decorators.paginate
@rest_decorators.sortable(models.BlueprintsFilter)
@rest_decorators.all_tenants
@rest_decorators.search('id')
def get(self, _include=None, pagination=None, sort=None,
all_tenants=None, search=None):
"""List blueprints filters"""
return list_resource_filters(models.BlueprintsFilter, _include,
pagination, sort, all_tenants, search)
class DeploymentsFilters(SecuredResource):
@authorize('filter_list')
@rest_decorators.marshal_with(models.DeploymentsFilter)
@rest_decorators.paginate
@rest_decorators.sortable(models.DeploymentsFilter)
@rest_decorators.all_tenants
@rest_decorators.search('id')
def get(self, _include=None, pagination=None, sort=None,
all_tenants=None, search=None):
"""List deployments filters"""
return list_resource_filters(models.DeploymentsFilter, _include,
pagination, sort, all_tenants, search)
def list_resource_filters(filters_model, _include=None, pagination=None,
sort=None, all_tenants=None, search=None):
get_all_results = rest_utils.verify_and_convert_bool(
'_get_all_results',
request.args.get('_get_all_results', False)
)
result = get_storage_manager().list(
filters_model,
include=_include,
substr_filters=search,
pagination=pagination,
sort=sort,
all_tenants=all_tenants,
get_all_results=get_all_results,
)
return result
class FiltersId(SecuredResource):
def put(self, filters_model, filter_id, filtered_resource):
"""Create a filter"""
rest_utils.validate_inputs({'filter_id': filter_id})
if filter_id.lower().startswith(RESERVED_PREFIX):
raise manager_exceptions.BadParametersError(
f'All filters with a `{RESERVED_PREFIX}` prefix are reserved '
f'for internal use.')
request_dict = rest_utils.get_json_and_verify_params(
{'filter_rules': {'type': list}})
filter_rules = create_filter_rules_list(request_dict['filter_rules'],
filtered_resource)
visibility = rest_utils.get_visibility_parameter(
optional=True, valid_values=VisibilityState.STATES)
now = get_formatted_timestamp()
new_filter = filters_model(
id=filter_id,
value=filter_rules,
created_at=now,
updated_at=now,
visibility=visibility
)
return get_storage_manager().put(new_filter)
def get(self, filters_model, filter_id, _include=None):
"""
Get a filter by ID
"""
rest_utils.validate_inputs({'filter_id': filter_id})
return get_storage_manager().get(
filters_model, filter_id, include=_include)
def delete(self, filters_model, filter_id):
"""
Delete a filter by ID
"""
rest_utils.validate_inputs({'filter_id': filter_id})
storage_manager = get_storage_manager()
filter_elem = storage_manager.get(filters_model, filter_id)
_verify_not_a_system_filter(filter_elem, 'delete')
storage_manager.delete(filter_elem, validate_global=True)
return None, 204
def patch(self, filters_model, filter_id, filtered_resource):
"""Update a filter by its ID
This function updates the filter rules and visibility
"""
rest_utils.validate_inputs({'filter_id': filter_id})
if not request.json:
raise manager_exceptions.IllegalActionError(
'Update a filter request must include at least one parameter '
'to update')
request_dict = rest_utils.get_json_and_verify_params(
{'filter_rules': {'type': list, 'optional': True}})
filter_rules = request_dict.get('filter_rules')
visibility = rest_utils.get_visibility_parameter(
optional=True, valid_values=VisibilityState.STATES)
storage_manager = get_storage_manager()
filter_elem = storage_manager.get(filters_model, filter_id)
_verify_not_a_system_filter(filter_elem, 'update')
if visibility:
get_resource_manager().validate_visibility_value(
filters_model, filter_elem, visibility)
filter_elem.visibility = visibility
if filter_rules:
new_filter_rules = create_filter_rules_list(filter_rules,
filtered_resource)
new_attrs_filter_rules = _get_filter_rules_by_type(
new_filter_rules, 'attribute')
new_labels_filter_rules = _get_filter_rules_by_type(
new_filter_rules, 'label')
if new_attrs_filter_rules:
if new_labels_filter_rules: # Both need to be updated
filter_elem.value = new_filter_rules
else: # Only labels filter rules should be saved
filter_elem.value = (filter_elem.labels_filter_rules +
new_filter_rules)
elif new_labels_filter_rules:
# Only attributes filter rules should be saved
filter_elem.value = (filter_elem.attrs_filter_rules +
new_filter_rules)
else: # Should not get here
raise manager_exceptions.BadParametersError(
'Unknown filter rules type')
filter_elem.updated_at = get_formatted_timestamp()
return storage_manager.update(filter_elem)
def _get_filter_rules_by_type(filter_rules_list, filter_rule_type):
return [filter_rule for filter_rule in
filter_rules_list if filter_rule['type'] == filter_rule_type]
def _verify_not_a_system_filter(filter_elem, action):
if filter_elem.is_system_filter:
raise manager_exceptions.IllegalActionError(
f'Cannot {action} a system filter')
class BlueprintsFiltersId(FiltersId):
@authorize('filter_create')
@rest_decorators.marshal_with(models.BlueprintsFilter)
def put(self, filter_id):
return super().put(models.BlueprintsFilter, filter_id,
models.Blueprint)
@authorize('filter_get')
@rest_decorators.marshal_with(models.BlueprintsFilter)
def get(self, filter_id, _include=None):
return super().get(models.BlueprintsFilter, filter_id, _include)
@authorize('filter_update')
@rest_decorators.marshal_with(models.BlueprintsFilter)
def patch(self, filter_id):
return super().patch(models.BlueprintsFilter, filter_id,
models.Blueprint)
@authorize('filter_delete')
def delete(self, filter_id):
return super().delete(models.BlueprintsFilter, filter_id)
class DeploymentsFiltersId(FiltersId):
@authorize('filter_create')
@rest_decorators.marshal_with(models.DeploymentsFilter)
def put(self, filter_id):
return super().put(models.DeploymentsFilter, filter_id,
models.Deployment)
@authorize('filter_get')
@rest_decorators.marshal_with(models.DeploymentsFilter)
def get(self, filter_id, _include=None):
return super().get(models.DeploymentsFilter, filter_id, _include)
@authorize('filter_update')
@rest_decorators.marshal_with(models.DeploymentsFilter)
def patch(self, filter_id):
return super().patch(models.DeploymentsFilter, filter_id,
models.Deployment)
@authorize('filter_delete')
def delete(self, filter_id):
return super().delete(models.DeploymentsFilter, filter_id)
| apache-2.0 | -4,878,345,227,776,293,000 | 38.299539 | 78 | 0.634615 | false |
lamter/slaveo | scout/base.py | 1 | 10394 | # coding: utf-8
import logging
import os
import datetime as dt
from collections import OrderedDict
import pandas as pd
import tushare as ts
import numpy as np
from mymath import *
class BaseScout:
"""
基础策略实例
1. 有监控标的本身,如股票,期货等,直接交易其标的自身
2. 有监控指数,则要交易其对应的证券
初始化
>>> setting = {"path": "scout_data", "open_indexes": "ma"} # 配置参数
>>> scout = BaseScout(**setting) # 生成实例
>>> scout.add_underlying("000025") # 添加白名单
>>> scout.update_quotation(price) # 循环更新行情
>>> scout.get_buy_order(asset_balance) # 更新行情
>>> scout.code, amount, exc_value(asset_balance) # 记录购买
"""
# 最低开仓手数
MIN_OPEN_HAND = 3
# 最大开仓金额比例
MAX_OPEN_RATE = p2f("50%")
# 指标类型
INDEXES_TYPE_MA = 'ma'
INDEXES_TYPE_FPR = 'fpr' # 浮盈回撤
def __init__(self, get_now, debug=False, log_handler=None, path=None, open_indexes=None, open_rate=p2f("2%"),
open_close_indexes=None,
open_close_rate=p2f("-5%")):
"""
:param path:
:param open_indexes:
:return:
"""
# 数据检查, 测试时使用, 生产环境时关闭,以加快速度
self.debug = debug
# 需要给定时间戳的获取方式
self.get_now = get_now
# 日志句柄
self.log = log_handler or logging.getLogger()
# 策略文件存储位置, 需要绝对路径
self.path = path or os.path.join(os.getcwd(), "scout_data")
# 开仓指标
self.open_indexes = open_indexes or self.INDEXES_TYPE_MA
# 开仓比例
self.open_rate = open_rate
# 首仓的清仓指标
self.open_close_indexes = open_close_indexes or self.INDEXES_TYPE_FPR
# 首仓的清仓回撤比例
self.open_close_rate = open_close_rate
self.codes_columns = OrderedDict([
# "index", # 要监控价格的标的,比如股票,或者指数
("code", None), # 对应的标的,股票的话就是自身,指数的话就是对应的基金
("open_ma", None), # 采用均线作为开仓指标
("close", None), # 昨日收盘均线
("open_position", False), # True 处于开仓状态
("times", 0), # 加仓次数
("exc_times", 0), # 预期加仓次数
("amount", 0), # 持仓数量
("balance", 0), # 累积投入的资金
("exc_value", 0), # 预期本次加仓金额
("av_price", 0), # 开仓均价
("close_fp", 0), # 清仓浮盈, 可以为负数,浮盈小于这这个数值就清仓
("hand_unit", 100), # 一手的数量
])
self.codes = pd.DataFrame(columns=self.codes_columns, dtype="int")
# 行情
self.quotation_columns = ["name", "ask1_volume", "ask1", "bid1", "bid1_volume"]
self.quotation = pd.DataFrame(columns=self.quotation_columns)
def get_now(self):
"""
需要重设时间戳的获取方式
:return: datetime.datetime()
"""
raise ValueError("This function need redefine!")
def add_underlying(self, codes):
"""
添加标的证券,即白名单
可以在任何时候添加新的白名单
:param codes: 数组["000001", "000002"] OR "000001, 000002"
:return:
"""
if isinstance(codes, str):
codes = [c.strip() for c in codes.split(',')]
# 股票标的
data = self.codes_columns.copy()
data.update({
"code": codes,
})
self.codes = self.codes.append(
pd.DataFrame(
data,
index=codes,
),
)
# 数据类型
self.codes.times = self.codes.times.astype(np.int16)
self.codes.exc_times = self.codes.exc_times.astype(np.int16)
self.codes.amount = self.codes.amount.astype(np.int32)
# 重设开仓指标
self.reset_open_indexes()
def update_quotation(self, price):
"""
获取行情
:param price: 实时行情
:return:
"""
# 刷新行情
self.refresh_quotation(price)
# 达到开仓条件的
self.touch_open_indexes()
# 计算开仓价格
self.cal_open_position()
def refresh_quotation(self, price):
"""
:param price: pd.DataFrame() 传进来之前需要根据处理好 self.quotation_columns 处理好 columns
:return:
"""
assert isinstance(self.quotation, pd.DataFrame)
if self.debug:
assert isinstance(price, pd.DataFrame)
if list(price.columns.values) != list(self.quotation.columns.values):
raise ValueError("columns of quotation err!")
self.quotation = price.copy()
def reset_open_indexes(self):
"""
重设开仓指标
可以在任何时候重设,一般需要在开盘前重设一次
:return:
"""
yesterday = self.get_now() - dt.timedelta(days=1)
yes_str = yesterday.strftime("%Y-%m-%d")
# 从 tushare 中获取历史数据
his = ts.get_hists(list(self.codes.index), start=yes_str, end=yes_str, pause=1)
his = his[["code", "ma5", "close"]].set_index("code")
if self.open_indexes == self.INDEXES_TYPE_MA:
# 根据均线设置开仓指标
self.codes.open_ma = his["ma5"]
self.codes.close = his["close"]
else:
raise ValueError("未知的开仓指标类型 %s " % self.open_indexes)
def touch_open_indexes(self):
"""
选出达到开仓条件的标的
:return:
"""
quo = self.quotation
assert isinstance(quo, pd.DataFrame)
if self.open_indexes == self.INDEXES_TYPE_MA:
# 合并数据
ma = pd.merge(self.codes[["open_ma", "close"]], quo[["bid1"]], left_index=True, right_index=True)
# 昨日收盘价 < 均线,且 买一价 > 均线
ma["open_position"] = ma.bid1 > ma.open_ma
self.codes.open_position = ma.open_position
if self.debug:
open_num = ma.open_position.value_counts()[True]
self.log.debug("%s 个标的达到开仓条件 " % open_num)
def cal_open_position(self):
"""
计算新开仓
:return:
"""
# 没有任何仓位的,才执行开仓逻辑
open_pos = self.codes[(self.codes.times == 0) & (self.codes.open_position == True)]
for code in open_pos.index:
self.codes.loc[code, "exc_times"] = 1
def get_buy_order(self, asset_balance):
"""
需要买入的标的
:param assert_balance: 总资产
:return:
"""
codes = self.codes[self.codes.times != self.codes.exc_times]
codes["change"] = codes.exc_times - codes.times
# 后买
buy_codes = codes[codes.change >= 1]
# 接入行情
buy_codes = pd.merge(buy_codes, self.quotation, left_index=True, right_index=True)
# 先买仓位大的, 获得一个优先级列表
# 先获得高仓位的
buy_priority_index = buy_codes[buy_codes.exc_times > 1]
if buy_priority_index.shape[0] > 0:
# TODO 先获得高仓位的
pass
else:
# 只有新开仓的,按总市值的一定比例开仓,比如2%, 并且最低3(self.MIN_OPEN_HAND)手
exc_value = asset_balance * self.open_rate
exc_amount = exc_value / buy_codes.bid1
exc_hand = exc_amount / buy_codes.hand_unit
exc_hand = exc_hand.apply(lambda x: max(x, self.MIN_OPEN_HAND))
exc_amount = exc_hand * buy_codes.hand_unit
buy_codes.exc_value = exc_amount * buy_codes.bid1
# 最大开仓规模不超过 50% 总资产
buy_codes = buy_codes[buy_codes.exc_value <= self.MAX_OPEN_RATE * asset_balance]
return buy_codes
def record_buy(self, code, amount, exc_value):
"""
记录已买
:param code:
:param amount:
:param exc_value:
:return:
"""
loc = self.codes.loc
# 无论买量多少,都记为买的次数 + 1
loc[code, "times"] += 1
# 记录买量
loc[code, "amount"] += amount
# 记录投入资金
loc[code, "balance"] += exc_value
# 更新成本价
loc[code, "av_price"] = loc[code, "balance"] / loc[code, "amount"]
# 设置清仓浮盈
if loc[code, "times"] == 1:
# 首仓止损浮盈
if self.open_close_indexes == self.INDEXES_TYPE_FPR:
loc[code, "close_fp"] = - loc[code, "balance"] * self.open_close_rate
def get_sell_order(self):
"""
需要卖出的标的
:param assert_balance: 总资产
:return:
"""
codes = self.codes[self.codes.times != self.codes.exc_times]
codes["change"] = codes.exc_times - codes.times
sell_codes = codes[codes.change < 0]
# 接入行情
sell_codes = pd.merge(sell_codes, self.quotation, left_index=True, right_index=True)
# 先卖仓位大的, 获得一个优先级列表
sell_priority_index = sell_codes[sell_codes.exc_times < -1].times.argsort()[::-1]
# 排序
sell_codes.take(sell_priority_index)
return sell_codes
def record_sell(self, code, amount, exc_value):
"""
:param code:
:param amount:
:param exc_value:
:return:
"""
loc = self.codes.loc
# 无论买量多少,都记为 0
loc[code, "times"] = loc[code, "exc_times"]
# 记录买量
loc[code, "amount"] = max(0, loc[code, "amount"] - amount)
# 记录投入资金
loc[code, "balance"] = max(0, loc[code, "balance"] - exc_value)
# 更新成本价
if loc[code, "amount"] == 0:
loc[code, "av_price"] = loc[code, "balance"] / loc[code, "amount"]
# 设置清仓浮盈
loc[code, "close_fp"] = self.codes_columns["close_fp"]
| gpl-3.0 | -2,856,707,596,434,818,000 | 27.137072 | 113 | 0.531001 | false |
XVMX/vmbot | vmbot/__main__.py | 1 | 1292 | # coding: utf-8
# Copyright (C) 2010 Arthur Furlan <[email protected]>
# Copyright (c) 2013 Sascha J�ngling <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# On Debian systems, you can find the full text of the license in
# /usr/share/common-licenses/GPL-3
from __future__ import absolute_import, division, unicode_literals, print_function
import logging
from logging.handlers import TimedRotatingFileHandler
from . import VMBot
from .helpers.logging import setup_logging
import config
if __name__ == "__main__":
logger = setup_logging(TimedRotatingFileHandler("vmbot.log", when='d', interval=7,
backupCount=3, encoding="utf-8"))
jbc = config.JABBER
morgooglie = VMBot(jbc['username'], jbc['password'], jbc['res'])
for room in jbc['chatrooms']:
morgooglie.muc_join_room(room, jbc['nickname'])
try:
morgooglie.serve_forever()
except Exception:
logger.exception("An error happened in the main loop:")
morgooglie.shutdown()
logging.shutdown()
| gpl-3.0 | 5,210,573,513,656,405,000 | 32.076923 | 86 | 0.686822 | false |
RickMohr/nyc-trees | src/nyc_trees/apps/survey/urls/blockface.py | 1 | 2285 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf.urls import patterns, url
from apps.survey.routes import (reserve_blockface_page, cancel_reservation,
reserve_blockfaces, reservations,
reserved_blockface_popup,
reservations_instructions,
blockface_reservations_confirmation_page,
blockface, progress_page,
progress_page_blockface_popup,
printable_reservations_map,
reservations_map_pdf_poll)
# These URLs have the prefix 'blockface/'
urlpatterns = patterns(
'',
url(r'^progress/$', progress_page, name='progress_page'),
url(r'^$', reservations, name='reservations'),
url(r'^printable-map/$', printable_reservations_map,
name='printable_reservations_map'),
url(r'^reserve/$', reserve_blockface_page, name='reserve_blockface_page'),
url(r'^reservations-instructions/$', reservations_instructions,
name='reservations_instructions'),
url(r'^(?P<blockface_id>\d+)/cancel-reservation/$', cancel_reservation,
name='cancel_reservation'),
# Note: changes here must be kept in sync with
# src/nyc_trees/js/src/reservationPage.js
url(r'^(?P<blockface_id>\d+)/reservation-popup/$',
reserved_blockface_popup, name='reserved_blockface_popup'),
# Note: changes here must be kept in sync with
# src/nyc_trees/js/src/progressPage.js
url(r'^(?P<blockface_id>\d+)/progress-page-blockface-popup/$',
progress_page_blockface_popup, name='progress_blockface_popup'),
url(r'^checkout/$', reserve_blockfaces, name='reserve_blockfaces'),
url(r'^checkout-confirmation/$', blockface_reservations_confirmation_page,
name='blockface_reservations_confirmation_page'),
# Note: this must be kept in sync with the hardcoded url in
# js/src/reservationPage.js
url(r'^(?P<blockface_id>\d+)/$', blockface,
name='blockface'),
url(r'^map-poll/$', reservations_map_pdf_poll,
name='reservations_map_pdf_poll'),
)
| apache-2.0 | -299,436,547,300,556,100 | 37.728814 | 78 | 0.621444 | false |
fritzo/distributions | setup.py | 1 | 6121 | # Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import sys
import numpy
if len(sys.argv) >= 2 and sys.argv[1] == 'bdist_wheel':
# bdist_wheel needs setuptools
import setuptools
assert setuptools # satisfy pyflakes
from distutils.core import setup, Extension
from distutils.version import LooseVersion
try:
from Cython.Build import cythonize
from cython import __version__ as cython_version
min_cython_version = '0.20.1'
if LooseVersion(cython_version) < LooseVersion(min_cython_version):
raise ValueError(
'cython support requires cython>={}'.format(min_cython_version))
cython = True
except ImportError:
cython = False
clang = False
if sys.platform.lower().startswith('darwin'):
clang = True
if os.environ.get('CC', '').startswith('gcc'):
clang = False
if os.environ.get('CXX', '').startswith('g++'):
clang = False
include_dirs = ['include', 'distributions']
include_dirs.append(numpy.get_include())
if 'EXTRA_INCLUDE_PATH' in os.environ:
include_dirs.append(os.environ['EXTRA_INCLUDE_PATH'])
extra_compile_args = [
'-DDIST_DEBUG_LEVEL=3',
'-DDIST_THROW_ON_ERROR=1',
'-Wno-unused-function',
'-Wno-unused-variable',
]
if clang:
extra_compile_args.extend([
'-mmacosx-version-min=10.7', # for anaconda
'-std=c++0x',
'-stdlib=libc++',
'-Wno-deprecated-register',
'-Wno-#warnings', # avoid #warning "Using deprecated NumPy API,..."
])
else:
extra_compile_args.extend([
'-std=c++0x',
'-Wall',
'-Werror',
'-Wno-error=cpp', # avoid #warning "Using deprecated NumPy API,..."
'-Wno-sign-compare',
'-Wno-strict-aliasing',
'-O3',
'-ffast-math',
'-funsafe-math-optimizations',
# '-fno-trapping-math',
# '-ffinite-math-only',
# '-fvect-cost-model',
'-mfpmath=sse',
'-msse4.1',
# '-mavx',
# '-mrecip',
# '-march=native',
# '-DNPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION',
])
use_protobuf = 'DISTRIBUTIONS_USE_PROTOBUF' in os.environ
def make_extension(name):
module = 'distributions.' + name
sources = [
'{}.{}'.format(module.replace('.', '/'), 'pyx' if cython else 'cpp')
]
libraries = ['m']
if use_protobuf:
libraries.append('protobuf')
if name.startswith('lp'):
libraries = ['distributions_shared'] + libraries
return Extension(
module,
sources=sources,
language='c++',
include_dirs=include_dirs,
libraries=libraries,
extra_compile_args=extra_compile_args,
)
def make_extensions(names):
return [make_extension(name) for name in names]
hp_extensions = make_extensions([
'has_cython',
'rng_cc',
'global_rng',
'_eigen',
'hp.special',
'hp.random',
'hp.models.dd',
'hp.models.gp',
'hp.models.nich',
])
lp_extensions = make_extensions([
'lp.special',
'lp.random',
'lp.vector',
'lp.models.bb',
'lp.models._bb',
'lp.models.dd',
'lp.models._dd',
'lp.models.dpd',
'lp.models._dpd',
'lp.models.gp',
'lp.models._gp',
'lp.models.bnb',
'lp.models._bnb',
'lp.models.nich',
'lp.models._nich',
'lp.models.niw',
'lp.models._niw',
'lp.clustering',
'lp.mixture',
])
if cython:
ext_modules = cythonize(hp_extensions + lp_extensions)
else:
ext_modules = hp_extensions + lp_extensions
version = None
with open(os.path.join('distributions', '__init__.py')) as f:
for line in f:
if re.match("__version__ = '\S+'$", line):
version = line.split()[-1].strip("'")
assert version, 'could not determine version'
with open('README.md') as f:
long_description = f.read()
config = {
'version': version,
'name': 'distributions',
'description': 'Primitives for Bayesian MCMC inference',
'long_description': long_description,
'url': 'https://github.com/posterior/distributions',
'author': 'Jonathan Glidden, Eric Jonas, Fritz Obermeyer, Cap Petschulat',
'maintainer': 'Fritz Obermeyer',
'maintainer_email': '[email protected]',
'license': 'Revised BSD',
'packages': [
'distributions',
'distributions.dbg',
'distributions.dbg.models',
'distributions.hp',
'distributions.hp.models',
'distributions.lp',
'distributions.lp.models',
'distributions.io',
'distributions.tests',
'distributions.vendor',
],
'ext_modules': ext_modules,
}
setup(**config)
| bsd-3-clause | 7,079,599,730,979,026,000 | 27.872642 | 78 | 0.643196 | false |
morta-code/YAX | yax/YAXReader.py | 1 | 10807 | import re
import inspect
from .condition import Condition
import warnings
__author__ = 'Móréh, Tamás'
# Type of compiled regexes
RE = type(re.compile(""))
def element_to_string(element, encoding="unicode", method="xml", **kwargs):
return YAXReader.etree.tostring(element, encoding=encoding, method=method, **kwargs)
def element_to_cmplx_dict(element):
# {tag: "", attrib: {}, text: "", children: {}, childlist: []}
d = dict()
d["tag"] = element.tag
d["attrib"] = element.attrib
d["text"] = element.text
chd = {}
chl = []
for child in list(element):
cd = element_to_cmplx_dict(child)
chl.append(cd)
chd[cd["tag"]] = cd
d["children"] = chd
d["childlist"] = chl
return d
def element_to_json_dict(element, attrib_prefix="-", text_prefix="#"):
tag = element.tag
text = [element.text.strip() if element.text is not None else "", ]
d = dict()
for a, v in element.attrib.items():
if d.get(attrib_prefix + a):
c = d[attrib_prefix + a]
if isinstance(c, list):
c.append(v)
else:
d[attrib_prefix + a] = [c, v]
else:
d[attrib_prefix + a] = v
for child in list(element):
text.append(child.tail.strip() if child.tail is not None else "")
ch = element_to_json_dict(child)
if d.get(child.tag):
c = d[child.tag]
if isinstance(c, list):
c.append(ch[child.tag])
else:
d[child.tag] = [c, ch[child.tag]]
else:
d[child.tag] = ch[child.tag]
# clean text
t2 = []
for t in text:
if t:
t2.append(t)
text = t2
if len(text) == 1:
text = text[0]
# add text if exists
if len(d) == 0:
d = text
elif text:
d[text_prefix + "text"] = text
return {tag: d}
def element_to_element(e):
return e
class CallbackRunner:
ETREE = 1
STRING = 2
DICT = 3
JSON_DICT = 4
ATTRIB_PREFIX = "-"
TEXT_PREFIX = "#"
@staticmethod
def _default(*args):
pass
CONVERT_DICT = {ETREE: element_to_element,
STRING: element_to_string,
DICT: element_to_cmplx_dict,
JSON_DICT: element_to_json_dict}
def __init__(self, t: int, attrib_prefix='-', text_prefix='#', condition: Condition=None):
self.condition = condition
self._callback = CallbackRunner._default
self._type = t
CallbackRunner.ATTRIB_PREFIX = attrib_prefix
CallbackRunner.TEXT_PREFIX = text_prefix
try:
self._convert = CallbackRunner.CONVERT_DICT[t]
except KeyError as e:
e.args = ("CallbackRunner type must be one of CallbackRunner.ETREE, " +
"CallbackRunner.STRING, CallbackRunner.JSON_DICT and " +
"CallbackRunner.DICT!",)
raise
def inverted(self) -> Condition:
warnings.warn("This feature is waiting for a better implementation", FutureWarning)
self.condition.inverse()
return self
# TODO itt kell megvalósítani a visszaírást
def calls(self, callback):
if not callable(callback):
raise Exception("The callback argument must be callable!")
ins = inspect.getfullargspec(callback)
if len(ins.args) < 2 and ins.varargs is None:
raise Exception("The callback funciton must can accept at least 2 arguments!\n" +
"First: The element itself, Second: the line number.")
self._callback = callback
def __call__(self, element, line: int=0):
self._callback(self._convert(element), line)
class YAXReader:
etree = None # todo példány szintre! (egyébként működik)
def __init__(self, stream=None, use_lxml=False):
self._cnds = []
self.stream = stream
if use_lxml:
try:
import lxml.etree as etree
Condition.LXML = True
except ImportError:
import xml.etree.ElementTree as etree
Condition.LXML = False
else:
import xml.etree.ElementTree as etree
Condition.LXML = False
YAXReader.etree = etree
@staticmethod
def lxml_in_use():
return Condition.LXML
def start(self, chunk_size=8192):
if not self.stream:
raise Exception("Input stream is not initialized.")
elif self.stream.closed:
raise Exception("The input stream is closed.")
if Condition.LXML:
parser = YAXReader.etree.XMLPullParser(events=('end',))
prev_parent = None
prev_element = None
keep = False
chunk = self.stream.read(chunk_size)
while chunk:
parser.feed(chunk)
for action, element in parser.read_events():
if not keep and prev_parent is not None:
prev_parent.remove(prev_element)
keep = False
for cond, cb_runner in self._cnds:
if cond.check(element):
cb_runner(element)
if not keep and cond.keep(element):
keep = True
prev_parent = element.getparent()
prev_element = element
chunk = self.stream.read(chunk_size)
else:
parser = YAXReader.etree.XMLPullParser(events=('end', 'start'))
parents = []
chunk = self.stream.read(chunk_size)
while chunk:
parser.feed(chunk)
for action, element in parser.read_events():
if action == 'start':
parents.append(element)
else:
parents.pop()
keep = False # Do not keep anything by default.
for cond, cb_runner in self._cnds: # For all conditions.
if cond.check(element, parents):
cb_runner(element)
if not keep and cond.keep(element, parents):
keep = True
if not keep and len(parents) > 0:
parents[-1].remove(element)
chunk = self.stream.read(chunk_size)
self.stream.close()
def find(self, tag=None, attrib: dict=None, text=None,
parent=None, children=None, keep_children=None) -> CallbackRunner:
tup = (Condition(tag, attrib, text, parent, children, keep_children),
CallbackRunner(CallbackRunner.ETREE))
self._cnds.append(tup)
tup[1].condition = tup[0]
return tup[1]
def match(self, cond: Condition) -> CallbackRunner:
tup = (cond, CallbackRunner(CallbackRunner.ETREE))
self._cnds.append(tup)
tup[1].condition = tup[0]
return tup[1]
# TODO remove deprecated funcs
def find_as_element(self, tag=None, attrib: dict=None, text=None,
parent=None, children=None, keep_children=None) -> CallbackRunner:
warnings.warn("Deprecated: this method will be removed in version 2.0.\n"
"Use the new converter methods.", DeprecationWarning)
tup = (Condition(tag, attrib, text, parent, children, keep_children),
CallbackRunner(CallbackRunner.ETREE))
self._cnds.append(tup)
tup[1].condition = tup[0]
return tup[1]
def find_as_str(self, tag=None, attrib: dict=None, text=None,
parent=None, children=None, keep_children=None) -> CallbackRunner:
warnings.warn("Deprecated: this method will be removed in version 2.0.\n"
"Use the new converter methods.", DeprecationWarning)
tup = (Condition(tag, attrib, text, parent, children, keep_children),
CallbackRunner(CallbackRunner.STRING))
self._cnds.append(tup)
tup[1].condition = tup[0]
return tup[1]
def find_as_dict(self, tag=None, attrib: dict=None, text=None,
parent=None, children=None, keep_children=None) -> CallbackRunner:
warnings.warn("Deprecated: this method will be removed in version 2.0.\n"
"Use the new converter methods.", DeprecationWarning)
tup = (Condition(tag, attrib, text, parent, children, keep_children),
CallbackRunner(CallbackRunner.DICT))
self._cnds.append(tup)
tup[1].condition = tup[0]
return tup[1]
def find_as_json_dict(self, tag=None, attrib: dict=None, text=None,
parent=None, children=None, keep_children=None) -> CallbackRunner:
warnings.warn("Deprecated: this method will be removed in version 2.0.\n"
"Use the new converter methods.", DeprecationWarning)
tup = (Condition(tag, attrib, text, parent, children, keep_children),
CallbackRunner(CallbackRunner.JSON_DICT))
self._cnds.append(tup)
tup[1].condition = tup[0]
return tup[1]
def match_as_element(self, cond: Condition) -> CallbackRunner:
warnings.warn("Deprecated: this method will be removed in version 2.0.\n"
"Use the new converter methods.", DeprecationWarning)
tup = (cond, CallbackRunner(CallbackRunner.ETREE))
self._cnds.append(tup)
tup[1].condition = tup[0]
return tup[1]
def match_as_str(self, cond: Condition) -> CallbackRunner:
warnings.warn("Deprecated: this method will be removed in version 2.0.\n"
"Use the new converter methods.", DeprecationWarning)
tup = (cond, CallbackRunner(CallbackRunner.STRING))
self._cnds.append(tup)
tup[1].condition = tup[0]
return tup[1]
def match_as_dict(self, cond: Condition) -> CallbackRunner:
warnings.warn("Deprecated: this method will be removed in version 2.0.\n"
"Use the new converter methods.", DeprecationWarning)
tup = (cond, CallbackRunner(CallbackRunner.DICT))
self._cnds.append(tup)
tup[1].condition = tup[0]
return tup[1]
def match_as_json_dict(self, cond: Condition) -> CallbackRunner:
warnings.warn("Deprecated: this method will be removed in version 2.0.\n"
"Use the new converter methods.", DeprecationWarning)
tup = (cond, CallbackRunner(CallbackRunner.JSON_DICT))
self._cnds.append(tup)
tup[1].condition = tup[0]
return tup[1]
| gpl-3.0 | -1,334,978,849,130,943,200 | 36.741259 | 98 | 0.560311 | false |
DataDog/integrations-core | tokumx/datadog_checks/tokumx/vendor/pymongo/thread_util.py | 1 | 4017 | # Copyright 2012-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for multi-threading support."""
import threading
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
from datadog_checks.tokumx.vendor.pymongo.monotonic import time as _time
from datadog_checks.tokumx.vendor.pymongo.errors import ExceededMaxWaiters
### Begin backport from CPython 3.2 for timeout support for Semaphore.acquire
class Semaphore:
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self._cond = threading.Condition(threading.Lock())
self._value = value
def acquire(self, blocking=True, timeout=None):
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
rc = False
endtime = None
self._cond.acquire()
while self._value == 0:
if not blocking:
break
if timeout is not None:
if endtime is None:
endtime = _time() + timeout
else:
timeout = endtime - _time()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
self._value = self._value - 1
rc = True
self._cond.release()
return rc
__enter__ = acquire
def release(self):
self._cond.acquire()
self._value = self._value + 1
self._cond.notify()
self._cond.release()
def __exit__(self, t, v, tb):
self.release()
@property
def counter(self):
return self._value
class BoundedSemaphore(Semaphore):
"""Semaphore that checks that # releases is <= # acquires"""
def __init__(self, value=1):
Semaphore.__init__(self, value)
self._initial_value = value
def release(self):
if self._value >= self._initial_value:
raise ValueError("Semaphore released too many times")
return Semaphore.release(self)
### End backport from CPython 3.2
class DummySemaphore(object):
def __init__(self, value=None):
pass
def acquire(self, blocking=True, timeout=None):
return True
def release(self):
pass
class MaxWaitersBoundedSemaphore(object):
def __init__(self, semaphore_class, value=1, max_waiters=1):
self.waiter_semaphore = semaphore_class(max_waiters)
self.semaphore = semaphore_class(value)
def acquire(self, blocking=True, timeout=None):
if not self.waiter_semaphore.acquire(False):
raise ExceededMaxWaiters()
try:
return self.semaphore.acquire(blocking, timeout)
finally:
self.waiter_semaphore.release()
def __getattr__(self, name):
return getattr(self.semaphore, name)
class MaxWaitersBoundedSemaphoreThread(MaxWaitersBoundedSemaphore):
def __init__(self, value=1, max_waiters=1):
MaxWaitersBoundedSemaphore.__init__(
self, BoundedSemaphore, value, max_waiters)
def create_semaphore(max_size, max_waiters):
if max_size is None:
return DummySemaphore()
else:
if max_waiters is None:
return BoundedSemaphore(max_size)
else:
return MaxWaitersBoundedSemaphoreThread(max_size, max_waiters)
| bsd-3-clause | 4,264,808,422,550,712,300 | 29.664122 | 78 | 0.630819 | false |
TeamSWAP/swap | src/overlays/raid_mechanics/tfb.py | 1 | 2196 | #
# Copyright 2013 TeamSWAP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import wx
import config
import log_analyzer
import raid
import util
from overlays.base_list import BaseListOverlay
from logging import prnt
class TFBOp9Colors(BaseListOverlay):
def __init__(self):
BaseListOverlay.__init__(self, title="Operator IX", size=(300, 150))
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnClose)
analyzer = log_analyzer.get()
analyzer.registerFrame(self)
self.onAnalyzerTick(analyzer)
def createUI(self):
BaseListOverlay.createUI(self)
self.setColumns(["ColoredRect", "String", "String"], [0, 1, 1], [BaseListOverlay.LEFT, BaseListOverlay.LEFT, BaseListOverlay.RIGHT])
def OnClose(self, event):
if event.GetEventObject() == self:
log_analyzer.get().unregisterFrame(self)
def onAnalyzerTick(self, analyzer):
self.beginBatch()
self.clearList()
playersPerColor = {
1: [], 2: [], 3: [], 4: []
}
for player in raid.playerData:
name = player['name'][1:]
orb = player['tfbOrb']
if orb:
playersPerColor[orb].append(name)
self.addRow([None, "Next Deletion", "Shields"])
index = 0
for colorId in ['blue', 'orange', 'purple', 'yellow']:
index += 1
count = 0
nextDeletion = ""
for player in playersPerColor[index]:
if not nextDeletion:
nextDeletion = player
count += 1
self.addRow([colorId, nextDeletion, str(count)])
self.endBatch()
self.panel.Layout()
| apache-2.0 | 6,516,623,201,248,352,000 | 28.675676 | 140 | 0.623862 | false |
noironetworks/apic-ml2-driver | apic_ml2/neutron/db/migration/alembic_migrations/versions/5d1c1f1d1282_rename_tenant_id.py | 1 | 1198 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Rename tenant_id to project_id
Revision ID: 5d1c1f1d1282
Revises: None
Create Date: 2017-04-04 15:05:45.523877
"""
# revision identifiers, used by Alembic.
revision = '5d1c1f1d1282'
down_revision = '60741a4735ca'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column('cisco_ml2_apic_contracts', 'tenant_id',
new_column_name='project_id',
existing_type=sa.String(255))
def downgrade():
op.alter_column('cisco_ml2_apic_contracts', 'project_id',
new_column_name='tenant_id',
existing_type=sa.String(255))
| apache-2.0 | 8,279,858,757,475,654,000 | 30.526316 | 78 | 0.680301 | false |
Urinx/Project_Euler_Answers | 123.py | 1 | 1544 | #!/usr/bin/env python
#coding:utf-8
"""
Prime square remainders
Let pn be the nth prime: 2, 3, 5, 7, 11, ..., and let r be the remainder when (pn−1)n + (pn+1)n is divided by pn2.
For example, when n = 3, p3 = 5, and 43 + 63 = 280 ≡ 5 mod 25.
The least value of n for which the remainder first exceeds 109 is 7037.
Find the least value of n for which the remainder first exceeds 1010.
"""
LIMIT_PRIME = 250000
primes = [0] # Pad with a dummy item, to make primes[n] return the right thing
def calculate_primes():
prime_table = [1]*LIMIT_PRIME # table of largest factor
i = 2
while i < (LIMIT_PRIME/2):
if prime_table[i] == 1:
primes.append(i)
j = i*2
while j < LIMIT_PRIME:
prime_table[j] = i
j += i
i += 1
while i < LIMIT_PRIME:
if prime_table[i] == 1:
primes.append(i)
i += 1
del prime_table
def rem_p(n):
p = primes[n]
p_sq = p**2
(pp, pm) = (1,1)
for i in range(n):
pp *= (p+1)
pm *= (p-1)
pp %= p_sq
pm %= p_sq
r = pp + pm
r = r % p_sq
return r
def answer():
calculate_primes()
print "Found {0} primes".format(len(primes))
max_res = 0
for i in range(2,len(primes)):
res = rem_p(i)
if res > 10**10:
print "Answer =", i
break
print "Finished without finding result"
import time
tStart=time.time()
answer()
print 'run time=',time.time()-tStart
# 21035
# run time= 68.5072770119 | gpl-2.0 | 641,334,964,325,962,500 | 23.460317 | 114 | 0.545455 | false |
costadorione/purestream | servers/idowatch.py | 1 | 2261 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# streamondemand - XBMC Plugin
# Conector para idowatch
# http://www.mimediacenter.info/foro/viewforum.php?f=36
#------------------------------------------------------------
import re
from core import jsunpack
from core import logger
from core import scrapertools
def test_video_exists( page_url ):
logger.info("streamondemand.servers.idowatch test_video_exists(page_url='%s')" % page_url)
data = scrapertools.cache_page( page_url )
if "File Not Found" in data: return False, "[Idowatch] El archivo no existe o ha sido borrado"
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("streamondemand.servers.idowatch get_video_url(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
try:
mediaurl = scrapertools.find_single_match(data, ',{file:(?:\s+|)"([^"]+)"')
except:
matches = scrapertools.find_single_match(data, "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script>")
matchjs = jsunpack.unpack(matches).replace("\\","")
mediaurl = scrapertools.find_single_match(matchjs, ',{file:(?:\s+|)"([^"]+)"')
video_urls = []
video_urls.append( [ scrapertools.get_filename_from_url(mediaurl)[-4:]+" [idowatch]", mediaurl])
for video_url in video_urls:
logger.info("streamondemand.servers.idowatch %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://idowatch.net/m5k9s1g7il01.html
patronvideos = 'idowatch.net/(?:embed-|)([a-z0-9]+)'
logger.info("streamondemand.servers.idowatch find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[idowatch]"
url = "http://idowatch.net/%s.html" % match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'idowatch' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
| gpl-3.0 | -2,722,581,283,899,343,000 | 35.451613 | 130 | 0.60531 | false |
scavallero/mydomus | mydomus.py | 1 | 3442 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MyDomus
# Home Domotic Service
# Copyright (c) 2016 Salvatore Cavallero ([email protected])
# https://github.com/scavallero/mydomus
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import argparse
import json
import threading
import httpapp
import sensor
import scheduler
import logging
import logging.handlers
import dbutil
import auth
## Self defined summary log handler ##
class SummaryHandler(logging.Handler):
def __init__(self,bufferlen=100):
logging.Handler.__init__(self)
self.bufferlen = bufferlen
self.buffered_entry = []
def emit(self, record):
log_entry = self.format(record)
self.buffered_entry.append(log_entry)
if len(self.buffered_entry) > self.bufferlen:
del self.buffered_entry[0]
## SETUP DEFAULTS AND LOGGER ##
CONFIG_DIR = "."
LOG_LEVEL = logging.DEBUG
VERSION = "1.0"
parser = argparse.ArgumentParser(description="Mydomus")
parser.add_argument("-c", "--confdir", help="config directory (default '" + CONFIG_DIR + "')")
args = parser.parse_args()
if args.confdir:
CONFIG_DIR = args.confdir
# Reads Config
with open(CONFIG_DIR+'/mydomus.conf') as data_file:
config = json.load(data_file)
LOG_FILENAME = config["LogFileName"]
SERVER_ADDR = config["ServerAddress"]
SERVER_PORT = config["ServerPort"]
logger = logging.getLogger("Mydomus")
logger.setLevel(LOG_LEVEL)
handler_logfile = logging.handlers.TimedRotatingFileHandler(LOG_FILENAME, when="midnight", backupCount=30)
handler_stream = logging.StreamHandler()
handler_summary = SummaryHandler()
handler_summary.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(module)-10s %(message)s')
handler_logfile.setFormatter(formatter)
handler_stream.setFormatter(formatter)
handler_summary.setFormatter(formatter)
logger.addHandler(handler_logfile)
logger.addHandler(handler_stream)
logger.addHandler(handler_summary)
if "RedirectOutput" in config.keys():
if config["RedirectOutput"]:
sys.stderr = open('/var/log/mydomus/stderr.log', 'w')
sys.stdouy = open('/var/log/mydomus/stdout.log', 'w')
### ADDED API ###
@httpapp.addurl('/')
def root(p,m):
msg = {}
msg["status"]="ok",
msg["version"]=VERSION
msg["log"] = handler_summary.buffered_entry
return json.dumps(msg, sort_keys=True, indent=4)
if __name__ == "__main__":
logger.info("Mydomus service started")
auth.load()
db = dbutil.dbutil(config)
db.InitDB()
t1 = threading.Thread(target=sensor.run,args=(config,))
t1.start()
t2 = threading.Thread(target=scheduler.run,args=(config,))
t2.start()
httpapp.run(port=SERVER_PORT,log_handler=logger)
logger.info("*** EXIT ***")
| gpl-3.0 | 3,706,829,008,748,207,000 | 28.169492 | 106 | 0.693492 | false |
ak212/python-hockey-rss | retry_decorator.py | 1 | 1529 | from functools import wraps
import time
__author__ = "Aaron Koeppel"
__version__ = 1.0
def retry(ExceptionToCheck, logger, tries=4, delay=3, backoff=2):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def decoRetry(f):
@wraps(f)
def fRetry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck, e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
print msg
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return fRetry # true decorator
return decoRetry
| mit | -6,569,519,518,677,940,000 | 30.854167 | 76 | 0.608895 | false |
phac-nml/irida-miseq-uploader | API/directorymonitor.py | 1 | 4383 | import os
import logging
import time
import threading
from wx.lib.pubsub import pub
from API.pubsub import send_message
from API.directoryscanner import find_runs_in_directory
toMonitor = True
TIMEBETWEENMONITOR = 120
class DirectoryMonitorTopics(object):
"""Topics for monitoring directories for new runs."""
new_run_observed = "new_run_observed"
finished_discovering_run = "finished_discovering_run"
shut_down_directory_monitor = "shut_down_directory_monitor"
start_up_directory_monitor = "start_up_directory_monitor"
finished_uploading_run = "finished_uploading_run"
class RunMonitor(threading.Thread):
"""A convenience thread wrapper for monitoring a directory for runs"""
def __init__(self, directory, cond, name="RunMonitorThread"):
"""Initialize a `RunMonitor`"""
self._directory = directory
self._condition = cond
super(RunMonitor, self).__init__(name=name)
def run(self):
"""Initiate directory monitor. The monitor checks the default
directory every 2 minutes
"""
monitor_directory(self._directory, self._condition)
def join(self, timeout=None):
"""Kill the thread"""
global toMonitor
logging.info("going to kill monitoring")
toMonitor = False
threading.Thread.join(self, timeout)
def on_created(directory, cond):
"""When a CompletedJobInfo.xml file is found without a .miseqUploaderInfo file,
an automatic upload is triggered
"""
logging.info("Observed new run in {}, telling the UI to start uploading it.".format(directory))
directory = os.path.dirname(directory)
# tell the UI to clean itself up before observing new runs
send_message(DirectoryMonitorTopics.new_run_observed)
if toMonitor:
find_runs_in_directory(directory)
# check if monitoring is still "on" after returning from find_runs_in_directory()
if toMonitor:
send_message(DirectoryMonitorTopics.finished_discovering_run)
# using locks to prevent the monitor from running while an upload is happening.
cond.acquire()
cond.wait()
cond.release()
send_message(DirectoryMonitorTopics.finished_uploading_run)
def monitor_directory(directory, cond):
"""Calls the function searches the default directory every 2 minutes unless
monitoring is no longer required
"""
global toMonitor
logging.info("Getting ready to monitor directory {}".format(directory))
pub.subscribe(stop_monitoring, DirectoryMonitorTopics.shut_down_directory_monitor)
pub.subscribe(start_monitoring, DirectoryMonitorTopics.start_up_directory_monitor)
time.sleep(10)
while toMonitor:
search_for_upload(directory, cond)
i = 0
while toMonitor and i < TIMEBETWEENMONITOR:
time.sleep(10)
i = i+10
def search_for_upload(directory, cond):
"""loop through subdirectories of the default directory looking for CompletedJobInfo.xml without
.miseqUploaderInfo files.
"""
global toMonitor
if not os.access(directory, os.W_OK):
logging.warning("Could not access directory while monitoring for samples, directory is not writeable {}".format(directory))
return
root = next(os.walk(directory))[0]
dirs = next(os.walk(directory))[1]
for name in dirs:
check_for_comp_job = os.path.join(root, name, "CompletedJobInfo.xml")
check_for_miseq = os.path.join(root, name, ".miseqUploaderInfo")
if os.path.isfile(check_for_comp_job):
if not os.path.isfile(check_for_miseq):
path_to_upload = check_for_comp_job
if toMonitor:
on_created(path_to_upload, cond)
# After upload, start back at the start of directories
return
# Check each step of loop if monitoring is still required
if not toMonitor:
return
return
def stop_monitoring():
"""Stop directory monitoring by setting toMonitor to False"""
global toMonitor
if toMonitor:
logging.info("Halting monitoring on directory.")
toMonitor = False
def start_monitoring():
"""Restart directory monitoring by setting toMonitor to True"""
global toMonitor
if not toMonitor:
logging.info("Restarting monitor on directory")
toMonitor = True
| apache-2.0 | -3,668,619,201,657,861,000 | 34.346774 | 131 | 0.680356 | false |
jamiebull1/transport-carbon | transport_carbon/make_db.py | 1 | 1496 | '''
Created on 16 Jan 2014
@author: Jamie
'''
import sqlite3 as lite
import pandas
import pandas.io.sql as pd_lite
''' Create the stations database '''
with lite.connect("./db/uk_stations.db") as con:
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS Stations")
stations = pandas.read_csv('./tables/uk_stations.csv', encoding="utf-8")
pd_lite.write_frame(stations, "Stations", con)
''' Create the emissions factors database '''
with lite.connect("./db/defra_carbon.db") as con:
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS Activities")
activities = ["BusinessBus", "BusinessCarsByMarketSegment","BusinessCarsBySize",
"BusinessFlights", "BusinessFerries", "BusinessRail",
"BusinessMotorbike", "BusinessTaxi",
"FreightCargoShip","FreightFlights", "FreightHGV", "FreightRail",
"FreightSeaTanker", "FreightVans"]
for activity in activities:
cur.execute("DROP TABLE IF EXISTS %s" % activity)
cur.execute("CREATE TABLE Activities(Id INTEGER PRIMARY KEY, Activity TEXT)")
for activity in activities:
cur.execute("INSERT INTO Activities(Activity) VALUES(?)", (activity,))
activity_data = pandas.read_csv('./tables/%s.csv' % activity)
pd_lite.write_frame(activity_data, activity, con)
cur.execute("SELECT Activity FROM Activities")
rows = cur.fetchall()
for row in rows:
print row[0]
| mit | -5,008,361,842,240,347,000 | 33.790698 | 85 | 0.643048 | false |
repotvsupertuga/tvsupertuga.repository | script.module.streamtvsupertuga/lib/resources/lib/sources/en_de/rapidmoviez.py | 1 | 6433 | # -*- coding: utf-8 -*-
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,time
from resources.lib.modules import cleantitle
from resources.lib.modules import dom_parser2
from resources.lib.modules import client
from resources.lib.modules import debrid
from resources.lib.modules import source_utils
from resources.lib.modules import workers
from resources.lib.modules import cfscrape
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['rmz.cr']
self.base_link = 'http://rmz.cr/'
self.search_link = 'search/%s'
self.scraper = cfscrape.create_scraper()
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except BaseException:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except BaseException:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url is None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def search(self, title, year):
try:
url = urlparse.urljoin(self.base_link, self.search_link % (urllib.quote_plus(title)))
r = self.scraper.get(url).content
r = dom_parser2.parse_dom(r, 'div', {'class': 'list_items'})[0]
r = dom_parser2.parse_dom(r.content, 'li')
r = [(dom_parser2.parse_dom(i, 'a', {'class': 'title'})) for i in r]
r = [(i[0].attrs['href'], i[0].content) for i in r]
r = [(urlparse.urljoin(self.base_link, i[0])) for i in r if cleantitle.get(title) in cleantitle.get(i[1]) and year in i[1]]
if r: return r[0]
else: return
except:
return
def sources(self, url, hostDict, hostprDict):
self.sources = []
try:
if url is None:
return self.sources
if debrid.status() is False:
raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = data['year']
hdlr2 = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else ''
imdb = data['imdb']
url = self.search(title, hdlr)
r = self.scraper.get(url).content
if hdlr2 == '':
r = dom_parser2.parse_dom(r, 'ul', {'id': 'releases'})[0]
else:
r = dom_parser2.parse_dom(r, 'ul', {'id': 'episodes'})[0]
r = dom_parser2.parse_dom(r.content, 'a', req=['href'])
r = [(i.content, urlparse.urljoin(self.base_link, i.attrs['href'])) for i in r if i and i.content != 'Watch']
if hdlr2 != '':
r = [(i[0], i[1]) for i in r if hdlr2.lower() in i[0].lower()]
self.hostDict = hostDict + hostprDict
threads = []
for i in r:
threads.append(workers.Thread(self._get_sources, i[0], i[1]))
[i.start() for i in threads]
alive = [x for x in threads if x.is_alive() is True]
while alive:
alive = [x for x in threads if x.is_alive() is True]
time.sleep(0.1)
return self.sources
except:
return self.sources
def _get_sources(self, name, url):
try:
r = self.scraper.get(url).content
name = client.replaceHTMLCodes(name)
l = dom_parser2.parse_dom(r, 'div', {'class': 'ppu2h'})
s = ''
for i in l:
s += i.content
urls = re.findall(r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''', i.content, flags=re.MULTILINE|re.DOTALL)
urls = [i for i in urls if '.rar' not in i or '.zip' not in i or '.iso' not in i or '.idx' not in i or '.sub' not in i]
for url in urls:
if url in str(self.sources):
continue
valid, host = source_utils.is_host_valid(url, self.hostDict)
if not valid:
continue
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
quality, info = source_utils.get_release_quality(name, url)
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', name)[0]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
size = '%.2f GB' % size
info.append(size)
except BaseException:
pass
info = ' | '.join(info)
self.sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
def resolve(self, url):
return url
| gpl-2.0 | 8,336,147,231,519,264,000 | 39.20625 | 161 | 0.524794 | false |
openprocurement/openprocurement.auction | openprocurement/auction/tests/test_databridge.py | 1 | 14374 | # -*- coding: utf-8 -*-
# TODO: test do_until_success function
from gevent import monkey
monkey.patch_all()
import logging
from mock import MagicMock, call
import pytest
from openprocurement.auction.databridge import AuctionsDataBridge
from openprocurement.auction.utils import FeedItem
from openprocurement.auction.tests.utils import test_bridge_config, \
test_bridge_config_error_port
from urlparse import urljoin
from pytest import raises
from copy import deepcopy
import openprocurement.auction.databridge as databridge_module
from openprocurement.auction.tests.utils import \
tender_data_templ, API_EXTRA, ID, tender_data_cancelled, LOT_ID, \
tender_data_active_qualification, tender_data_active_auction
from openprocurement.auction import core as core_module
from openprocurement.auction.databridge import LOGGER as databridge_logger
from openprocurement.auction.core import LOGGER
from StringIO import StringIO
LOGGER.setLevel(logging.DEBUG)
class TestDatabridgeConfig(object):
def test_config_init(self, db, bridge):
bridge_inst = bridge['bridge']
assert 'resource_api_server' in bridge_inst.config['main']
assert 'resource_api_version' in bridge_inst.config['main']
assert 'resource_api_token' in bridge_inst.config['main']
assert 'resource_name' in bridge_inst.config['main']
assert 'couch_url' in bridge_inst.config['main']
assert 'auctions_db' in bridge_inst.config['main']
assert 'timezone' in bridge_inst.config['main']
assert 'auction_worker' in bridge_inst.config['main']
assert 'auction_worker_config' in bridge_inst.config['main']
assert 'plugins' in bridge_inst.config['main']
assert 'esco.EU' in bridge_inst.config['main']
assert 'auction_worker' in bridge_inst.config['main']['esco.EU']
assert bridge_inst.couch_url == \
urljoin(bridge_inst.config['main']['couch_url'],
bridge_inst.config['main']['auctions_db'])
assert bridge_inst.config == bridge['bridge_config']
def test_connection_refused(self, db):
with raises(Exception) as exc_info:
AuctionsDataBridge(test_bridge_config_error_port)
assert exc_info.value.strerror == 'Connection refused'
def test_error_config(self, db):
keys = ['couch_url', 'auctions_db']
for key in keys:
test_bridge_error_config = deepcopy(test_bridge_config)
del test_bridge_error_config['main'][key]
with raises(KeyError) as exc_info:
AuctionsDataBridge(test_bridge_error_config)
assert key in exc_info.value
class TestDataBridgeRunLogInformation(object):
log_capture_string = StringIO()
ch = logging.StreamHandler(log_capture_string)
ch.setLevel(logging.DEBUG)
databridge_logger.addHandler(ch)
def test_check_log_for_start_bridge(self, db, bridge):
"""
Test check the log messages at bridge start
"""
bridge['bridge_thread'].join(0.1)
log_strings = self.log_capture_string.getvalue().split('\n')
assert (log_strings[3] == 'Start Auctions Bridge')
assert (log_strings[4] == 'Start data sync...')
class TestDataBridgeGetTenders(object):
@pytest.mark.parametrize(
'bridge', [({'tenders': [{}]*0}), ({'tenders': [{}]*1}),
({'tenders': [{}]*2})], indirect=['bridge'])
def test_run_get_tenders_once(self, db, bridge):
"""
Test checks:
1) 'get_tenders' function is called once inside bridge.run method.
2) 'get_tenders' yields the same number of tenders the database
contains
"""
bridge['bridge_thread'].join(0.1)
# check that 'get_resource_items' function was called once
bridge['mock_resource_items']\
.assert_called_once_with(bridge['bridge'].feeder)
# check that 'get_resource_items' yielded the correct number of tenders
assert bridge['mock_resource_items'].side_effect.ind == \
len(bridge['tenders'])
class TestDataBridgeFeedItem(object):
@pytest.mark.parametrize(
'bridge', [({'tenders': [{}] * 0}), ({'tenders': [{}] * 1}),
({'tenders': [{}] * 2})], indirect=['bridge'])
def test_mapper_call_number(self, db, bridge, mocker):
"""
Test checks:
1) that 'self.mapper' method is called the correct number of times.
2) that 'FeedItem' class is instantiated the correct number of times.
Actually the number of tenders provided by 'get_tenders' function.
"""
mock_feed_item = mocker.patch.object(databridge_module, 'FeedItem',
side_effect=FeedItem,
autospec=True)
mock_mapper = MagicMock()
bridge['bridge'].mapper = mock_mapper
bridge['bridge_thread'].join(0.1)
assert mock_feed_item.call_count == len(bridge['tenders'])
assert mock_mapper.call_count == len(bridge['tenders'])
@pytest.mark.parametrize(
'bridge', [({'tenders': [tender_data_templ]})], indirect=['bridge'])
def test_mapper_args_value(self, db, bridge, mocker):
"""
Test checks:
1) that 'FeedItem' class is instantiated once with correct arguments
2) that 'self.mapper' method is called once with correct arguments,
Actually, with the item yielded by 'get_tenders' function.
3) that 'self.mapper' was called AFTER 'FeedItem' class instantiated.
"""
mock_feed_item = mocker.patch.object(databridge_module, 'FeedItem',
side_effect=FeedItem,
autospec=True)
manager = MagicMock()
mock_mapper = MagicMock()
bridge['bridge'].mapper = mock_mapper
manager.attach_mock(mock_mapper, 'mock_mapper')
manager.attach_mock(mock_feed_item, 'mock_feed_item')
bridge['bridge_thread'].join(0.1)
manager.assert_has_calls(
[call.mock_feed_item(bridge['tenders'][0]),
call.mock_mapper(mock_feed_item(bridge['tenders'][0]))]
)
class TestDataBridgePlanning(object):
@pytest.mark.parametrize(
'bridge', [({'tenders': [{}]}), ({'tenders': [tender_data_templ]}),
({'tenders': [tender_data_active_auction['tender_in_past_data']]})], indirect=['bridge'])
def test_wrong_tender_no_planning(self, db, bridge):
"""
Test checks that the function do_until_success responsible
for running the process planning the auction is not called if tender's
data are inappropriate.
"""
bridge['bridge_thread'].join(0.1)
# check that 'check_call' was not called as tender documents
# doesn't contain appropriate data
assert bridge['mock_do_until_success'].call_count == 0
class TestForDataBridgePositive(object):
@pytest.mark.parametrize(
'bridge', [({'tenders': [tender_data_active_auction['tender_data_no_lots']]})],
indirect=['bridge'])
def test_active_auction_no_lots(self, db, bridge):
"""
Test checks that the function do_until_success function is called once
for the tender satisfying the following conditions:
1) status: active.auction
2) no_lots:
3) 'auctionPeriod' in self.item and 'startDate' in self.item['auctionPeriod'] and 'endDate' not in self.item['auctionPeriod']
4) datetime.now(self.bridge.tz) < start_date
"""
bridge['bridge_thread'].join(0.1)
bridge['mock_do_until_success'].assert_called_once_with(
core_module.check_call,
args=([bridge['bridge_config']['main']['auction_worker'], 'planning', ID,
bridge['bridge_config']['main']['auction_worker_config']],),
)
@pytest.mark.parametrize(
'bridge', [({'tenders': [tender_data_active_auction['tender_data_with_lots']]})],
indirect=['bridge'])
def test_active_auction_with_lots(self, db, bridge):
"""
Test checks that the function do_until_success function is called once
for the tender satisfying the following conditions:
1) status: active.auction
2) have field 'lots'
3) lot["status"] is 'active' and 'auctionPeriod' is in lot and 'startDate' in lot['auctionPeriod']
and 'endDate' not in lot['auctionPeriod']
4) datetime.now(self.bridge.tz) > start_date
"""
bridge['bridge_thread'].join(0.1)
bridge['mock_do_until_success'].assert_called_once_with(
core_module.check_call,
args=([bridge['bridge_config']['main']['auction_worker'], 'planning', ID,
bridge['bridge_config']['main']['auction_worker_config'], '--lot', LOT_ID],),
)
@pytest.mark.parametrize(
'db, bridge',
[([{'_id': '{}_{}'.format(ID, LOT_ID), 'stages': ['a', 'b', 'c'], 'current_stage': 1}],
{'tenders': [tender_data_active_qualification['tender_data_active_qualification']]})],
indirect=['db', 'bridge'])
def test_active_qualification(self, db, bridge):
"""
Tender status: "active.qualification"
tender has 'lots'
"""
bridge['bridge_thread'].join(0.1)
bridge['mock_do_until_success'].assert_called_once_with(
core_module.check_call,
args=([bridge['bridge_config']['main']['auction_worker'], 'announce', ID,
bridge['bridge_config']['main']['auction_worker_config'], '--lot', LOT_ID],),
)
@pytest.mark.parametrize(
'db, bridge',
[([{'_id': '{}_{}'.format(ID, LOT_ID), 'endDate': '2100-06-28T10:32:19.233669+03:00'}],
{'tenders': [tender_data_cancelled['tender_data_with_lots']]})],
indirect=['db', 'bridge'])
def test_cancelled_with_lots(self, db, bridge):
"""Auction has been cancelled with lots"""
bridge['bridge_thread'].join(0.1)
bridge['mock_do_until_success'].assert_called_once_with(
core_module.check_call,
args=([bridge['bridge_config']['main']['auction_worker'], 'cancel', ID,
bridge['bridge_config']['main']['auction_worker_config'], '--lot', LOT_ID],),
)
@pytest.mark.parametrize(
'db, bridge',
[([{'_id': '{}_{}'.format(ID, LOT_ID), 'stages': [{'start': '2100-06-28T10:32:19.233669+03:00'}, 'b', 'c']}],
{'tenders': [tender_data_cancelled['tender_data_with_lots']]})],
indirect=['db', 'bridge'])
def test_cancelled_with_lots_2(self, db, bridge):
"""Auction has been cancelled with lots"""
bridge['bridge_thread'].join(0.1)
bridge['mock_do_until_success'].assert_called_once_with(
core_module.check_call,
args=([bridge['bridge_config']['main']['auction_worker'], 'cancel', ID,
bridge['bridge_config']['main']['auction_worker_config'], '--lot', LOT_ID],),
)
@pytest.mark.parametrize(
'db, bridge',
[([{'_id': ID, 'endDate': '2100-06-28T10:32:19.233669+03:00'}],
{'tenders': [tender_data_cancelled['tender_data_no_lots']]})],
indirect=['db', 'bridge'])
def test_cancelled_no_lots(self, db, bridge):
"""Auction has been cancelled with no lots"""
bridge['bridge_thread'].join(0.1)
bridge['mock_do_until_success'].assert_called_once_with(
core_module.check_call,
args=([bridge['bridge_config']['main']['auction_worker'], 'cancel', ID,
bridge['bridge_config']['main']['auction_worker_config']],),
)
@pytest.mark.parametrize(
'db, bridge',
[([{'_id': ID, 'stages': [{'start': '2100-06-28T10:32:19.233669+03:00'}, 'b', 'c']}],
{'tenders': [tender_data_cancelled['tender_data_no_lots']]})],
indirect=['db', 'bridge'])
def test_cancelled_no_lots_2(self, db, bridge):
"""Auction has been cancelled with no lots"""
bridge['bridge_thread'].join(0.1)
bridge['mock_do_until_success'].assert_called_once_with(
core_module.check_call,
args=([bridge['bridge_config']['main']['auction_worker'], 'cancel', ID,
bridge['bridge_config']['main']['auction_worker_config']],),
)
# TODO: should be refactored
class TestForDataBridgeNegative(object):
@pytest.mark.parametrize(
'bridge', [({'tenders': [tender_data_active_auction['wrong_startDate']]})],
indirect=['bridge'])
def test_active_auction_wrong_date(self, db, bridge):
"""
# If the start date of the tender in the past then skip it for planning
# 1) status - "active.auction"
# 2) no lots
# 3 Wrong start date
"""
log_capture_string = StringIO()
ch = logging.StreamHandler(log_capture_string)
ch.setLevel(logging.DEBUG)
LOGGER.addHandler(ch)
bridge['bridge_thread'].join(0.1)
log_strings = log_capture_string.getvalue().split('\n')
assert (log_strings[0] == 'Tender ' + ID + ' start date in past. Skip it for planning')
# @pytest.mark.parametrize(
# 'bridge', [({'tenders': [tender_data_active_auction['re_planning']]})],
# indirect=['bridge'])
# def test_active_auction_re_planning(self, db, bridge, mocker):
# sleep(0.1)
# # TODO Write test
# pass
#
# @pytest.mark.parametrize(
# 'bridge', [({'tenders': [tender_data_active_auction['planned_on_the_same_date']]})],
# indirect=['bridge'])
# def test_active_auction_planned_on_the_same_date(self, db, bridge, mocker):
# sleep(0.1)
# # TODO Write test
# pass
@pytest.mark.parametrize(
'bridge', [({'tenders': [tender_data_active_qualification['no_active_status_in_lot']]})],
indirect=['bridge'])
def test_active_qualification_no_active_status_in_lot(self, db, bridge):
"""
1) status - "active.qualification"
2) Tender must contain lots
3) The status of the lot should not be 'active'
"""
bridge['bridge_thread'].join(0.1)
assert(bridge['tenders'][0]['lots'][0]['status'] != 'active')
| apache-2.0 | 7,674,672,193,438,880,000 | 40.543353 | 133 | 0.60039 | false |
thylong/cabu | cabu/utils/cookies.py | 1 | 1593 | # -*- coding: utf-8 -*-
class CookieStorage(object):
"""Interface between Cookies and Database.
Args:
db (Database): The Database class instance to wrap.
"""
def __init__(self, db):
self.db = db
def get(self, key):
"""Get the value of the given cookie key.
Args:
key (str): The name of the cookie key to retrieve.
Returns:
value (str): The value of the key or None if undefined.
"""
# if callable(getattr(self.db, 'find')):
return self.db.cookies.find_one({key: {'$exists': True}})
def set(self, key, value):
"""Set the value of the defined cookie key.
Args:
key (str): The name of the cookie key to set.
value (str): The value associated to the cookie key to set.
Returns:
raw_result (str): The result of the attempt to store the cookie.
"""
r = self.db.cookies.replace_one({key: {'$exists': True}}, {key: value}, upsert=True)
return r.raw_result
def delete(self, key):
"""Delete the value of the given cookie key.
Args:
key (str): The name of the cookie key to delete.
Returns:
raw_result (str): The result of the attempt to delete the cookie.
"""
return self.db.cookies.remove({key: {'$exists': True}})
def clean(self):
"""Delete all the cookies stored in the database.
Returns:
raw_result (str): The result of the cleaning.
"""
return self.db.cookies.remove({})
| bsd-3-clause | -702,076,479,647,128,800 | 27.446429 | 92 | 0.557439 | false |
orviz/ooi | ooi/tests/middleware/test_compute_controller.py | 1 | 13660 | # -*- coding: utf-8 -*-
# Copyright 2015 Spanish National Research Council
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from ooi.tests import fakes
from ooi.tests.middleware import test_middleware
from ooi import utils
def build_occi_server(server):
name = server["name"]
server_id = server["id"]
flavor_id = fakes.flavors[server["flavor"]["id"]]["id"]
flavor_name = fakes.flavors[server["flavor"]["id"]]["name"]
ram = fakes.flavors[server["flavor"]["id"]]["ram"]
cores = fakes.flavors[server["flavor"]["id"]]["vcpus"]
image_id = server["image"]["id"]
status = server["status"].upper()
if status in ("ACTIVE",):
status = "active"
elif status in ("PAUSED", "SUSPENDED", "STOPPED"):
status = "suspended"
else:
status = "inactive"
cats = []
cats.append('compute; '
'scheme="http://schemas.ogf.org/occi/infrastructure#"; '
'class="kind"; title="compute resource"; '
'rel="http://schemas.ogf.org/occi/core#resource"'),
cats.append('%s; '
'scheme="http://schemas.openstack.org/template/os#"; '
'class="mixin"; title="%s"; '
'rel="http://schemas.ogf.org/occi/infrastructure#os_tpl"'
% (image_id, image_id)),
cats.append('%s; '
'scheme="http://schemas.openstack.org/template/resource#"; '
'class="mixin"; title="Flavor: %s"; '
'rel="http://schemas.ogf.org/occi/infrastructure#resource_tpl"'
% (flavor_id, flavor_name)),
attrs = [
'occi.core.title="%s"' % name,
'occi.compute.state="%s"' % status,
'occi.compute.memory=%s' % ram,
'occi.compute.cores=%s' % cores,
'occi.compute.hostname="%s"' % name,
'occi.core.id="%s"' % server_id,
]
links = []
links.append('<%s/compute/%s?action=restart>; '
'rel="http://schemas.ogf.org/occi/'
'infrastructure/compute/action#restart"' %
(fakes.application_url, server_id))
links.append('<%s/compute/%s?action=start>; '
'rel="http://schemas.ogf.org/occi/'
'infrastructure/compute/action#start"' %
(fakes.application_url, server_id))
links.append('<%s/compute/%s?action=stop>; '
'rel="http://schemas.ogf.org/occi/'
'infrastructure/compute/action#stop"' %
(fakes.application_url, server_id))
links.append('<%s/compute/%s?action=suspend>; '
'rel="http://schemas.ogf.org/occi/'
'infrastructure/compute/action#suspend"' %
(fakes.application_url, server_id))
result = []
for c in cats:
result.append(("Category", c))
for l in links:
result.append(("Link", l))
for a in attrs:
result.append(("X-OCCI-Attribute", a))
return result
class TestComputeController(test_middleware.TestMiddleware):
"""Test OCCI compute controller."""
def test_list_vms_empty(self):
tenant = fakes.tenants["bar"]
app = self.get_app()
for url in ("/compute/", "/compute"):
req = self._build_req(url, tenant["id"], method="GET")
m = mock.MagicMock()
m.user.project_id = tenant["id"]
req.environ["keystone.token_auth"] = m
resp = req.get_response(app)
expected_result = ""
self.assertDefaults(resp)
self.assertExpectedResult(expected_result, resp)
self.assertEqual(204, resp.status_code)
def test_list_vms(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
for url in ("/compute/", "/compute"):
req = self._build_req(url, tenant["id"], method="GET")
resp = req.get_response(app)
self.assertEqual(200, resp.status_code)
expected = []
for s in fakes.servers[tenant["id"]]:
expected.append(
("X-OCCI-Location",
utils.join_url(self.application_url + "/",
"compute/%s" % s["id"]))
)
self.assertDefaults(resp)
self.assertExpectedResult(expected, resp)
def test_show_vm(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
for server in fakes.servers[tenant["id"]]:
req = self._build_req("/compute/%s" % server["id"],
tenant["id"], method="GET")
resp = req.get_response(app)
expected = build_occi_server(server)
self.assertDefaults(resp)
self.assertExpectedResult(expected, resp)
self.assertEqual(200, resp.status_code)
def test_vm_not_found(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
req = self._build_req("/compute/%s" % uuid.uuid4().hex,
tenant["id"], method="GET")
resp = req.get_response(app)
self.assertEqual(404, resp.status_code)
def test_action_vm(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
for action in ("stop", "start", "restart"):
headers = {
'Category': (
'%s;'
'scheme="http://schemas.ogf.org/occi/infrastructure/'
'compute/action#";'
'class="action"' % action)
}
for server in fakes.servers[tenant["id"]]:
req = self._build_req("/compute/%s?action=%s" % (server["id"],
action),
tenant["id"], method="POST",
headers=headers)
resp = req.get_response(app)
self.assertDefaults(resp)
self.assertEqual(204, resp.status_code)
def test_invalid_action(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
action = "foo"
for server in fakes.servers[tenant["id"]]:
req = self._build_req("/compute/%s?action=%s" % (server["id"],
action),
tenant["id"], method="POST")
resp = req.get_response(app)
self.assertDefaults(resp)
self.assertEqual(400, resp.status_code)
def test_action_body_mismatch(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
action = "stop"
headers = {
'Category': (
'start;'
'scheme="http://schemas.ogf.org/occi/infrastructure/'
'compute/action#";'
'class="action"')
}
for server in fakes.servers[tenant["id"]]:
req = self._build_req("/compute/%s?action=%s" % (server["id"],
action),
tenant["id"], method="POST",
headers=headers)
resp = req.get_response(app)
self.assertDefaults(resp)
self.assertEqual(400, resp.status_code)
def test_create_vm(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
headers = {
'Category': (
'compute;'
'scheme="http://schemas.ogf.org/occi/infrastructure#";'
'class="kind",'
'foo;'
'scheme="http://schemas.openstack.org/template/resource#";'
'class="mixin",'
'bar;'
'scheme="http://schemas.openstack.org/template/os#";'
'class="mixin"')
}
req = self._build_req("/compute", tenant["id"], method="POST",
headers=headers)
resp = req.get_response(app)
expected = [("X-OCCI-Location",
utils.join_url(self.application_url + "/",
"compute/%s" % "foo"))]
self.assertEqual(200, resp.status_code)
self.assertExpectedResult(expected, resp)
self.assertDefaults(resp)
def test_create_vm_incomplete(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
headers = {
'Category': (
'compute;'
'scheme="http://schemas.ogf.org/occi/infrastructure#";'
'class="kind",'
'bar;'
'scheme="http://schemas.openstack.org/template/os#";'
'class="mixin"')
}
req = self._build_req("/compute", tenant["id"], method="POST",
headers=headers)
resp = req.get_response(app)
self.assertEqual(400, resp.status_code)
self.assertDefaults(resp)
def test_create_with_context(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
headers = {
'Category': (
'compute;'
'scheme="http://schemas.ogf.org/occi/infrastructure#";'
'class="kind",'
'foo;'
'scheme="http://schemas.openstack.org/template/resource#";'
'class="mixin",'
'bar;'
'scheme="http://schemas.openstack.org/template/os#";'
'class="mixin",'
'user_data;'
'scheme="http://schemas.openstack.org/compute/instance#";'
'class="mixin"'
),
'X-OCCI-Attribute': (
'org.openstack.compute.user_data="foo"'
)
}
req = self._build_req("/compute", tenant["id"], method="POST",
headers=headers)
resp = req.get_response(app)
expected = [("X-OCCI-Location",
utils.join_url(self.application_url + "/",
"compute/%s" % "foo"))]
self.assertEqual(200, resp.status_code)
self.assertExpectedResult(expected, resp)
self.assertDefaults(resp)
def test_vm_links(self):
tenant = fakes.tenants["baz"]
app = self.get_app()
for server in fakes.servers[tenant["id"]]:
req = self._build_req("/compute/%s" % server["id"],
tenant["id"], method="GET")
resp = req.get_response(app)
self.assertDefaults(resp)
self.assertContentType(resp)
self.assertEqual(200, resp.status_code)
source = utils.join_url(self.application_url + "/",
"compute/%s" % server["id"])
# volumes
vols = server.get("os-extended-volumes:volumes_attached", [])
for v in vols:
vol_id = v["id"]
link_id = '_'.join([server["id"], vol_id])
target = utils.join_url(self.application_url + "/",
"storage/%s" % vol_id)
self.assertResultIncludesLink(link_id, source, target, resp)
# network
addresses = server.get("addresses", {})
for addr_set in addresses.values():
for addr in addr_set:
ip = addr["addr"]
link_id = '_'.join([server["id"], ip])
if addr["OS-EXT-IPS:type"] == "fixed":
net_id = "fixed"
else:
net_id = "floating"
target = utils.join_url(self.application_url + "/",
"network/%s" % net_id)
self.assertResultIncludesLink(link_id, source, target,
resp)
def test_delete_vm(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
for s in fakes.servers[tenant["id"]]:
req = self._build_req("/compute/%s" % s["id"],
tenant["id"], method="DELETE")
resp = req.get_response(app)
self.assertContentType(resp)
self.assertEqual(204, resp.status_code)
# TODO(enolfc): find a way to be sure that all servers
# are in fact deleted.
def test_delete_all_vms(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
req = self._build_req("/compute/", tenant["id"], method="DELETE")
resp = req.get_response(app)
self.assertContentType(resp)
self.assertEqual(204, resp.status_code)
class ComputeControllerTextPlain(test_middleware.TestMiddlewareTextPlain,
TestComputeController):
"""Test OCCI compute controller with Accept: text/plain."""
class ComputeControllerTextOcci(test_middleware.TestMiddlewareTextOcci,
TestComputeController):
"""Test OCCI compute controller with Accept: text/occi."""
| apache-2.0 | -7,797,839,068,062,063,000 | 36.119565 | 79 | 0.502635 | false |
david-wm-sanders/rwrtrack | rwrtrack/sum.py | 1 | 5245 | """Provides functionality for calculating sums from statistics data."""
from sqlalchemy.sql import func, and_
from sqlalchemy.orm.query import Query
from .db import sesh
from .record import Record, RA, RB
from .difference import Diff
from .filter import filter_
from .constants import EARTH_EQUAT_CIRC
class Sum:
"""Provides shortcuts for summing Record metrics for use in SQLAlchemy queries."""
_count = func.count(Record.account_id).label("_count")
# Columned statistics
xp = func.sum(Record.xp).label("xp")
time_played = func.sum(Record.time_played).label("time_played")
kills = func.sum(Record.kills).label("kills")
deaths = func.sum(Record.deaths).label("deaths")
kill_streak = func.sum(Record.kill_streak).label("kill_streak")
targets_destroyed = func.sum(Record.targets_destroyed).label("targets_destroyed")
vehicles_destroyed = func.sum(Record.vehicles_destroyed).label("vehicles_destroyed")
soldiers_healed = func.sum(Record.soldiers_healed).label("soldiers_healed")
team_kills = func.sum(Record.team_kills).label("team_kills")
distance_moved = func.sum(Record.distance_moved).label("distance_moved")
shots_fired = func.sum(Record.shots_fired).label("shots_fired")
throwables_thrown = func.sum(Record.throwables_thrown).label("throwables_thrown")
# Converted statistics
time_played_hours = (time_played / 60.0).label("time_played_hours")
distance_moved_km = (distance_moved / 1000.0).label("distance_moved_km")
# Derived statistics
score = (kills - deaths).label("score")
runs_around_the_equator = (distance_moved_km / EARTH_EQUAT_CIRC).label("runs_around_the_equator")
class DiffSum:
"""Provides shortcuts for summing differenced Record metrics for use in SQLAlchemy queries."""
_count = func.count(RA.account_id).label("_count")
# Columned statistics
xp = func.sum(Diff.xp).label("xp")
time_played = func.sum(Diff.time_played).label("time_played")
kills = func.sum(Diff.kills).label("kills")
deaths = func.sum(Diff.deaths).label("deaths")
kill_streak = func.sum(Diff.kill_streak).label("kill_streak")
targets_destroyed = func.sum(Diff.targets_destroyed).label("targets_destroyed")
vehicles_destroyed = func.sum(Diff.vehicles_destroyed).label("vehicles_destroyed")
soldiers_healed = func.sum(Diff.soldiers_healed).label("soldiers_healed")
team_kills = func.sum(Diff.team_kills).label("team_kills")
distance_moved = func.sum(Diff.distance_moved).label("distance_moved")
shots_fired = func.sum(Diff.shots_fired).label("shots_fired")
throwables_thrown = func.sum(Diff.throwables_thrown).label("throwables_thrown")
# Converted statistics
time_played_hours = (time_played / 60.0).label("time_played_hours")
distance_moved_km = (distance_moved / 1000.0).label("distance_moved_km")
# Derived statistics
score = (kills - deaths).label("score")
runs_around_the_equator = (distance_moved_km / EARTH_EQUAT_CIRC).label("runs_around_the_equator")
sum_query = Query([Sum._count, Sum.xp, Sum.time_played, Sum.time_played_hours, Sum.kills, Sum.deaths, Sum.score,
Sum.kill_streak, Sum.targets_destroyed, Sum.vehicles_destroyed, Sum.soldiers_healed,
Sum.team_kills, Sum.distance_moved, Sum.distance_moved_km,
Sum.shots_fired, Sum.throwables_thrown, Sum.runs_around_the_equator])
diffsum_query = Query([DiffSum._count, DiffSum.xp, DiffSum.time_played, DiffSum.time_played_hours,
DiffSum.kills, DiffSum.deaths, DiffSum.score, DiffSum.kill_streak,
DiffSum.targets_destroyed, DiffSum.vehicles_destroyed, DiffSum.soldiers_healed,
DiffSum.team_kills, DiffSum.distance_moved, DiffSum.distance_moved_km,
DiffSum.shots_fired, DiffSum.throwables_thrown, DiffSum.runs_around_the_equator]).\
filter(RA.account_id == RB.account_id)
def _sum(date, usernames=None, record_filters=None):
"""Return a SQLAlchemy query that will calculate the sums of Records on the date."""
q = sum_query.with_session(sesh).filter(Record.date == date)
if usernames:
q = q.filter(Record.username.in_(usernames))
if record_filters:
q = filter_(q, Record, record_filters)
return q
def sum_(date, usernames=None, record_filters=None):
"""Return the sums of Records on the date as a dict."""
return _sum(date, usernames, record_filters).one()._asdict()
def _diffsum(date_a, date_b, usernames=None, record_filters=None, diff_filters=None):
"""Return a SQLAlchemy query that will calculate the sums of differenced Records on the date."""
q = diffsum_query.with_session(sesh).filter(RA.date == date_a, RB.date == date_b)
if usernames:
q = q.filter(RA.username.in_(usernames))
if record_filters:
q = filter_(q, RA, record_filters)
if diff_filters:
q = filter_(q, Diff, diff_filters)
return q
def diffsum(date_a, date_b, usernames=None, record_filters=None, diff_filters=None):
"""Return the sums of differenced Records on the date as a dict."""
return _diffsum(date_a, date_b, usernames, record_filters, diff_filters).one()._asdict()
| mit | 1,729,537,021,495,413,200 | 47.564815 | 112 | 0.691134 | false |
googleapis/python-compute | google/cloud/compute_v1/services/global_operations/transports/base.py | 1 | 7885 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
from requests import __version__ as requests_version
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.cloud.compute_v1.types import compute
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
grpc_version=None,
rest_version=requests_version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class GlobalOperationsTransport(abc.ABC):
"""Abstract transport class for GlobalOperations."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
)
DEFAULT_HOST: str = "compute.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes or self.AUTH_SCOPES
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.aggregated_list: gapic_v1.method.wrap_method(
self.aggregated_list, default_timeout=None, client_info=client_info,
),
self.delete: gapic_v1.method.wrap_method(
self.delete, default_timeout=None, client_info=client_info,
),
self.get: gapic_v1.method.wrap_method(
self.get, default_timeout=None, client_info=client_info,
),
self.list: gapic_v1.method.wrap_method(
self.list, default_timeout=None, client_info=client_info,
),
self.wait: gapic_v1.method.wrap_method(
self.wait, default_timeout=None, client_info=client_info,
),
}
@property
def aggregated_list(
self,
) -> Callable[
[compute.AggregatedListGlobalOperationsRequest],
Union[
compute.OperationAggregatedList, Awaitable[compute.OperationAggregatedList]
],
]:
raise NotImplementedError()
@property
def delete(
self,
) -> Callable[
[compute.DeleteGlobalOperationRequest],
Union[
compute.DeleteGlobalOperationResponse,
Awaitable[compute.DeleteGlobalOperationResponse],
],
]:
raise NotImplementedError()
@property
def get(
self,
) -> Callable[
[compute.GetGlobalOperationRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def list(
self,
) -> Callable[
[compute.ListGlobalOperationsRequest],
Union[compute.OperationList, Awaitable[compute.OperationList]],
]:
raise NotImplementedError()
@property
def wait(
self,
) -> Callable[
[compute.WaitGlobalOperationRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
__all__ = ("GlobalOperationsTransport",)
| apache-2.0 | -7,184,622,160,919,310,000 | 35.336406 | 103 | 0.636271 | false |
Onirik79/aaritmud | data/proto_items/flora/_flora_item_mirtillo-rosso-01-frutto.py | 1 | 14309 | # -*- coding: utf-8 -*-
#= DESCRIZIONE =================================================================
# SCRIPT CESTINATO (prototipo delle plants)
# mirtilli rossi
# semplice progetto d'ecosistema su mirtillo rosso
# il DROP in locazioni opportune del seme di mirtillo rosso
# origina una sequenza di callLater che sostituiscono il mirtillo
# con un germoglio, una pianta, pianta in fiore, pianta in frutto.
# dopo aver fruttificato torna pianta e va in loop pianta, in fiore, in frutto.
# nella versione in frutto vi sono mirtilli buoni e cattivi.
# ad ogni stadio di mutamento della pianta un random vede se la pianta
# deve morire oppure no.
# il numero di frutti prodotti è dato da un valore random + un valore fissato
# MAX_FRUIT_QUANTITY e dall'età della pianta
# la fortuna è un parametro che viene decrementato di stadio in stadio
# imponendo un limite all'età massima della pianta
#= NOTE DI CODIFICA=============================================================
# allo stato attuale le varie funzioni si passano fra loro il codice della room
# anche se solo la funzione germinate() ne fa uso
#= IMPORT ======================================================================
import random
from twisted.internet import reactor
from src.database import database
from src.log import log
from src.enums import SECTOR, TO
from src.item import Item
#= COSTANTI ===================================================================
# numero di frutti buoni caricati dalla pianta in fiore
MAX_FRUIT_QUANTITY = 5
# intero > 1 ; più è alto e più longeve saranno mediamente le piante
FORTUNA = 100
# durata min e max dello stadio di semente
SEED_WAIT_MIN = 100
SEED_WAIT_MAX = 200
# durata min e max dello stadio di germoglio
GERM_WAIT_MIN = 100
GERM_WAIT_MAX = 200
# durata min e max dello stadio di pianta
PLANT_WAIT_MIN = 100
PLANT_WAIT_MAX = 200
# durata min e max della fioritura
FLOWER_WAIT_MIN = 100
FLOWER_WAIT_MAX = 200
# durata min e max della pianta con i frutti maturi
GROWED_WAIT_MIN = 500
GROWED_WAIT_MAX = 1000
#= TROUBLE ======================================================================
def after_drop(entity, seed, room, behavioured):
if not room.IS_ROOM:
return
if room.sector not in (SECTOR.PLAIN, SECTOR.WOOD, SECTOR.SAVANNA, SECTOR.HILL):
return
fortuna = FORTUNA
if random.randint(1, fortuna) == 1:
reactor.callLater(random.randint( SEED_WAIT_MIN, SEED_WAIT_MAX ), marciume, seed, room)
return
#coefficiente che segna l'età della pianta
AGE = 0
reactor.callLater(random.randint( SEED_WAIT_MIN, SEED_WAIT_MAX ), germinate, seed, room, AGE, fortuna)
#- Fine Funzione
def germinate(seed, room, age, fortuna):
if seed.code not in database["items"]:
return
germoglio = Item("karpuram_item_mirtillo-rosso-02-germoglio")
if not germoglio:
log.bug("impossibile creare germoglio: %r" % germoglio)
return
#rudimentale contollo per vedere che il seme non sia stato spostato nel frattempo
# 2 i problemi a cui si va incontro
# aa) qualcuno lo droppa e poi lo raccatta e lo ridroppa 1ora dopo e comunque lui cresce (da evitare)
# bb) la room stessa si resetta e anche se il seme non è stato spostato la room è cambiate e lui non cresce (da evitare)
# forse il controllo con le coordinate xyz potrebbe risolvere b
# (gatto dice che non si presenta il problema bb anche se la room resetta)
# c'è da vedere perché al solito credo ci siam fraintesi ed io pensavo al reset di room diverse nelle stesse coordinate
# per risolvere aa forse basta in qualche modo che un get interrompa lo script (come fare?)
if room != seed.location:
# log da rimuovere
log.bug("room di drop diversa da room attuale")
return
location=seed.location
seed.act("Di recente $N s'è schiuso...", TO.OTHERS, seed)
seed.act("Di recente $N s'è schiuso...", TO.ENTITY, seed)
seed.extract(1)
germoglio.inject(location)
germoglio.act("... in $N.", TO.OTHERS, germoglio)
germoglio.act("... in $N.", TO.ENTITY, germoglio)
fortuna = fortuna -1
if random.randint(1, fortuna) == 1:
reactor.callLater(random.randint( GERM_WAIT_MIN , GERM_WAIT_MAX ), desiccation, germoglio, room, age)
return
reactor.callLater(random.randint( GERM_WAIT_MIN , GERM_WAIT_MAX ), growing, germoglio, room, age, fortuna)
#- Fine Funzione
def growing(germoglio, room, age, fortuna):
if germoglio.code not in database["items"]:
return
pianta = Item("karpuram_item_mirtillo-rosso-03-pianta")
if not pianta:
log.bug("impossibile creare pianta: %r" % pianta)
return
location=germoglio.location
germoglio.act("Quel che poco tempo fa era solo $N, ora ...", TO.OTHERS, germoglio)
germoglio.act("Quel che poco tempo fa era solo $N, ora ...", TO.ENTITY, germoglio)
germoglio.extract(1)
pianta.inject(location)
pianta.act("... è $N.", TO.OTHERS, pianta)
fortuna = fortuna -1
if random.randint(1, fortuna) == 1:
reactor.callLater(random.randint( PLANT_WAIT_MIN , PLANT_WAIT_MAX ), desiccation, pianta, room, age)
return
reactor.callLater(random.randint( PLANT_WAIT_MIN , PLANT_WAIT_MAX ), blooming, pianta, room, age, fortuna)
#- Fine Funzione
def blooming(pianta, room, age, fortuna):
if pianta.code not in database["items"]:
return
fiore = Item("karpuram_item_mirtillo-rosso-04-fiore")
if not fiore:
log.bug("impossibile creare fiore: %r" % fiore)
return
location=pianta.location
pianta.act("Nelle scorse ore $N ha aperto i fiori", TO.OTHERS, pianta)
pianta.act("Nelle scorse ore $N ha aperto i fiori", TO.ENTITY, pianta)
pianta.extract(1)
fiore.inject(location)
fiore.act("$N rifulge di tutto il suo splendore.", TO.OTHERS, fiore)
fiore.act("$N rifulge di tutto il suo splendore.", TO.ENTITY, fiore)
fortuna = fortuna -1
if random.randint(1, fortuna) == 1:
reactor.callLater(random.randint( FLOWER_WAIT_MIN , FLOWER_WAIT_MAX ), desiccation, fiore, room, age )
return
reactor.callLater(random.randint( FLOWER_WAIT_MIN , FLOWER_WAIT_MAX ),fructification , fiore, room, age, fortuna)
#- Fine Funzione
def fructification(fiore, room, age, fortuna):
if fiore.code not in database["items"]:
return
fruttificato = Item("karpuram_item_mirtillo-rosso-05-fruttificato")
if not fruttificato:
log.bug("impossibile creare fruttificato: %r" % fruttificato)
return
#l'istruzione Item è eseguita qui per il chk
#poi però è ripetuta più volte nel loop
#qui in qualche modo è codice sporco...
bacca_buona = Item("karpuram_item_mirtillo-rosso-01-frutto")
if not fruttificato:
log.bug("impossibile creare fruttificato: %r" % bacca_buona)
return
bacca_cattiva = Item("karpuram_item_mirtillo-rosso-00-frutto-sterile")
if not fruttificato:
log.bug("impossibile creare fruttificato: %r" % bacca_cattiva)
return
location=fiore.location
fiore.act("Dei fiori d'un tempo $N non ne ha più...", TO.OTHERS, fiore)
fiore.act("Dei fiori d'un tempo $N non ne ha più...", TO.ENTITY, fiore)
fiore.extract(1)
fruttificato.inject(location)
for q in xrange(MAX_FRUIT_QUANTITY + 2 * age + random.randint(1,4) - 2 ):
bacca_buona = Item("karpuram_item_mirtillo-rosso-01-frutto")
bacca_buona.inject(fruttificato)
for q in xrange(MAX_FRUIT_QUANTITY + 2 * age + random.randint(1,4) - 2 ):
bacca_cattiva = Item("karpuram_item_mirtillo-rosso-00-frutto-sterile")
bacca_cattiva.inject(fruttificato)
fruttificato.act("in compenso ora è $N.", TO.OTHERS, fruttificato)
fortuna = fortuna -1
if random.randint(1, fortuna) == 1:
reactor.callLater(random.randint(GROWED_WAIT_MIN,GROWED_WAIT_MAX), desiccation, fruttificato, room, age )
return
reactor.callLater(random.randint(GROWED_WAIT_MIN,GROWED_WAIT_MAX), renew, fruttificato, room, age, fortuna )
#- Fine Funzione
def renew(fruttificato, room, age, fortuna):
if fruttificato.code not in database["items"]:
return
pianta = Item("karpuram_item_mirtillo-rosso-03-pianta")
if not pianta:
log.bug("impossibile creare pianta: %r" % pianta)
return
age = age +1
location=fruttificato.location
fruttificato.act("quel che un tempo doveva esser $N, ora ...", TO.OTHERS, fruttificato)
fruttificato.act("quel che un tempo doveva esser $N, ora ...", TO.ENTITY, fruttificato)
fruttificato.extract(1)
pianta.inject(location)
pianta.act("... è $N che fruttificherà più avanti", TO.OTHERS, pianta)
pianta.act("... è $N che fruttificherà più avanti", TO.ENTITY, pianta)
fortuna = fortuna -1
if random.randint(1, fortuna) == 1:
reactor.callLater(random.randint(10,20), desiccation, pianta, room, age )
return
reactor.callLater(random.randint(10,20), blooming, pianta, room, age, fortuna)
#- Fine Funzione
# il seme è marcito
def marciume(seed, room):
if seed.code not in database["items"]:
return
frutto_marcio = Item("karpuram_item_mirtillo-rosso-00-frutto-marcio")
if not frutto_marcio:
log.bug("impossibile creare frutto_marcio: %r" % frutto_marcio)
return
if room != seed.location:
# log da rimuovere
log.bug("room di drop diversa da room attuale")
return
location=seed.location
seed.act("$N appare tutto molliccio...", TO.OTHERS, seed)
seed.act("$N appare tutto molliccio...", TO.ENTITY, seed)
seed.extract()
frutto_marcio.inject(location)
frutto_marcio.act("... è $N buono solo come concime.", TO.OTHERS, frutto_marcio)
frutto_marcio.act("... è $N buono solo come concime.", TO.ENTITY, frutto_marcio)
# c'è da valutare se i semi marci son da far sparire dopo un po'
# evitandone l'accumulo sconsiderato
#- Fine Funzione
# lo stadio attuale della pianta va in marcescenza
def desiccation(generic_plant, room, age):
if generic_plant.code not in database["items"]:
return
pianta_morta = Item("karpuram_item_mirtillo-rosso-06-pianta_morta")
if not pianta_morta:
# qui non dovrà uscire dallo script che sennò si frizza la pianta a questo stadio
log.bug("impossibile creare pianta_morta: %r" % pianta_morta)
return
location=generic_plant.location
generic_plant.act("Il rigoglio di $N ...", TO.OTHERS, generic_plant)
generic_plant.act("Il rigoglio di $N ...", TO.ENTITY, generic_plant)
generic_plant.extract()
pianta_morta.inject(location)
pianta_morta.descr_sixth = "una pianta che ha passato %r stagioni" % age
pianta_morta.act("... è tramontato in $N.", TO.OTHERS, pianta_morta)
pianta_morta.act("... è tramontato in $N.", TO.ENTITY, pianta_morta)
#anche qui è da vedere come rimuovere i cadaveri delle piante morte
#ha la stessa urgenza di rimozione dei mirtilli marci
#- Fine Funzione
#
# # Ora che è nato, è certo contento e lo dirà al mondo!
# reactor.callLater(random.randint(15, 30), command_say, gallina_adulta, "Ero piccolo ma adesso non più!")
#
# reactor.callLater(random.randint(20, 35), start_deposing, gallina_adulta)
##- Fine Funzione -
#
#def death(entity):
#
# location = entity.location
# if not location:
# log.bug("gatto, l'on_init è lento sul grow")
# return
# if not location.IS_ROOM:
# return
#
# # Se il settore della stanza non è corretto esce
# if location.sector not in (SECTOR.INSIDE, SECTOR.PLAIN, SECTOR.WOOD, SECTOR.FARMLAND):
# return
#
# # Se il gallinotto nel frattempo non si trova più tra i mob allora esce
# if entity.code not in database["mobs"]:
# return
#
# # Rimuove il gallinozzo dalla stanza e dal database,
# # Avvisiamo tutti quanti del lieto evento
# entity.act("$N non è più fra noi...", TO.OTHERS, entity)
# entity.act("È ora di morire!", TO.TARGET, entity)
#
# entity.from_location()
# del(database["mobs"][entity.code])
##- Fine Funzione -
#
## funzione di rimozione entità di tipo item
#def death_items(entity):
#
# location = entity.location
# if not location:
# log.bug("not location")
# return
# if not location.IS_ROOM:
# # (TD)
# # qui probabile che debba andare a morire anche se non è in room
# # quindi vada tolto il controllo
# log.bug("location is not room")
# return
#
# # la morte non si ferma davanti a simili quisquilie
# ## Se il settore della stanza non è corretto esce
# #if location.sector not in (SECTOR.INSIDE, SECTOR.PLAIN, SECTOR.WOOD, SECTOR.FARMLAND):
# return
#
# # Se nel frattempo non si trova più tra gli items allora esce
# if entity.code not in database["items"]:
# return
#
# # Rimuove finalmente dalla stanza e dal database,
# # Avvisiamo tutti quanti del triste epilogo
# entity.act("$N non è più fra noi...", TO.OTHERS, entity)
# entity.act("Appassisco", TO.TARGET, entity)
#
# entity.from_location()
# del(database["items"][entity.code])
##- Fine Funzione -
#
## perfezionato e rimuove sia mob che intem
#def death(entity):
#
# location = entity.location
# if not location:
# log.bug("gatto, l'on_init è lento sul grow")
# return
# # tolto che la morte non bada a simili quisquilie
# #if not location.IS_ROOM:
# # return
#
# # tolto che la morte non bada a simili quisquilie
# ## Se il settore della stanza non è corretto esce
# #if location.sector not in (SECTOR.INSIDE, SECTOR.PLAIN, SECTOR.WOOD, SECTOR.FARMLAND):
# return
#
# # Se l'entità nel frattempo non si trova più tra i mob o tra gli items allora esce
# if entity.code not in database[entity.ACCESS_ATTR]:
# return
#
# # Rimuove l'entità dalla locazione e dal database,
# # ma prima avvisiamo tutti quanti del lieto evento
# entity.act("$N non è più fra noi...", TO.OTHERS, entity)
# entity.act("È ora di morire!", TO.TARGET, entity)
#
# entity.from_location()
# del(database[entity.ACCESS_ATTR][entity.code])
##- Fine Funzione -
| gpl-2.0 | 5,973,117,736,689,592,000 | 33.580097 | 124 | 0.66035 | false |
flavour/iscram | controllers/hrm.py | 1 | 38417 | # -*- coding: utf-8 -*-
"""
Human Resource Management
"""
module = request.controller
resourcename = request.function
if module not in deployment_settings.modules:
raise HTTP(404, body="Module disabled: %s" % module)
s3db.hrm_vars(module)
# =============================================================================
def index():
""" Dashboard """
mode = session.s3.hrm.mode
if mode is not None:
redirect(URL(f="person"))
tablename = "hrm_human_resource"
table = s3db.hrm_human_resource
roles = session.s3.roles or []
if ADMIN not in roles:
orgs = session.s3.hrm.orgs or [None]
org_filter = (table.organisation_id.belongs(orgs))
else:
# Admin can see all Orgs
org_filter = (table.organisation_id > 0)
s3mgr.configure(tablename,
insertable=False,
list_fields=["id",
"person_id",
"job_title",
"type",
"site_id"])
response.s3.filter = org_filter
# Parse the Request
r = s3base.S3Request(s3mgr, prefix="hrm", name="human_resource")
# Pre-process
# Only set the method to search if it is not an ajax dataTable call
# This fixes a problem with the dataTable where the the filter had a
# distinct in the sql which cause a ticket to be raised
if r.representation != "aadata":
r.method = "search"
r.custom_action = s3db.hrm_human_resource_search
# Execute the request
output = r()
if r.representation == "aadata":
return output
# Post-process
response.s3.actions = [dict(label=str(T(messages["UPDATE"])),
_class="action-btn",
url=URL(f="person",
args=["human_resource"],
vars={"human_resource.id": "[id]"}))]
if r.interactive:
output["module_name"] = response.title
if session.s3.hrm.orgname:
output["orgname"] = session.s3.hrm.orgname
response.view = "hrm/index.html"
query = (table.deleted != True) & \
(table.status == 1) & org_filter
# Staff
ns = db(query & (table.type == 1)).count()
# Volunteers
nv = db(query & (table.type == 2)).count()
output["ns"] = ns
output["nv"] = nv
try:
module_name = deployment_settings.modules[module].name_nice
except:
module_name = T("Human Resources Management")
response.title = module_name
output["title"] = module_name
return output
# =============================================================================
# People
# =============================================================================
def human_resource():
"""
HR Controller
"""
tablename = "hrm_human_resource"
table = s3db[tablename]
# Must specify a group to create HRs
# Interactive
group = request.vars.get("group", None)
if group == None:
# Imports
groupCode = request.vars.get("human_resource.type", None)
if groupCode == "2":
group = "volunteer"
elif groupCode == "1":
group = "staff"
if group == "volunteer":
_type = table.type
_type.default = 2
response.s3.filter = (_type == 2)
_type.readable = False
_type.writable = False
_location = table.location_id
_location.writable = True
_location.readable = True
_location.label = T("Home Address")
table.site_contact.writable = False
table.site_contact.readable = False
list_fields = ["id",
"person_id",
"job_title",
"organisation_id",
"location_id",
"status",
]
s3mgr.configure(tablename,
list_fields = list_fields)
table.job_title.label = T("Volunteer Role")
s3.crud_strings[tablename].update(
title_create = T("Add Volunteer"),
title_display = T("Volunteer Information"),
title_list = T("Volunteers"),
title_search = T("Search Volunteers"),
subtitle_create = T("Add New Volunteer"),
subtitle_list = T("Volunteers"),
label_create_button = T("Add Volunteer"),
msg_record_created = T("Volunteer added"))
# Remove inappropriate filters from the Search widget
human_resource_search = s3mgr.model.get_config(tablename,
"search_method")
# Facility
human_resource_search._S3Search__advanced.pop(6)
# Type
human_resource_search._S3Search__advanced.pop(1)
s3mgr.configure(tablename,
search_method = human_resource_search)
elif group == "staff":
#s3mgr.configure(table._tablename, insertable=False)
# Default to Staff
_type = table.type
_type.default = 1
response.s3.filter = (_type == 1)
_type.readable = False
_type.writable = False
table.site_id.writable = True
table.site_id.readable = True
list_fields = ["id",
"person_id",
"job_title",
"organisation_id",
"site_id",
"site_contact",
"end_date",
"status",
]
s3mgr.configure(tablename,
list_fields = list_fields)
s3.crud_strings[tablename].update(
title_create = T("Add Staff Member"),
title_list = T("Staff"),
title_search = T("Search Staff"),
title_upload = T("Import Staff & Volunteers"),
)
if "expiring" in request.get_vars:
response.s3.filter = response.s3.filter & \
(table.end_date < (request.utcnow + datetime.timedelta(weeks=4)))
s3.crud_strings[tablename].title_list = T("Staff with Contracts Expiring in the next Month")
# Remove the big Add button
s3mgr.configure(tablename,
insertable=False)
# Remove Type filter from the Search widget
human_resource_search = s3mgr.model.get_config(tablename,
"search_method")
human_resource_search._S3Search__advanced.pop(1)
s3mgr.configure(tablename,
search_method = human_resource_search)
def prep(r):
if r.interactive:
# Assume volunteers only between 12-81
s3db.pr_person.date_of_birth.widget = S3DateWidget(past=972, future=-144)
table = r.table
table.site_id.comment = DIV(DIV(_class="tooltip",
_title="%s|%s|%s" % (T("Facility"),
T("The site where this position is based."),
T("Enter some characters to bring up a list of possible matches."))))
if r.method != "read":
# Don't want to see in Create forms
# inc list_create (list_fields over-rides)
field = table.status
field.writable = False
field.readable = False
if r.method == "create" and r.component is None:
if group in (1, 2):
field = table.type
field.readable = False
field.writable = False
elif r.representation == "plain":
# Don't redirect Map popups
pass
elif r.id:
# Redirect to person controller
vars = {"human_resource.id": r.id}
if group:
vars.update(group=group)
redirect(URL(f="person",
vars=vars))
return True
response.s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
s3_action_buttons(r, deletable=False)
if "msg" in deployment_settings.modules:
# @ToDo: Remove this now that we have it in Events?
response.s3.actions.append({
"url": URL(f="compose",
vars = {"hrm_id": "[id]"}),
"_class": "action-btn",
"label": str(T("Send Message"))})
elif r.representation == "plain":
# Map Popups
output = hrm_map_popup(r)
return output
response.s3.postp = postp
output = s3_rest_controller(interactive_report=True)
return output
# -----------------------------------------------------------------------------
def hrm_map_popup(r):
"""
Custom output to place inside a Map Popup
- called from postp of human_resource controller
"""
output = TABLE()
append = output.append
# Edit button
append(TR(TD(A(T("Edit"),
_target="_blank",
_id="edit-btn",
_href=URL(args=[r.id, "update"])))))
# First name, last name
append(TR(TD(B("%s:" % T("Name"))),
TD(s3_fullname(r.record.person_id))))
# Job Title
if r.record.job_title:
append(TR(TD(B("%s:" % r.table.job_title.label)),
TD(r.record.job_title)))
# Organization (better with just name rather than Represent)
# @ToDo: Make this configurable - some deployments will only see
# their staff so this is a meaningless field
#table = s3db.org_organisation
#query = (table.id == r.record.organisation_id)
#name = db(query).select(table.name,
# limitby=(0, 1)).first().name
#append(TR(TD(B("%s:" % r.table.organisation_id.label)),
# TD(name)))
# Components link to the Person record
person_id = r.record.person_id
# Skills
table = s3db.hrm_competency
stable = s3db.hrm_skill
query = (table.person_id == person_id) & \
(table.skill_id == stable.id)
skills = db(query).select(stable.name)
if skills:
vals = [skill.name for skill in skills]
if len(skills) > 1:
represent = ", ".join(vals)
else:
represent = len(vals) and vals[0] or ""
append(TR(TD(B("%s:" % T("Skills"))),
TD(represent)))
# Certificates
table = s3db.hrm_certification
ctable = s3db.hrm_certificate
query = (table.person_id == person_id) & \
(table.certificate_id == ctable.id)
certificates = db(query).select(ctable.name)
if certificates:
vals = [cert.name for cert in certificates]
if len(certificates) > 1:
represent = ", ".join(vals)
else:
represent = len(vals) and vals[0] or ""
append(TR(TD(B("%s:" % T("Certificates"))),
TD(represent)))
# Trainings
table = s3db.hrm_training
etable = s3db.hrm_training_event
ctable = s3db.hrm_course
query = (table.person_id == person_id) & \
(table.training_event_id == etable.id) & \
(etable.course_id == ctable.id)
trainings = db(query).select(ctable.name)
if trainings:
vals = [train.name for train in trainings]
if len(trainings) > 1:
represent = ", ".join(vals)
else:
represent = len(vals) and vals[0] or ""
append(TR(TD(B("%s:" % T("Trainings"))),
TD(represent)))
if r.record.location_id:
table = s3db.gis_location
query = (table.id == r.record.location_id)
location = db(query).select(table.path,
table.addr_street,
limitby=(0, 1)).first()
# City
# Street address
if location.addr_street:
append(TR(TD(B("%s:" % table.addr_street.label)),
TD(location.addr_street)))
# Mobile phone number
ptable = s3db.pr_person
ctable = s3db.pr_contact
query = (ptable.id == person_id) & \
(ctable.pe_id == ptable.pe_id)
contacts = db(query).select(ctable.contact_method,
ctable.value)
email = mobile_phone = ""
for contact in contacts:
if contact.contact_method == "EMAIL":
email = contact.value
elif contact.contact_method == "SMS":
mobile_phone = contact.value
if mobile_phone:
append(TR(TD(B("%s:" % msg.CONTACT_OPTS.get("SMS"))),
TD(mobile_phone)))
# Office number
if r.record.site_id:
table = s3db.org_office
query = (table.site_id == r.record.site_id)
office = db(query).select(table.phone1,
limitby=(0, 1)).first()
if office and office.phone1:
append(TR(TD(B("%s:" % T("Office Phone"))),
TD(office.phone1)))
else:
# @ToDo: Support other Facility Types (Hospitals & Shelters)
pass
# Email address (as hyperlink)
if email:
append(TR(TD(B("%s:" % msg.CONTACT_OPTS.get("EMAIL"))),
TD(A(email, _href="mailto:%s" % email))))
return output
# -----------------------------------------------------------------------------
def person():
"""
Person Controller
- used for Personal Profile & Imports
- includes components relevant to HRM
@ToDo: Volunteers should be redirected to vol/person?
"""
# Custom Method for Contacts
s3mgr.model.set_method("pr", resourcename,
method="contacts",
action=s3db.pr_contacts)
if deployment_settings.has_module("asset"):
# Assets as component of people
s3mgr.model.add_component("asset_asset",
pr_person="assigned_to_id")
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
s3mgr.configure("asset_asset",
insertable = False,
editable = False,
deletable = False)
group = request.get_vars.get("group", "staff")
hr_id = request.get_vars.get("human_resource.id", None)
if not str(hr_id).isdigit():
hr_id = None
mode = session.s3.hrm.mode
# Configure human resource table
tablename = "hrm_human_resource"
table = s3db[tablename]
if hr_id and str(hr_id).isdigit():
hr = table[hr_id]
if hr:
group = hr.type == 2 and "volunteer" or "staff"
# Also inform the back-end of this finding
request.get_vars["group"] = group
org = session.s3.hrm.org
if org is not None:
table.organisation_id.default = org
table.organisation_id.comment = None
table.organisation_id.readable = False
table.organisation_id.writable = False
table.site_id.requires = IS_EMPTY_OR(IS_ONE_OF(db,
"org_site.%s" % super_key(db.org_site),
s3db.org_site_represent,
filterby="organisation_id",
filter_opts=[session.s3.hrm.org]))
if hr_id:
if group == "staff":
table.site_id.writable = True
table.site_id.readable = True
else:
# Volunteer
table.location_id.writable = True
table.location_id.readable = True
table.location_id.label = T("Home Address")
else:
table.location_id.readable = True
table.site_id.readable = True
if session.s3.hrm.mode is not None:
s3mgr.configure(tablename,
list_fields=["id",
"organisation_id",
"type",
"job_title",
"status",
"location_id",
"site_id"])
else:
s3mgr.configure(tablename,
list_fields=["id",
"type",
"job_title",
"status",
"location_id",
"site_id"])
# Configure person table
# - hide fields
tablename = "pr_person"
table = s3db[tablename]
table.pe_label.readable = False
table.pe_label.writable = False
table.missing.readable = False
table.missing.writable = False
table.age_group.readable = False
table.age_group.writable = False
s3mgr.configure(tablename,
deletable=False)
if group == "staff":
s3.crud_strings[tablename].update(
title_upload = T("Import Staff"))
# No point showing the 'Occupation' field - that's the Job Title in the Staff Record
table.occupation.readable = False
table.occupation.writable = False
# Just have a Home Address
table = s3db.pr_address
table.type.default = 1
table.type.readable = False
table.type.writable = False
_crud = s3.crud_strings.pr_address
_crud.title_create = T("Add Home Address")
_crud.title_update = T("Edit Home Address")
s3mgr.model.add_component("pr_address",
pr_pentity=dict(joinby=super_key(s3db.pr_pentity),
multiple=False))
# Default type for HR
table = s3db.hrm_human_resource
table.type.default = 1
request.get_vars.update(xsltmode="staff")
else:
s3.crud_strings[tablename].update(
title_upload = T("Import Volunteers"))
# Default type for HR
table = db.hrm_human_resource
table.type.default = 2
request.get_vars.update(xsltmode="volunteer")
if session.s3.hrm.mode is not None:
# Configure for personal mode
s3db.hrm_human_resource.organisation_id.readable = True
s3.crud_strings[tablename].update(
title_display = T("Personal Profile"),
title_update = T("Personal Profile"))
# People can view their own HR data, but not edit it
s3mgr.configure("hrm_human_resource",
insertable = False,
editable = False,
deletable = False)
s3mgr.configure("hrm_certification",
insertable = True,
editable = True,
deletable = True)
s3mgr.configure("hrm_credential",
insertable = False,
editable = False,
deletable = False)
s3mgr.configure("hrm_competency",
insertable = True, # Can add unconfirmed
editable = False,
deletable = False)
s3mgr.configure("hrm_training", # Can add but not provide grade
insertable = True,
editable = False,
deletable = False)
s3mgr.configure("hrm_experience",
insertable = False,
editable = False,
deletable = False)
s3mgr.configure("pr_group_membership",
insertable = False,
editable = False,
deletable = False)
else:
# Configure for HR manager mode
s3.crud_strings[tablename].update(
title_upload = T("Import Staff & Volunteers"))
if group == "staff":
s3.crud_strings[tablename].update(
title_display = T("Staff Member Details"),
title_update = T("Staff Member Details"))
elif group == "volunteer":
s3.crud_strings[tablename].update(
title_display = T("Volunteer Details"),
title_update = T("Volunteer Details"))
# Upload for configuration (add replace option)
response.s3.importerPrep = lambda: dict(ReplaceOption=T("Remove existing data before import"))
# Import pre-process
def import_prep(data, group=group):
"""
Deletes all HR records (of the given group) of the organisation
before processing a new data import, used for the import_prep
hook in s3mgr
"""
request = current.request
resource, tree = data
xml = s3mgr.xml
tag = xml.TAG
att = xml.ATTRIBUTE
if response.s3.import_replace:
if tree is not None:
if group == "staff":
group = 1
elif group == "volunteer":
group = 2
else:
return # don't delete if no group specified
root = tree.getroot()
expr = "/%s/%s[@%s='org_organisation']/%s[@%s='name']" % \
(tag.root, tag.resource, att.name, tag.data, att.field)
orgs = root.xpath(expr)
for org in orgs:
org_name = org.get("value", None) or org.text
if org_name:
try:
org_name = json.loads(s3mgr.xml.xml_decode(org_name))
except:
pass
if org_name:
htable = s3db.hrm_human_resource
otable = s3db.org_organisation
query = (otable.name == org_name) & \
(htable.organisation_id == otable.id) & \
(htable.type == group)
resource = s3mgr.define_resource("hrm", "human_resource", filter=query)
ondelete = s3mgr.model.get_config("hrm_human_resource", "ondelete")
resource.delete(ondelete=ondelete, format="xml", cascade=True)
s3mgr.import_prep = import_prep
# CRUD pre-process
def prep(r):
if r.representation == "s3json":
s3mgr.show_ids = True
elif r.interactive and r.method != "import":
if r.component:
if r.component_name == "asset":
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
s3mgr.configure("asset_asset",
insertable = False,
editable = False,
deletable = False)
else:
# Assume volunteers only between 12-81
r.table.date_of_birth.widget = S3DateWidget(past=972, future=-144)
resource = r.resource
if mode is not None:
r.resource.build_query(id=s3_logged_in_person())
else:
if not r.id and not hr_id:
# pre-action redirect => must retain prior errors
if response.error:
session.error = response.error
redirect(URL(r=r, f="human_resource"))
if resource.count() == 1:
resource.load()
r.record = resource.records().first()
if r.record:
r.id = r.record.id
if not r.record:
session.error = T("Record not found")
redirect(URL(f="human_resource",
args=["search"], vars={"group":group}))
if hr_id and r.component_name == "human_resource":
r.component_id = hr_id
s3mgr.configure("hrm_human_resource",
insertable = False)
if not r.component_id or r.method in ("create", "update"):
address_hide(s3db.pr_address)
return True
response.s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and r.component and r.component_name == "asset":
# Provide a link to assign a new Asset
# @ToDo: Proper Widget to do this inline
output["add_btn"] = A(T("Assign Asset"),
_href=URL(c="asset", f="asset"),
_id="add-btn",
_class="action-btn")
return output
response.s3.postp = postp
# REST Interface
if session.s3.hrm.orgname and mode is None:
orgname = session.s3.hrm.orgname
else:
orgname = None
output = s3_rest_controller("pr", resourcename,
native=False,
rheader=s3db.hrm_rheader,
orgname=orgname,
replace_option=T("Remove existing data before import"))
return output
# =============================================================================
# Teams
# =============================================================================
def group():
"""
Team controller
- uses the group table from PR
"""
tablename = "pr_group"
table = s3db[tablename]
_group_type = table.group_type
_group_type.label = T("Team Type")
table.description.label = T("Team Description")
table.name.label = T("Team Name")
mtable = s3db.pr_group_membership
mtable.group_id.label = T("Team ID")
mtable.group_head.label = T("Team Leader")
# Set Defaults
_group_type.default = 3 # 'Relief Team'
_group_type.readable = _group_type.writable = False
# Only show Relief Teams
# Do not show system groups
response.s3.filter = (table.system == False) & \
(_group_type == 3)
# CRUD Strings
ADD_TEAM = T("Add Team")
LIST_TEAMS = T("List Teams")
s3.crud_strings[tablename] = Storage(
title_create = ADD_TEAM,
title_display = T("Team Details"),
title_list = LIST_TEAMS,
title_update = T("Edit Team"),
title_search = T("Search Teams"),
subtitle_create = T("Add New Team"),
subtitle_list = T("Teams"),
label_list_button = LIST_TEAMS,
label_create_button = T("Add New Team"),
label_search_button = T("Search Teams"),
msg_record_created = T("Team added"),
msg_record_modified = T("Team updated"),
msg_record_deleted = T("Team deleted"),
msg_list_empty = T("No Teams currently registered"))
s3.crud_strings["pr_group_membership"] = Storage(
title_create = T("Add Member"),
title_display = T("Membership Details"),
title_list = T("Team Members"),
title_update = T("Edit Membership"),
title_search = T("Search Member"),
subtitle_create = T("Add New Member"),
subtitle_list = T("Current Team Members"),
label_list_button = T("List Members"),
label_create_button = T("Add Team Member"),
label_delete_button = T("Delete Membership"),
msg_record_created = T("Team Member added"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Membership deleted"),
msg_list_empty = T("No Members currently registered"))
s3mgr.configure(tablename, main="name", extra="description",
# Redirect to member list when a new group has been created
create_next = URL(f="group",
args=["[id]", "group_membership"]))
s3mgr.configure("pr_group_membership",
list_fields=["id",
"person_id",
"group_head",
"description"])
# Post-process
def postp(r, output):
if r.interactive:
if not r.component:
update_url = URL(args=["[id]", "group_membership"])
s3_action_buttons(r, deletable=False, update_url=update_url)
if "msg" in deployment_settings.modules:
response.s3.actions.append({
"url": URL(f="compose",
vars = {"group_id": "[id]"}),
"_class": "action-btn",
"label": str(T("Send Notification"))})
return output
response.s3.postp = postp
tabs = [
(T("Team Details"), None),
# Team should be contacted either via the Leader or
# simply by sending a message to the group as a whole.
#(T("Contact Data"), "contact"),
(T("Members"), "group_membership")
]
output = s3_rest_controller("pr", resourcename,
rheader=lambda r: s3db.pr_rheader(r, tabs=tabs))
return output
# =============================================================================
# Jobs
# =============================================================================
def job_role():
""" Job Roles Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
response.s3.prep = prep
output = s3_rest_controller()
return output
# =============================================================================
# Skills
# =============================================================================
def skill():
""" Skills Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
def skill_type():
""" Skill Types Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def competency_rating():
""" Competency Rating for Skill Types Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def skill_competencies():
"""
Called by S3FilterFieldChange to provide the competency options for a
particular Skill Type
"""
table = s3db.hrm_skill
ttable = s3db.hrm_skill_type
rtable = s3db.hrm_competency_rating
query = (table.id == request.args[0]) & \
(table.skill_type_id == ttable.id) & \
(rtable.skill_type_id == table.skill_type_id)
records = db(query).select(rtable.id,
rtable.name,
orderby=~rtable.priority)
response.headers["Content-Type"] = "application/json"
return records.json()
# -----------------------------------------------------------------------------
def skill_provision():
""" Skill Provisions Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def course():
""" Courses Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller(rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def course_certificate():
""" Courses to Certificates Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def certificate():
""" Certificates Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
response.s3.prep = prep
output = s3_rest_controller(rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def certificate_skill():
""" Certificates to Skills Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def training():
""" Training Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
roles = session.s3.roles or []
if ADMIN not in roles and \
EDITOR not in roles:
ttable = s3db.hrm_training
hrtable = s3db.hrm_human_resource
orgtable = s3db.org_organisation
orgs = session.s3.hrm.orgs
query = (ttable.person_id == hrtable.person_id) & \
(hrtable.organisation_id == orgtable.id) & \
(orgtable.pe_id.belongs(orgs))
response.s3.filter = query
output = s3_rest_controller(interactive_report = True)
return output
# -----------------------------------------------------------------------------
def training_event():
""" Training Events Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
def prep(r):
if r.interactive and r.component:
# Use appropriate CRUD strings
s3.crud_strings["hrm_training"] = Storage(
title_create = T("Add Participant"),
title_display = T("Participant Details"),
title_list = T("Participants"),
title_update = T("Edit Participant"),
title_search = T("Search Participants"),
title_upload = T("Import Participant Participants"),
subtitle_create = T("Add Participant"),
subtitle_list = T("Participants"),
label_list_button = T("List Participants"),
label_create_button = T("Add New Participant"),
label_delete_button = T("Delete Participant"),
msg_record_created = T("Participant added"),
msg_record_modified = T("Participant updated"),
msg_record_deleted = T("Participant deleted"),
msg_no_match = T("No entries found"),
msg_list_empty = T("Currently no Participants registered"))
return True
response.s3.prep = prep
output = s3_rest_controller(rheader=s3db.hrm_rheader)
return output
# =============================================================================
def staff_org_site_json():
"""
Used by the Asset - Assign to Person page
"""
table = s3db.hrm_human_resource
otable = s3db.org_organisation
#db.req_commit.date.represent = lambda dt: dt[:10]
query = (table.person_id == request.args[0]) & \
(table.organisation_id == otable.id)
records = db(query).select(table.site_id,
otable.id,
otable.name)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
# Messaging
# =============================================================================
def compose():
""" Send message to people/teams """
vars = request.vars
if "hrm_id" in vars:
id = vars.hrm_id
fieldname = "hrm_id"
table = s3db.pr_person
htable = s3db.hrm_human_resource
query = (htable.id == id) & \
(htable.person_id == table.id)
title = T("Send a message to this person")
elif "group_id" in request.vars:
id = request.vars.group_id
fieldname = "group_id"
table = s3db.pr_group
query = (table.id == id)
title = T("Send a message to this team")
else:
session.error = T("Record not found")
redirect(URL(f="index"))
pe = db(query).select(table.pe_id,
limitby=(0, 1)).first()
if not pe:
session.error = T("Record not found")
redirect(URL(f="index"))
pe_id = pe.pe_id
if "hrm_id" in vars:
# Get the individual's communications options & preference
ctable = s3db.pr_contact
contact = db(ctable.pe_id == pe_id).select(ctable.contact_method,
orderby="priority",
limitby=(0, 1)).first()
if contact:
s3db.msg_outbox.pr_message_method.default = contact.contact_method
else:
session.error = T("No contact method found")
redirect(URL(f="index"))
# URL to redirect to after message sent
url = URL(c=module,
f="compose",
vars={fieldname: id})
# Create the form
output = msg.compose(recipient = pe_id,
url = url)
output["title"] = title
response.view = "msg/compose.html"
return output
# END =========================================================================
| mit | 7,011,889,472,696,497,000 | 35.657443 | 134 | 0.493089 | false |
jvce92/web-tdd | superlists/settings.py | 1 | 2740 | """
Django settings for superlists project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6tlbr#djuyt)i=a-#1ch)g_(os(sd12v5*^pes32)_)#214e^j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
#'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'lists',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'superlists.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'superlists.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, '../database/db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.abspath(os.path.join(BASE_DIR,'../static'))
| gpl-3.0 | 1,074,121,401,098,659,500 | 25.346154 | 71 | 0.690146 | false |
085astatine/togetter | togetter/webpage.py | 1 | 2064 | # -*- coding:utf-8 -*-
import logging
import pathlib
from typing import Union
import lxml.html
import requests
class WebPage:
def __init__(
self,
url: str,
session: requests.sessions.Session = None,
params: dict = None,
logger: logging.Logger = None) -> None:
"""Initialize
Arguments:
url (str): URL to send.
session (requests.sessions.Session, optional):
A Requests session.
Defaults to None. Then new Session will be Createed.
params (dict, optional):
dictonary of URL parameters to append to the URL.
Defaults to None.
logger (logging.Logger, optional):
Logger
Defauults to None, then new Logger will be Created"""
self._logger = (logger
if logger is not None
else logging.getLogger(__name__))
self._session = session if session is not None else requests.Session()
self._response = self._session.get(url, params=params)
self._html = lxml.html.fromstring(self.response.content)
@property
def session(self) -> requests.sessions.Session:
return self._session
@property
def url(self) -> str:
return self._response.url
@property
def response(self) -> requests.Response:
return self._response
@property
def html(self) -> lxml.html.HtmlElement:
return self._html
def page_title(self) -> str:
"""Get page title from HTML header"""
xpath = r'head//title'
return self.html.xpath(xpath)[0].text
def save(self, filepath: Union[str, pathlib.Path]) -> None:
"""Save the contents of the pages in the file
Args:
filepath (str, pathlib.Path): Filepath to save the contents
"""
if isinstance(filepath, str):
filepath = pathlib.Path(filepath)
with filepath.open(mode='wb') as file:
file.write(self.response.content)
| mit | -6,903,450,613,005,605,000 | 29.352941 | 78 | 0.580426 | false |
memsql/memsql-mesos | memsql_framework/util/json.py | 1 | 1725 | import simplejson
# declare direct exports here
loads = simplejson.loads
JSONDecodeError = simplejson.JSONDecodeError
def _simplejson_datetime_serializer(obj):
""" Designed to be passed as simplejson.dumps default serializer.
Serializes dates and datetimes to ISO strings.
"""
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
raise TypeError('Object of type %s with value of %s is not JSON serializable' % (type(obj), repr(obj)))
def _set_defaults(kwargs, pretty=False):
kwargs.setdefault('default', _simplejson_datetime_serializer)
kwargs.setdefault('for_json', True)
if pretty:
kwargs.setdefault('separators', (',', ': '))
kwargs.setdefault('indent', ' ' * 4)
kwargs.setdefault('sort_keys', True)
else:
kwargs.setdefault('separators', (',', ':'))
return kwargs
def dumps(data, **kwargs):
""" Dump the provided data to JSON via simplejson.
Sets a bunch of default options providing the following functionality:
* serializes anything with a isoformat method (like datetime) to a iso timestamp.
* if it encounters an unknown object it will try calling for_json on it
to get a json serializable version.
"""
return simplejson.dumps(data, **_set_defaults(kwargs))
def pretty_dumps(data, **kwargs):
""" Same as dumps, except it formats the JSON so it looks pretty. """
return simplejson.dumps(data, **_set_defaults(kwargs, pretty=True))
def safe_loads(data, default, **kwargs):
""" Tries to load the provided data, on failure returns the default instead. """
try:
return simplejson.loads(data, **kwargs)
except JSONDecodeError:
return default
| apache-2.0 | 6,660,841,081,443,745,000 | 34.204082 | 111 | 0.675942 | false |
arthurdejong/python-stdnum | stdnum/py/ruc.py | 1 | 2920 | # rut.py - functions for handling Paraguay RUC numbers
# coding: utf-8
#
# Copyright (C) 2019 Leandro Regueiro
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""RUC number (Registro Único de Contribuyentes, Paraguay tax number).
The Registro Único del Contribuyente (RUC) is the unique taxpayer registry
that maintains identification numbers for all persons (national or foreign)
and legal entities in Paraguay.
The RUC number for legal entities consists of 8 digits starting after
80000000. Number for residents and foreigners are up to 9 digits. The last
digit is a check digit.
More information:
* https://www.ruc.com.py/
>>> validate('80028061-0')
'800280610'
>>> validate('9991603')
'9991603'
>>> validate('2660-3')
'26603'
>>> validate('800532492')
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> validate('80123456789')
Traceback (most recent call last):
...
InvalidLength: ...
>>> format('800000358')
'80000035-8'
"""
from stdnum.exceptions import *
from stdnum.util import clean, isdigits
def compact(number):
"""Convert the number to the minimal representation.
This strips the number of any valid separators and removes surrounding
whitespace.
"""
return clean(number, ' -').upper().strip()
def calc_check_digit(number):
"""Calculate the check digit.
The number passed should not have the check digit included.
"""
s = sum((i + 2) * int(n) for i, n in enumerate(reversed(number)))
return str((-s % 11) % 10)
def validate(number):
"""Check if the number is a valid Paraguay RUC number.
This checks the length, formatting and check digit.
"""
number = compact(number)
if len(number) > 9:
raise InvalidLength()
if not isdigits(number):
raise InvalidFormat()
if number[-1] != calc_check_digit(number[:-1]):
raise InvalidChecksum()
return number
def is_valid(number):
"""Check if the number is a valid Paraguay RUC number."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the number to the standard presentation format."""
number = compact(number)
return '-'.join([number[:-1], number[-1]])
| lgpl-2.1 | -7,021,451,548,509,538,000 | 27.891089 | 75 | 0.705278 | false |
persandstrom/home-assistant | homeassistant/components/config/automation.py | 3 | 1986 | """Provide configuration end points for Automations."""
import asyncio
from collections import OrderedDict
import uuid
from homeassistant.const import CONF_ID
from homeassistant.components.config import EditIdBasedConfigView
from homeassistant.components.automation import (
PLATFORM_SCHEMA, DOMAIN, async_reload)
import homeassistant.helpers.config_validation as cv
CONFIG_PATH = 'automations.yaml'
@asyncio.coroutine
def async_setup(hass):
"""Set up the Automation config API."""
hass.http.register_view(EditAutomationConfigView(
DOMAIN, 'config', CONFIG_PATH, cv.string,
PLATFORM_SCHEMA, post_write_hook=async_reload
))
return True
class EditAutomationConfigView(EditIdBasedConfigView):
"""Edit automation config."""
def _write_value(self, hass, data, config_key, new_value):
"""Set value."""
index = None
for index, cur_value in enumerate(data):
# When people copy paste their automations to the config file,
# they sometimes forget to add IDs. Fix it here.
if CONF_ID not in cur_value:
cur_value[CONF_ID] = uuid.uuid4().hex
elif cur_value[CONF_ID] == config_key:
break
else:
cur_value = OrderedDict()
cur_value[CONF_ID] = config_key
index = len(data)
data.append(cur_value)
# Iterate through some keys that we want to have ordered in the output
updated_value = OrderedDict()
for key in ('id', 'alias', 'trigger', 'condition', 'action'):
if key in cur_value:
updated_value[key] = cur_value[key]
if key in new_value:
updated_value[key] = new_value[key]
# We cover all current fields above, but just in case we start
# supporting more fields in the future.
updated_value.update(cur_value)
updated_value.update(new_value)
data[index] = updated_value
| apache-2.0 | 4,748,439,277,051,262,000 | 33.241379 | 78 | 0.639476 | false |
idosekely/python-lessons | lesson_5/server.py | 1 | 1684 | import socket
import sys
__author__ = 'sekely'
class SimpleServer(object):
def __init__(self, addr='localhost', port=50000, buf=1024):
self.buf = buf
self.server_address = (addr, port)
print('setting up server up on %s port %s' % self.server_address)
self._bind()
def _bind(self):
# Create a TCP/IP socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind(self.server_address)
def _listen(self):
# Listen for incoming connections
self.sock.listen(1)
def _accept(self):
print('waiting for a connection')
# Wait for a connection
connection, client_address = self.sock.accept()
return connection, client_address
def start(self):
self._listen()
while True:
connection, client_address = self._accept()
try:
# Receive the data in small chunks and retransmit it
while True:
data = connection.recv(self.buf)
print('received "%s"' % data)
if data:
print('sending data back to the client')
connection.sendall(data)
else:
print('no more data from', client_address)
break
finally:
# Clean up the connection
connection.close()
def stop(self):
print("\nclosing server")
self.sock.close()
if __name__ == '__main__':
server = SimpleServer()
try:
server.start()
except KeyboardInterrupt:
server.stop()
| mit | 4,789,459,424,898,749,000 | 29.071429 | 73 | 0.529097 | false |
RPGOne/Skynet | imbalanced-learn-master/imblearn/under_sampling/tests/test_neighbourhood_cleaning_rule.py | 1 | 3601 | """Test the module neighbourhood cleaning rule."""
from __future__ import print_function
import os
import numpy as np
from numpy.testing import assert_raises
from numpy.testing import assert_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_warns
from sklearn.datasets import make_classification
from sklearn.utils.estimator_checks import check_estimator
from imblearn.under_sampling import NeighbourhoodCleaningRule
# Generate a global dataset to use
RND_SEED = 0
X, Y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
n_informative=3, n_redundant=1, flip_y=0,
n_features=20, n_clusters_per_class=1,
n_samples=5000, random_state=RND_SEED)
def test_ncr_sk_estimator():
"""Test the sklearn estimator compatibility"""
check_estimator(NeighbourhoodCleaningRule)
def test_ncr_init():
"""Test the initialisation of the object"""
# Define a ratio
ncr = NeighbourhoodCleaningRule(random_state=RND_SEED)
assert_equal(ncr.size_ngh, 3)
assert_equal(ncr.n_jobs, -1)
assert_equal(ncr.random_state, RND_SEED)
def test_ncr_fit_single_class():
"""Test either if an error when there is a single class"""
# Create the object
ncr = NeighbourhoodCleaningRule(random_state=RND_SEED)
# Resample the data
# Create a wrong y
y_single_class = np.zeros((X.shape[0], ))
assert_warns(RuntimeWarning, ncr.fit, X, y_single_class)
def test_ncr_fit():
"""Test the fitting method"""
# Create the object
ncr = NeighbourhoodCleaningRule(random_state=RND_SEED)
# Fit the data
ncr.fit(X, Y)
# Check if the data information have been computed
assert_equal(ncr.min_c_, 0)
assert_equal(ncr.maj_c_, 1)
assert_equal(ncr.stats_c_[0], 500)
assert_equal(ncr.stats_c_[1], 4500)
def test_ncr_sample_wt_fit():
"""Test either if an error is raised when sample is called before
fitting"""
# Create the object
ncr = NeighbourhoodCleaningRule(random_state=RND_SEED)
assert_raises(RuntimeError, ncr.sample, X, Y)
def test_ncr_fit_sample():
"""Test the fit sample routine"""
# Resample the data
ncr = NeighbourhoodCleaningRule(random_state=RND_SEED)
X_resampled, y_resampled = ncr.fit_sample(X, Y)
currdir = os.path.dirname(os.path.abspath(__file__))
X_gt = np.load(os.path.join(currdir, 'data', 'ncr_x.npy'))
y_gt = np.load(os.path.join(currdir, 'data', 'ncr_y.npy'))
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_ncr_fit_sample_with_indices():
"""Test the fit sample routine with indices support"""
# Resample the data
ncr = NeighbourhoodCleaningRule(return_indices=True, random_state=RND_SEED)
X_resampled, y_resampled, idx_under = ncr.fit_sample(X, Y)
currdir = os.path.dirname(os.path.abspath(__file__))
X_gt = np.load(os.path.join(currdir, 'data', 'ncr_x.npy'))
y_gt = np.load(os.path.join(currdir, 'data', 'ncr_y.npy'))
idx_gt = np.load(os.path.join(currdir, 'data', 'ncr_idx.npy'))
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
assert_array_equal(idx_under, idx_gt)
def test_ncr_sample_wrong_X():
"""Test either if an error is raised when X is different at fitting
and sampling"""
# Create the object
ncr = NeighbourhoodCleaningRule(random_state=RND_SEED)
ncr.fit(X, Y)
assert_raises(RuntimeError, ncr.sample, np.random.random((100, 40)),
np.array([0] * 50 + [1] * 50))
| bsd-3-clause | 2,330,156,791,655,878,700 | 30.587719 | 79 | 0.672036 | false |
Mellthas/quodlibet | quodlibet/quodlibet/ext/events/trayicon/__init__.py | 1 | 2461 | # Copyright 2004-2006 Joe Wreschnig, Michael Urman, Iñigo Serna
# 2012 Christoph Reiter
# 2013 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from quodlibet import _
from quodlibet import app
from quodlibet.plugins.events import EventPlugin
from quodlibet.qltk import is_wayland, Icons
from quodlibet.util import (is_unity, is_osx, is_plasma, is_enlightenment,
print_exc, print_w, print_d)
from .prefs import Preferences
from .systemtray import SystemTray
if is_osx():
# Works, but not without problems:
# https://github.com/quodlibet/quodlibet/issues/1870
# The dock menu is more useful so disable.
from quodlibet.plugins import PluginNotSupportedError
raise PluginNotSupportedError
def get_indicator_impl():
"""Returns a BaseIndicator implementation depending on the environ"""
use_app_indicator = (is_unity() or is_wayland() or is_plasma() or
is_enlightenment())
print_d("use app indicator: %s" % use_app_indicator)
if not use_app_indicator:
return SystemTray
else:
try:
from .appindicator import AppIndicator
except ImportError:
print_w("importing app indicator failed")
print_exc()
# no indicator, fall back
return SystemTray
else:
return AppIndicator
class TrayIconPlugin(EventPlugin):
PLUGIN_ID = "Tray Icon"
PLUGIN_NAME = _("Tray Icon")
PLUGIN_DESC = _("Controls Quod Libet from the system tray.")
PLUGIN_ICON = Icons.USER_DESKTOP
def enabled(self):
impl = get_indicator_impl()
self._tray = impl()
self._tray.set_song(app.player.song)
self._tray.set_info_song(app.player.info)
self._tray.set_paused(app.player.paused)
def disabled(self):
self._tray.remove()
del self._tray
def PluginPreferences(self, parent):
return Preferences()
def plugin_on_song_started(self, song):
self._tray.set_song(app.player.song)
self._tray.set_info_song(app.player.info)
def plugin_on_paused(self):
self._tray.set_paused(True)
def plugin_on_unpaused(self):
self._tray.set_paused(False)
| gpl-2.0 | 5,917,764,536,973,619,000 | 30.139241 | 74 | 0.657317 | false |
moneymaker365/plugin.video.ustvvod | resources/lib/stations/pbskids.py | 1 | 8311 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import common
import connection
import m3u8
import re
import simplejson
import sys
import urllib
import ustvpaths
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
addon = xbmcaddon.Addon()
pluginHandle = int (sys.argv[1])
SITE = "pbskids"
NAME = "PBS Kids"
DESCRIPTION = "PBS Kids is the brand for children's programming aired by the Public Broadcasting Service (PBS) in the United States founded in 1993. It is aimed at children ages 2 to 13."
SHOWS = "http://pbskids.org/pbsk/video/api/getShows/?destination=producer"
SWFURL = "http://www-tc.pbs.org/video/media/swf/PBSPlayer.swf?video=%s&player=viral"
TYPES = ["Episode", "Segment", "Clip", "Promotion", "Interstitial", "Other"]
SEASON = "http://pbskids.org/pbsk/video/api/getVideos/?destination=producer&program=%s&endindex=1&encoding=&orderby=-airdate&status=available&category=&type=%s"
EPISODES = "http://pbskids.org/pbsk/video/api/getVideos/?destination=producer&program=%s&endindex=100&encoding=&orderby=-airdate&status=available&category=&type=%s&return=type,airdate,images,expirationdate,rating"
VIDEO = "http://pbskids.org/pbsk/video/api/getVideos/?destination=producer&guid=%s&endindex=1&encoding=&return=captions"
def masterlist():
master_db = []
master_menu = simplejson.loads(connection.getURL(SHOWS))
for master_item in master_menu['items']:
master_name = common.smart_utf8(master_item['title'])
master_db.append((master_name, SITE, 'seasons', urllib.quote_plus(master_name)))
return master_db
def seasons(show_name = common.args.url):
seasons = []
for type in TYPES:
season_data = connection.getURL(SEASON % (show_name, type))
season_menu = simplejson.loads(season_data)
try:
season_count = int(season_menu['matched'])
except:
season_count = 0
if season_count > 0:
seasons.append((type + 's', SITE, 'episodes', EPISODES % (show_name, type), -1, -1))
return seasons
def episodes(episode_url = common.args.url):
episodes = []
episode_data = connection.getURL(episode_url)
episode_menu = simplejson.loads(episode_data)
for episode_item in episode_menu['items']:
if episode_item['videos']:
url = episode_item['guid']
episode_name = episode_item['title']
episode_plot = episode_item['description']
episode_airdate = common.format_date(episode_item['airdate'], '%Y-%m-%d %H:%M:%S', '%d.%m.%Y')
episode_duration = int(episode_item['videos'].itervalues().next()['length']) / 1000
try:
episode_thumb = episode_item['images']['kids-mezzannine-16x9']['url']
except:
try:
episode_thumb = episode_item['images']['kids-mezzannine-4x3']['url']
except:
episode_thumb = episode_item['images']['mezzanine']['url']
HD = False
for video in episode_item['videos']['flash'].itervalues():
try:
if video['bitrate'] > 2000:
HD = True
except:
pass
episode_type = 'Full ' + episode_item['type']
show_name = episode_item['series_title']
try:
episode_number, episode_season = re.compile('\(Ep. ([0-9])([0-9][0-9])\)').search(episode_name).groups()
except:
episode_number = -1
episode_season = -1
u = sys.argv[0]
u += '?url="' + urllib.quote_plus(url) + '"'
u += '&mode="' + SITE + '"'
u += '&sitemode="play_video"'
infoLabels={ 'title' : episode_name,
'durationinseconds' : episode_duration,
'plot' : episode_plot,
'premiered' : episode_airdate,
'TVShowTitle' : show_name,
'season' : episode_season,
'episode' : episode_number}
episodes.append((u, episode_name, episode_thumb, infoLabels, 'select_quailty', HD, episode_type))
return episodes
def play_video(guid = common.args.url):
try:
qbitrate = common.args.quality
except:
qbitrate = None
video_url = VIDEO % guid
hbitrate = -1
lbitrate = -1
sbitrate = int(addon.getSetting('quality'))
closedcaption = None
video_url2 = None
finalurl = ''
video_data = connection.getURL(video_url)
video_menu = simplejson.loads(video_data)['items']
video_item = video_menu[0]
try:
closedcaption = video_item['captions']['sami']['url']
except:
pass
if (addon.getSetting('enablesubtitles') == 'true') and (closedcaption is not None) and (closedcaption != ''):
convert_subtitles(closedcaption.replace(' ', '+'))
if addon.getSetting('preffered_stream_type') == 'RTMP':
for video in video_item['videos']['flash'].itervalues():
try:
bitrate = video['bitrate']
if qbitrate is None:
if bitrate < lbitrate or lbitrate == -1:
lbitrate = bitrate
luri = video['url']
if bitrate > hbitrate and bitrate <= sbitrate:
hbitrate = bitrate
uri = video['url']
else:
if bitrate == qbitrate:
uri = video['url']
except:
pass
if uri is None:
uri = luri
video_data2 = connection.getURL(uri + '?format=json')
video_url3 = simplejson.loads(video_data2)['url']
if '.mp4' in video_url3:
base_url, playpath_url = video_url3.split('mp4:')
playpath_url = ' playpath=mp4:' + playpath_url
elif 'flv' in video_url3:
base_url, playpath_url = video_url3.split('flv:')
playpath_url = ' playpath=' + playpath_url.replace('.flv','')
finalurl = base_url + playpath_url + '?player= swfurl=' + SWFURL % guid + ' swfvfy=true'
else:
ipad_url = video_item['videos']['iphone']['url']
video_data2 = connection.getURL(ipad_url + '?format=json')
video_url3 = simplejson.loads(video_data2)['url']
video_data3 = connection.getURL(video_url3)
video_url4 = m3u8.parse(video_data3)
uri = None
for video_index in video_url4.get('playlists'):
try:
codecs = video_index.get('stream_info')['codecs']
except:
codecs = ''
if codecs != 'mp4a.40.5':
if qbitrate is None:
bitrate = int(video_index.get('stream_info')['bandwidth']) /1024
if bitrate < lbitrate or lbitrate == -1:
lbitrate = bitrate
luri = video_index.get('uri')
if bitrate > hbitrate and bitrate <= sbitrate:
hbitrate = bitrate
uri = video_index.get('uri')
else:
bitrate = int(video_index.get('stream_info')['bandwidth'])
if bitrate == qbitrate:
uri = video_index.get('uri')
if uri is None:
uri = luri
finalurl = video_url3.rsplit('/', 1)[0] + '/' + uri
item = xbmcgui.ListItem(path = finalurl)
if qbitrate is not None:
item.setThumbnailImage(common.args.thumb)
item.setInfo('Video', { 'title' : common.args.name,
'season' : common.args.season_number,
'episode' : common.args.episode_number})
xbmcplugin.setResolvedUrl(pluginHandle, True, item)
if (addon.getSetting('enablesubtitles') == 'true') and (closedcaption is not None) and (closedcaption != ''):
while not xbmc.Player().isPlaying():
xbmc.sleep(100)
xbmc.Player().setSubtitles(ustvpaths.SUBTITLESMI)
def select_quailty(guid = common.args.url):
video_url = VIDEO % guid
sbitrate = int(addon.getSetting('quality')) * 1024
closedcaption = None
video_url2 = None
video_data = connection.getURL(video_url)
video_menu = simplejson.loads(video_data)['items']
video_item = video_menu[0]
bitrates = []
if addon.getSetting('preffered_stream_type') == 'RTMP':
for video in video_item['videos']['flash'].itervalues():
try:
bitrate = video['bitrate']
bitrates.append((bitrate,bitrate))
except:
pass
else:
ipad_url = video_item['videos']['iphone']['url']
video_data2 = connection.getURL(ipad_url + '?format=json')
video_url3 = simplejson.loads(video_data2)['url']
video_data3 = connection.getURL(video_url3)
video_url4 = m3u8.parse(video_data3)
uri = None
for video_index in video_url4.get('playlists'):
try:
codecs = video_index.get('stream_info')['codecs']
except:
codecs = ''
if codecs != 'mp4a.40.5':
bitrate = int(video_index.get('stream_info')['bandwidth'])
bitrates.append((int(bitrate) / 1024 , bitrate))
return bitrates
def clean_subs(data):
sami = re.compile(r'sami')
tag = re.compile(r' *<')
quote = re.compile(r'"')
sub = sami.sub('SAMI', data)
sub = tag.sub('<', sub)
sub = quote.sub('', sub)
return sub
def convert_subtitles(closedcaption):
str_output = ''
subtitle_data = connection.getURL(closedcaption, connectiontype = 0)
subtitle_data = clean_subs(common.smart_utf8(subtitle_data))
file = open(ustvpaths.SUBTITLESMI, 'w')
file.write(subtitle_data)
file.close()
| gpl-2.0 | -609,971,110,751,474,700 | 34.669528 | 213 | 0.670316 | false |
samstav/requests-chef | requests_chef/__about__.py | 1 | 1183 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package attributes and metadata."""
__all__ = (
'__title__',
'__summary__',
'__url__',
'__version__',
'__author__',
'__email__',
'__license__',
'__copyright__',
'__keywords__',
)
__title__ = 'requests-chef'
__summary__ = 'Chef Authentication protocol support for Python-Requests'
__url__ = 'https://github.com/samstav/requests-chef'
__version__ = '0.1.7'
__author__ = 'Sam Stavinoha'
__email__ = '[email protected]'
__keywords__ = ['opscode', 'chef', 'requests', 'authentication', 'auth']
__license__ = 'Apache License, Version 2.0'
__copyright__ = 'Copyright 2015-2016 Sam Stavinoha'
| apache-2.0 | 4,353,036,945,659,839,500 | 32.8 | 74 | 0.664413 | false |
m-weigand/ccd_tools | src/dd_single/characterisation/TooMuchTimeReg/plot.py | 1 | 3707 | #!/usr/bin/python
from plot_settings import *
import numpy as np
import sip_formats.convert as SC
def plot_specs(ax, result_dir, specs):
# load data
f = np.loadtxt(result_dir + '/frequencies.dat')
response = np.loadtxt(result_dir + '/f.dat')
data = np.loadtxt(result_dir + '/data.dat')
data_format = open(result_dir + '/data_format.dat', 'r').readline().strip()
cre_cim = SC.convert(data_format, 'cre_cim', data)
cre, cim = SC.split_data(cre_cim)
fcre_cim = SC.convert(data_format, 'cre_cim', response)
fcre, fcim = SC.split_data(fcre_cim)
# plot cmim
for spec_nr in specs:
ax.semilogx(f, cim[spec_nr, :] * 1e4, '.', color='gray', label='data')
ax.semilogx(f, fcim[spec_nr, :] * 1e4, '.-', color='k', label='fit',
linewidth=0.5)
ax.set_ylabel(r"$\sigma''~[\mu S/cm]$")
ax.set_xlabel('frequency (Hz)')
ax.xaxis.set_major_locator(mpl.ticker.LogLocator(numticks=3))
# ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(5))
ax.yaxis.set_major_locator(mpl.ticker.FixedLocator([1, 5, 9]))
def plot_rtd(ax, result_dir, rtd_nr, color):
# load data
f = np.loadtxt(result_dir + '/frequencies.dat')
tau_f_min = 1 / (f[-1] * np.pi * 2)
tau_f_max = 1 / (f[0] * np.pi * 2)
m_i = np.loadtxt(result_dir + '/stats_and_rms/m_i_results.dat')
tau = np.loadtxt(result_dir + '/tau.dat')
ax.semilogx(tau, m_i[rtd_nr, :], '.-', color=color)
# mark extended tau range
ax.axvspan(tau[0], tau_f_min, color='gray', hatch='/', alpha=0.5)
ax.axvspan(tau_f_max, tau[-1], color='gray', hatch='/', alpha=0.5)
ax.set_xlim((min(tau), max(tau)))
ax.set_xlabel(r'$\tau~[s]$')
ax.set_ylabel(r'$log_{10}(\textit{m})$')
ax.xaxis.set_major_locator(mpl.ticker.LogLocator(numticks=7))
# ax.xaxis.set_major_locator(mpl.ticker.FixedLocator([1e1, 1e2]))
# ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(5))
ax.yaxis.set_major_locator(mpl.ticker.FixedLocator([-5, -3, -1]))
ax.invert_xaxis()
# ax.set_title('time {0}'.format(rtd_nr + 1))
def plot_time_evolution(ax, result_dirs):
# ax2 = ax.twinx()
colors = ('k', 'gray')
index = 0
ax.set_ylabel(r'$\textit{m}_{tot}^n~[mS/m]$')
for axx, result_dir in zip((ax, ax), result_dirs):
# plot m_tot_n
m_tot_n = np.loadtxt(result_dir + '/stats_and_rms/m_tot_n_results.dat')
print index, colors[index]
axx.plot(1e3 * 10**m_tot_n, '.-', color=colors[index])
index += 1
ax.set_xlabel('time')
# ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(5))
ax.yaxis.set_major_locator(mpl.ticker.FixedLocator([0, 1.6, 3.2]))
if __name__ == '__main__':
fig, axes = plt.subplots(2, 2, figsize=(3.6, 2.25))
ax = axes[0, 0]
plot_specs(ax, 'results_good', range(0, 20))
ax = axes[0, 1]
plot_time_evolution(ax, ('results_good', 'results_bad_3'))
ax = axes[1, 0]
plot_rtd(ax, 'results_good', 9, 'k')
ax = axes[1, 1]
plot_rtd(ax, 'results_bad_3', 9, 'gray')
# set global limits for RTD
rtd_min = min(min((axes[1, 0].get_ylim(), axes[1, 1].get_ylim())))
rtd_max = max(max((axes[1, 0].get_ylim(), axes[1, 1].get_ylim())))
print rtd_min, rtd_max
rtd_min = -6
rtd_max = -1
for ax in axes[1, :].flatten():
ax.set_ylim((rtd_min, rtd_max))
######
# labels a) - d)
for nr, ax in enumerate(axes.flatten(order='C')):
ax.annotate('{0})'.format(chr(97 + nr)), xy=(-0.2, -0.53),
xycoords='axes fraction')
fig.tight_layout()
fig.savefig('fig7_too_much_time_reg.png', dpi=300)
# fig.savefig('fig7_too_much_time_reg.pdf', dpi=300)
| gpl-3.0 | -5,896,570,371,492,413,000 | 33.324074 | 79 | 0.580793 | false |
JeffRoy/mi-dataset | mi/dataset/driver/wc_wm/cspp/wc_wm_cspp_telemetered_driver.py | 1 | 2039 | #!/usr/bin/env python
"""
@package mi.dataset.driver.wc_wm.cspp
@file mi/dataset/driver/wc_wm/cspp/wc_wm_cspp_telemetered_driver.py
@author Jeff Roy
@brief Driver for the wc_wm_cspp instrument
Release notes:
Initial Release
"""
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.wc_wm_cspp import \
WcWmCsppParser, \
WcWmEngTelemeteredDataParticle, \
WcWmMetadataTelemeteredDataParticle
from mi.dataset.parser.cspp_base import \
METADATA_PARTICLE_CLASS_KEY, \
DATA_PARTICLE_CLASS_KEY
from mi.core.versioning import version
@version("15.6.1")
def parse(basePythonCodePath, sourceFilePath, particleDataHdlrObj):
"""
This is the method called by Uframe
:param basePythonCodePath This is the file system location of mi-dataset
:param sourceFilePath This is the full path and filename of the file to be parsed
:param particleDataHdlrObj Java Object to consume the output of the parser
:return particleDataHdlrObj
"""
with open(sourceFilePath, 'rU') as stream_handle:
# create and instance of the concrete driver class defined below
driver = WcWmCsppRecoveredDriver(basePythonCodePath, stream_handle, particleDataHdlrObj)
driver.processFileStream()
return particleDataHdlrObj
class WcWmCsppRecoveredDriver(SimpleDatasetDriver):
"""
Derived wc_wm_cspp driver class
All this needs to do is create a concrete _build_parser method
"""
def _build_parser(self, stream_handle):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: WcWmMetadataTelemeteredDataParticle,
DATA_PARTICLE_CLASS_KEY: WcWmEngTelemeteredDataParticle
}
}
parser = WcWmCsppParser(parser_config, stream_handle,
self._exception_callback)
return parser
| bsd-2-clause | -9,146,377,210,328,459,000 | 29.893939 | 96 | 0.718489 | false |
LiamBao/Spiders | tmall&taobao/Tmall_Thread_Only(Keyword & Shop.py | 1 | 23082 | # -*- coding: utf-8 -*-
# __author__ =='liam'
import re
import requests
import math
import time
import datetime
from lxml import etree
import xlsxwriter as wx
import random
import os
import json
import ctypes
import win32ui
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE= -11
STD_ERROR_HANDLE = -12
FOREGROUND_BLACK = 0x0
FOREGROUND_BLUE = 0x01 # text color contains blue.
FOREGROUND_GREEN= 0x02 # text color contains green.
FOREGROUND_RED = 0x04 # text color contains red.
FOREGROUND_INTENSITY = 0x08 # text color is intensified.
BACKGROUND_BLUE = 0x10 # background color contains blue.
BACKGROUND_GREEN= 0x20 # background color contains green.
BACKGROUND_RED = 0x40 # background color contains red.
BACKGROUND_INTENSITY = 0x80 # background color is intensified.
class Color:
''''' See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winprog/winprog/windows_api_reference.asp
for information on Windows APIs.'''
std_out_handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
def set_cmd_color(self, color, handle=std_out_handle):
"""(color) -> bit
Example: set_cmd_color(FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE | FOREGROUND_INTENSITY)
"""
bool = ctypes.windll.kernel32.SetConsoleTextAttribute(handle, color)
return bool
def reset_color(self):
self.set_cmd_color(FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE)
def print_red_text(self, print_text):
self.set_cmd_color(FOREGROUND_RED | FOREGROUND_INTENSITY)
print (print_text)
self.reset_color()
def print_green_text(self, print_text):
self.set_cmd_color(FOREGROUND_GREEN | FOREGROUND_INTENSITY)
print (print_text)
self.reset_color()
def parseDateStr(date_pa):
return time.strftime("%Y-%m-%d %H:%M:%S", date_pa)
def parseDateStrToStamp(datestr):
return time.mktime(time.strptime(datestr,'%Y-%m-%d %H:%M:%S'))
def loadProductList(keyword,pageNum):
global clr
if keyword:
try:
valueNode=(pageNum-1)*44
kstsNode="%d" % (time.time() * 1000)
headers = { 'cookie' : 'thw=cn; cna=jCGdDgo1eioCAXTsq3pq4acz; x=e%3D1%26p%3D*%26s%3D0%26c%3D0%26f%3D0%26g%3D0%26t%3D0%26__ll%3D-1%26_ato%3D0; uc3=nk2=AnywymvJAg%3D%3D&id2=UoH8VdpejL6PVA%3D%3D&vt3=F8dAScPiFCD1VRRbxcs%3D&lg2=U%2BGCWk%2F75gdr5Q%3D%3D; lgc=amen_nm; tracknick=amen_nm; _cc_=V32FPkk%2Fhw%3D%3D; tg=0; mt=ci=0_1; isg=C5C64B911C2D2BE426E05E1803925CC7; l=AqOjku73WJcR8NeISY45WOfjs-lNqTfa; v=0; cookie2=1cfd2313facba6013b6a051a56edb89b; t=3dc0209d48a7022db36cbc763b2dc39e; _tb_token_=4fGjN8315alJ; linezing_session=05Sk9B4qqJDNTd7AxIiVwFxA_1450151125996X9SX_1; JSESSIONID=A810EE3F9B371ADCC646C7C948F1A11C',
'referer':'https://www.taobao.com/',
'user-agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
}
url = "https://s.taobao.com/search?data-key=s&data-value="+str(valueNode)+"&ajax=true&_ksTS="+str(kstsNode)+"_1116&callback=jsonp1117&isprepay=1&initiative_id=staobaoz_20141104&tab=mall&q="+keyword+"&style=list&stats_click=search_radio_tmall%253A1&sort=sale-desc&s="+str(valueNode)
## url = "http://s.taobao.com/search?data-key=s&data-value="+str(valueNode)+"&ajax=true&_ksTS="+str(kstsNode)+"_1341&callback=jsonp1341&sort=sale-desc&initiative_id=staobaoz_"+str(time.strftime("%Y%m%d",time.localtime(time.time())))+"&tab=all&q="+keyword+"&style=list&s="+str(valueNode)
html = requests.get(url,headers = headers,timeout = 10).text
return html
except Exception as err:
clr.print_red_text(err+' Cannot parse Page')
def getJsons(xml,parse_type):
global theDomain,clr
if parse_type == 1:
startPos = xml.find("({")
if ( startPos > -1):
jsonStr = xml[startPos+1:len(xml)]
endPos = jsonStr.find("})")
jsonStr = jsonStr[0:endPos+1]
jsonStr = jsonStr.replace("\n", "")
jsonStr = jsonStr.replace("\r", "")
jsonStr = jsonStr.replace("\"cassandre\"", "cassandre")
try:
jsons = json.loads(jsonStr)
except Exception as err:
print('Parsing Error ON Json')
return jsons
elif parse_type == 2:
xml = str(xml)
part1=xml[0:xml.find("?")+1]
part2=xml[xml.find("?")+1:]
# timestamp = int(time.time())
# xml=theDomain+part1+'_ksTS='+str(timestamp)+'_658&callback=jsonp659&'+part2
xml= theDomain+part1+"_ksTS=1434002336091_658&callback=jsonp659&"+part2
jsonxml = requests.get(xml,timeout = 10).text
if jsonxml.find("(") >-1:
startPos = jsonxml.find("(")
if jsonxml.find("</body>") >-1:
endPos = jsonxml.find("</body>")
jsonxml = jsonxml[startPos+1:endPos]
jsonxml= jsonxml.replace('\\','')
else:
jsonxml = None
return jsonxml
def getNextProductPageNode(string):
if string.find('J_SearchAsync next') < 0:
return None
if string.find('J_SearchAsync next') > -1:
index = string.find('J_SearchAsync next')
if string.find('下一页',index) > -1:
end = string.find('下一页',index)
string = string[index:end]
if string.find('href='):
index=string.find('href=')
else:
return None
string = string[index+7:]
string = string[0:len(string) - 3]
return "https:" + string
def parseProductPage(xml):
global clr,threads
nextPageNode = None
# 判断该页面是否一个有效的页面
if (len(getProductRowNodes(xml)) <= 0 ):
clr.print_red_text("This page is not valid product page. URL ")
try:
nextPageNode = getNextProductPageNode(xml)
# 获得productlist页面中的product行集节点对象
nodes = getProductRowNodes(xml)
if nodes:
for node in nodes:
# 解析一个行对象为一个Product对象
result = parseSingleResultRow(node,2)
if result:
threads.append(result)
else:
clr.print_red_text('Cant not parse Shop thread~')
except Exception as err:
clr.print_red_text(err+' parse Shop ProductPage Error')
clr.print_green_text("End of parse Shop products ")
return nextPageNode
def parseResultPage(xml,proIndex):
ret = False
jasonNode=getJsons(xml,1)
global threads,clr
resNode=jasonNode["mods"]["itemlist"]["data"]
if(resNode == None or len(resNode) == 0):
return ret
nodes=resNode["auctions"]
if(nodes == None or len(nodes) == 0):
return ret
try:
if nodes:
if proIndex:
for index,node in enumerate(nodes):
if(index<proIndex):
result = parseSingleResultRow(node,1)
if result:
threads.append(result)
ret=True
if( index>=proIndex):
ret=False
return ret
else:
for node in nodes:
result = parseSingleResultRow(node,1)
if result:
threads.append(result)
ret = True
return ret
else:
clr.print_red_text('NO Result!')
except Exception as err:
clr.print_red_text(err)
def getProductRowNodes(html):
items =[]
html=html.replace('\\"','"')
html=html.replace('"\\','"')
index = html.find("<dl class=\"item")
while(index > -1):
html = html[index:]
pos = html.find("</dl>")
if(pos > -1):
item = html[0:pos+5]
html = html[pos+5:]
items.append(item)
else:
break
index = html.find("<dl class=\"item")
return items
def isTopProduct(theTopnum):
if (theTopnum == 0):
return True
return False
def getTopPage(theTopNum):
if(theTopNum<=44):
return 1
thePgNum=str(theTopNum/44)
if(thePgNum.find('.')<0):
return thePgNum
thePgNum=int(thePgNum[0:thePgNum.find('.')])
if(theTopNum>thePgNum*44):
thePgNum=thePgNum+1
return thePgNum
def getTopPro(theTopNum):
pgNum=getTopPage(theTopNum)
topProNum=0
if(theTopNum==44*pgNum):
topProNum=0
return topProNum
if(theTopNum<44*pgNum):
topProNum=theTopNum-((pgNum-1)*44)
return topProNum
return topProNum
def isMatchResult(jsons):
nodes = jsons
if nodes["mods"]["tips"]:
if rNode["data"]:
return False
noNode=aNode.html
if noNode:
if noNode.find('搜索结果较少'):
return False
return True
return False
def parseSalesVolume(value):
if(value.find('万')>0):
value = value.replace('万','')
value=value*10000
return value.replace('人收货','')
def parseNumOfReview(value):
if(value.find('万')>0):
value = value.replace('万','')
value = value*10000
return value
def parseSubject(string):
index=string.find('item-name')
if not index:
print('Can Not parse subject!')
string=string[index:]
if string.find('">'):
index=string.find('>')
string=string[index+1:]
if string.find('</a>'):
index=string.find('</a>')
string=string[0:index]
string=string.replace('<span class=\"h\">','')
string=string.replace('</span>','')
return string
def parseUrl(string):
index = string.find('item-name')
if index <0:
print("Can not parse URL")
stringpa = string[index:]
index =stringpa.find('//detail')
if index < 0:
start = string.find('href="//detail')
if start < 0 :
print("cannot parse Url")
end = string.find('"',start+14)
if end < 0:
print("cannot parse Url ")
stringpa = string[start+6:end]
index = stringpa.find('&')
stringpa = stringpa[0:index]
else:
stringpa = stringpa[index:]
index = stringpa.find('"')
stringpa= stringpa[0:index]
if stringpa == "" or stringpa is None:
st = string.find('//detail')
if st < 0:
print("can not parse Url")
end = string.find('&',st)
stringpa = stringpa[index+8:end]
return 'http:'+stringpa
def parsePrice(string):
index=string.find('c-price')
if(index<0):
print("Can not parse Price.")
string=string[index:]
index=string.find('</span>')
string=string[9:index]
return string.replace(' ','')
def parseComments(string):
index=string.find('评价:')
if(index<0):
return '推荐商品'
string=string[index+3:]
index=string.find('</span>')
string=string[0:index]
return string
def parseSum(string):
index=string.find('sale-num">')
if(index<0):
return 0
string=string[index:]
index=string.find('</span>')
string=string[10:index]
return string
def parseProId(proUrl):
proId = 0
if not proUrl:
return proId
if proUrl.find('id=') > 0:
proId=re.search('id=(\d+)',proUrl).group(1)
return proId
def parseSingleResultRow(rowNode,parse_type):
global clr, theSeller, theKeywordID, theUrl, theScrapecomment, theComment_type, theComment_filter, theSiteId,theKeyword,theTopnum
try:
if parse_type == 1:
title = rowNode["raw_title"]
url = "https:"+rowNode["detail_url"]
price = rowNode["view_price"]
marketPrice = rowNode["reserve_price"]
salesVolume = parseSalesVolume(rowNode["view_sales"])
numOfReview = parseNumOfReview(rowNode["comment_count"])
sellerName = rowNode["nick"]
category = None
keywordID = theKeywordID
promotions = None
reviewStar = 0
siteId = theSiteId
domain = 'detail.tmall.com' if (rowNode["detail_url"]).find('tmall')>-1 else 'detail.taobao.com'
productId = rowNode["nid"]
numOfFavorite = 0
sellerUrl = "https:"+ rowNode["shopLink"]
tags = None
scrapecomment = theScrapecomment
skuid = None
weight = 0
stock = 0
koubei = 0
type = None
lastScrapeDate = time.strftime('%Y-%m-%d',time.localtime(time.time()))
comment_type = theComment_type
comment_filter = theComment_filter
thread = [title,url,price,marketPrice,salesVolume,numOfReview,sellerName,category,keywordID,promotions,reviewStar,siteId,domain,productId,numOfFavorite,sellerUrl,tags,scrapecomment,skuid,weight,stock,koubei,type,lastScrapeDate,comment_type,comment_filter]
return thread
elif parse_type == 2 :
title = parseSubject(rowNode)
url = parseUrl(rowNode)
price = parsePrice(rowNode)
marketPrice = None
salesVolume = parseSum(rowNode)
numOfReview = parseComments(rowNode)
sellerName = theSeller
category = None
keywordID = theKeywordID
promotions = None
reviewStar = 0
siteId = theSiteId
domain = 'detail.tmall.com'
productId = parseProId(url)
numOfFavorite = 0
sellerUrl = theUrl
tags = None
scrapecomment = theScrapecomment
skuid = None
weight = 0
stock = 0
koubei = 0
type = None
lastScrapeDate = time.strftime('%Y-%m-%d', time.localtime(time.time()))
comment_type = theComment_type
comment_filter = theComment_filter
thread = [title, url, price, marketPrice, salesVolume, numOfReview, sellerName, category, keywordID, promotions,reviewStar, siteId, domain, productId, numOfFavorite, sellerUrl, tags, scrapecomment, skuid, weight,stock, koubei, type, lastScrapeDate, comment_type, comment_filter]
return thread
except Exception as err:
clr.print_red_text(err)
def doCapture(keyword, keywordID,topnum,scrapecomment,comment_type,comment_filter):
global clr,theKeyword,theKeywordID,theScrapecomment,theTopnum,theComment_type,theComment_filter,theSiteId,threads
theKeyword=keyword
theKeywordID=keywordID
theScrapecomment=scrapecomment
theTopnum=int(topnum)
theComment_type=int(comment_type)
theComment_filter=comment_filter
theSiteId=3000
parse_type = 1
try:
pageNum = 0
#check page
threads = []
hasNextPage = False
if isTopProduct(theTopnum):
while(True):
pageNum += 1
if(pageNum>100):
break
clr.print_green_text('Starting parse page ' + str(pageNum))
# sleepnum = random.uniform(2, 4)
# clr.print_green_text("Wait for " + str(int(sleepnum)) + " Seconds!")
# time.sleep(sleepnum)
xml = loadProductList(theKeyword,pageNum)
hasNextPage = parseResultPage(xml,None)
if not hasNextPage:
break
if not isTopProduct(theTopnum):
topNum = theTopnum
topPage = getTopPage(topNum)
topProNum = getTopPro(topNum)
while(True):
pageNum += 1
if (pageNum > topPage):
break
if (pageNum > 100):
break
clr.print_green_text(' Start parsing page ' + str(pageNum))
if (topProNum == 0):
# sleepnum = random.uniform(2, 4)
# clr.print_green_text(" Wait for " + str(int(sleepnum)) + " Seconds!")
# time.sleep(sleepnum)
xml = loadProductList(theKeyword, pageNum)
hasNextPage=parseResultPage(xml,None)
if (topProNum > 0):
# sleepnum = random.uniform(2, 4)
# clr.print_green_text(" Wait for " + str(int(sleepnum)) + " Seconds!")
# time.sleep(sleepnum)
xml = loadProductList(theKeyword, pageNum)
if (pageNum == topPage):
hasNextPage = parseResultPage(xml, topProNum)
else:
hasNextPage = parseResultPage(xml,None)
if not hasNextPage:
break
return threads
except Exception as err:
print(err)
def shop_doCapture(keywordID,keyword,topnum, url,scrapecomment,comment_type,comment_filter):
global theKeywordID,theScrapecomment,theUrl,theComment_type,theComment_filter,theSiteId,theKeyword,clr,theSeller,theDomain,threads
theKeywordID=keywordID
theScrapecomment=scrapecomment
theTopnum=int(topnum)
theUrl = url
theSiteId = 3002
theComment_type=int(comment_type)
theComment_filter=comment_filter
theIdx=0
parse_type = 2
threads = []
if url.find('.com') > 0:
theDomain = url[0:url.find('.com') + 4]
else:
theDomain ='tmall.com'
try:
theKeyword=keyword
if keyword == None or keyword == 'null' or keyword == '' :
if url.find('search=y') < 0:
url = theDomain+'/search.htm?orderType=hotsell_desc'
else:
url = theDomain+'/search.htm?q='+keyword+'&search=y&orderType=hotsell_desc&tsearch=y'
xml = requests.get(url,timeout = 20).text
xml = etree.HTML(xml)
if xml.xpath('.//a[@class ="shop-name"]/span') :
theSeller = xml.xpath('.//a[@class ="shop-name"]/span')[0].xpath('string(.)').strip() #商户名称
else:
theSeller = None
hasNextPage = True
while hasNextPage:
if xml.xpath('.//input[@id= "J_ShopAsynSearchURL"]'):
theSearchUrl = xml.xpath('.//input[@id = "J_ShopAsynSearchURL"]/@value')
theSearchUrl = theSearchUrl[0].strip().replace('&', '')
else:
break
jsons =getJsons(theSearchUrl, parse_type)
nextPageButton =parseProductPage(jsons)
hasNextPage = True if nextPageButton and ( theIdx<=theTopnum or theTopnum==0) else False
if hasNextPage:
try:
xml = requests.get(nextPageButton,timeout = 20).text
xml = etree.HTML(xml)
except Exception as err:
clr.print_red_text(err)
return threads
except Exception as err:
clr.print_red_text(err)
def main():
global clr
clr = Color()
clr.print_green_text('*'*40)
clr.print_green_text('## Python 3.4')
clr.print_green_text('## Author Liam')
clr.print_green_text('## Date 11/16/2016')
clr.print_green_text('## Crawl Tmall_Thread2.0(Keyword & Shop)')
clr.print_green_text('*'*40)
clr.print_green_text('Enter to Open File')
dlg = win32ui.CreateFileDialog(1) # 表示打开文件对话框
dlg.SetOFNInitialDir('C:/') # 设置打开文件对话框中的初始显示目录
dlg.DoModal()
filename = dlg.GetPathName()
clr.print_green_text('Open File or directory: '+filename)
# f = open(os.getcwd()+r'/indexCrawl.txt','rb')
if filename is None or filename == '':
sys.exit(0)
f = open(filename,'rb')
task_lines = [i for i in f.readlines()]
f.close()
count = 0
allthread = []
data =[]
try:
for line in task_lines:
try:
count += 1
line = str(line, encoding='utf-8')
line = line.replace(')','').replace('main(','').replace('\'','')
line_split = line.strip()
if not line:
continue
line_split = line_split.split(',')
clr.print_green_text('Start Parsing Keyword/Shop : '+str(line_split))
if len(line_split) == 6 :
data = doCapture(line_split[0],line_split[1],line_split[2],line_split[3],line_split[4],line_split[5])
clr.print_green_text('KeyWord '+str(line_split)+ ' parsing Successfully!')
elif len(line_split) == 7:
data = shop_doCapture(line_split[0],line_split[1],line_split[2],line_split[3],line_split[4],line_split[5],line_split[6])
clr.print_green_text(' Shop '+str(line_split)+ ' parsing Successfully!')
for i in data:
allthread.append(i)
clr.print_green_text ('Counts '+str(len(allthread))+' threads')
if len(allthread) > 10000: #避免消耗内存过大机器崩溃
getExcel(allthread)
allthread =[]
waitTime = random.uniform(2, 4)
## clr.print_green_text(" Wait for "+str(int(waitTime))+" Seconds!")
# time.sleep(waitTime)
except Exception as err:
clr.print_red_text (err)
getExcel(allthread)
except Exception as err:
clr.print_red_text(err)
def getExcel(data):
global clr
try:
title = ['title','url','price','marketPrice','salesVolume','numOfReview','sellerName','category','keywordID','promotions','reviewStar','siteId','domain','productId','numOfFavorite','sellerUrl','scrapecomment','skuid','weight','stock','koubei','type','lastScrapeDate','comment_type','comment_filter']
file_name = '%s%s' % ('Output_',("%d" % (time.time() * 1000)))
workbook = wx.Workbook(file_name+'.xls')
worksheet = workbook.add_worksheet('Info')
for i in range(len(data)):
for j in range(len(title)):
if i==0:
worksheet.write(i, j, title[j])
worksheet.write(i+1, j, data[i][j])
workbook.close()
clr.print_green_text('\n File '+file_name+' Done!')
except Exception as err:
clr.print_red_text(err)
if __name__ == '__main__':
main()
| gpl-3.0 | -1,888,404,789,537,275,400 | 32.985141 | 625 | 0.559549 | false |
BertrandBordage/django-cachalot | cachalot/api.py | 1 | 5217 | # coding: utf-8
from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.db import connections
from django.utils.six import string_types
from .cache import cachalot_caches
from .settings import cachalot_settings
from .signals import post_invalidation
from .transaction import AtomicCache
from .utils import _invalidate_tables
__all__ = ('invalidate', 'get_last_invalidation')
def _cache_db_tables_iterator(tables, cache_alias, db_alias):
no_tables = not tables
cache_aliases = settings.CACHES if cache_alias is None else (cache_alias,)
db_aliases = settings.DATABASES if db_alias is None else (db_alias,)
for db_alias in db_aliases:
if no_tables:
tables = connections[db_alias].introspection.table_names()
if tables:
for cache_alias in cache_aliases:
yield cache_alias, db_alias, tables
def _get_tables(tables_or_models):
for table_or_model in tables_or_models:
if isinstance(table_or_model, string_types) and '.' in table_or_model:
try:
table_or_model = apps.get_model(table_or_model)
except LookupError:
pass
yield (table_or_model if isinstance(table_or_model, string_types)
else table_or_model._meta.db_table)
def invalidate(*tables_or_models, **kwargs):
"""
Clears what was cached by django-cachalot implying one or more SQL tables
or models from ``tables_or_models``.
If ``tables_or_models`` is not specified, all tables found in the database
(including those outside Django) are invalidated.
If ``cache_alias`` is specified, it only clears the SQL queries stored
on this cache, otherwise queries from all caches are cleared.
If ``db_alias`` is specified, it only clears the SQL queries executed
on this database, otherwise queries from all databases are cleared.
:arg tables_or_models: SQL tables names, models or models lookups
(or a combination)
:type tables_or_models: tuple of strings or models
:arg cache_alias: Alias from the Django ``CACHES`` setting
:type cache_alias: string or NoneType
:arg db_alias: Alias from the Django ``DATABASES`` setting
:type db_alias: string or NoneType
:returns: Nothing
:rtype: NoneType
"""
# TODO: Replace with positional arguments when we drop Python 2 support.
cache_alias = kwargs.pop('cache_alias', None)
db_alias = kwargs.pop('db_alias', None)
for k in kwargs:
raise TypeError(
"invalidate() got an unexpected keyword argument '%s'" % k)
send_signal = False
invalidated = set()
for cache_alias, db_alias, tables in _cache_db_tables_iterator(
list(_get_tables(tables_or_models)), cache_alias, db_alias):
cache = cachalot_caches.get_cache(cache_alias, db_alias)
if not isinstance(cache, AtomicCache):
send_signal = True
_invalidate_tables(cache, db_alias, tables)
invalidated.update(tables)
if send_signal:
for table in invalidated:
post_invalidation.send(table, db_alias=db_alias)
def get_last_invalidation(*tables_or_models, **kwargs):
"""
Returns the timestamp of the most recent invalidation of the given
``tables_or_models``. If ``tables_or_models`` is not specified,
all tables found in the database (including those outside Django) are used.
If ``cache_alias`` is specified, it only fetches invalidations
in this cache, otherwise invalidations in all caches are fetched.
If ``db_alias`` is specified, it only fetches invalidations
for this database, otherwise invalidations for all databases are fetched.
:arg tables_or_models: SQL tables names, models or models lookups
(or a combination)
:type tables_or_models: tuple of strings or models
:arg cache_alias: Alias from the Django ``CACHES`` setting
:type cache_alias: string or NoneType
:arg db_alias: Alias from the Django ``DATABASES`` setting
:type db_alias: string or NoneType
:returns: The timestamp of the most recent invalidation
:rtype: float
"""
# TODO: Replace with positional arguments when we drop Python 2 support.
cache_alias = kwargs.pop('cache_alias', None)
db_alias = kwargs.pop('db_alias', None)
for k in kwargs:
raise TypeError("get_last_invalidation() got an unexpected "
"keyword argument '%s'" % k)
last_invalidation = 0.0
for cache_alias, db_alias, tables in _cache_db_tables_iterator(
list(_get_tables(tables_or_models)), cache_alias, db_alias):
get_table_cache_key = cachalot_settings.CACHALOT_TABLE_KEYGEN
table_cache_keys = [get_table_cache_key(db_alias, t) for t in tables]
invalidations = cachalot_caches.get_cache(
cache_alias, db_alias).get_many(table_cache_keys).values()
if invalidations:
current_last_invalidation = max(invalidations)
if current_last_invalidation > last_invalidation:
last_invalidation = current_last_invalidation
return last_invalidation
| bsd-3-clause | -5,263,881,530,152,889,000 | 39.757813 | 79 | 0.672609 | false |
vakaras/nmadb-session-reg | src/nmadb_session_reg/models/section_based.py | 1 | 1388 | from django.db import models
from django.utils.translation import ugettext_lazy as _
class SessionGroup(models.Model):
""" The session group. In most cases it will be section.
"""
title = models.CharField(
max_length=80,
verbose_name=_(u'title'),
unique=True,
)
description = models.TextField(
blank=True,
null=True,
verbose_name=_(u'comment'),
)
class Meta(object):
ordering = [u'title']
verbose_name = _(u'session group')
verbose_name_plural = _(u'session groups')
app_label = 'nmadb_session_reg'
def __unicode__(self):
return self.title
class RegistrationInfoMixin(models.Model):
""" Information entered by administrator. Session is section
based.
"""
assigned_session_group = models.ForeignKey(
'nmadb_session_reg.SessionGroup',
verbose_name=_(u'assigned session group'),
blank=True,
null=True,
)
class Meta(object):
abstract = True
ordering = [u'invitation',]
verbose_name = _(u'registration info (section)')
verbose_name_plural = _(u'registration infos (section)')
app_label = 'nmadb_session_reg'
def __unicode__(self):
return u'<{0.id}> invitation: {0.invitation}'.format(self)
| lgpl-3.0 | -4,476,336,846,390,657,000 | 26.215686 | 66 | 0.575648 | false |
LarsSchy/SMAC-M | chart-installation/cache_monitor/cache_monitor.py | 1 | 1824 | #!/usr/bin/python2
import os
import argparse
import subprocess
import shutil
import time
def parse_arguments():
parser = argparse.ArgumentParser(
prog="cache_monitor.py", description="This program monitors a directory and clears it if the size is above a certain limit")
parser.add_argument("path", nargs=1, help="The cache directory to watch")
parser.add_argument("-t", "--time-in-seconds", nargs=1,
help="The interval time, in seconds, with which to watch the folder", required=True)
parser.add_argument(
"-l", "--limit", help="Set the limit for the folder size in kilobytes (1GB = \"1,000,000\"), if the watched folder is larger than this limit, it will be removed", required=True)
return parser.parse_args()
def watch_folder(limit_string):
# Get folders with sizes at current directory
# The 'du' command does not tolerate an empty directory, thus checking the len(listdir) before continuing
if len(os.listdir('.')) > 0:
disk_usage = subprocess.check_output(
"du -s *", shell=True).splitlines()
limit = int(limit_string.replace(',', ''))
# Loop over all folders and folder sizes
for entry in disk_usage:
size, folder = entry.split('\t')
if(int(size) > limit):
print("Clearing cache at " + os.path.join(os.getcwd(), folder))
try:
shutil.rmtree(folder)
except OSError:
pass
def main():
args = parse_arguments()
watch_dir = args.path[0]
if not os.path.exists(watch_dir):
os.makedirs(watch_dir)
os.chdir(args.path[0])
while(True):
watch_folder(args.limit)
time.sleep(float(args.time_in_seconds[0]))
if __name__ == "__main__":
main()
| mit | 2,335,487,726,664,124,400 | 31.571429 | 185 | 0.612939 | false |
fieldsofview/sim-city-client | tests/test_management.py | 1 | 1149 | # SIM-CITY client
#
# Copyright 2015 Joris Borgdorff <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import simcity
from nose.tools import assert_raises
def test_init():
assert_raises(ValueError, simcity.init, 'thispathdoesnotexist.ini')
cfg = simcity.Config(from_file=False)
assert_raises(KeyError, simcity.init, cfg)
cfg.add_section('task-db', {
'url': 'http://doesnotexistforsure_atleasti_think_so.nl/',
'username': 'example',
'password': 'example',
'database': 'example',
})
assert_raises(IOError, simcity.init, cfg)
| apache-2.0 | -1,536,535,285,818,314,000 | 33.818182 | 74 | 0.718016 | false |
spjmurray/openstack-sentinel | sentinel/api/controllers/image/v2/images.py | 1 | 1414 | # Copyright 2017 DataCentred Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
import pecan.decorators
from sentinel import utils
from sentinel.api.controllers.base import BaseController
from sentinel.scope import Scope
class ImageV2ImagesController(BaseController):
service = u'image'
collection = u'images'
resource = u'image'
def _scoped_images(self):
# Ensure we have a list of Resource like objects
images = utils.unglancify(self.image.images.list())
images = Scope.filter(images, key='owner')
return utils.paginate(images, pecan.request.GET.get('marker'),
pecan.request.GET.get('limit'))
@pecan.expose('json')
@pecan.decorators.accept_noncanonical
def get_all(self):
images = self._scoped_images()
return self.format_collection(images)
# vi: ts=4 et:
| apache-2.0 | 8,207,387,003,058,586,000 | 31.136364 | 78 | 0.695898 | false |
sentinelsat/sentinelsat | setup.py | 1 | 1824 | import re
from io import open
from setuptools import find_packages, setup
# Get the long description from the relevant file
with open("README.rst", encoding="utf-8") as f:
long_description = f.read()
with open("sentinelsat/__init__.py", encoding="utf-8") as f:
version = re.search(r'__version__\s*=\s*"(\S+)"', f.read()).group(1)
setup(
name="sentinelsat",
version=version,
description="Utility to search and download Copernicus Sentinel satellite images",
long_description=long_description,
classifiers=[
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: GIS",
"Topic :: Utilities",
],
keywords="copernicus, sentinel, esa, satellite, download, GIS",
author="Kersten Clauss",
author_email="[email protected]",
url="https://github.com/sentinelsat/sentinelsat",
license="GPLv3+",
packages=find_packages(exclude=["ez_setup", "examples", "tests"]),
include_package_data=True,
zip_safe=False,
install_requires=open("requirements.txt").read().splitlines(),
extras_require={
"dev": [
"pandas",
"geopandas",
"shapely",
"pytest >= 3.6.3",
"pytest-vcr",
"pytest-socket",
"requests-mock",
"pyyaml",
"rstcheck",
"sphinx >= 1.3",
"sphinx_rtd_theme",
"flaky",
],
},
entry_points="""
[console_scripts]
sentinelsat=sentinelsat.scripts.cli:cli
""",
)
| gpl-3.0 | -192,730,876,689,257,060 | 31 | 86 | 0.583882 | false |
bgribble/mfp | mfp/gui/processor_element.py | 1 | 6854 | #! /usr/bin/env python
'''
processor_element.py
A patch element corresponding to a signal or control processor
'''
from gi.repository import Clutter
import cairo
from .patch_element import PatchElement
from .colordb import ColorDB
from .modes.label_edit import LabelEditMode
from ..gui_main import MFPGUI
from mfp import log
from mfp.utils import catchall
class ProcessorElement (PatchElement):
display_type = "processor"
proc_type = None
# constants
label_off_x = 3
label_off_y = 0
def __init__(self, window, x, y, params={}):
PatchElement.__init__(self, window, x, y)
self.param_list.extend(["show_label", "export_x", "export_y",
"export_w", "export_h"])
self.show_label = params.get("show_label", True)
# display elements
self.texture = None
self.label = None
self.label_text = None
self.export_x = None
self.export_y = None
self.export_w = None
self.export_h = None
self.export_created = False
# create display
self.create_display()
self.set_size(35, 25)
self.move(x, y)
self.obj_state = self.OBJ_HALFCREATED
self.update()
def create_display(self):
# box
self.texture = Clutter.Canvas.new()
self.set_content(self.texture)
self.texture.connect("draw", self.draw_cb)
self.texture.set_size(35, 25)
# label
self.label = Clutter.Text()
self.label.set_position(self.label_off_x, self.label_off_y)
self.label.set_color(self.get_color('text-color'))
self.label.set_font_name(self.get_fontspec())
self.label.connect('text-changed', self.label_changed_cb)
self.label.set_reactive(False)
if self.show_label:
self.add_actor(self.label)
self.set_reactive(True)
def update(self):
if self.show_label or self.obj_state == self.OBJ_HALFCREATED:
label_width = self.label.get_property('width') + 14
else:
label_width = 0
box_width = self.export_w or 0
new_w = None
num_ports = max(self.num_inlets, self.num_outlets)
port_width = (num_ports * self.get_style('porthole_minspace')
+ 2*self.get_style('porthole_border'))
new_w = max(35, port_width, label_width, box_width)
self.set_size(new_w, self.texture.get_property('height'))
@catchall
def draw_cb(self, texture, ct, width, height):
lw = 2.0
w = width - lw
h = height - lw
# clear the drawing area
ct.save()
ct.set_operator(cairo.OPERATOR_CLEAR)
ct.paint()
ct.restore()
ct.set_line_width(lw)
ct.set_antialias(cairo.ANTIALIAS_NONE)
ct.translate(lw/2.0, lw/2.0)
ct.move_to(0, 0)
ct.line_to(0, h)
ct.line_to(w, h)
ct.line_to(w, 0)
ct.line_to(0, 0)
ct.close_path()
# fill to paint the background
color = ColorDB.to_cairo(self.get_color('fill-color'))
ct.set_source_rgba(color.red, color.green, color.blue, color.alpha)
ct.fill_preserve()
# stroke to draw the outline
color = ColorDB.to_cairo(self.get_color('stroke-color'))
ct.set_source_rgba(color.red, color.green, color.blue, color.alpha)
if self.obj_state == self.OBJ_COMPLETE:
ct.set_dash([])
else:
ct.set_dash([8, 4])
ct.set_line_width(lw)
ct.stroke()
return True
def get_label(self):
return self.label
def label_edit_start(self):
self.obj_state = self.OBJ_HALFCREATED
if not self.show_label:
self.add_actor(self.label)
self.update()
def label_edit_finish(self, widget, text=None):
if text is not None:
parts = text.split(' ', 1)
obj_type = parts[0]
if len(parts) > 1:
obj_args = parts[1]
else:
obj_args = None
self.create(obj_type, obj_args)
# obj_args may get forcibly changed on create
if self.obj_args and (len(parts) < 2 or self.obj_args != parts[1]):
self.label.set_text(self.obj_type + ' ' + self.obj_args)
if self.obj_id is not None and self.obj_state != self.OBJ_COMPLETE:
self.obj_state = self.OBJ_COMPLETE
if not self.show_label:
self.remove_actor(self.label)
self.update()
def label_changed_cb(self, *args):
newtext = self.label.get_text()
if newtext != self.label_text:
self.label_text = newtext
self.update()
def set_size(self, w, h):
PatchElement.set_size(self, w, h)
self.texture.set_size(w, h)
self.texture.invalidate()
def select(self):
PatchElement.select(self)
self.label.set_color(self.get_color('text-color'))
self.texture.invalidate()
def unselect(self):
PatchElement.unselect(self)
self.label.set_color(self.get_color('text-color'))
self.texture.invalidate()
def delete(self):
for c in self.connections_out + self.connections_in:
c.delete()
PatchElement.delete(self)
def make_edit_mode(self):
return LabelEditMode(self.stage, self, self.label)
def configure(self, params):
if self.obj_args is None:
self.label.set_text("%s" % (self.obj_type,))
else:
self.label.set_text("%s %s" % (self.obj_type, self.obj_args))
need_update = False
labelheight = 20
if "show_label" in params:
oldval = self.show_label
self.show_label = params.get("show_label")
if oldval ^ self.show_label:
need_update = True
if self.show_label:
self.add_actor(self.label)
else:
self.remove_actor(self.label)
self.export_x = params.get("export_x")
self.export_y = params.get("export_y")
self.export_w = params.get("export_w")
self.export_h = params.get("export_h")
if self.export_x is not None and self.export_y is not None:
self.export_created = True
params["width"] = max(self.width, params.get("export_w") or 0)
params["height"] = max(self.height, (params.get("export_h") or 0) + labelheight)
PatchElement.configure(self, params)
if self.obj_id is not None and self.obj_state != self.OBJ_COMPLETE:
self.obj_state = self.OBJ_COMPLETE
if self.export_created:
MFPGUI().mfp.create_export_gui(self.obj_id)
need_update = True
if need_update:
self.update()
| gpl-2.0 | -4,675,372,229,137,122,000 | 29.061404 | 88 | 0.568573 | false |
nikita-sunwind/vizhu | server/test/test_export_data.py | 1 | 3014 | # pylint: disable=no-self-use,no-member
'''Export event data to different formats
'''
from csv import DictReader
from io import BytesIO
import numpy as np
from pytest import mark
from test.utils import EVENTS_URL, BAD_DATA, COMPOUND_DATA, N_TEST_EVENTS
@mark.usefixtures('fx_load_fixtures')
class TestExportData:
'''Export event data to different formats
'''
test_keys = [
'_id', '_series', '_agent', '_timestamp',
'roundtrip_delay', 'bad_data', 'compound_data']
async def test_export_data_default_format(self, fx_client):
params = {
'series': 'demo',
}
response = await fx_client.get(EVENTS_URL, params=params)
assert response.status == 200
assert response.headers['Content-Type'] == 'application/json'
async def test_export_data_to_json(self, fx_client):
params = {
'series': 'demo',
'format': 'json',
}
response = await fx_client.get(EVENTS_URL, params=params)
assert response.status == 200
assert response.headers['Content-Type'] == 'application/json'
results = await response.json()
assert isinstance(results, list)
assert len(results) == N_TEST_EVENTS
for row in results:
for key in self.test_keys:
assert key in row
assert row['bad_data'] == BAD_DATA
assert row['compound_data'] == COMPOUND_DATA
async def test_export_data_to_csv(self, fx_client):
params = {
'series': 'demo',
'format': 'csv',
}
response = await fx_client.get(EVENTS_URL, params=params)
assert response.status == 200
assert response.headers['Content-Type'] == 'text/csv'
results = list()
async for line in response.content:
results.append(line.decode('utf-8'))
assert len(results) == N_TEST_EVENTS + 1
reader = DictReader(results)
for index, row in enumerate(reader):
for key in self.test_keys:
assert key in row
if index > 0:
assert row['bad_data'] == BAD_DATA
assert row['compound_data'] == str(COMPOUND_DATA)
async def test_export_data_to_numpy(self, fx_client):
params = {
'series': 'demo',
'format': 'numpy',
}
response = await fx_client.get(EVENTS_URL, params=params)
assert response.status == 200
assert response.headers['Content-Type'] == 'application/octet-stream'
received_data = await response.read()
buffer = BytesIO()
buffer.write(received_data)
buffer.seek(0)
result = np.load(buffer)
assert result.shape == (N_TEST_EVENTS, len(self.test_keys))
for row in result:
assert BAD_DATA in row
for cell in row:
if cell == COMPOUND_DATA:
break
else:
assert False
| mit | 2,937,678,873,745,961,500 | 26.651376 | 77 | 0.567684 | false |
openstack/os-net-config | os_net_config/impl_ifcfg.py | 1 | 86208 | # -*- coding: utf-8 -*-
# Copyright 2014-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import itertools
import logging
import netaddr
import os
import re
import os_net_config
from os_net_config import objects
from os_net_config import utils
logger = logging.getLogger(__name__)
# Import the raw NetConfig object so we can call its methods
netconfig = os_net_config.NetConfig()
_ROUTE_TABLE_DEFAULT = """# reserved values
#
255\tlocal
254\tmain
253\tdefault
0\tunspec
#
# local
#
#1\tinr.ruhep\n"""
def ifcfg_config_path(name):
return "/etc/sysconfig/network-scripts/ifcfg-%s" % name
def remove_ifcfg_config(ifname):
if re.match(r'[\w-]+$', ifname):
ifcfg_file = ifcfg_config_path(ifname)
if os.path.exists(ifcfg_file):
os.remove(ifcfg_file)
# NOTE(dprince): added here for testability
def bridge_config_path(name):
return ifcfg_config_path(name)
def ivs_config_path():
return "/etc/sysconfig/ivs"
def nfvswitch_config_path():
return "/etc/sysconfig/nfvswitch"
def vpp_config_path():
return "/etc/vpp/startup.conf"
def route_config_path(name):
return "/etc/sysconfig/network-scripts/route-%s" % name
def route6_config_path(name):
return "/etc/sysconfig/network-scripts/route6-%s" % name
def route_rule_config_path(name):
return "/etc/sysconfig/network-scripts/rule-%s" % name
def route_table_config_path():
return "/etc/iproute2/rt_tables"
def cleanup_pattern():
return "/etc/sysconfig/network-scripts/ifcfg-*"
def dhclient_path():
if os.path.exists("/usr/sbin/dhclient"):
return "/usr/sbin/dhclient"
elif os.path.exists("/sbin/dhclient"):
return "/sbin/dhclient"
else:
raise RuntimeError("Could not find dhclient")
def stop_dhclient_process(interface):
"""Stop a DHCP process when no longer needed.
This method exists so that it may be stubbed out for unit tests.
:param interface: The interface on which to stop dhclient.
"""
pid_file = '/var/run/dhclient-%s.pid' % (interface)
try:
dhclient = dhclient_path()
except RuntimeError as err:
logger.info('Exception when stopping dhclient: %s' % err)
return
if os.path.exists(pid_file):
msg = 'Stopping %s on interface %s' % (dhclient, interface)
netconfig.execute(msg, dhclient, '-r', '-pf',
pid_file, interface)
try:
os.unlink(pid_file)
except OSError as err:
logger.error('Could not remove dhclient pid file \'%s\': %s' %
(pid_file, err))
class IfcfgNetConfig(os_net_config.NetConfig):
"""Configure network interfaces using the ifcfg format."""
def __init__(self, noop=False, root_dir=''):
super(IfcfgNetConfig, self).__init__(noop, root_dir)
self.interface_data = {}
self.ivsinterface_data = {}
self.nfvswitch_intiface_data = {}
self.nfvswitch_options = None
self.vlan_data = {}
self.ib_childs_data = {}
self.route_data = {}
self.route6_data = {}
self.route_table_data = {}
self.rule_data = {}
self.bridge_data = {}
self.linuxbridge_data = {}
self.linuxbond_data = {}
self.ib_interface_data = {}
self.linuxteam_data = {}
self.vpp_interface_data = {}
self.vpp_bond_data = {}
self.member_names = {}
self.renamed_interfaces = {}
self.bond_primary_ifaces = {}
logger.info('Ifcfg net config provider created.')
def parse_ifcfg(self, ifcfg_data):
"""Break out the key/value pairs from ifcfg_data
Return the keys and values without quotes.
"""
ifcfg_values = {}
for line in ifcfg_data.split("\n"):
if not line.startswith("#") and line.find("=") > 0:
k, v = line.split("=", 1)
ifcfg_values[k] = v.strip("\"'")
return ifcfg_values
def parse_ifcfg_routes(self, ifcfg_data):
"""Break out the individual routes from an ifcfg route file."""
routes = []
for line in ifcfg_data.split("\n"):
if not line.startswith("#"):
routes.append(line)
return routes
def parse_ifcfg_rules(self, ifcfg_data):
"""Break out the individual rules from an ifcfg rule file."""
rules = []
for line in ifcfg_data.split("\n"):
if not line.startswith("#"):
rules.append(line)
return rules
def enumerate_ifcfg_changes(self, ifcfg_data_old, ifcfg_data_new):
"""Determine which values are added/modified/removed
:param ifcfg_data_old: content of existing ifcfg file
:param ifcfg_data_new: content of replacement ifcfg file
:return: dict of changed values and states (added, removed, modified)
"""
changed_values = {}
for key in ifcfg_data_old:
if key in ifcfg_data_new:
if ifcfg_data_old[key].upper() != ifcfg_data_new[key].upper():
changed_values[key] = "modified"
else:
changed_values[key] = "removed"
for key in ifcfg_data_new:
if key not in ifcfg_data_old:
changed_values[key] = "added"
return changed_values
def enumerate_ifcfg_route_changes(self, old_routes, new_routes):
"""Determine which routes are added or removed.
:param file_values: contents of existing interface route file
:param data_values: contents of replacement interface route file
:return: list of tuples representing changes (route, state), where
state is one of added or removed
"""
route_changes = []
for route in old_routes:
if route not in new_routes:
route_changes.append((route, 'removed'))
for route in new_routes:
if route not in old_routes:
route_changes.append((route, 'added'))
return route_changes
def enumerate_ifcfg_rule_changes(self, old_rules, new_rules):
"""Determine which routes are added or removed.
:param file_values: contents of existing interface route rule file
:param data_values: contents of replacement interface route rule file
:return: list of tuples representing changes (rule, state), where
state is one of added or removed
"""
rule_changes = []
for rule in old_rules:
if rule not in new_rules:
rule_changes.append((rule, 'removed'))
for rule in new_rules:
if rule not in old_rules:
rule_changes.append((rule, 'added'))
return rule_changes
def ifcfg_requires_restart(self, filename, new_data):
"""Determine if changes to the ifcfg file require a restart to apply.
Simple changes like IP, MTU, and routes can be directly applied
without restarting the interface.
:param filename: The ifcfg-<int> filename.
:type filename: string
:param new_data: The data for the new ifcfg-<int> file.
:type new_data: string
:returns: boolean value for whether a restart is required
"""
file_data = utils.get_file_data(filename)
logger.debug("Original ifcfg file:\n%s" % file_data)
logger.debug("New ifcfg file:\n%s" % new_data)
file_values = self.parse_ifcfg(file_data)
new_values = self.parse_ifcfg(new_data)
restart_required = False
# Certain changes can be applied without restarting the interface
permitted_changes = [
"IPADDR",
"NETMASK",
"MTU",
"ONBOOT"
]
# Check whether any of the changes require restart
for change in self.enumerate_ifcfg_changes(file_values, new_values):
if change not in permitted_changes:
# Moving to DHCP requires restarting interface
if change in ["BOOTPROTO", "OVSBOOTPROTO"]:
if change in new_values:
if (new_values[change].upper() == "DHCP"):
restart_required = True
logger.debug(
"DHCP on %s requires restart" % change)
else:
restart_required = True
if not restart_required:
logger.debug("Changes do not require restart")
return restart_required
def iproute2_apply_commands(self, device_name, filename, data):
"""Return list of commands needed to implement changes.
Given ifcfg data for an interface, return commands required to
apply the configuration using 'ip' commands.
:param device_name: The name of the int, bridge, or bond
:type device_name: string
:param filename: The ifcfg-<int> filename.
:type filename: string
:param data: The data for the new ifcfg-<int> file.
:type data: string
:returns: commands (commands to be run)
"""
previous_cfg = utils.get_file_data(filename)
file_values = self.parse_ifcfg(previous_cfg)
data_values = self.parse_ifcfg(data)
logger.debug("File values:\n%s" % file_values)
logger.debug("Data values:\n%s" % data_values)
changes = self.enumerate_ifcfg_changes(file_values, data_values)
commands = []
new_cidr = 0
old_cidr = 0
# Convert dot notation netmask to CIDR length notation
if "NETMASK" in file_values:
netmask = file_values["NETMASK"]
old_cidr = netaddr.IPAddress(netmask).netmask_bits()
if "NETMASK" in data_values:
netmask = data_values["NETMASK"]
new_cidr = netaddr.IPAddress(netmask).netmask_bits()
if "IPADDR" in changes:
if changes["IPADDR"] == "removed" or changes[
"IPADDR"] == "modified":
if old_cidr:
commands.append("addr del %s/%s dev %s" %
(file_values["IPADDR"], old_cidr,
device_name))
else:
# Cannot remove old IP specifically if netmask not known
commands.append("addr flush dev %s" % device_name)
if changes["IPADDR"] == "added" or changes["IPADDR"] == "modified":
commands.insert(0, "addr add %s/%s dev %s" %
(data_values["IPADDR"], new_cidr, device_name))
if "MTU" in changes:
if changes["MTU"] == "added" or changes["MTU"] == "modified":
commands.append("link set dev %s mtu %s" %
(device_name, data_values["MTU"]))
elif changes["MTU"] == "removed":
commands.append("link set dev %s mtu 1500" % device_name)
return commands
def iproute2_route_commands(self, filename, data):
"""Return a list of commands for 'ip route' to modify routing table.
The list of commands is generated by comparing the old and new
configs, and calculating which routes need to be added and which
need to be removed.
:param filename: path to the original interface route file
:param data: data that is to be written to new route file
:return: list of commands to feed to 'ip' to reconfigure routes
"""
file_values = self.parse_ifcfg_routes(utils.get_file_data(filename))
data_values = self.parse_ifcfg_routes(data)
route_changes = self.enumerate_ifcfg_route_changes(file_values,
data_values)
commands = []
for route in route_changes:
if route[1] == 'removed':
commands.append('route del ' + route[0])
elif route[1] == 'added':
commands.append('route add ' + route[0])
return commands
def iproute2_rule_commands(self, filename, data):
"""Return a list of commands for 'ip route' to modify routing rules.
The list of commands is generated by comparing the old and new
configs, and calculating which rules need to be added and which
need to be removed.
:param filename: path to the original interface route rule file
:param data: data that is to be written to new route rule file
:return: list of commands to feed to 'ip' to reconfigure route rules
"""
file_values = self.parse_ifcfg_rules(utils.get_file_data(filename))
data_values = self.parse_ifcfg_rules(data)
rule_changes = self.enumerate_ifcfg_rule_changes(file_values,
data_values)
commands = []
for rule in rule_changes:
if rule[1] == 'removed':
commands.append('rule del ' + rule[0])
elif rule[1] == 'added':
commands.append('rule add ' + rule[0])
return commands
def child_members(self, name):
children = set()
try:
for member in self.member_names[name]:
children.add(member)
children.update(self.child_members(member))
except KeyError:
pass
return children
def _add_common(self, base_opt):
ovs_extra = []
data = "# This file is autogenerated by os-net-config\n"
data += "DEVICE=%s\n" % base_opt.name
if base_opt.onboot:
data += "ONBOOT=yes\n"
else:
data += "ONBOOT=no\n"
if isinstance(base_opt, objects.Interface) and base_opt.hotplug:
data += "HOTPLUG=yes\n"
else:
data += "HOTPLUG=no\n"
if base_opt.nm_controlled:
data += "NM_CONTROLLED=yes\n"
else:
data += "NM_CONTROLLED=no\n"
if not base_opt.dns_servers and not base_opt.use_dhcp:
data += "PEERDNS=no\n"
if isinstance(base_opt, objects.Vlan):
if not base_opt.ovs_port:
# vlans on OVS bridges are internal ports (no device, etc)
data += "VLAN=yes\n"
if base_opt.device:
data += "PHYSDEV=%s\n" % base_opt.device
elif base_opt.linux_bond_name:
data += "PHYSDEV=%s\n" % base_opt.linux_bond_name
else:
if base_opt.ovs_options:
data += "OVS_OPTIONS=\"%s\"\n" % base_opt.ovs_options
ovs_extra.extend(base_opt.ovs_extra)
elif isinstance(base_opt, objects.IvsInterface):
data += "TYPE=IVSIntPort\n"
elif isinstance(base_opt, objects.NfvswitchInternal):
data += "TYPE=NFVSWITCHIntPort\n"
elif isinstance(base_opt, objects.IbInterface):
data += "TYPE=Infiniband\n"
elif isinstance(base_opt, objects.IbChildInterface):
data += "TYPE=Infiniband\n"
data += "PKEY=yes\n"
data += "PHYSDEV=%s\n" % base_opt.parent
data += "PKEY_ID=%s\n" % base_opt.pkey_id
elif re.match(r'\w+\.\d+$', base_opt.name):
data += "VLAN=yes\n"
elif isinstance(base_opt, objects.Interface):
if base_opt.linkdelay:
data += "LINKDELAY=%s\n" % base_opt.linkdelay
if base_opt.linux_bond_name:
data += "MASTER=%s\n" % base_opt.linux_bond_name
data += "SLAVE=yes\n"
if base_opt.linux_team_name:
data += "TEAM_MASTER=%s\n" % base_opt.linux_team_name
if base_opt.primary:
data += "TEAM_PORT_CONFIG='{\"prio\": 100}'\n"
if base_opt.ivs_bridge_name:
data += "DEVICETYPE=ivs\n"
data += "IVS_BRIDGE=%s\n" % base_opt.ivs_bridge_name
if base_opt.nfvswitch_bridge_name:
data += "DEVICETYPE=nfvswitch\n"
data += "NFVSWITCH_BRIDGE=%s\n" % base_opt.nfvswitch_bridge_name
if base_opt.ovs_port:
if not isinstance(base_opt, objects.LinuxTeam):
data += "DEVICETYPE=ovs\n"
if base_opt.bridge_name:
if isinstance(base_opt, objects.Vlan):
data += "TYPE=OVSIntPort\n"
data += "OVS_BRIDGE=%s\n" % base_opt.bridge_name
data += "OVS_OPTIONS=\"tag=%s\"\n" % base_opt.vlan_id
else:
data += "TYPE=OVSPort\n"
data += "OVS_BRIDGE=%s\n" % base_opt.bridge_name
if base_opt.linux_bridge_name:
data += "BRIDGE=%s\n" % base_opt.linux_bridge_name
if isinstance(base_opt, objects.OvsBridge):
data += "DEVICETYPE=ovs\n"
data += "TYPE=OVSBridge\n"
if base_opt.use_dhcp or base_opt.use_dhcpv6:
data += "OVSBOOTPROTO=dhcp\n"
if base_opt.members:
members = [member.name for member in base_opt.members]
self.member_names[base_opt.name] = members
if base_opt.use_dhcp:
data += ("OVSDHCPINTERFACES=\"%s\"\n" % " ".join(members))
if base_opt.primary_interface_name:
mac = utils.interface_mac(base_opt.primary_interface_name)
ovs_extra.append("set bridge %s other-config:hwaddr=%s" %
(base_opt.name, mac))
if base_opt.ovs_options:
data += "OVS_OPTIONS=\"%s\"\n" % base_opt.ovs_options
ovs_extra.extend(base_opt.ovs_extra)
elif isinstance(base_opt, objects.OvsUserBridge):
data += "DEVICETYPE=ovs\n"
data += "TYPE=OVSUserBridge\n"
if base_opt.use_dhcp or base_opt.use_dhcpv6:
data += "OVSBOOTPROTO=dhcp\n"
if base_opt.members:
members = [member.name for member in base_opt.members]
self.member_names[base_opt.name] = members
if base_opt.use_dhcp:
data += ("OVSDHCPINTERFACES=\"%s\"\n" % " ".join(members))
if base_opt.ovs_options:
data += "OVS_OPTIONS=\"%s\"\n" % base_opt.ovs_options
ovs_extra.extend(base_opt.ovs_extra)
elif isinstance(base_opt, objects.OvsBond):
if base_opt.primary_interface_name:
primary_name = base_opt.primary_interface_name
self.bond_primary_ifaces[base_opt.name] = primary_name
data += "DEVICETYPE=ovs\n"
data += "TYPE=OVSBond\n"
if base_opt.use_dhcp or base_opt.use_dhcpv6:
data += "OVSBOOTPROTO=dhcp\n"
if base_opt.members:
members = [member.name for member in base_opt.members]
self.member_names[base_opt.name] = members
data += ("BOND_IFACES=\"%s\"\n" % " ".join(members))
if base_opt.ovs_options:
data += "OVS_OPTIONS=\"%s\"\n" % base_opt.ovs_options
ovs_extra.extend(base_opt.ovs_extra)
elif isinstance(base_opt, objects.LinuxBridge):
data += "TYPE=Bridge\n"
data += "DELAY=0\n"
if base_opt.use_dhcp:
data += "BOOTPROTO=dhcp\n"
if base_opt.members:
members = [member.name for member in base_opt.members]
self.member_names[base_opt.name] = members
if base_opt.primary_interface_name:
primary_name = base_opt.primary_interface_name
primary_mac = utils.interface_mac(primary_name)
data += "MACADDR=\"%s\"\n" % primary_mac
elif isinstance(base_opt, objects.LinuxBond):
if base_opt.primary_interface_name:
primary_name = base_opt.primary_interface_name
primary_mac = utils.interface_mac(primary_name)
data += "MACADDR=\"%s\"\n" % primary_mac
if base_opt.use_dhcp:
data += "BOOTPROTO=dhcp\n"
if base_opt.members:
members = [member.name for member in base_opt.members]
self.member_names[base_opt.name] = members
if base_opt.bonding_options:
data += "BONDING_OPTS=\"%s\"\n" % base_opt.bonding_options
elif isinstance(base_opt, objects.LinuxTeam):
if base_opt.primary_interface_name:
primary_name = base_opt.primary_interface_name
primary_mac = utils.interface_mac(primary_name)
data += "MACADDR=\"%s\"\n" % primary_mac
if base_opt.use_dhcp:
data += "BOOTPROTO=dhcp\n"
if base_opt.members:
members = [member.name for member in base_opt.members]
self.member_names[base_opt.name] = members
data += "DEVICETYPE=Team\n"
if base_opt.bonding_options:
data += "TEAM_CONFIG='%s'\n" % base_opt.bonding_options
elif isinstance(base_opt, objects.OvsTunnel):
ovs_extra.extend(base_opt.ovs_extra)
data += "DEVICETYPE=ovs\n"
data += "TYPE=OVSTunnel\n"
data += "OVS_BRIDGE=%s\n" % base_opt.bridge_name
data += "OVS_TUNNEL_TYPE=%s\n" % base_opt.tunnel_type
data += "OVS_TUNNEL_OPTIONS=\"%s\"\n" % \
' '.join(base_opt.ovs_options)
elif isinstance(base_opt, objects.OvsPatchPort):
ovs_extra.extend(base_opt.ovs_extra)
data += "DEVICETYPE=ovs\n"
data += "TYPE=OVSPatchPort\n"
data += "OVS_BRIDGE=%s\n" % base_opt.bridge_name
data += "OVS_PATCH_PEER=%s\n" % base_opt.peer
elif isinstance(base_opt, objects.OvsDpdkPort):
ovs_extra.extend(base_opt.ovs_extra)
data += "DEVICETYPE=ovs\n"
data += "TYPE=OVSDPDKPort\n"
data += "OVS_BRIDGE=%s\n" % base_opt.bridge_name
# Validation of DPDK port having only one interface is done prior
# to this. So accesing the interface name statically.
# Also dpdk_devargs would be valid here, since
# bind_dpdk_interfaces() is invoked before this.
dpdk_devargs = utils.get_dpdk_devargs(
base_opt.members[0].name, self.noop)
ovs_extra.append("set Interface $DEVICE options:dpdk-devargs="
"%s" % dpdk_devargs)
if base_opt.mtu:
ovs_extra.append("set Interface $DEVICE mtu_request=$MTU")
if base_opt.rx_queue:
data += "RX_QUEUE=%i\n" % base_opt.rx_queue
ovs_extra.append("set Interface $DEVICE " +
"options:n_rxq=$RX_QUEUE")
elif isinstance(base_opt, objects.OvsDpdkBond):
ovs_extra.extend(base_opt.ovs_extra)
# Referring to bug:1643026, the below commenting of the interfaces,
# is to workaround the error, but is not the long term solution.
# The long term solution is to run DPDK options before
# os-net-config, which is being tracked at BUG:1654975
# if base_opt.primary_interface_name:
# primary_name = base_opt.primary_interface_name
# self.bond_primary_ifaces[base_opt.name] = primary_name
data += "DEVICETYPE=ovs\n"
data += "TYPE=OVSDPDKBond\n"
data += "OVS_BRIDGE=%s\n" % base_opt.bridge_name
if base_opt.members:
for bond_member in base_opt.members:
# Validation of DPDK port having only one interface is done
# prior to this. So accesing the interface name statically.
# Also dpdk_devargs would be valid here, since
# bind_dpdk_interfaces () is invoked before this.
dpdk_devargs = utils.get_dpdk_devargs(
bond_member.members[0].name, self.noop)
ovs_extra.append("set Interface %s options:"
"dpdk-devargs=%s"
% (bond_member.name, dpdk_devargs))
members = [member.name for member in base_opt.members]
data += ("BOND_IFACES=\"%s\"\n" % " ".join(members))
# MTU configuration given for the OvsDpdkbond shall be applied
# to each of the members of the OvsDpdkbond
if base_opt.mtu:
for member in base_opt.members:
ovs_extra.append("set Interface %s mtu_request=$MTU" %
member.name)
if base_opt.rx_queue:
data += "RX_QUEUE=%i\n" % base_opt.rx_queue
for member in base_opt.members:
ovs_extra.append("set Interface %s options:n_rxq="
"$RX_QUEUE" % member.name)
if base_opt.ovs_options:
data += "OVS_OPTIONS=\"%s\"\n" % base_opt.ovs_options
ovs_extra.extend(base_opt.ovs_extra)
else:
if base_opt.use_dhcp:
data += "BOOTPROTO=dhcp\n"
elif not base_opt.addresses:
data += "BOOTPROTO=none\n"
if hasattr(base_opt, 'ethtool_opts') and base_opt.ethtool_opts:
data += "ETHTOOL_OPTS=\"%s\"\n" % base_opt.ethtool_opts
if base_opt.mtu:
data += "MTU=%i\n" % base_opt.mtu
if base_opt.use_dhcpv6 or base_opt.v6_addresses():
data += "IPV6INIT=yes\n"
if base_opt.mtu:
data += "IPV6_MTU=%i\n" % base_opt.mtu
if base_opt.use_dhcpv6:
data += "DHCPV6C=yes\n"
elif base_opt.addresses:
v4_addresses = base_opt.v4_addresses()
if v4_addresses:
data += "BOOTPROTO=static\n"
for i, address in enumerate(v4_addresses):
num = '%s' % i if i else ''
data += "IPADDR%s=%s\n" % (num, address.ip)
data += "NETMASK%s=%s\n" % (num, address.netmask)
v6_addresses = base_opt.v6_addresses()
if v6_addresses:
first_v6 = v6_addresses[0]
data += "IPV6_AUTOCONF=no\n"
data += "IPV6ADDR=%s\n" % first_v6.ip_netmask
if len(v6_addresses) > 1:
secondaries_v6 = " ".join(map(lambda a: a.ip_netmask,
v6_addresses[1:]))
data += "IPV6ADDR_SECONDARIES=\"%s\"\n" % secondaries_v6
if base_opt.hwaddr:
data += "HWADDR=%s\n" % base_opt.hwaddr
if ovs_extra:
data += "OVS_EXTRA=\"%s\"\n" % " -- ".join(ovs_extra)
if not base_opt.defroute:
data += "DEFROUTE=no\n"
if base_opt.dhclient_args:
data += "DHCLIENTARGS=%s\n" % base_opt.dhclient_args
if base_opt.dns_servers:
data += "DNS1=%s\n" % base_opt.dns_servers[0]
if len(base_opt.dns_servers) >= 2:
data += "DNS2=%s\n" % base_opt.dns_servers[1]
if len(base_opt.dns_servers) > 2:
logger.warning('ifcfg format supports max 2 resolvers.')
if base_opt.domain:
if type(base_opt.domain) == list:
data += "DOMAIN=\"%s\"\n" % ' '.join(base_opt.domain)
else:
data += "DOMAIN=%s\n" % base_opt.domain
return data
def _add_routes(self, interface_name, routes=[]):
logger.info('adding custom route for interface: %s' % interface_name)
data = ""
first_line = ""
data6 = ""
first_line6 = ""
for route in routes:
options = ""
table = ""
if route.route_options:
options = " %s" % route.route_options
if route.route_table:
if route.route_options.find('table ') == -1:
table = " table %s" % route.route_table
if ":" not in route.next_hop:
# Route is an IPv4 route
if route.default:
first_line = "default via %s dev %s%s%s\n" % (
route.next_hop, interface_name,
table, options)
else:
data += "%s via %s dev %s%s%s\n" % (
route.ip_netmask, route.next_hop,
interface_name, table, options)
else:
# Route is an IPv6 route
if route.default:
first_line6 = "default via %s dev %s%s%s\n" % (
route.next_hop, interface_name,
table, options)
else:
data6 += "%s via %s dev %s%s%s\n" % (
route.ip_netmask, route.next_hop,
interface_name, table, options)
self.route_data[interface_name] = first_line + data
self.route6_data[interface_name] = first_line6 + data6
logger.debug('route data: %s' % self.route_data[interface_name])
logger.debug('ipv6 route data: %s' % self.route6_data[interface_name])
def _add_rules(self, interface, rules):
"""Add RouteRule objects to an interface.
:param interface: the name of the interface to apply rules.
:param rules: the list of rules to apply to the interface.
"""
logger.info('adding route rules for interface: %s' % interface)
data = ""
first_line = "# This file is autogenerated by os-net-config\n"
for rule in rules:
if rule.comment:
data += "# %s\n" % rule.comment
data += "%s\n" % rule.rule
self.rule_data[interface] = first_line + data
logger.debug('rules for interface: %s' % self.rule_data[interface])
def add_route_table(self, route_table):
"""Add a RouteTable object to the net config object.
:param route_table: the RouteTable object to add.
"""
logger.info('adding route table: %s %s' % (route_table.table_id,
route_table.name))
self.route_table_data[int(route_table.table_id)] = route_table.name
def add_interface(self, interface):
"""Add an Interface object to the net config object.
:param interface: The Interface object to add.
"""
logger.info('adding interface: %s' % interface.name)
data = self._add_common(interface)
logger.debug('interface data: %s' % data)
self.interface_data[interface.name] = data
if interface.routes:
self._add_routes(interface.name, interface.routes)
if interface.rules:
self._add_rules(interface.name, interface.rules)
if interface.renamed:
logger.info("Interface %s being renamed to %s"
% (interface.hwname, interface.name))
self.renamed_interfaces[interface.hwname] = interface.name
def add_vlan(self, vlan):
"""Add a Vlan object to the net config object.
:param vlan: The vlan object to add.
"""
logger.info('adding vlan: %s' % vlan.name)
data = self._add_common(vlan)
logger.debug('vlan data: %s' % data)
self.vlan_data[vlan.name] = data
if vlan.routes:
self._add_routes(vlan.name, vlan.routes)
if vlan.rules:
self._add_rules(vlan.name, vlan.rules)
def add_ivs_interface(self, ivs_interface):
"""Add a ivs_interface object to the net config object.
:param ivs_interface: The ivs_interface object to add.
"""
logger.info('adding ivs_interface: %s' % ivs_interface.name)
data = self._add_common(ivs_interface)
logger.debug('ivs_interface data: %s' % data)
self.ivsinterface_data[ivs_interface.name] = data
if ivs_interface.routes:
self._add_routes(ivs_interface.name, ivs_interface.routes)
if ivs_interface.rules:
self._add_rules(ivs_interface.name, ivs_interface.rules)
def add_nfvswitch_internal(self, nfvswitch_internal):
"""Add a nfvswitch_internal interface object to the net config object.
:param nfvswitch_internal: The nfvswitch_internal object to add.
"""
iface_name = nfvswitch_internal.name
logger.info('adding nfvswitch_internal interface: %s' % iface_name)
data = self._add_common(nfvswitch_internal)
logger.debug('nfvswitch_internal interface data: %s' % data)
self.nfvswitch_intiface_data[iface_name] = data
if nfvswitch_internal.routes:
self._add_routes(iface_name, nfvswitch_internal.routes)
if nfvswitch_internal.rules:
self._add_rules(iface_name, nfvswitch_internal.rules)
def add_bridge(self, bridge):
"""Add an OvsBridge object to the net config object.
:param bridge: The OvsBridge object to add.
"""
logger.info('adding bridge: %s' % bridge.name)
data = self._add_common(bridge)
logger.debug('bridge data: %s' % data)
self.bridge_data[bridge.name] = data
if bridge.routes:
self._add_routes(bridge.name, bridge.routes)
if bridge.rules:
self._add_rules(bridge.name, bridge.rules)
def add_ovs_user_bridge(self, bridge):
"""Add an OvsUserBridge object to the net config object.
:param bridge: The OvsUserBridge object to add.
"""
logger.info('adding ovs user bridge: %s' % bridge.name)
data = self._add_common(bridge)
logger.debug('ovs user bridge data: %s' % data)
self.bridge_data[bridge.name] = data
if bridge.routes:
self._add_routes(bridge.name, bridge.routes)
if bridge.rules:
self._add_rules(bridge.name, bridge.rules)
def add_linux_bridge(self, bridge):
"""Add a LinuxBridge object to the net config object.
:param bridge: The LinuxBridge object to add.
"""
logger.info('adding linux bridge: %s' % bridge.name)
data = self._add_common(bridge)
logger.debug('bridge data: %s' % data)
self.linuxbridge_data[bridge.name] = data
if bridge.routes:
self._add_routes(bridge.name, bridge.routes)
if bridge.rules:
self._add_rules(bridge.name, bridge.rules)
def add_ivs_bridge(self, bridge):
"""Add a IvsBridge object to the net config object.
IVS can only support one virtual switch per node,
using "ivs" as its name. As long as the ivs service
is running, the ivs virtual switch will be there.
It is impossible to add multiple ivs virtual switches
per node.
:param bridge: The IvsBridge object to add.
"""
pass
def add_nfvswitch_bridge(self, bridge):
"""Add a NFVSwitchBridge object to the net config object.
NFVSwitch can only support one virtual switch per node,
using "nfvswitch" as its name. As long as the nfvswitch service
is running, the nfvswitch virtual switch will be available.
:param bridge: The NfvswitchBridge object to add.
"""
self.nfvswitch_options = bridge.options
def add_bond(self, bond):
"""Add an OvsBond object to the net config object.
:param bond: The OvsBond object to add.
"""
logger.info('adding bond: %s' % bond.name)
data = self._add_common(bond)
logger.debug('bond data: %s' % data)
self.interface_data[bond.name] = data
if bond.routes:
self._add_routes(bond.name, bond.routes)
if bond.rules:
self._add_rules(bond.name, bond.rules)
def add_linux_bond(self, bond):
"""Add a LinuxBond object to the net config object.
:param bond: The LinuxBond object to add.
"""
logger.info('adding linux bond: %s' % bond.name)
data = self._add_common(bond)
logger.debug('bond data: %s' % data)
self.linuxbond_data[bond.name] = data
if bond.routes:
self._add_routes(bond.name, bond.routes)
if bond.rules:
self._add_rules(bond.name, bond.rules)
def add_linux_team(self, team):
"""Add a LinuxTeam object to the net config object.
:param team: The LinuxTeam object to add.
"""
logger.info('adding linux team: %s' % team.name)
data = self._add_common(team)
logger.debug('team data: %s' % data)
self.linuxteam_data[team.name] = data
if team.routes:
self._add_routes(team.name, team.routes)
if team.rules:
self._add_rules(team.name, team.rules)
def add_ovs_tunnel(self, tunnel):
"""Add a OvsTunnel object to the net config object.
:param tunnel: The OvsTunnel object to add.
"""
logger.info('adding ovs tunnel: %s' % tunnel.name)
data = self._add_common(tunnel)
logger.debug('ovs tunnel data: %s' % data)
self.interface_data[tunnel.name] = data
def add_ovs_patch_port(self, ovs_patch_port):
"""Add a OvsPatchPort object to the net config object.
:param ovs_patch_port: The OvsPatchPort object to add.
"""
logger.info('adding ovs patch port: %s' % ovs_patch_port.name)
data = self._add_common(ovs_patch_port)
logger.debug('ovs patch port data: %s' % data)
self.interface_data[ovs_patch_port.name] = data
def add_ib_interface(self, ib_interface):
"""Add an InfiniBand interface object to the net config object.
:param ib_interface: The InfiniBand interface object to add.
"""
logger.info('adding ib_interface: %s' % ib_interface.name)
data = self._add_common(ib_interface)
logger.debug('ib_interface data: %s' % data)
self.ib_interface_data[ib_interface.name] = data
if ib_interface.routes:
self._add_routes(ib_interface.name, ib_interface.routes)
if ib_interface.rules:
self._add_rules(ib_interface.name, ib_interface.rules)
if ib_interface.renamed:
logger.info("InfiniBand interface %s being renamed to %s"
% (ib_interface.hwname, ib_interface.name))
self.renamed_interfaces[ib_interface.hwname] = ib_interface.name
def add_ib_child_interface(self, ib_child_interface):
"""Add an InfiniBand child interface object to the net config object.
:param ib_child_interface: The InfiniBand child
interface object to add.
"""
logger.info('adding ib_child_interface: %s' % ib_child_interface.name)
data = self._add_common(ib_child_interface)
logger.debug('ib_child_interface data: %s' % data)
self.ib_childs_data[ib_child_interface.name] = data
if ib_child_interface.routes:
self._add_routes(ib_child_interface.name,
ib_child_interface.routes)
if ib_child_interface.rules:
self._add_rules(ib_child_interface.name, ib_child_interface.rules)
def add_ovs_dpdk_port(self, ovs_dpdk_port):
"""Add a OvsDpdkPort object to the net config object.
:param ovs_dpdk_port: The OvsDpdkPort object to add.
"""
logger.info('adding ovs dpdk port: %s' % ovs_dpdk_port.name)
# DPDK Port will have only one member of type Interface, validation
# checks are added at the object creation stage.
ifname = ovs_dpdk_port.members[0].name
# Bind the dpdk interface
utils.bind_dpdk_interfaces(ifname, ovs_dpdk_port.driver, self.noop)
if not self.noop:
remove_ifcfg_config(ifname)
data = self._add_common(ovs_dpdk_port)
logger.debug('ovs dpdk port data: %s' % data)
self.interface_data[ovs_dpdk_port.name] = data
def add_ovs_dpdk_bond(self, ovs_dpdk_bond):
"""Add an OvsDPDKBond object to the net config object.
:param ovs_dpdk_bond: The OvsBond object to add.
"""
logger.info('adding ovs dpdk bond: %s' % ovs_dpdk_bond.name)
# Bind the dpdk interface
for dpdk_port in ovs_dpdk_bond.members:
# DPDK Port will have only one member of type Interface, validation
# checks are added at the object creation stage.
ifname = dpdk_port.members[0].name
utils.bind_dpdk_interfaces(ifname, dpdk_port.driver, self.noop)
if not self.noop:
remove_ifcfg_config(ifname)
data = self._add_common(ovs_dpdk_bond)
logger.debug('ovs dpdk bond data: %s' % data)
self.interface_data[ovs_dpdk_bond.name] = data
if ovs_dpdk_bond.routes:
self._add_routes(ovs_dpdk_bond.name, ovs_dpdk_bond.routes)
if ovs_dpdk_bond.rules:
self._add_rules(ovs_dpdk_bond.name, ovs_dpdk_bond.rules)
def add_sriov_pf(self, sriov_pf):
"""Add a SriovPF object to the net config object
:param sriov_pf: The SriovPF object to add
"""
logger.info('adding sriov pf: %s' % sriov_pf.name)
data = self._add_common(sriov_pf)
logger.debug('sriov pf data: %s' % data)
utils.update_sriov_pf_map(sriov_pf.name, sriov_pf.numvfs,
self.noop, promisc=sriov_pf.promisc,
link_mode=sriov_pf.link_mode)
self.interface_data[sriov_pf.name] = data
if sriov_pf.routes:
self._add_routes(sriov_pf.name, sriov_pf.routes)
if sriov_pf.rules:
self._add_rules(sriov_pf.name, sriov_pf.rules)
def add_sriov_vf(self, sriov_vf):
"""Add a SriovVF object to the net config object
:param sriov_vf: The SriovVF object to add
"""
logger.info('adding sriov vf: %s for pf: %s, vfid: %d'
% (sriov_vf.name, sriov_vf.device, sriov_vf.vfid))
data = self._add_common(sriov_vf)
logger.debug('sriov vf data: %s' % data)
self.interface_data[sriov_vf.name] = data
if sriov_vf.routes:
self._add_routes(sriov_vf.name, sriov_vf.routes)
if sriov_vf.rules:
self._add_rules(sriov_vf.name, sriov_vf.rules)
def add_vpp_interface(self, vpp_interface):
"""Add a VppInterface object to the net config object
:param vpp_interface: The VppInterface object to add
"""
vpp_interface.pci_dev = utils.get_pci_address(vpp_interface.name,
False)
if not vpp_interface.pci_dev:
vpp_interface.pci_dev = utils.get_stored_pci_address(
vpp_interface.name, False)
vpp_interface.hwaddr = utils.interface_mac(vpp_interface.name)
if not self.noop:
self.ifdown(vpp_interface.name)
remove_ifcfg_config(vpp_interface.name)
logger.info('adding vpp interface: %s %s'
% (vpp_interface.name, vpp_interface.pci_dev))
self.vpp_interface_data[vpp_interface.name] = vpp_interface
def add_vpp_bond(self, vpp_bond):
"""Add a VppInterface object to the net config object
:param vpp_bond: The VPPBond object to add
"""
logger.info('adding vpp bond: %s' % vpp_bond.name)
self.vpp_bond_data[vpp_bond.name] = vpp_bond
def add_contrail_vrouter(self, contrail_vrouter):
"""Add a ContraiVrouter object to the net config object
:param contrail_vrouter:
The ContrailVrouter object to add
"""
logger.info('adding contrail_vrouter interface: %s'
% contrail_vrouter.name)
# Contrail vrouter will have the only member (of type interface,
# vlan or linux_bond)
ifname = contrail_vrouter.members[0].name
data = self._add_common(contrail_vrouter)
data += "DEVICETYPE=vhost\n"
data += "TYPE=kernel_mode\n"
data += "BIND_INT=%s\n" % ifname
logger.debug('contrail data: %s' % data)
self.interface_data[contrail_vrouter.name] = data
if contrail_vrouter.routes:
self._add_routes(contrail_vrouter.name, contrail_vrouter.routes)
if contrail_vrouter.rules:
self._add_rules(contrail_vrouter.name, contrail_vrouter.rules)
def add_contrail_vrouter_dpdk(self, contrail_vrouter_dpdk):
"""Add a ContraiVrouterDpdk object to the net config object
:param contrail_vrouter_dpdk:
The ContrailVrouterDpdk object to add
"""
logger.info('adding contrail vrouter dpdk interface: %s'
% contrail_vrouter_dpdk.name)
pci_string = ",".join(
utils.translate_ifname_to_pci_address(bind_int.name, self.noop)
for bind_int in contrail_vrouter_dpdk.members)
data = self._add_common(contrail_vrouter_dpdk)
data += "DEVICETYPE=vhost\n"
data += "TYPE=dpdk\n"
data += "BIND_INT=%s\n" % pci_string
if len(contrail_vrouter_dpdk.members) > 1:
data += "BOND_MODE=%s\n" % contrail_vrouter_dpdk.bond_mode
data += "BOND_POLICY=%s\n" % contrail_vrouter_dpdk.bond_policy
data += "DRIVER=%s\n" % contrail_vrouter_dpdk.driver
data += "CPU_LIST=%s\n" % contrail_vrouter_dpdk.cpu_list
if contrail_vrouter_dpdk.vlan_id:
data += "VLAN_ID=%s\n" % contrail_vrouter_dpdk.vlan_id
logger.debug('contrail dpdk data: %s' % data)
self.interface_data[contrail_vrouter_dpdk.name] = data
if contrail_vrouter_dpdk.routes:
self._add_routes(contrail_vrouter_dpdk.name,
contrail_vrouter_dpdk.routes)
if contrail_vrouter_dpdk.rules:
self._add_rules(contrail_vrouter_dpdk.name,
contrail_vrouter_dpdk.rules)
def add_linux_tap(self, linux_tap):
"""Add a LinuxTap object to the net config object
:param linux_tap:
The LinuxTap object to add
"""
logger.info('adding Linux TAP interface: %s'
% linux_tap.name)
data = self._add_common(linux_tap)
data += "TYPE=Tap\n"
self.interface_data[linux_tap.name] = data
def generate_ivs_config(self, ivs_uplinks, ivs_interfaces):
"""Generate configuration content for ivs."""
intfs = []
for intf in ivs_uplinks:
intfs.append(' -u ')
intfs.append(intf)
uplink_str = ''.join(intfs)
intfs = []
for intf in ivs_interfaces:
intfs.append(' --internal-port=')
intfs.append(intf)
intf_str = ''.join(intfs)
data = ("DAEMON_ARGS=\"--hitless --certificate /etc/ivs "
"--inband-vlan 4092%s%s\""
% (uplink_str, intf_str))
return data
def generate_nfvswitch_config(self, nfvswitch_ifaces,
nfvswitch_internal_ifaces):
"""Generate configuration content for nfvswitch."""
options_str = ""
if self.nfvswitch_options:
options_str = self.nfvswitch_options
ifaces = []
for iface in nfvswitch_ifaces:
ifaces.append(' -u ')
ifaces.append(iface)
iface_str = ''.join(ifaces)
ifaces = []
for iface in nfvswitch_internal_ifaces:
ifaces.append(' -m ')
ifaces.append(iface)
internal_str = ''.join(ifaces)
data = "SETUP_ARGS=\"%s%s%s\"" % (options_str, iface_str, internal_str)
return data
def generate_route_table_config(self, route_tables):
"""Generate configuration content for routing tables.
This method first extracts the existing route table definitions. If
any non-default tables exist, they will be kept unless they conflict
with new tables defined in the route_tables dict.
:param route_tables: A dict of RouteTable objects
"""
custom_tables = {}
res_ids = ['0', '253', '254', '255']
res_names = ['unspec', 'default', 'main', 'local']
rt_config = utils.get_file_data(route_table_config_path()).split('\n')
rt_defaults = _ROUTE_TABLE_DEFAULT.split("\n")
data = _ROUTE_TABLE_DEFAULT
for line in (line for line in rt_config if line not in rt_defaults):
# Leave non-standard comments intact in file
if line.startswith('#') and not line.strip() in rt_defaults:
data += "%s\n" % line
# Ignore old managed entries, will be added back if in new config.
elif line.find("# os-net-config managed table") == -1:
id_name = line.split()
# Keep custom tables if there is no conflict with new tables.
if id_name[0].isdigit() and len(id_name) > 1:
if not id_name[0] in res_ids:
if not id_name[1] in res_names:
if not int(id_name[0]) in route_tables:
if not id_name[1] in route_tables.values():
# Replicate line with any comments appended
custom_tables[id_name[0]] = id_name[1]
data += "%s\n" % line
if custom_tables:
logger.debug("Existing route tables: %s" % custom_tables)
for id in sorted(route_tables):
if str(id) in res_ids:
message = "Table %s(%s) conflicts with reserved table %s(%s)" \
% (route_tables[id], id,
res_names[res_ids.index(str(id))], id)
raise os_net_config.ConfigurationError(message)
elif route_tables[id] in res_names:
message = "Table %s(%s) conflicts with reserved table %s(%s)" \
% (route_tables[id], id, route_tables[id],
res_ids[res_names.index(route_tables[id])])
raise os_net_config.ConfigurationError(message)
else:
data += "%s\t%s # os-net-config managed table\n" \
% (id, route_tables[id])
return data
def apply(self, cleanup=False, activate=True):
"""Apply the network configuration.
:param cleanup: A boolean which indicates whether any undefined
(existing but not present in the object model) interface
should be disabled and deleted.
:param activate: A boolean which indicates if the config should
be activated by stopping/starting interfaces
NOTE: if cleanup is specified we will deactivate interfaces even
if activate is false
:returns: a dict of the format: filename/data which contains info
for each file that was changed (or would be changed if in --noop
mode).
Note the noop mode is set via the constructor noop boolean
"""
logger.info('applying network configs...')
restart_interfaces = []
restart_vlans = []
restart_ib_childs = []
restart_bridges = []
restart_linux_bonds = []
start_linux_bonds = []
restart_linux_teams = []
restart_vpp = False
apply_interfaces = []
apply_bridges = []
apply_routes = []
apply_rules = []
update_files = {}
all_file_names = []
linux_bond_children = {}
ivs_uplinks = [] # ivs physical uplinks
ivs_interfaces = [] # ivs internal ports
nfvswitch_interfaces = [] # nfvswitch physical interfaces
nfvswitch_internal_ifaces = [] # nfvswitch internal/management ports
stop_dhclient_interfaces = []
ovs_needs_restart = False
vpp_interfaces = self.vpp_interface_data.values()
vpp_bonds = self.vpp_bond_data.values()
ipcmd = utils.iproute2_path()
for interface_name, iface_data in self.interface_data.items():
route_data = self.route_data.get(interface_name, '')
route6_data = self.route6_data.get(interface_name, '')
rule_data = self.rule_data.get(interface_name, '')
interface_path = self.root_dir + ifcfg_config_path(interface_name)
route_path = self.root_dir + route_config_path(interface_name)
route6_path = self.root_dir + route6_config_path(interface_name)
rule_path = self.root_dir + route_rule_config_path(interface_name)
all_file_names.append(interface_path)
all_file_names.append(route_path)
all_file_names.append(route6_path)
if "IVS_BRIDGE" in iface_data:
ivs_uplinks.append(interface_name)
if "NFVSWITCH_BRIDGE" in iface_data:
nfvswitch_interfaces.append(interface_name)
if utils.diff(interface_path, iface_data):
if self.ifcfg_requires_restart(interface_path, iface_data):
restart_interfaces.append(interface_name)
# Openvswitch needs to be restarted when OVSDPDKPort or
# OVSDPDKBond is added
if "OVSDPDK" in iface_data:
ovs_needs_restart = True
else:
apply_interfaces.append(
(interface_name, interface_path, iface_data))
update_files[interface_path] = iface_data
if "BOOTPROTO=dhcp" not in iface_data:
stop_dhclient_interfaces.append(interface_name)
else:
logger.info('No changes required for interface: %s' %
interface_name)
if utils.diff(route_path, route_data):
update_files[route_path] = route_data
if interface_name not in restart_interfaces:
apply_routes.append((interface_name, route_data))
if utils.diff(route6_path, route6_data):
update_files[route6_path] = route6_data
if interface_name not in restart_interfaces:
apply_routes.append((interface_name, route6_data))
if utils.diff(rule_path, rule_data):
update_files[rule_path] = rule_data
if interface_name not in restart_interfaces:
apply_rules.append((interface_name, rule_data))
for interface_name, iface_data in self.ivsinterface_data.items():
route_data = self.route_data.get(interface_name, '')
route6_data = self.route6_data.get(interface_name, '')
rule_data = self.rule_data.get(interface_name, '')
interface_path = self.root_dir + ifcfg_config_path(interface_name)
route_path = self.root_dir + route_config_path(interface_name)
route6_path = self.root_dir + route6_config_path(interface_name)
rule_path = self.root_dir + route_rule_config_path(interface_name)
all_file_names.append(interface_path)
all_file_names.append(route_path)
all_file_names.append(route6_path)
all_file_names.append(rule_path)
ivs_interfaces.append(interface_name)
if utils.diff(interface_path, iface_data):
if self.ifcfg_requires_restart(interface_path, iface_data):
restart_interfaces.append(interface_name)
else:
apply_interfaces.append(
(interface_name, interface_path, iface_data))
update_files[interface_path] = iface_data
else:
logger.info('No changes required for ivs interface: %s' %
interface_name)
if utils.diff(route_path, route_data):
update_files[route_path] = route_data
if interface_name not in restart_interfaces:
apply_routes.append((interface_name, route_data))
if utils.diff(route6_path, route6_data):
update_files[route6_path] = route6_data
if interface_name not in restart_interfaces:
apply_routes.append((interface_name, route6_data))
if utils.diff(rule_path, rule_data):
update_files[rule_path] = rule_data
if interface_name not in restart_interfaces:
apply_rules.append((interface_name, rule_data))
for iface_name, iface_data in self.nfvswitch_intiface_data.items():
route_data = self.route_data.get(iface_name, '')
route6_data = self.route6_data.get(iface_name, '')
rule_data = self.rule_data.get(iface_name, '')
iface_path = self.root_dir + ifcfg_config_path(iface_name)
route_path = self.root_dir + route_config_path(iface_name)
route6_path = self.root_dir + route6_config_path(iface_name)
rule_path = self.root_dir + route_rule_config_path(iface_name)
all_file_names.append(iface_path)
all_file_names.append(route_path)
all_file_names.append(route6_path)
all_file_names.append(rule_path)
nfvswitch_internal_ifaces.append(iface_name)
if utils.diff(iface_path, iface_data):
if self.ifcfg_requires_restart(iface_path, iface_data):
restart_interfaces.append(iface_name)
else:
apply_interfaces.append(
(iface_name, iface_path, iface_data))
update_files[iface_path] = iface_data
else:
logger.info('No changes required for nfvswitch interface: %s' %
iface_name)
if utils.diff(route_path, route_data):
update_files[route_path] = route_data
if iface_name not in restart_interfaces:
apply_routes.append((iface_name, route_data))
if utils.diff(route6_path, route6_data):
update_files[route6_path] = route6_data
if iface_name not in restart_interfaces:
apply_routes.append((iface_name, route6_data))
if utils.diff(rule_path, rule_data):
update_files[rule_path] = rule_data
if iface_name not in restart_interfaces:
apply_rules.append((iface_name, rule_data))
for bridge_name, bridge_data in self.bridge_data.items():
route_data = self.route_data.get(bridge_name, '')
route6_data = self.route6_data.get(bridge_name, '')
rule_data = self.rule_data.get(bridge_name, '')
bridge_path = self.root_dir + bridge_config_path(bridge_name)
br_route_path = self.root_dir + route_config_path(bridge_name)
br_route6_path = self.root_dir + route6_config_path(bridge_name)
br_rule_path = self.root_dir + route_rule_config_path(bridge_name)
all_file_names.append(bridge_path)
all_file_names.append(br_route_path)
all_file_names.append(br_route6_path)
all_file_names.append(br_rule_path)
if utils.diff(bridge_path, bridge_data):
if self.ifcfg_requires_restart(bridge_path, bridge_data):
restart_bridges.append(bridge_name)
# Avoid duplicate interface being added to the restart list
children = self.child_members(bridge_name)
for child in children:
if child not in restart_interfaces:
restart_interfaces.append(child)
else:
apply_bridges.append((bridge_name, bridge_path,
bridge_data))
update_files[bridge_path] = bridge_data
else:
logger.info('No changes required for bridge: %s' % bridge_name)
if utils.diff(br_route_path, route_data):
update_files[br_route_path] = route_data
if bridge_name not in restart_interfaces:
apply_routes.append((bridge_name, route_data))
if utils.diff(br_route6_path, route6_data):
update_files[br_route6_path] = route6_data
if bridge_name not in restart_interfaces:
apply_routes.append((bridge_name, route6_data))
if utils.diff(br_rule_path, rule_data):
update_files[br_rule_path] = rule_data
if bridge_name not in restart_interfaces:
apply_rules.append((bridge_name, rule_data))
for bridge_name, bridge_data in self.linuxbridge_data.items():
route_data = self.route_data.get(bridge_name, '')
route6_data = self.route6_data.get(bridge_name, '')
rule_data = self.rule_data.get(bridge_name, '')
bridge_path = self.root_dir + bridge_config_path(bridge_name)
br_route_path = self.root_dir + route_config_path(bridge_name)
br_route6_path = self.root_dir + route6_config_path(bridge_name)
br_rule_path = self.root_dir + route_rule_config_path(bridge_name)
all_file_names.append(bridge_path)
all_file_names.append(br_route_path)
all_file_names.append(br_route6_path)
all_file_names.append(br_rule_path)
if utils.diff(bridge_path, bridge_data):
if self.ifcfg_requires_restart(bridge_path, bridge_data):
restart_bridges.append(bridge_name)
# Avoid duplicate interface being added to the restart list
children = self.child_members(bridge_name)
for child in children:
if child not in restart_interfaces:
restart_interfaces.append(child)
else:
apply_bridges.append((bridge_name, bridge_path,
bridge_data))
update_files[bridge_path] = bridge_data
else:
logger.info('No changes required for bridge: %s' % bridge_name)
if utils.diff(br_route_path, route_data):
update_files[br_route_path] = route_data
if bridge_name not in restart_bridges:
apply_routes.append((bridge_name, route_data))
if utils.diff(route6_path, route6_data):
update_files[route6_path] = route6_data
if bridge_name not in restart_bridges:
apply_routes.append((bridge_name, route6_data))
if utils.diff(br_rule_path, rule_data):
update_files[br_rule_path] = rule_data
if bridge_name not in restart_bridges:
apply_rules.append((bridge_name, rule_data))
for team_name, team_data in self.linuxteam_data.items():
route_data = self.route_data.get(team_name, '')
route6_data = self.route6_data.get(team_name, '')
rule_data = self.rule_data.get(team_name, '')
team_path = self.root_dir + bridge_config_path(team_name)
team_route_path = self.root_dir + route_config_path(team_name)
team_route6_path = self.root_dir + route6_config_path(team_name)
team_rule_path = self.root_dir + route_rule_config_path(team_name)
all_file_names.append(team_path)
all_file_names.append(team_route_path)
all_file_names.append(team_route6_path)
all_file_names.append(team_rule_path)
if utils.diff(team_path, team_data):
if self.ifcfg_requires_restart(team_path, team_data):
restart_linux_teams.append(team_name)
# Avoid duplicate interface being added to the restart list
children = self.child_members(team_name)
for child in children:
if child not in restart_interfaces:
restart_interfaces.append(child)
else:
apply_interfaces.append(
(team_name, team_path, team_data))
update_files[team_path] = team_data
else:
logger.info('No changes required for linux team: %s' %
team_name)
if utils.diff(team_route_path, route_data):
update_files[team_route_path] = route_data
if team_name not in restart_linux_teams:
apply_routes.append((team_name, route_data))
if utils.diff(team_route6_path, route6_data):
update_files[team_route6_path] = route6_data
if team_name not in restart_linux_teams:
apply_routes.append((team_name, route6_data))
if utils.diff(team_rule_path, rule_data):
update_files[team_rule_path] = rule_data
for bond_name, bond_data in self.linuxbond_data.items():
route_data = self.route_data.get(bond_name, '')
route6_data = self.route6_data.get(bond_name, '')
rule_data = self.rule_data.get(bond_name, '')
bond_path = self.root_dir + bridge_config_path(bond_name)
bond_route_path = self.root_dir + route_config_path(bond_name)
bond_route6_path = self.root_dir + route6_config_path(bond_name)
bond_rule_path = self.root_dir + route_rule_config_path(bond_name)
all_file_names.append(bond_path)
all_file_names.append(bond_route_path)
all_file_names.append(bond_route6_path)
all_file_names.append(bond_rule_path)
children = self.child_members(bond_name)
linux_bond_children[bond_name] = children
if utils.diff(bond_path, bond_data):
if self.ifcfg_requires_restart(bond_path, bond_data):
restart_linux_bonds.append(bond_name)
# Avoid duplicate interface being added to the restart list
for child in children:
if child not in restart_interfaces:
restart_interfaces.append(child)
else:
apply_interfaces.append(
(bond_name, bond_path, bond_data))
update_files[bond_path] = bond_data
else:
logger.info('No changes required for linux bond: %s' %
bond_name)
if utils.diff(bond_route_path, route_data):
update_files[bond_route_path] = route_data
if bond_name not in restart_linux_bonds:
apply_routes.append((bond_name, route_data))
if utils.diff(bond_route6_path, route6_data):
update_files[bond_route6_path] = route6_data
if bond_name not in restart_linux_bonds:
apply_routes.append((bond_name, route6_data))
if utils.diff(bond_rule_path, rule_data):
update_files[bond_rule_path] = rule_data
# Infiniband interfaces are handled similarly to Ethernet interfaces
for interface_name, iface_data in self.ib_interface_data.items():
route_data = self.route_data.get(interface_name, '')
route6_data = self.route6_data.get(interface_name, '')
rule_data = self.rule_data.get(interface_name, '')
interface_path = self.root_dir + ifcfg_config_path(interface_name)
route_path = self.root_dir + route_config_path(interface_name)
route6_path = self.root_dir + route6_config_path(interface_name)
rule_path = self.root_dir + route_rule_config_path(interface_name)
all_file_names.append(interface_path)
all_file_names.append(route_path)
all_file_names.append(route6_path)
all_file_names.append(rule_path)
# TODO(dsneddon) determine if InfiniBand can be used with IVS
if "IVS_BRIDGE" in iface_data:
ivs_uplinks.append(interface_name)
if utils.diff(interface_path, iface_data):
if self.ifcfg_requires_restart(interface_path, iface_data):
restart_interfaces.append(interface_name)
else:
apply_interfaces.append(
(interface_name, interface_path, iface_data))
update_files[interface_path] = iface_data
else:
logger.info('No changes required for InfiniBand iface: %s' %
interface_name)
if utils.diff(route_path, route_data):
update_files[route_path] = route_data
if interface_name not in restart_interfaces:
apply_routes.append((interface_name, route_data))
if utils.diff(route6_path, route6_data):
update_files[route6_path] = route6_data
if interface_name not in restart_interfaces:
apply_routes.append((interface_name, route6_data))
if utils.diff(rule_path, rule_data):
update_files[rule_path] = rule_data
if interface_name not in restart_interfaces:
apply_rules.append((interface_name, rule_data))
# NOTE(hjensas): Process the VLAN's last so that we know if the vlan's
# parent interface is being restarted.
for vlan_name, vlan_data in self.vlan_data.items():
route_data = self.route_data.get(vlan_name, '')
route6_data = self.route6_data.get(vlan_name, '')
rule_data = self.rule_data.get(vlan_name, '')
vlan_path = self.root_dir + ifcfg_config_path(vlan_name)
vlan_route_path = self.root_dir + route_config_path(vlan_name)
vlan_route6_path = self.root_dir + route6_config_path(vlan_name)
vlan_rule_path = self.root_dir + route_rule_config_path(vlan_name)
all_file_names.append(vlan_path)
all_file_names.append(vlan_route_path)
all_file_names.append(vlan_route6_path)
all_file_names.append(vlan_rule_path)
restarts_concatenated = itertools.chain(restart_interfaces,
restart_bridges,
restart_linux_bonds,
restart_linux_teams)
if (self.parse_ifcfg(vlan_data).get('PHYSDEV') in
restarts_concatenated):
if vlan_name not in restart_vlans:
restart_vlans.append(vlan_name)
update_files[vlan_path] = vlan_data
elif utils.diff(vlan_path, vlan_data):
if self.ifcfg_requires_restart(vlan_path, vlan_data):
restart_vlans.append(vlan_name)
else:
apply_interfaces.append(
(vlan_name, vlan_path, vlan_data))
update_files[vlan_path] = vlan_data
else:
logger.info('No changes required for vlan interface: %s' %
vlan_name)
if utils.diff(vlan_route_path, route_data):
update_files[vlan_route_path] = route_data
if vlan_name not in restart_vlans:
apply_routes.append((vlan_name, route_data))
if utils.diff(vlan_route6_path, route6_data):
update_files[vlan_route6_path] = route6_data
if vlan_name not in restart_vlans:
apply_routes.append((vlan_name, route6_data))
if utils.diff(vlan_rule_path, rule_data):
update_files[vlan_rule_path] = rule_data
if vlan_name not in restart_vlans:
apply_rules.append((vlan_name, rule_data))
for ib_child_name, ib_child_data in self.ib_childs_data.items():
route_data = self.route_data.get(ib_child_name, '')
route6_data = self.route6_data.get(ib_child_name, '')
rule_data = self.rule_data.get(ib_child_name, '')
ib_child_path = self.root_dir + ifcfg_config_path(ib_child_name)
ib_child_route_path = \
self.root_dir + route_config_path(ib_child_name)
ib_child_route6_path = \
self.root_dir + route6_config_path(ib_child_name)
ib_child_rule_path = \
self.root_dir + route_rule_config_path(ib_child_name)
all_file_names.append(ib_child_path)
all_file_names.append(ib_child_route_path)
all_file_names.append(ib_child_route6_path)
all_file_names.append(ib_child_rule_path)
restarts_concatenated = itertools.chain(restart_interfaces,
restart_bridges,
restart_linux_bonds,
restart_linux_teams)
if (self.parse_ifcfg(ib_child_data).get('PHYSDEV') in
restarts_concatenated):
if ib_child_name not in restart_ib_childs:
restart_ib_childs.append(ib_child_name)
update_files[ib_child_path] = ib_child_data
elif utils.diff(ib_child_path, ib_child_data):
if self.ifcfg_requires_restart(ib_child_path, ib_child_data):
restart_ib_childs.append(ib_child_name)
else:
apply_interfaces.append(
(ib_child_name, ib_child_path, ib_child_data))
update_files[ib_child_path] = ib_child_data
else:
logger.info('No changes required for the ib child interface: '
'%s' % ib_child_name)
if utils.diff(ib_child_route_path, route_data):
update_files[ib_child_route_path] = route_data
if ib_child_name not in restart_ib_childs:
apply_routes.append((ib_child_name, route_data))
if utils.diff(ib_child_route6_path, route6_data):
update_files[ib_child_route6_path] = route6_data
if ib_child_name not in restart_ib_childs:
apply_routes.append((ib_child_name, route6_data))
if utils.diff(ib_child_rule_path, rule_data):
update_files[ib_child_rule_path] = rule_data
if ib_child_name not in restart_ib_childs:
apply_rules.append((ib_child_name, rule_data))
if self.vpp_interface_data or self.vpp_bond_data:
vpp_path = self.root_dir + vpp_config_path()
vpp_config = utils.generate_vpp_config(vpp_path, vpp_interfaces,
vpp_bonds)
if utils.diff(vpp_path, vpp_config):
restart_vpp = True
update_files[vpp_path] = vpp_config
else:
logger.info('No changes required for VPP')
if cleanup:
for ifcfg_file in glob.iglob(cleanup_pattern()):
if ifcfg_file not in all_file_names:
interface_name = ifcfg_file[len(cleanup_pattern()) - 1:]
if interface_name != 'lo':
logger.info('cleaning up interface: %s'
% interface_name)
self.ifdown(interface_name)
self.remove_config(ifcfg_file)
if activate:
for interface in apply_interfaces:
logger.debug('Running ip commands on interface: %s' %
interface[0])
commands = self.iproute2_apply_commands(interface[0],
interface[1],
interface[2])
for command in commands:
try:
args = command.split()
self.execute('Running ip %s' % command, ipcmd, *args)
except Exception as e:
logger.warning("Error in 'ip %s', restarting %s:\n%s" %
(command, interface[0], str(e)))
restart_interfaces.append(interface[0])
restart_interfaces.extend(
self.child_members(interface[0]))
break
for bridge in apply_bridges:
logger.debug('Running ip commands on bridge: %s' %
bridge[0])
commands = self.iproute2_apply_commands(bridge[0],
bridge[1],
bridge[2])
for command in commands:
try:
args = command.split()
self.execute('Running ip %s' % command, ipcmd, *args)
except Exception as e:
logger.warning("Error in 'ip %s', restarting %s:\n%s" %
(command, bridge[0], str(e)))
restart_bridges.append(bridge[0])
restart_interfaces.extend(
self.child_members(bridge[0]))
break
for interface in apply_routes:
logger.debug('Applying routes for interface %s' % interface[0])
filename = self.root_dir + route_config_path(interface[0])
commands = self.iproute2_route_commands(filename, interface[1])
for command in commands:
args = command.split()
try:
if len(args) > 0:
self.execute('Running ip %s' % command, ipcmd,
*args)
except Exception as e:
logger.warning("Error in 'ip %s', restarting %s:\n%s" %
(command, interface[0], str(e)))
restart_interfaces.append(interface[0])
restart_interfaces.extend(
self.child_members(interface[0]))
break
for interface in apply_rules:
logger.debug('Applying rules for interface %s' % interface[0])
filename = self.root_dir + route_rule_config_path(interface[0])
commands = self.iproute2_rule_commands(filename, interface[1])
for command in commands:
args = command.split()
try:
if len(args) > 0:
self.execute('Running ip %s' % command, ipcmd,
*args)
except Exception as e:
logger.warning("Error in 'ip %s', restarting %s:\n%s" %
(command, interface[0], str(e)))
restart_interfaces.append(interface[0])
restart_interfaces.extend(
self.child_members(interface[0]))
break
for vlan in restart_vlans:
self.ifdown(vlan)
for ib_child in restart_ib_childs:
self.ifdown(ib_child)
for interface in restart_interfaces:
self.ifdown(interface)
for bond in linux_bond_children:
if interface in linux_bond_children[bond]:
if bond not in restart_linux_bonds:
start_linux_bonds.append(bond)
for linux_bond in restart_linux_bonds:
self.ifdown(linux_bond)
for linux_team in restart_linux_teams:
self.ifdown(linux_team)
for bridge in restart_bridges:
self.ifdown(bridge, iftype='bridge')
for vpp_interface in vpp_interfaces:
self.ifdown(vpp_interface.name)
for oldname, newname in self.renamed_interfaces.items():
self.ifrename(oldname, newname)
# DPDK initialization is done before running os-net-config, to make
# the DPDK ports available when enabled. DPDK Hotplug support is
# supported only in OvS 2.7 version. Until then, OvS needs to be
# restarted after adding a DPDK port. This change will be removed
# on migration to OvS 2.7 where DPDK Hotplug support is available.
if ovs_needs_restart:
msg = "Restart openvswitch"
self.execute(msg, '/usr/bin/systemctl',
'restart', 'openvswitch')
for location, data in update_files.items():
self.write_config(location, data)
if self.route_table_data:
location = route_table_config_path()
data = self.generate_route_table_config(self.route_table_data)
self.write_config(location, data)
if ivs_uplinks or ivs_interfaces:
location = ivs_config_path()
data = self.generate_ivs_config(ivs_uplinks, ivs_interfaces)
if (utils.diff(location, data)):
self.write_config(location, data)
msg = "Restart ivs"
self.execute(msg, '/usr/bin/systemctl',
'restart', 'ivs')
if nfvswitch_interfaces or nfvswitch_internal_ifaces:
location = nfvswitch_config_path()
data = self.generate_nfvswitch_config(nfvswitch_interfaces,
nfvswitch_internal_ifaces)
if (utils.diff(location, data)):
self.write_config(location, data)
msg = "Restart nfvswitch"
self.execute(msg, '/usr/bin/systemctl',
'restart', 'nfvswitch')
if activate:
for linux_team in restart_linux_teams:
self.ifup(linux_team)
for bridge in restart_bridges:
self.ifup(bridge, iftype='bridge')
# If dhclient is running and dhcp not set, stop dhclient
for interface in stop_dhclient_interfaces:
logger.debug("Calling stop_dhclient_interfaces() for %s" %
interface)
if not self.noop:
stop_dhclient_process(interface)
for interface in restart_interfaces:
self.ifup(interface)
for linux_bond in start_linux_bonds:
if linux_bond not in restart_linux_bonds:
restart_linux_bonds.append(linux_bond)
for linux_bond in restart_linux_bonds:
self.ifup(linux_bond)
for bond in self.bond_primary_ifaces:
self.ovs_appctl('bond/set-active-slave', bond,
self.bond_primary_ifaces[bond])
if ivs_uplinks or ivs_interfaces:
logger.info("Attach to ivs with "
"uplinks: %s, "
"interfaces: %s" %
(ivs_uplinks, ivs_interfaces))
for ivs_uplink in ivs_uplinks:
self.ifup(ivs_uplink)
for ivs_interface in ivs_interfaces:
self.ifup(ivs_interface)
if nfvswitch_interfaces or nfvswitch_internal_ifaces:
logger.info("Attach to nfvswitch with "
"interfaces: %s, "
"internal interfaces: %s" %
(nfvswitch_interfaces, nfvswitch_internal_ifaces))
for nfvswitch_interface in nfvswitch_interfaces:
self.ifup(nfvswitch_interface)
for nfvswitch_internal in nfvswitch_internal_ifaces:
self.ifup(nfvswitch_internal)
for ib_child in restart_ib_childs:
self.ifup(ib_child)
for vlan in restart_vlans:
self.ifup(vlan)
if not self.noop:
if restart_vpp:
logger.info('Restarting VPP')
utils.restart_vpp(vpp_interfaces)
if self.vpp_interface_data:
logger.info('Updating VPP mapping')
utils.update_vpp_mapping(vpp_interfaces, vpp_bonds)
if self.errors:
message = 'Failure(s) occurred when applying configuration'
logger.error(message)
for e in self.errors:
logger.error('stdout: %s, stderr: %s', e.stdout, e.stderr)
raise os_net_config.ConfigurationError(message)
return update_files
| apache-2.0 | -6,009,259,872,810,658,000 | 44.492348 | 79 | 0.551933 | false |
fcecin/infinitum | share/qt/extract_strings_qt.py | 1 | 2538 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/infinitumstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *infinitum_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("infinitum-core", "%s"),\n' % (os.getenv('PACKAGE_NAME'),))
f.write('QT_TRANSLATE_NOOP("infinitum-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
if os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION') != os.getenv('PACKAGE_NAME'):
f.write('QT_TRANSLATE_NOOP("infinitum-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("infinitum-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| mit | -8,116,536,709,440,369,000 | 28.172414 | 108 | 0.613869 | false |
OpenBfS/dokpool-plone | Plone/src/elan.sitrep/elan/sitrep/portlets/srmoduleedit.py | 1 | 1625 | # -*- coding: utf-8 -*-
from elan.sitrep import DocpoolMessageFactory as _
from plone.app.portlets.portlets import base
from plone.portlets.interfaces import IPortletDataProvider
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zope.interface import implementer
# This interface defines the configurable options (if any) for the portlet.
# It will be used to generate add and edit forms. In this case, we don't
# have an edit form, since there are no editable options.
class ISRModuleEditPortlet(IPortletDataProvider):
pass
# The assignment is a persistent object used to store the configuration of
# a particular instantiation of the portlet.
@implementer(ISRModuleEditPortlet)
class Assignment(base.Assignment):
@property
def title(self):
return _(u"SRModuleEdit")
# The renderer is like a view (in fact, like a content provider/viewlet). The
# item self.data will typically be the assignment (although it is possible
# that the assignment chooses to return a different object - see
# base.Assignment).
class Renderer(base.Renderer):
# render() will be called to render the portlet
render = ViewPageTemplateFile('srmoduleedit.pt')
@property
def available(self):
return self.isEditMode()
def isEditMode(self):
"""
"""
path = self.request.get("PATH_INFO", "")
if path.endswith("/edit") or path.endswith("/@@edit"):
return True
class AddForm(base.NullAddForm):
# This method must be implemented to actually construct the object.
def create(self):
return Assignment()
| gpl-3.0 | 2,800,916,112,963,199,500 | 25.639344 | 77 | 0.721846 | false |
egbertbouman/tribler-g | Tribler/Test/Core/Subtitles/MetadataDomainObjects/test_MetadataDTO.py | 1 | 7047 | # Written by Andrea Reale
# see LICENSE.txt for license information
import unittest
from Tribler.Core.Subtitles.MetadataDomainObjects.MetadataDTO import MetadataDTO
import Tribler.Core.Subtitles.MetadataDomainObjects.MetadataDTO as MDUtil
from Tribler.Core.Overlay.permid import generate_keypair
from Tribler.Core.CacheDB.sqlitecachedb import bin2str
import time
from Tribler.Core.BitTornado.bencode import bdecode
from Tribler.Core.Subtitles.MetadataDomainObjects.SubtitleInfo import SubtitleInfo
from Tribler.Core.Subtitles.MetadataDomainObjects.Languages import LanguagesProvider
from Tribler.Core.Utilities.utilities import str2bin
import os.path
from struct import pack
RES_DIR = os.path.join('..','..','..','subtitles_test_res')
test_keypair = generate_keypair()
test_perm_id = str(test_keypair.pub().get_der())
class TestMetadataDTO(unittest.TestCase):
def setUp(self):
self._srtSubs = {"eng": os.path.join(RES_DIR, "fake.srt"),"ita": os.path.join(RES_DIR,"fake1.srt"), "rus" : os.path.join(RES_DIR, "fake2.srt")}
def testMetadataDTOInit(self):
badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
dto = MetadataDTO(test_perm_id, badInfohash)
self.assertFalse(dto is None)
self.assertEqual(test_perm_id,dto.channel)
self.assertEquals(badInfohash,dto.infohash)
current = time.time()
self.assertTrue(current -1 <= int(dto.timestamp) <= current)
self.assertEquals("",dto.description)
self.assertEquals({}, dto._subtitles)
self.assertTrue(dto.signature is None)
def test_packData(self):
badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
dto = MetadataDTO(test_perm_id, badInfohash)
dto.description = u"Sample Description\u041f"
bla = dto._packData()
decoded = bdecode(bla)
self.assertTrue(len(decoded) == 6)
decodedChannelId = decoded[0]
decodedInfohash = decoded[1]
decodedDescription = decoded[2].decode("utf-8")
decodedTimestamp = decoded[3]
bin_decodedBitmask = decoded[4]
decodedBitmask, = unpack("!L", bin_decodedBitmask)
self.assertEquals(dto.channel, decodedChannelId)
self.assertEquals(dto.infohash, decodedInfohash)
self.assertEquals(dto.description,decodedDescription)
self.assertAlmostEquals(dto.timestamp,decodedTimestamp)
self.assertEquals(0,decodedBitmask)
self.assertEquals(0,len(decoded[5]))
def test_packDataWithSubs(self):
badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
dto = MetadataDTO(test_perm_id, badInfohash)
subtitles = [SubtitleInfo(lang,path) for lang,path in self._srtSubs.iteritems()]
for sub in subtitles :
sub.computeChecksum()
dto.addSubtitle(sub)
packed = dto._packData()
decoded = bdecode(packed)
self.assertTrue(len(decoded) == 6)
decodedChannelId = decoded[0]
decodedInfohash = decoded[1]
decodedDescription = decoded[2]
decodedTimestamp = decoded[3]
decodedBitmask = decoded[4]
checksums = decoded[5]
expectedMask = \
LanguagesProvider.getLanguagesInstance().langCodesToMask(self._srtSubs.keys())
binaryExpexted = pack("!L", expectedMask)
self.assertEquals(dto.channel, decodedChannelId)
self.assertEquals(dto.infohash, decodedInfohash)
self.assertEquals(dto.description,decodedDescription)
self.assertAlmostEquals(dto.timestamp,decodedTimestamp)
self.assertEquals(binaryExpexted,decodedBitmask)
self.assertEquals(3,len(checksums))
subs = dto.getAllSubtitles()
i=0
for key in sorted(subs.iterkeys()):
self.assertEquals(subs[key].checksum, checksums[i])
i += 1
def testSignature(self):
badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
dto = MetadataDTO(test_perm_id, badInfohash)
dto.sign(test_keypair)
self.assertTrue(dto.verifySignature())
dto.timestamp = 2
ok = dto.verifySignature()
self.assertFalse(ok)
def testSignatureOnChecksums(self):
badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
dto = MetadataDTO(test_perm_id, badInfohash)
subtitles = [SubtitleInfo(lang,path) for lang,path in self._srtSubs.iteritems()]
for sub in subtitles :
sub.computeChecksum()
dto.addSubtitle(sub)
dto.sign(test_keypair)
self.assertTrue(dto.verifySignature())
dto.getSubtitle("rus").checksum = "ABCDEFGHILMOPQRS"
self.assertFalse(dto.verifySignature())
def testSerialize(self):
badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
dto = MetadataDTO(test_perm_id, badInfohash)
dto.description = u"Sample Description"
dto.sign(test_keypair)
serialized = dto.serialize()
self.assertEquals(7, len(serialized))
signature = serialized[6]
self.assertEquals(dto.signature,signature)
#the rest is tested with test_packData
def testSerializeWithSubs(self):
badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
dto = MetadataDTO(test_perm_id, badInfohash)
subtitles = [SubtitleInfo(lang,path) for lang,path in self._srtSubs.iteritems()]
for sub in subtitles :
sub.computeChecksum()
dto.addSubtitle(sub)
dto.sign(test_keypair)
serial = dto.serialize()
decoded = serial
self.assertEquals(7, len(decoded))
signature = decoded[6]
self.assertEquals(dto.signature,signature)
#the rest is tested with test_packDataWithSubs
def testDesrialize(self):
badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
dto = MetadataDTO(test_perm_id, badInfohash)
dto.description = u"Sample Description"
dto.sign(test_keypair)
serialized = dto.serialize()
newDto = MDUtil.deserialize(serialized)
self.assertEquals(dto,newDto)
def testDeserializeWithSubs(self):
badInfohash = str2bin("GEh/o8rtTLB1wZJzFcSZSS4u9qo=")
dto = MetadataDTO(test_perm_id, badInfohash)
subtitles = [SubtitleInfo(lang,path) for lang,path in self._srtSubs.iteritems()]
for sub in subtitles :
sub.computeChecksum()
dto.addSubtitle(sub)
dto.sign(test_keypair)
serial = dto.serialize()
newDto = MDUtil.deserialize(serial)
self.assertEquals(dto,newDto)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TestMetadataDTO)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testMetadataDTOInit']
unittest.main()
| lgpl-2.1 | 3,202,050,947,913,541,000 | 35.703125 | 151 | 0.645097 | false |
srusskih/SublimeJEDI | dependencies/jedi/inference/gradual/typing.py | 1 | 17752 | """
We need to somehow work with the typing objects. Since the typing objects are
pretty bare we need to add all the Jedi customizations to make them work as
values.
This file deals with all the typing.py cases.
"""
import itertools
from jedi._compatibility import unicode
from jedi import debug
from jedi.inference.compiled import builtin_from_name, create_simple_object
from jedi.inference.base_value import ValueSet, NO_VALUES, Value, \
LazyValueWrapper
from jedi.inference.lazy_value import LazyKnownValues
from jedi.inference.arguments import repack_with_argument_clinic
from jedi.inference.filters import FilterWrapper
from jedi.inference.names import NameWrapper, ValueName
from jedi.inference.value.klass import ClassMixin
from jedi.inference.gradual.base import BaseTypingValue, BaseTypingValueWithGenerics
from jedi.inference.gradual.type_var import TypeVarClass
from jedi.inference.gradual.generics import LazyGenericManager, TupleGenericManager
_PROXY_CLASS_TYPES = 'Tuple Generic Protocol Callable Type'.split()
_TYPE_ALIAS_TYPES = {
'List': 'builtins.list',
'Dict': 'builtins.dict',
'Set': 'builtins.set',
'FrozenSet': 'builtins.frozenset',
'ChainMap': 'collections.ChainMap',
'Counter': 'collections.Counter',
'DefaultDict': 'collections.defaultdict',
'Deque': 'collections.deque',
}
_PROXY_TYPES = 'Optional Union ClassVar'.split()
class TypingModuleName(NameWrapper):
def infer(self):
return ValueSet(self._remap())
def _remap(self):
name = self.string_name
inference_state = self.parent_context.inference_state
try:
actual = _TYPE_ALIAS_TYPES[name]
except KeyError:
pass
else:
yield TypeAlias.create_cached(
inference_state, self.parent_context, self.tree_name, actual)
return
if name in _PROXY_CLASS_TYPES:
yield ProxyTypingClassValue.create_cached(
inference_state, self.parent_context, self.tree_name)
elif name in _PROXY_TYPES:
yield ProxyTypingValue.create_cached(
inference_state, self.parent_context, self.tree_name)
elif name == 'runtime':
# We don't want anything here, not sure what this function is
# supposed to do, since it just appears in the stubs and shouldn't
# have any effects there (because it's never executed).
return
elif name == 'TypeVar':
yield TypeVarClass.create_cached(
inference_state, self.parent_context, self.tree_name)
elif name == 'Any':
yield Any.create_cached(
inference_state, self.parent_context, self.tree_name)
elif name == 'TYPE_CHECKING':
# This is needed for e.g. imports that are only available for type
# checking or are in cycles. The user can then check this variable.
yield builtin_from_name(inference_state, u'True')
elif name == 'overload':
yield OverloadFunction.create_cached(
inference_state, self.parent_context, self.tree_name)
elif name == 'NewType':
yield NewTypeFunction.create_cached(
inference_state, self.parent_context, self.tree_name)
elif name == 'cast':
yield CastFunction.create_cached(
inference_state, self.parent_context, self.tree_name)
elif name == 'TypedDict':
# TODO doesn't even exist in typeshed/typing.py, yet. But will be
# added soon.
yield TypedDictBase.create_cached(
inference_state, self.parent_context, self.tree_name)
elif name in ('no_type_check', 'no_type_check_decorator'):
# This is not necessary, as long as we are not doing type checking.
for c in self._wrapped_name.infer(): # Fuck my life Python 2
yield c
else:
# Everything else shouldn't be relevant for type checking.
for c in self._wrapped_name.infer(): # Fuck my life Python 2
yield c
class TypingModuleFilterWrapper(FilterWrapper):
name_wrapper_class = TypingModuleName
class TypingValueWithIndex(BaseTypingValueWithGenerics):
def execute_annotation(self):
string_name = self._tree_name.value
if string_name == 'Union':
# This is kind of a special case, because we have Unions (in Jedi
# ValueSets).
return self.gather_annotation_classes().execute_annotation()
elif string_name == 'Optional':
# Optional is basically just saying it's either None or the actual
# type.
return self.gather_annotation_classes().execute_annotation() \
| ValueSet([builtin_from_name(self.inference_state, u'None')])
elif string_name == 'Type':
# The type is actually already given in the index_value
return self._generics_manager[0]
elif string_name == 'ClassVar':
# For now don't do anything here, ClassVars are always used.
return self._generics_manager[0].execute_annotation()
mapped = {
'Tuple': Tuple,
'Generic': Generic,
'Protocol': Protocol,
'Callable': Callable,
}
cls = mapped[string_name]
return ValueSet([cls(
self.parent_context,
self._tree_name,
generics_manager=self._generics_manager,
)])
def gather_annotation_classes(self):
return ValueSet.from_sets(self._generics_manager.to_tuple())
def _create_instance_with_generics(self, generics_manager):
return TypingValueWithIndex(
self.parent_context,
self._tree_name,
generics_manager
)
class ProxyTypingValue(BaseTypingValue):
index_class = TypingValueWithIndex
def with_generics(self, generics_tuple):
return self.index_class.create_cached(
self.inference_state,
self.parent_context,
self._tree_name,
generics_manager=TupleGenericManager(generics_tuple)
)
def py__getitem__(self, index_value_set, contextualized_node):
return ValueSet(
self.index_class.create_cached(
self.inference_state,
self.parent_context,
self._tree_name,
generics_manager=LazyGenericManager(
context_of_index=contextualized_node.context,
index_value=index_value,
)
) for index_value in index_value_set
)
class _TypingClassMixin(ClassMixin):
def py__bases__(self):
return [LazyKnownValues(
self.inference_state.builtins_module.py__getattribute__('object')
)]
def get_metaclasses(self):
return []
@property
def name(self):
return ValueName(self, self._tree_name)
class TypingClassValueWithIndex(_TypingClassMixin, TypingValueWithIndex):
def infer_type_vars(self, value_set, is_class_value=False):
# Circular
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
type_var_dict = {}
annotation_generics = self.get_generics()
if not annotation_generics:
return type_var_dict
annotation_name = self.py__name__()
if annotation_name == 'Type':
if is_class_value:
# This only applies if we are comparing something like
# List[Type[int]] with Iterable[Type[int]]. First, Jedi tries to
# match List/Iterable. After that we will land here, because
# is_class_value will be True at that point. Obviously we also
# compare below that both sides are `Type`.
for element in value_set:
element_name = element.py__name__()
if element_name == 'Type':
merge_type_var_dicts(
type_var_dict,
merge_pairwise_generics(self, element),
)
else:
return annotation_generics[0].infer_type_vars(
value_set,
is_class_value=True,
)
elif annotation_name == 'Callable':
if len(annotation_generics) == 2:
if is_class_value:
# This only applies if we are comparing something like
# List[Callable[..., T]] with Iterable[Callable[..., T]].
# First, Jedi tries to match List/Iterable. After that we
# will land here, because is_class_value will be True at
# that point. Obviously we also compare below that both
# sides are `Callable`.
for element in value_set:
element_name = element.py__name__()
if element_name == 'Callable':
merge_type_var_dicts(
type_var_dict,
merge_pairwise_generics(self, element),
)
else:
return annotation_generics[1].infer_type_vars(
value_set.execute_annotation(),
)
elif annotation_name == 'Tuple':
tuple_annotation, = self.execute_annotation()
return tuple_annotation.infer_type_vars(value_set, is_class_value)
return type_var_dict
class ProxyTypingClassValue(_TypingClassMixin, ProxyTypingValue):
index_class = TypingClassValueWithIndex
class TypeAlias(LazyValueWrapper):
def __init__(self, parent_context, origin_tree_name, actual):
self.inference_state = parent_context.inference_state
self.parent_context = parent_context
self._origin_tree_name = origin_tree_name
self._actual = actual # e.g. builtins.list
@property
def name(self):
return ValueName(self, self._origin_tree_name)
def py__name__(self):
return self.name.string_name
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._actual)
def _get_wrapped_value(self):
module_name, class_name = self._actual.split('.')
if self.inference_state.environment.version_info.major == 2 and module_name == 'builtins':
module_name = '__builtin__'
# TODO use inference_state.import_module?
from jedi.inference.imports import Importer
module, = Importer(
self.inference_state, [module_name], self.inference_state.builtins_module
).follow()
classes = module.py__getattribute__(class_name)
# There should only be one, because it's code that we control.
assert len(classes) == 1, classes
cls = next(iter(classes))
return cls
def gather_annotation_classes(self):
return ValueSet([self._get_wrapped_value()])
class Callable(BaseTypingValueWithGenerics):
def py__call__(self, arguments):
"""
def x() -> Callable[[Callable[..., _T]], _T]: ...
"""
# The 0th index are the arguments.
try:
param_values = self._generics_manager[0]
result_values = self._generics_manager[1]
except IndexError:
debug.warning('Callable[...] defined without two arguments')
return NO_VALUES
else:
from jedi.inference.gradual.annotation import infer_return_for_callable
return infer_return_for_callable(arguments, param_values, result_values)
class Tuple(BaseTypingValueWithGenerics):
def _is_homogenous(self):
# To specify a variable-length tuple of homogeneous type, Tuple[T, ...]
# is used.
return self._generics_manager.is_homogenous_tuple()
def py__simple_getitem__(self, index):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
else:
if isinstance(index, int):
return self._generics_manager.get_index_and_execute(index)
debug.dbg('The getitem type on Tuple was %s' % index)
return NO_VALUES
def py__iter__(self, contextualized_node=None):
if self._is_homogenous():
yield LazyKnownValues(self._generics_manager.get_index_and_execute(0))
else:
for v in self._generics_manager.to_tuple():
yield LazyKnownValues(v.execute_annotation())
def py__getitem__(self, index_value_set, contextualized_node):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
return ValueSet.from_sets(
self._generics_manager.to_tuple()
).execute_annotation()
def _get_wrapped_value(self):
tuple_, = self.inference_state.builtins_module \
.py__getattribute__('tuple').execute_annotation()
return tuple_
def infer_type_vars(self, value_set, is_class_value=False):
# Circular
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
from jedi.inference.gradual.base import GenericClass
if self._is_homogenous():
# The parameter annotation is of the form `Tuple[T, ...]`,
# so we treat the incoming tuple like a iterable sequence
# rather than a positional container of elements.
return self.get_generics()[0].infer_type_vars(
value_set.merge_types_of_iterate(),
)
else:
# The parameter annotation has only explicit type parameters
# (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we
# treat the incoming values as needing to match the annotation
# exactly, just as we would for non-tuple annotations.
type_var_dict = {}
for element in value_set:
py_class = element.get_annotated_class_object()
if not isinstance(py_class, GenericClass):
py_class = element
merge_type_var_dicts(
type_var_dict,
merge_pairwise_generics(self, py_class),
)
return type_var_dict
class Generic(BaseTypingValueWithGenerics):
pass
class Protocol(BaseTypingValueWithGenerics):
pass
class Any(BaseTypingValue):
def execute_annotation(self):
debug.warning('Used Any - returned no results')
return NO_VALUES
class OverloadFunction(BaseTypingValue):
@repack_with_argument_clinic('func, /')
def py__call__(self, func_value_set):
# Just pass arguments through.
return func_value_set
class NewTypeFunction(BaseTypingValue):
def py__call__(self, arguments):
ordered_args = arguments.unpack()
next(ordered_args, (None, None))
_, second_arg = next(ordered_args, (None, None))
if second_arg is None:
return NO_VALUES
return ValueSet(
NewType(
self.inference_state,
contextualized_node.context,
contextualized_node.node,
second_arg.infer(),
) for contextualized_node in arguments.get_calling_nodes())
class NewType(Value):
def __init__(self, inference_state, parent_context, tree_node, type_value_set):
super(NewType, self).__init__(inference_state, parent_context)
self._type_value_set = type_value_set
self.tree_node = tree_node
def py__class__(self):
c, = self._type_value_set.py__class__()
return c
def py__call__(self, arguments):
return self._type_value_set.execute_annotation()
@property
def name(self):
from jedi.inference.compiled.value import CompiledValueName
return CompiledValueName(self, 'NewType')
class CastFunction(BaseTypingValue):
@repack_with_argument_clinic('type, object, /')
def py__call__(self, type_value_set, object_value_set):
return type_value_set.execute_annotation()
class TypedDictBase(BaseTypingValue):
"""
This class has no responsibilities and is just here to make sure that typed
dicts can be identified.
"""
class TypedDict(LazyValueWrapper):
"""Represents the instance version of ``TypedDictClass``."""
def __init__(self, definition_class):
self.inference_state = definition_class.inference_state
self.parent_context = definition_class.parent_context
self.tree_node = definition_class.tree_node
self._definition_class = definition_class
@property
def name(self):
return ValueName(self, self.tree_node.name)
def py__simple_getitem__(self, index):
if isinstance(index, unicode):
return ValueSet.from_sets(
name.infer()
for filter in self._definition_class.get_filters(is_instance=True)
for name in filter.get(index)
)
return NO_VALUES
def get_key_values(self):
filtered_values = itertools.chain.from_iterable((
f.values()
for f in self._definition_class.get_filters(is_instance=True)
))
return ValueSet({
create_simple_object(self.inference_state, v.string_name)
for v in filtered_values
})
def _get_wrapped_value(self):
d, = self.inference_state.builtins_module.py__getattribute__('dict')
result, = d.execute_with_values()
return result
| mit | 1,786,058,261,746,276,600 | 36.294118 | 99 | 0.603369 | false |
ZeroCater/Eyrie | interface/migrations/0001_initial.py | 1 | 1650 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-04-14 21:17
from __future__ import unicode_literals
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='Repo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.TextField(unique=True)),
('webhook_id', models.IntegerField(blank=True, null=True)),
('is_private', models.BooleanField(default=True)),
('wiki_branch', models.TextField(default='master')),
('disabled', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['full_name'],
},
),
migrations.CreateModel(
name='UserProxy',
fields=[
],
options={
'proxy': True,
},
bases=('auth.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AddField(
model_name='repo',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='repos', to='interface.UserProxy'),
),
]
| mit | 629,217,487,608,357,400 | 31.352941 | 129 | 0.533939 | false |
jeremiahyan/odoo | addons/crm/tests/test_crm_pls.py | 1 | 31898 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import timedelta
from odoo import tools
from odoo.addons.mail.tests.common import mail_new_test_user
from odoo.fields import Date
from odoo.tests import Form, tagged, users
from odoo.tests.common import TransactionCase
@tagged('crm_lead_pls')
class TestCRMPLS(TransactionCase):
@classmethod
def setUpClass(cls):
""" Keep a limited setup to ensure tests are not impacted by other
records created in CRM common. """
super(TestCRMPLS, cls).setUpClass()
cls.company_main = cls.env.user.company_id
cls.user_sales_manager = mail_new_test_user(
cls.env, login='user_sales_manager',
name='Martin PLS Sales Manager', email='[email protected]',
company_id=cls.company_main.id,
notification_type='inbox',
groups='sales_team.group_sale_manager,base.group_partner_manager',
)
cls.pls_team = cls.env['crm.team'].create({
'name': 'PLS Team',
})
def _get_lead_values(self, team_id, name_suffix, country_id, state_id, email_state, phone_state, source_id, stage_id):
return {
'name': 'lead_' + name_suffix,
'type': 'opportunity',
'state_id': state_id,
'email_state': email_state,
'phone_state': phone_state,
'source_id': source_id,
'stage_id': stage_id,
'country_id': country_id,
'team_id': team_id
}
def generate_leads_with_tags(self, tag_ids):
Lead = self.env['crm.lead']
team_id = self.env['crm.team'].create({
'name': 'blup',
}).id
leads_to_create = []
for i in range(150):
if i < 50: # tag 1
leads_to_create.append({
'name': 'lead_tag_%s' % str(i),
'tag_ids': [(4, tag_ids[0])],
'team_id': team_id
})
elif i < 100: # tag 2
leads_to_create.append({
'name': 'lead_tag_%s' % str(i),
'tag_ids': [(4, tag_ids[1])],
'team_id': team_id
})
else: # tag 1 and 2
leads_to_create.append({
'name': 'lead_tag_%s' % str(i),
'tag_ids': [(6, 0, tag_ids)],
'team_id': team_id
})
leads_with_tags = Lead.create(leads_to_create)
return leads_with_tags
def test_crm_lead_pls_update(self):
""" We test here that the wizard for updating probabilities from settings
is getting correct value from config params and after updating values
from the wizard, the config params are correctly updated
"""
# Set the PLS config
frequency_fields = self.env['crm.lead.scoring.frequency.field'].search([])
pls_fields_str = ','.join(frequency_fields.mapped('field_id.name'))
pls_start_date_str = "2021-01-01"
IrConfigSudo = self.env['ir.config_parameter'].sudo()
IrConfigSudo.set_param("crm.pls_start_date", pls_start_date_str)
IrConfigSudo.set_param("crm.pls_fields", pls_fields_str)
date_to_update = "2021-02-02"
fields_to_remove = frequency_fields.filtered(lambda f: f.field_id.name in ['source_id', 'lang_id'])
fields_after_updation_str = ','.join((frequency_fields - fields_to_remove).mapped('field_id.name'))
# Check that wizard to update lead probabilities has correct value set by default
pls_update_wizard = Form(self.env['crm.lead.pls.update'])
with pls_update_wizard:
self.assertEqual(Date.to_string(pls_update_wizard.pls_start_date), pls_start_date_str, 'Correct date is taken from config')
self.assertEqual(','.join([f.field_id.name for f in pls_update_wizard.pls_fields]), pls_fields_str, 'Correct fields are taken from config')
# Update the wizard values and check that config values and probabilities are updated accordingly
pls_update_wizard.pls_start_date = date_to_update
for field in fields_to_remove:
pls_update_wizard.pls_fields.remove(field.id)
pls_update_wizard0 = pls_update_wizard.save()
pls_update_wizard0.action_update_crm_lead_probabilities()
# Config params should have been updated
self.assertEqual(IrConfigSudo.get_param("crm.pls_start_date"), date_to_update, 'Correct date is updated in config')
self.assertEqual(IrConfigSudo.get_param("crm.pls_fields"), fields_after_updation_str, 'Correct fields are updated in config')
def test_predictive_lead_scoring(self):
""" We test here computation of lead probability based on PLS Bayes.
We will use 3 different values for each possible variables:
country_id : 1,2,3
state_id: 1,2,3
email_state: correct, incorrect, None
phone_state: correct, incorrect, None
source_id: 1,2,3
stage_id: 1,2,3 + the won stage
And we will compute all of this for 2 different team_id
Note : We assume here that original bayes computation is correct
as we don't compute manually the probabilities."""
Lead = self.env['crm.lead']
LeadScoringFrequency = self.env['crm.lead.scoring.frequency']
state_values = ['correct', 'incorrect', None]
source_ids = self.env['utm.source'].search([], limit=3).ids
state_ids = self.env['res.country.state'].search([], limit=3).ids
country_ids = self.env['res.country'].search([], limit=3).ids
stage_ids = self.env['crm.stage'].search([], limit=3).ids
won_stage_id = self.env['crm.stage'].search([('is_won', '=', True)], limit=1).id
team_ids = self.env['crm.team'].create([{'name': 'Team Test 1'}, {'name': 'Team Test 2'}, {'name': 'Team Test 3'}]).ids
# create bunch of lost and won crm_lead
leads_to_create = []
# for team 1
for i in range(3):
leads_to_create.append(
self._get_lead_values(team_ids[0], 'team_1_%s' % str(i), country_ids[i], state_ids[i], state_values[i], state_values[i], source_ids[i], stage_ids[i]))
leads_to_create.append(
self._get_lead_values(team_ids[0], 'team_1_%s' % str(3), country_ids[0], state_ids[1], state_values[2], state_values[0], source_ids[2], stage_ids[1]))
leads_to_create.append(
self._get_lead_values(team_ids[0], 'team_1_%s' % str(4), country_ids[1], state_ids[1], state_values[1], state_values[0], source_ids[1], stage_ids[0]))
# for team 2
leads_to_create.append(
self._get_lead_values(team_ids[1], 'team_2_%s' % str(5), country_ids[0], state_ids[1], state_values[2], state_values[0], source_ids[1], stage_ids[2]))
leads_to_create.append(
self._get_lead_values(team_ids[1], 'team_2_%s' % str(6), country_ids[0], state_ids[1], state_values[0], state_values[1], source_ids[2], stage_ids[1]))
leads_to_create.append(
self._get_lead_values(team_ids[1], 'team_2_%s' % str(7), country_ids[0], state_ids[2], state_values[0], state_values[1], source_ids[2], stage_ids[0]))
leads_to_create.append(
self._get_lead_values(team_ids[1], 'team_2_%s' % str(8), country_ids[0], state_ids[1], state_values[2], state_values[0], source_ids[2], stage_ids[1]))
leads_to_create.append(
self._get_lead_values(team_ids[1], 'team_2_%s' % str(9), country_ids[1], state_ids[0], state_values[1], state_values[0], source_ids[1], stage_ids[1]))
# for leads with no team
leads_to_create.append(
self._get_lead_values(False, 'no_team_%s' % str(10), country_ids[1], state_ids[1], state_values[2], state_values[0], source_ids[1], stage_ids[2]))
leads_to_create.append(
self._get_lead_values(False, 'no_team_%s' % str(11), country_ids[0], state_ids[1], state_values[1], state_values[1], source_ids[0], stage_ids[0]))
leads_to_create.append(
self._get_lead_values(False, 'no_team_%s' % str(12), country_ids[1], state_ids[2], state_values[0], state_values[1], source_ids[2], stage_ids[0]))
leads_to_create.append(
self._get_lead_values(False, 'no_team_%s' % str(13), country_ids[0], state_ids[1], state_values[2], state_values[0], source_ids[2], stage_ids[1]))
leads = Lead.create(leads_to_create)
# assign team 3 to all leads with no teams (also take data into account).
leads_with_no_team = self.env['crm.lead'].sudo().search([('team_id', '=', False)])
leads_with_no_team.write({'team_id': team_ids[2]})
# Set the PLS config
self.env['ir.config_parameter'].sudo().set_param("crm.pls_start_date", "2000-01-01")
self.env['ir.config_parameter'].sudo().set_param("crm.pls_fields", "country_id,state_id,email_state,phone_state,source_id,tag_ids")
# set leads as won and lost
# for Team 1
leads[0].action_set_lost()
leads[1].action_set_lost()
leads[2].action_set_won()
# for Team 2
leads[5].action_set_lost()
leads[6].action_set_lost()
leads[7].action_set_won()
# Leads with no team
leads[10].action_set_won()
leads[11].action_set_lost()
leads[12].action_set_lost()
# A. Test Full Rebuild
# rebuild frequencies table and recompute automated_probability for all leads.
Lead._cron_update_automated_probabilities()
# As the cron is computing and writing in SQL queries, we need to invalidate the cache
leads.invalidate_cache()
self.assertEqual(tools.float_compare(leads[3].automated_probability, 33.49, 2), 0)
self.assertEqual(tools.float_compare(leads[8].automated_probability, 7.74, 2), 0)
lead_13_team_3_proba = leads[13].automated_probability
self.assertEqual(tools.float_compare(lead_13_team_3_proba, 35.09, 2), 0)
# Probability for Lead with no teams should be based on all the leads no matter their team.
# De-assign team 3 and rebuilt frequency table and recompute.
# Proba should be different as "no team" is not considered as a separated team.
leads_with_no_team.write({'team_id': False})
Lead._cron_update_automated_probabilities()
leads.invalidate_cache()
lead_13_no_team_proba = leads[13].automated_probability
self.assertTrue(lead_13_team_3_proba != leads[13].automated_probability, "Probability for leads with no team should be different than if they where in their own team.")
self.assertEqual(tools.float_compare(lead_13_no_team_proba, 36.65, 2), 0)
# Test frequencies
lead_4_stage_0_freq = LeadScoringFrequency.search([('team_id', '=', leads[4].team_id.id), ('variable', '=', 'stage_id'), ('value', '=', stage_ids[0])])
lead_4_stage_won_freq = LeadScoringFrequency.search([('team_id', '=', leads[4].team_id.id), ('variable', '=', 'stage_id'), ('value', '=', won_stage_id)])
lead_4_country_freq = LeadScoringFrequency.search([('team_id', '=', leads[4].team_id.id), ('variable', '=', 'country_id'), ('value', '=', leads[4].country_id.id)])
lead_4_email_state_freq = LeadScoringFrequency.search([('team_id', '=', leads[4].team_id.id), ('variable', '=', 'email_state'), ('value', '=', str(leads[4].email_state))])
lead_9_stage_0_freq = LeadScoringFrequency.search([('team_id', '=', leads[9].team_id.id), ('variable', '=', 'stage_id'), ('value', '=', stage_ids[0])])
lead_9_stage_won_freq = LeadScoringFrequency.search([('team_id', '=', leads[9].team_id.id), ('variable', '=', 'stage_id'), ('value', '=', won_stage_id)])
lead_9_country_freq = LeadScoringFrequency.search([('team_id', '=', leads[9].team_id.id), ('variable', '=', 'country_id'), ('value', '=', leads[9].country_id.id)])
lead_9_email_state_freq = LeadScoringFrequency.search([('team_id', '=', leads[9].team_id.id), ('variable', '=', 'email_state'), ('value', '=', str(leads[9].email_state))])
self.assertEqual(lead_4_stage_0_freq.won_count, 1.1)
self.assertEqual(lead_4_stage_won_freq.won_count, 1.1)
self.assertEqual(lead_4_country_freq.won_count, 0.1)
self.assertEqual(lead_4_email_state_freq.won_count, 1.1)
self.assertEqual(lead_4_stage_0_freq.lost_count, 2.1)
self.assertEqual(lead_4_stage_won_freq.lost_count, 0.1)
self.assertEqual(lead_4_country_freq.lost_count, 1.1)
self.assertEqual(lead_4_email_state_freq.lost_count, 2.1)
self.assertEqual(lead_9_stage_0_freq.won_count, 1.1)
self.assertEqual(lead_9_stage_won_freq.won_count, 1.1)
self.assertEqual(lead_9_country_freq.won_count, 0.0) # frequency does not exist
self.assertEqual(lead_9_email_state_freq.won_count, 1.1)
self.assertEqual(lead_9_stage_0_freq.lost_count, 2.1)
self.assertEqual(lead_9_stage_won_freq.lost_count, 0.1)
self.assertEqual(lead_9_country_freq.lost_count, 0.0) # frequency does not exist
self.assertEqual(lead_9_email_state_freq.lost_count, 2.1)
# B. Test Live Increment
leads[4].action_set_lost()
leads[9].action_set_won()
# re-get frequencies that did not exists before
lead_9_country_freq = LeadScoringFrequency.search([('team_id', '=', leads[9].team_id.id), ('variable', '=', 'country_id'), ('value', '=', leads[9].country_id.id)])
# B.1. Test frequencies - team 1 should not impact team 2
self.assertEqual(lead_4_stage_0_freq.won_count, 1.1) # unchanged
self.assertEqual(lead_4_stage_won_freq.won_count, 1.1) # unchanged
self.assertEqual(lead_4_country_freq.won_count, 0.1) # unchanged
self.assertEqual(lead_4_email_state_freq.won_count, 1.1) # unchanged
self.assertEqual(lead_4_stage_0_freq.lost_count, 3.1) # + 1
self.assertEqual(lead_4_stage_won_freq.lost_count, 0.1) # unchanged - consider stages with <= sequence when lost
self.assertEqual(lead_4_country_freq.lost_count, 2.1) # + 1
self.assertEqual(lead_4_email_state_freq.lost_count, 3.1) # + 1
self.assertEqual(lead_9_stage_0_freq.won_count, 2.1) # + 1
self.assertEqual(lead_9_stage_won_freq.won_count, 2.1) # + 1 - consider every stages when won
self.assertEqual(lead_9_country_freq.won_count, 1.1) # + 1
self.assertEqual(lead_9_email_state_freq.won_count, 2.1) # + 1
self.assertEqual(lead_9_stage_0_freq.lost_count, 2.1) # unchanged
self.assertEqual(lead_9_stage_won_freq.lost_count, 0.1) # unchanged
self.assertEqual(lead_9_country_freq.lost_count, 0.1) # unchanged (did not exists before)
self.assertEqual(lead_9_email_state_freq.lost_count, 2.1) # unchanged
# Propabilities of other leads should not be impacted as only modified lead are recomputed.
self.assertEqual(tools.float_compare(leads[3].automated_probability, 33.49, 2), 0)
self.assertEqual(tools.float_compare(leads[8].automated_probability, 7.74, 2), 0)
self.assertEqual(leads[3].is_automated_probability, True)
self.assertEqual(leads[8].is_automated_probability, True)
# Restore -> Should decrease lost
leads[4].toggle_active()
self.assertEqual(lead_4_stage_0_freq.won_count, 1.1) # unchanged
self.assertEqual(lead_4_stage_won_freq.won_count, 1.1) # unchanged
self.assertEqual(lead_4_country_freq.won_count, 0.1) # unchanged
self.assertEqual(lead_4_email_state_freq.won_count, 1.1) # unchanged
self.assertEqual(lead_4_stage_0_freq.lost_count, 2.1) # - 1
self.assertEqual(lead_4_stage_won_freq.lost_count, 0.1) # unchanged - consider stages with <= sequence when lost
self.assertEqual(lead_4_country_freq.lost_count, 1.1) # - 1
self.assertEqual(lead_4_email_state_freq.lost_count, 2.1) # - 1
self.assertEqual(lead_9_stage_0_freq.won_count, 2.1) # unchanged
self.assertEqual(lead_9_stage_won_freq.won_count, 2.1) # unchanged
self.assertEqual(lead_9_country_freq.won_count, 1.1) # unchanged
self.assertEqual(lead_9_email_state_freq.won_count, 2.1) # unchanged
self.assertEqual(lead_9_stage_0_freq.lost_count, 2.1) # unchanged
self.assertEqual(lead_9_stage_won_freq.lost_count, 0.1) # unchanged
self.assertEqual(lead_9_country_freq.lost_count, 0.1) # unchanged
self.assertEqual(lead_9_email_state_freq.lost_count, 2.1) # unchanged
# set to won stage -> Should increase won
leads[4].stage_id = won_stage_id
self.assertEqual(lead_4_stage_0_freq.won_count, 2.1) # + 1
self.assertEqual(lead_4_stage_won_freq.won_count, 2.1) # + 1
self.assertEqual(lead_4_country_freq.won_count, 1.1) # + 1
self.assertEqual(lead_4_email_state_freq.won_count, 2.1) # + 1
self.assertEqual(lead_4_stage_0_freq.lost_count, 2.1) # unchanged
self.assertEqual(lead_4_stage_won_freq.lost_count, 0.1) # unchanged
self.assertEqual(lead_4_country_freq.lost_count, 1.1) # unchanged
self.assertEqual(lead_4_email_state_freq.lost_count, 2.1) # unchanged
# Archive (was won, now lost) -> Should decrease won and increase lost
leads[4].toggle_active()
self.assertEqual(lead_4_stage_0_freq.won_count, 1.1) # - 1
self.assertEqual(lead_4_stage_won_freq.won_count, 1.1) # - 1
self.assertEqual(lead_4_country_freq.won_count, 0.1) # - 1
self.assertEqual(lead_4_email_state_freq.won_count, 1.1) # - 1
self.assertEqual(lead_4_stage_0_freq.lost_count, 3.1) # + 1
self.assertEqual(lead_4_stage_won_freq.lost_count, 1.1) # consider stages with <= sequence when lostand as stage is won.. even won_stage lost_count is increased by 1
self.assertEqual(lead_4_country_freq.lost_count, 2.1) # + 1
self.assertEqual(lead_4_email_state_freq.lost_count, 3.1) # + 1
# Move to original stage -> Should do nothing (as lead is still lost)
leads[4].stage_id = stage_ids[0]
self.assertEqual(lead_4_stage_0_freq.won_count, 1.1) # unchanged
self.assertEqual(lead_4_stage_won_freq.won_count, 1.1) # unchanged
self.assertEqual(lead_4_country_freq.won_count, 0.1) # unchanged
self.assertEqual(lead_4_email_state_freq.won_count, 1.1) # unchanged
self.assertEqual(lead_4_stage_0_freq.lost_count, 3.1) # unchanged
self.assertEqual(lead_4_stage_won_freq.lost_count, 1.1) # unchanged
self.assertEqual(lead_4_country_freq.lost_count, 2.1) # unchanged
self.assertEqual(lead_4_email_state_freq.lost_count, 3.1) # unchanged
# Restore -> Should decrease lost - at the end, frequencies should be like first frequencyes tests (except for 0.0 -> 0.1)
leads[4].toggle_active()
self.assertEqual(lead_4_stage_0_freq.won_count, 1.1) # unchanged
self.assertEqual(lead_4_stage_won_freq.won_count, 1.1) # unchanged
self.assertEqual(lead_4_country_freq.won_count, 0.1) # unchanged
self.assertEqual(lead_4_email_state_freq.won_count, 1.1) # unchanged
self.assertEqual(lead_4_stage_0_freq.lost_count, 2.1) # - 1
self.assertEqual(lead_4_stage_won_freq.lost_count, 1.1) # unchanged - consider stages with <= sequence when lost
self.assertEqual(lead_4_country_freq.lost_count, 1.1) # - 1
self.assertEqual(lead_4_email_state_freq.lost_count, 2.1) # - 1
# Probabilities should only be recomputed after modifying the lead itself.
leads[3].stage_id = stage_ids[0] # probability should only change a bit as frequencies are almost the same (except 0.0 -> 0.1)
leads[8].stage_id = stage_ids[0] # probability should change quite a lot
# Test frequencies (should not have changed)
self.assertEqual(lead_4_stage_0_freq.won_count, 1.1) # unchanged
self.assertEqual(lead_4_stage_won_freq.won_count, 1.1) # unchanged
self.assertEqual(lead_4_country_freq.won_count, 0.1) # unchanged
self.assertEqual(lead_4_email_state_freq.won_count, 1.1) # unchanged
self.assertEqual(lead_4_stage_0_freq.lost_count, 2.1) # unchanged
self.assertEqual(lead_4_stage_won_freq.lost_count, 1.1) # unchanged
self.assertEqual(lead_4_country_freq.lost_count, 1.1) # unchanged
self.assertEqual(lead_4_email_state_freq.lost_count, 2.1) # unchanged
self.assertEqual(lead_9_stage_0_freq.won_count, 2.1) # unchanged
self.assertEqual(lead_9_stage_won_freq.won_count, 2.1) # unchanged
self.assertEqual(lead_9_country_freq.won_count, 1.1) # unchanged
self.assertEqual(lead_9_email_state_freq.won_count, 2.1) # unchanged
self.assertEqual(lead_9_stage_0_freq.lost_count, 2.1) # unchanged
self.assertEqual(lead_9_stage_won_freq.lost_count, 0.1) # unchanged
self.assertEqual(lead_9_country_freq.lost_count, 0.1) # unchanged
self.assertEqual(lead_9_email_state_freq.lost_count, 2.1) # unchanged
# Continue to test probability computation
leads[3].probability = 40
self.assertEqual(leads[3].is_automated_probability, False)
self.assertEqual(leads[8].is_automated_probability, True)
self.assertEqual(tools.float_compare(leads[3].automated_probability, 20.87, 2), 0)
self.assertEqual(tools.float_compare(leads[8].automated_probability, 2.43, 2), 0)
self.assertEqual(tools.float_compare(leads[3].probability, 40, 2), 0)
self.assertEqual(tools.float_compare(leads[8].probability, 2.43, 2), 0)
# Test modify country_id
leads[8].country_id = country_ids[1]
self.assertEqual(tools.float_compare(leads[8].automated_probability, 34.38, 2), 0)
self.assertEqual(tools.float_compare(leads[8].probability, 34.38, 2), 0)
leads[8].country_id = country_ids[0]
self.assertEqual(tools.float_compare(leads[8].automated_probability, 2.43, 2), 0)
self.assertEqual(tools.float_compare(leads[8].probability, 2.43, 2), 0)
# ----------------------------------------------
# Test tag_id frequencies and probability impact
# ----------------------------------------------
tag_ids = self.env['crm.tag'].create([
{'name': "Tag_test_1"},
{'name': "Tag_test_2"},
]).ids
# tag_ids = self.env['crm.tag'].search([], limit=2).ids
leads_with_tags = self.generate_leads_with_tags(tag_ids)
leads_with_tags[:30].action_set_lost() # 60% lost on tag 1
leads_with_tags[31:50].action_set_won() # 40% won on tag 1
leads_with_tags[50:90].action_set_lost() # 80% lost on tag 2
leads_with_tags[91:100].action_set_won() # 20% won on tag 2
leads_with_tags[100:135].action_set_lost() # 70% lost on tag 1 and 2
leads_with_tags[136:150].action_set_won() # 30% won on tag 1 and 2
# tag 1 : won = 19+14 / lost = 30+35
# tag 2 : won = 9+14 / lost = 40+35
tag_1_freq = LeadScoringFrequency.search([('variable', '=', 'tag_id'), ('value', '=', tag_ids[0])])
tag_2_freq = LeadScoringFrequency.search([('variable', '=', 'tag_id'), ('value', '=', tag_ids[1])])
self.assertEqual(tools.float_compare(tag_1_freq.won_count, 33.1, 1), 0)
self.assertEqual(tools.float_compare(tag_1_freq.lost_count, 65.1, 1), 0)
self.assertEqual(tools.float_compare(tag_2_freq.won_count, 23.1, 1), 0)
self.assertEqual(tools.float_compare(tag_2_freq.lost_count, 75.1, 1), 0)
# Force recompute - A priori, no need to do this as, for each won / lost, we increment tag frequency.
Lead._cron_update_automated_probabilities()
leads_with_tags.invalidate_cache()
lead_tag_1 = leads_with_tags[30]
lead_tag_2 = leads_with_tags[90]
lead_tag_1_2 = leads_with_tags[135]
self.assertEqual(tools.float_compare(lead_tag_1.automated_probability, 33.69, 2), 0)
self.assertEqual(tools.float_compare(lead_tag_2.automated_probability, 23.51, 2), 0)
self.assertEqual(tools.float_compare(lead_tag_1_2.automated_probability, 28.05, 2), 0)
lead_tag_1.tag_ids = [(5, 0, 0)] # remove all tags
lead_tag_1_2.tag_ids = [(3, tag_ids[1], 0)] # remove tag 2
self.assertEqual(tools.float_compare(lead_tag_1.automated_probability, 28.6, 2), 0)
self.assertEqual(tools.float_compare(lead_tag_2.automated_probability, 23.51, 2), 0) # no impact
self.assertEqual(tools.float_compare(lead_tag_1_2.automated_probability, 33.69, 2), 0)
lead_tag_1.tag_ids = [(4, tag_ids[1])] # add tag 2
lead_tag_2.tag_ids = [(4, tag_ids[0])] # add tag 1
lead_tag_1_2.tag_ids = [(3, tag_ids[0]), (4, tag_ids[1])] # remove tag 1 / add tag 2
self.assertEqual(tools.float_compare(lead_tag_1.automated_probability, 23.51, 2), 0)
self.assertEqual(tools.float_compare(lead_tag_2.automated_probability, 28.05, 2), 0)
self.assertEqual(tools.float_compare(lead_tag_1_2.automated_probability, 23.51, 2), 0)
# go back to initial situation
lead_tag_1.tag_ids = [(3, tag_ids[1]), (4, tag_ids[0])] # remove tag 2 / add tag 1
lead_tag_2.tag_ids = [(3, tag_ids[0])] # remove tag 1
lead_tag_1_2.tag_ids = [(4, tag_ids[0])] # add tag 1
self.assertEqual(tools.float_compare(lead_tag_1.automated_probability, 33.69, 2), 0)
self.assertEqual(tools.float_compare(lead_tag_2.automated_probability, 23.51, 2), 0)
self.assertEqual(tools.float_compare(lead_tag_1_2.automated_probability, 28.05, 2), 0)
# set email_state for each lead and update probabilities
leads.filtered(lambda lead: lead.id % 2 == 0).email_state = 'correct'
leads.filtered(lambda lead: lead.id % 2 == 1).email_state = 'incorrect'
Lead._cron_update_automated_probabilities()
leads_with_tags.invalidate_cache()
self.assertEqual(tools.float_compare(leads[3].automated_probability, 4.21, 2), 0)
self.assertEqual(tools.float_compare(leads[8].automated_probability, 0.23, 2), 0)
# remove all pls fields
self.env['ir.config_parameter'].sudo().set_param("crm.pls_fields", False)
Lead._cron_update_automated_probabilities()
leads_with_tags.invalidate_cache()
self.assertEqual(tools.float_compare(leads[3].automated_probability, 34.38, 2), 0)
self.assertEqual(tools.float_compare(leads[8].automated_probability, 50.0, 2), 0)
# check if the probabilities are the same with the old param
self.env['ir.config_parameter'].sudo().set_param("crm.pls_fields", "country_id,state_id,email_state,phone_state,source_id")
Lead._cron_update_automated_probabilities()
leads_with_tags.invalidate_cache()
self.assertEqual(tools.float_compare(leads[3].automated_probability, 4.21, 2), 0)
self.assertEqual(tools.float_compare(leads[8].automated_probability, 0.23, 2), 0)
# remove tag_ids from the calculation
self.assertEqual(tools.float_compare(lead_tag_1.automated_probability, 28.6, 2), 0)
self.assertEqual(tools.float_compare(lead_tag_2.automated_probability, 28.6, 2), 0)
self.assertEqual(tools.float_compare(lead_tag_1_2.automated_probability, 28.6, 2), 0)
lead_tag_1.tag_ids = [(5, 0, 0)] # remove all tags
lead_tag_2.tag_ids = [(4, tag_ids[0])] # add tag 1
lead_tag_1_2.tag_ids = [(3, tag_ids[1], 0)] # remove tag 2
self.assertEqual(tools.float_compare(lead_tag_1.automated_probability, 28.6, 2), 0)
self.assertEqual(tools.float_compare(lead_tag_2.automated_probability, 28.6, 2), 0)
self.assertEqual(tools.float_compare(lead_tag_1_2.automated_probability, 28.6, 2), 0)
def test_settings_pls_start_date(self):
# We test here that settings never crash due to ill-configured config param 'crm.pls_start_date'
set_param = self.env['ir.config_parameter'].sudo().set_param
str_date_8_days_ago = Date.to_string(Date.today() - timedelta(days=8))
resConfig = self.env['res.config.settings']
set_param("crm.pls_start_date", "2021-10-10")
res_config_new = resConfig.new()
self.assertEqual(Date.to_string(res_config_new.predictive_lead_scoring_start_date),
"2021-10-10", "If config param is a valid date, date in settings should match with config param")
set_param("crm.pls_start_date", "")
res_config_new = resConfig.new()
self.assertEqual(Date.to_string(res_config_new.predictive_lead_scoring_start_date),
str_date_8_days_ago, "If config param is empty, date in settings should be set to 8 days before today")
set_param("crm.pls_start_date", "One does not simply walk into system parameters to corrupt them")
res_config_new = resConfig.new()
self.assertEqual(Date.to_string(res_config_new.predictive_lead_scoring_start_date),
str_date_8_days_ago, "If config param is not a valid date, date in settings should be set to 8 days before today")
@users('user_sales_manager')
def test_team_unlink(self):
""" Test that frequencies are sent to "no team" when unlinking a team
in order to avoid loosing too much informations. """
pls_team = self.env["crm.team"].browse(self.pls_team.ids)
# clean existing data
self.env["crm.lead.scoring.frequency"].sudo().search([('team_id', '=', False)]).unlink()
# existing no-team data
no_team = [
('stage_id', '1', 20, 10),
('stage_id', '2', 0.1, 0.1),
('stage_id', '3', 10, 0),
('country_id', '1', 10, 0.1),
]
self.env["crm.lead.scoring.frequency"].sudo().create([
{'variable': variable, 'value': value,
'won_count': won_count, 'lost_count': lost_count,
'team_id': False,
} for variable, value, won_count, lost_count in no_team
])
# add some frequencies to team to unlink
team = [
('stage_id', '1', 20, 10), # existing noteam
('country_id', '1', 0.1, 10), # existing noteam
('country_id', '2', 0.1, 0), # new but void
('country_id', '3', 30, 30), # new
]
existing_plsteam = self.env["crm.lead.scoring.frequency"].sudo().create([
{'variable': variable, 'value': value,
'won_count': won_count, 'lost_count': lost_count,
'team_id': pls_team.id,
} for variable, value, won_count, lost_count in team
])
pls_team.unlink()
final_noteam = [
('stage_id', '1', 40, 20),
('stage_id', '2', 0.1, 0.1),
('stage_id', '3', 10, 0),
('country_id', '1', 10, 10),
('country_id', '3', 30, 30),
]
self.assertEqual(
existing_plsteam.exists(), self.env["crm.lead.scoring.frequency"],
'Frequencies of unlinked teams should be unlinked (cascade)')
existing_noteam = self.env["crm.lead.scoring.frequency"].sudo().search([
('team_id', '=', False),
('variable', 'in', ['stage_id', 'country_id']),
])
for frequency in existing_noteam:
stat = next(item for item in final_noteam if item[0] == frequency.variable and item[1] == frequency.value)
self.assertEqual(frequency.won_count, stat[2])
self.assertEqual(frequency.lost_count, stat[3])
self.assertEqual(len(existing_noteam), len(final_noteam))
| gpl-3.0 | 4,851,082,227,272,748,000 | 55.85918 | 179 | 0.618534 | false |
GFZ-Centre-for-Early-Warning/REM_RRVS | webapp/views.py | 1 | 11255 | '''
---------------------------
views.py
---------------------------
Created on 24.04.2015
Last modified on 15.01.2016
Author: Marc Wieland, Michael Haas
Description: The main views file setting up the flask application layout, defining all routes
----
'''
import flask
from webapp import app, db
from models import t_object, object_attribute, ve_object, dic_attribute_value, pan_imgs,gps, User, task, tasks_users
from forms import RrvsForm, LoginForm,RrvsForm_es,RrvsForm_ar
from flask.ext.security import login_required, login_user, logout_user
import geoalchemy2.functions as func
import json
from geojson import Feature, FeatureCollection, dumps
import time
########################################################
# REST interface getting task related buildings as json
########################################################
#@app.route("/bdgs/api/<int:taskid>",methods=["GET"])
#def get_task(taskid):
# geom = ve_object.query.filter_by(gid=taskid).first().the_geom
# #geom_json= json.loads(db.session.scalar(geoalchemy2.functions.ST_AsGeoJSON(geom)))
# geom_json = json.loads(db.session.scalar(func.ST_AsGeoJSON(geom)))
# geom_json["gid"]=taskid
# print geom_json
# return flask.jsonify(geom_json['coordinates'][0])
#def byteify(input):
# if isinstance(input, dict):
# return {byteify(key):byteify(value) for key,value in input.iteritems()}
# elif isinstance(input, list):
# return [byteify(element) for element in input]
# elif isinstance(input, unicode):
# return input.encode('utf-8')
# else:
# return input
@app.before_request
def check_login():
if flask.request.endpoint == 'static' and not flask.current_user.is_authenticated():
abort(401)
return None
#######################################
# Login landing page
#######################################
@app.route("/", methods=["GET", "POST"])
def login():
"""For GET requests, display the login form. For POSTS, login the current user
by processing the form and storing the taskid."""
msg=''
form = LoginForm()
if form.validate_on_submit():
try:
user = User.query.get(int(form.userid.data))
except:
user = False
#check if task id belongs to user
try:
task_id = tasks_users.query.filter_by(task_id=form.taskid.data).first()
1/(task_id.user_id==user.id)
except:
user = False
if user:
user.authenticated = True
db.session.add(user)
db.session.commit()
#log the user in
login_user(user, remember=True)
#set up task
flask.session['taskid']=form.taskid.data
#get all buildings gids from task and storing in session
flask.session['bdg_gids'] = task.query.filter_by(id=flask.session['taskid']).first().bdg_gids
#get all img gids from task and storing in session
flask.session['img_gids'] = task.query.filter_by(id=flask.session['taskid']).first().img_ids
#flags for screened buildings
flask.session['screened'] = [False]*len(flask.session['bdg_gids'])
#language is set in babel locale in __init__.py
#get gid's of attribute values as defined in dic_attribute_value as python dictionary
dic_attribute_val_query=dic_attribute_value.query.all()#.options(load_only("gid","attribute_value"))
dic_attribute_val_py={}
for attribute in dic_attribute_val_query:
dic_attribute_val_py[attribute.attribute_value] = attribute.gid
flask.session['dic_attribute_val_py']=dic_attribute_val_py
return flask.redirect(flask.url_for("main"))
else:
msg='Wrong combination of UserID and TaskID'
return flask.render_template("index.htm", form=form,msg=msg)
@app.route("/logout", methods=["GET"])
def logout():
"""Logout the current user."""
user = current_user
user.authenticated = False
db.session.add(user)
db.session.commit()
logout_user()
return flask.render_template("logout.html")
######################################
# Data entry/ Visualization interface
#####################################
@app.route('/main')
@login_required
def main():
"""
This will render a template that holds the main pagelayout.
"""
return flask.render_template('main.htm')
@app.route('/map')
@login_required
def map():
"""
This will render a template that holds the map.
Displaying buildings with gids contained in taskid
"""
#get bdg_gids
bdg_gids = flask.session['bdg_gids']
#get FeatureCollection with corresponding building footprints
rows = object_attribute.query.filter(db.and_(object_attribute.object_id.in_(bdg_gids),object_attribute.attribute_type_code=='RRVS_STATUS')).all()
bdgs = []
for row in rows:
geom = t_object.query.filter_by(gid=row.object_id).first().the_geom
geometry = json.loads(db.session.scalar(func.ST_AsGeoJSON(geom)))
feature = Feature(id=row.object_id,geometry=geometry,properties={"gid":row.object_id, "rrvs_status":row.attribute_value})
bdgs.append(feature)
bdgs_json = dumps(FeatureCollection(bdgs))
#get img_gids
img_gids = flask.session['img_gids']
#get metadata related to these images
image_rows = pan_imgs.query.filter(pan_imgs.gid.in_(img_gids)).all()
gps_ids = [row.gps for row in image_rows]
gps_rows = gps.query.filter(gps.gid.in_(gps_ids)).all()
#create a json object
img_gps = []
for i,image in enumerate(image_rows):
geometry = json.loads(db.session.scalar(func.ST_AsGeoJSON(gps_rows[i].the_geom)))
feature = Feature(id=image.gid,geometry=geometry,properties={"img_id":image.gid,"repository":image.repository,"filename":image.filename,"frame_id":image.frame_id,"azimuth":gps_rows[i].azimuth})
img_gps.append(feature)
gps_json = dumps(FeatureCollection(img_gps))
return flask.render_template('map.html',bdgs=bdgs_json,gps=gps_json)
@app.route('/pannellum')
@login_required
def pannellum():
"""
This will render a template that holds the panoimage viewer.
"""
return flask.render_template('pannellum.htm')
@app.route('/_update_rrvsform')
@login_required
def update_rrvsform():
"""
This updates the values of the rrvsform fields using jQuery. The function sends a json
string with all values to the rrvsform.html template for populating the fields.
Note that for QuerySelectFields the gid of the attribute_value needs to be returned by the function.
"""
# get building gid value for queries
gid_val = flask.request.args.get('gid_val', 0, type=int)
dic_attribute_val_py = flask.session['dic_attribute_val_py']
#query attribute values for select fields
rows = object_attribute.query.filter_by(object_id=gid_val).all()
height_fields = ['height','height2']
age_fields = ['yr_built']
text_fields = ['comment','rrvs_status']
attribute_vals = {}
for row in rows:
key = str(row.attribute_type_code)
if key.lower() in height_fields:
#convert to integer and store separately
attribute_vals['{}_1_val'.format(key.lower())]=int(row.attribute_numeric_1)
#also take value for type of int value
gid = dic_attribute_val_py[row.attribute_value]
elif key.lower() in age_fields:
#convert to integer and store separately
attribute_vals['year_1_val']=int(row.attribute_numeric_1)
#also take value for type of int value
gid = dic_attribute_val_py[row.attribute_value]
elif key.lower() in text_fields:
#keep string
attribute_vals['{}_val'.format(key.lower())]= row.attribute_value
gid = None
else:
#return gid to corresponding value in table dic_attribute_val
try:
gid = dic_attribute_val_py[row.attribute_value]
except KeyError:
gid = None
#add to dictionary
if gid != None:
attribute_vals['{}_gid'.format(key.lower())]=gid
return flask.jsonify(**attribute_vals)
@app.route('/rrvsform', methods=['GET', 'POST'])
@login_required
def rrvsform():
"""
This renders a template that displays all of the form objects if it's
a Get request. If the user is attempting to Post then this view will push
the data to the database.
"""
lang=flask.session['lang']
if lang == 'es':
rrvs_form = RrvsForm_es()
elif lang == 'ar':
rrvs_form = RrvsForm_ar()
else:
rrvs_form = RrvsForm()
if flask.request.method == 'POST' and rrvs_form.validate():
print 'UPDATE: Building {} updated!'.format(rrvs_form.gid_field.data)
# check if checkbox for rrvs status is ticked and assign values to be used for database update
if rrvs_form.rrvs_status_field.data == True:
rrvs_status_val = 'COMPLETED'
else:
rrvs_status_val = 'MODIFIED'
# update database with form content
rows = object_attribute.query.filter_by(object_id=rrvs_form.gid_field.data)
height_fields = ['height','height2']
age_fields = ['yr_built']
text_fields = ['comment']
for row in rows:
key = str(row.attribute_type_code).lower()
if key not in ['build_type','build_subtype']:#not implemented
if key in height_fields:
row.attribute_value = rrvs_form.__dict__[key+'_field'].data.attribute_value
row.attribute_numeric_1 = rrvs_form.__dict__[key+'_1_val_field'].data
elif key in age_fields:
row.attribute_value = rrvs_form.__dict__[key+'_field'].data.attribute_value
row.attribute_numeric_1 = rrvs_form.__dict__['year_1_val_field'].data
elif key in text_fields:
row.attribute_value = rrvs_form.__dict__[key+'_field'].data
elif key == 'rrvs_status':
row.attribute_value = rrvs_status_val
else:
row.attribute_value = rrvs_form.__dict__[key+'_field'].data.attribute_value
db.session.commit()
#update session variable for screened buildings
flask.session['screened'][flask.session['bdg_gids'].index(int(rrvs_form.gid_field.data))]=True
# if no post request is send the template is rendered normally showing numbers of completed bdgs
# get the data for the rrvsFormTable from the database
bdg_gids = flask.session['bdg_gids']
rows = object_attribute.query.filter(db.and_(object_attribute.object_id.in_(bdg_gids),object_attribute.attribute_type_code=='RRVS_STATUS')).all()
bdgs = []
for row in rows:
data = [str(row.object_id), str(row.attribute_value)]
bdgs.append(data)
return flask.render_template(template_name_or_list='rrvsform.html',
rrvs_form=rrvs_form,
bdgs=bdgs,
n=len(flask.session['bdg_gids']),
c=len([x for x in flask.session['screened'] if x==True]))
| bsd-3-clause | 3,528,388,904,670,477,000 | 41.794677 | 201 | 0.616526 | false |
mivade/qCamera | viewer/ring_buffer_viewer.py | 1 | 1959 | """Ring buffer viewer"""
from qcamera.ring_buffer import RingBuffer
from PyQt4 import QtGui
from guiqwt.builder import make
from ui_ring_buffer_viewer import Ui_RingBufferViewer
from util import get_image_item, get_rect_item
class RingBufferViewer(QtGui.QDialog, Ui_RingBufferViewer):
def __init__(self, rbuffer, parent):
QtGui.QDialog.__init__(self, parent=parent)
assert isinstance(rbuffer, RingBuffer)
# Stop recording
self.rbuffer = rbuffer
self.was_recording = self.rbuffer.recording
self.rbuffer.recording = False
# Setup UI
self.setupUi(self)
self.show()
self.closeButton.clicked.connect(self.finished)
max_ = self.parent().cam.rbuffer.get_current_index() - 1
self.indexBox.setRange(0, max_)
self.indexSlider.setRange(0, max_)
# Connect signals
self.indexBox.valueChanged.connect(self.update)
self.update()
def update(self):
"""Show the currently selected image from the ring buffer."""
# Get the specified image data and ROI
img_data = self.rbuffer.read(self.indexBox.value())
roi = self.rbuffer.get_roi(self.indexBox.value())
# Update the viewer
plot = self.imageWidget.get_plot()
img = get_image_item(self.imageWidget)
rect = get_rect_item(self.imageWidget)
if img is None:
img = make.image(img_data, colormap=str(self.parent().colormapBox.currentText()))
plot.add_item(img)
else:
img.set_data(img_data)
if rect is None:
rect = make.rectangle(roi[0], roi[1], roi[2], roi[3])
plot.add_item(rect)
else:
rect.set_rect(roi[0], roi[1], roi[2], roi[3])
plot.replot()
def finished(self):
"""Resume recording and quit."""
if self.was_recording:
self.rbuffer.recording = True
self.done(0)
| bsd-2-clause | -6,084,053,276,708,281,000 | 32.220339 | 93 | 0.614599 | false |
EduPepperPDTesting/pepper2013-testing | lms/djangoapps/reportlab/lib/abag.py | 1 | 1171 | #Copyright ReportLab Europe Ltd. 2000-2016
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/abag.py
__version__='3.3.0'
__doc__='''Data structure to hold a collection of attributes, used by styles.'''
class ABag:
"""
'Attribute Bag' - a trivial BAG class for holding attributes.
This predates modern Python. Doing this again, we'd use a subclass
of dict.
You may initialize with keyword arguments.
a = ABag(k0=v0,....,kx=vx,....) ==> getattr(a,'kx')==vx
c = a.clone(ak0=av0,.....) copy with optional additional attributes.
"""
def __init__(self,**attr):
self.__dict__.update(attr)
def clone(self,**attr):
n = self.__class__(**self.__dict__)
if attr: n.__dict__.update(attr)
return n
def __repr__(self):
D = self.__dict__
K = list(D.keys())
K.sort()
return '%s(%s)' % (self.__class__.__name__,', '.join(['%s=%r' % (k,D[k]) for k in K]))
if __name__=="__main__":
AB = ABag(a=1, c="hello")
CD = AB.clone()
print(AB)
print(CD)
| agpl-3.0 | 7,076,339,163,037,442,000 | 30.527778 | 100 | 0.557643 | false |
raony/pugpexxix | blackart.py | 1 | 3057 | # Hijacking third party libs
# now things are going to get serious.
# imagine you need to put some code inside a third party lib. Let's say
# it is some logging feature so you can understand some wacky behavior.
# you don't know where this will lead you, and you are exploring
# alternatives to learn more about the code without having to alter it in
# some way that it's gonna to be hard to keep it up to its new versions.
# that's when black arts come into hand, it is **exploratory** and
# **temporary**, it unleashes the depths of the system innerworks in order
# to give you insight. Please, clean up after use.
# this is how it is supposed to work:
# >>> import blackart
# >>> blackart.insert_finder() # hook into sys.meta_path
# >>> import math # (almost) any lib will do
# >>> math.ceil(1.2)
# *** ceil
# 2.0
# this is actually a partial implementation of the idea, it lacks
# the ability to successfully intercept dotted imports. It will leave
# untouched classes already with __metaclass__, and also those who lives
# on C code.
# but you get the idea.
from functools import wraps
import imp
import inspect
import sys
def logall(f):
# my simple decorator to log functions
@wraps(f)
def _f(*args, **kwargs):
print '*** %s' % f.__name__
return f(*args, **kwargs)
return _f
class logallmeta(type):
# my metaclass that decorates all the class methods
def __new__(cls, name, bases, clsdict):
for attr, attrvalue in clsdict.items():
if callable(attrvalue):
clsdict[attr] = logall(attrvalue)
return type.__new__(cls, name, bases, clsdict)
class baselogall(object):
# my base class that uses the metaclass, so is just throw it at
# "bases" list
__metaclass__ = logallmeta
def insert_finder():
# hijacking the import system!
sys.meta_path.append(module_finder())
class module_finder(object):
# first each import will call the find_module and expect a loader
def find_module(self, fullname, path=None):
return module_loader(*imp.find_module(fullname, path))
class module_loader(object):
def __init__(self, file, pathname, description):
self.file = file
self.pathname = pathname
self.description = description
def load_module(self, fullname):
# then the import will try to load_module and expect the mod
try:
mod = imp.load_module(fullname, self.file, self.pathname, self.description)
finally:
if self.file:
self.file.close()
dct = {'__module__':mod.__name__}
for key, val in mod.__dict__.items():
if inspect.isclass(val):
try:
# recreate all classes with the logging base class
setattr(mod, key, type(key,(val,baselogall),dct))
except TypeError, e:
print e
elif callable(val):
# decorate all the callables
setattr(mod, key, logall(val))
return mod
| gpl-2.0 | -2,859,481,613,356,873,000 | 31.870968 | 87 | 0.636572 | false |
lebauce/artub | projectproperties.py | 1 | 2568 | # Glumol - An adventure game creator
# Copyright (C) 1998-2008 Sylvain Baubeau & Alexis Contour
# This file is part of Glumol.
# Glumol is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# Glumol is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Glumol. If not, see <http://www.gnu.org/licenses/>.
import wx
def create(parent):
return OptionsWindow(parent)
[wxID_WXOPTIONSDIALOG, wxID_WXOPTIONSNOTEBOOK, wxID_WXDIALOG1BUTTON1, wxID_WXDIALOG1BUTTON2
] = map(lambda _init_ctrls: wx.NewId(), range(4))
class ProjectProperties(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, id=wxID_WXOPTIONSDIALOG, name='', parent=parent,
pos=wx.Point(256, 167), size=wx.Size(385, 330),
style=wx.DEFAULT_DIALOG_STYLE, title=_("Options"))
self.artub = parent
self.notebook = wx.Notebook(id=wxID_WXOPTIONSNOTEBOOK, name='notebook1',
parent=self, pos=wx.Point(8, 8), style=0)
self.create_plugins_pages()
self.CenterOnScreen()
def create_plugins_pages(self):
sizer = wx.BoxSizer(wx.VERTICAL)
from builder.optionspage import BuilderOptionsPage
page = BuilderOptionsPage(self.notebook, wx.GetApp().artub_frame.project)
self.builder_page = page
self.notebook.AddPage(page, _("Builder"))
sizer.Add(self.notebook, 0, wx.ALL, 5)
sizer2 = wx.BoxSizer(wx.HORIZONTAL)
okbutton = wx.Button(id=wx.ID_OK, label=_("Ok"),
name='button1', parent=self, style=0)
wx.EVT_BUTTON(okbutton, wx.ID_OK, self.OnOk)
cancelbutton = wx.Button(id=wx.ID_CANCEL, label=_("Cancel"),
name='button2', parent=self, style=0)
sizer2.Add(okbutton, 0, wx.ALIGN_CENTER)
sizer2.Add(cancelbutton, 0, wx.ALIGN_CENTER)
sizer.Add(sizer2, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER | wx.ALL, 10)
self.SetSizer(sizer)
sizer.Fit(self)
def OnOk(self, evt):
self.builder_page.OnOk()
evt.Skip()
| gpl-2.0 | 1,089,999,112,614,734,500 | 39.761905 | 91 | 0.641745 | false |
saifulazad/myApp | app/auth.py | 1 | 7116 | import os
from flask import Flask, url_for, redirect, render_template, request
from flask_sqlalchemy import SQLAlchemy
from wtforms import form, fields, validators
import flask_admin as admin
import flask_login as login
from flask_admin.contrib import sqla
from flask_admin import helpers, expose
from werkzeug.security import generate_password_hash, check_password_hash
# Create Flask application
app = Flask(__name__)
# Create dummy secrey key so we can use sessions
app.config['SECRET_KEY'] = '123456790'
# Create in-memory database
app.config['DATABASE_FILE'] = 'sample_db.sqlite'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + app.config['DATABASE_FILE']
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
# Create user model.
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(100))
last_name = db.Column(db.String(100))
login = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120))
password = db.Column(db.String(64))
# Flask-Login integration
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
# Required for administrative interface
def __unicode__(self):
return self.username
# Define login and registration forms (for flask-login)
class LoginForm(form.Form):
login = fields.TextField(validators=[validators.required()])
password = fields.PasswordField(validators=[validators.required()])
def validate_login(self, field):
user = self.get_user()
if user is None:
raise validators.ValidationError('Invalid user')
# we're comparing the plaintext pw with the the hash from the db
if not check_password_hash(user.password, self.password.data):
# to compare plain text passwords use
# if user.password != self.password.data:
raise validators.ValidationError('Invalid password')
def get_user(self):
return db.session.query(User).filter_by(login=self.login.data).first()
class RegistrationForm(form.Form):
login = fields.TextField(validators=[validators.required()])
email = fields.TextField()
password = fields.PasswordField(validators=[validators.required()])
def validate_login(self, field):
if db.session.query(User).filter_by(login=self.login.data).count() > 0:
raise validators.ValidationError('Duplicate username')
# Initialize flask-login
def init_login():
login_manager = login.LoginManager()
login_manager.init_app(app)
# Create user loader function
@login_manager.user_loader
def load_user(user_id):
return db.session.query(User).get(user_id)
# Create customized model view class
class MyModelView(sqla.ModelView):
def is_accessible(self):
return login.current_user.is_authenticated()
# Create customized index view class that handles login & registration
class MyAdminIndexView(admin.AdminIndexView):
@expose('/')
def index(self):
if not login.current_user.is_authenticated():
return redirect(url_for('.login_view'))
return super(MyAdminIndexView, self).index()
@expose('/login/', methods=('GET', 'POST'))
def login_view(self):
# handle user login
form = LoginForm(request.form)
if helpers.validate_form_on_submit(form):
user = form.get_user()
login.login_user(user)
if login.current_user.is_authenticated():
return redirect(url_for('.index'))
link = '<p>Don\'t have an account? <a href="' + url_for('.register_view') + '">Click here to register.</a></p>'
self._template_args['form'] = form
self._template_args['link'] = link
return super(MyAdminIndexView, self).index()
@expose('/register/', methods=('GET', 'POST'))
def register_view(self):
form = RegistrationForm(request.form)
if helpers.validate_form_on_submit(form):
user = User()
form.populate_obj(user)
# we hash the users password to avoid saving it as plaintext in the db,
# remove to use plain text:
user.password = generate_password_hash(form.password.data)
db.session.add(user)
db.session.commit()
login.login_user(user)
return redirect(url_for('.index'))
link = '<p>Already have an account? <a href="' + url_for('.login_view') + '">Click here to log in.</a></p>'
self._template_args['form'] = form
self._template_args['link'] = link
return super(MyAdminIndexView, self).index()
@expose('/logout/')
def logout_view(self):
login.logout_user()
return redirect(url_for('.index'))
# Flask views
@app.route('/')
def index():
return render_template('base.html')
# Initialize flask-login
init_login()
# Create admin
admin = admin.Admin(app, 'Example: Auth', index_view=MyAdminIndexView(), base_template='my_master.html')
# Add view
admin.add_view(MyModelView(User, db.session))
def build_sample_db():
"""
Populate a small db with some example entries.
"""
import string
import random
db.drop_all()
db.create_all()
# passwords are hashed, to use plaintext passwords instead:
# test_user = User(login="test", password="test")
test_user = User(login="test", password=generate_password_hash("test"))
db.session.add(test_user)
first_names = [
'Harry', 'Amelia', 'Oliver', 'Jack', 'Isabella', 'Charlie','Sophie', 'Mia',
'Jacob', 'Thomas', 'Emily', 'Lily', 'Ava', 'Isla', 'Alfie', 'Olivia', 'Jessica',
'Riley', 'William', 'James', 'Geoffrey', 'Lisa', 'Benjamin', 'Stacey', 'Lucy'
]
last_names = [
'Brown', 'Smith', 'Patel', 'Jones', 'Williams', 'Johnson', 'Taylor', 'Thomas',
'Roberts', 'Khan', 'Lewis', 'Jackson', 'Clarke', 'James', 'Phillips', 'Wilson',
'Ali', 'Mason', 'Mitchell', 'Rose', 'Davis', 'Davies', 'Rodriguez', 'Cox', 'Alexander'
]
for i in range(len(first_names)):
user = User()
user.first_name = first_names[i]
user.last_name = last_names[i]
user.login = user.first_name.lower()
user.email = user.login + "@example.com"
user.password = generate_password_hash(''.join(random.choice(string.ascii_lowercase + string.digits) for i in range(10)))
db.session.add(user)
db.session.commit()
return
if __name__ == '__main__':
# Build a sample db on the fly, if one does not exist yet.
app_dir = os.path.realpath(os.path.dirname(__file__))
database_path = os.path.join(app_dir, app.config['DATABASE_FILE'])
if not os.path.exists(database_path):
build_sample_db()
# Start app
app.run(debug=True) | bsd-3-clause | 609,638,215,565,379,500 | 30.651376 | 129 | 0.621979 | false |
davidsoncolin/IMS | GCore/State.py | 1 | 12941 | #!/usr/bin/env python
import IO
def freeze(v): return str(IO.encode(v))
def thaw(s): ret,e = IO.decode(s); assert(e==len(s)); return ret
g_state = {}
g_dirty = set() # keys that have been modified
from re import compile as re_compile
g_reLastNum = re_compile(r'(?:[^\d]*(\d+)[^\d]*)+')
g_undos,g_redos,g_undo = [],[],{}
primarySelection = None # TODO, sort this out properly!
# State is a registry of key/value pairs
# Leaf keys hold a value which is frozen as a string
# Other ('FieldDict') keys hold a dict of key/value pairs where the key is a field and the value is the key for that data
# g_state is a dict of key/values. the values are strings, dicts or lists
# values are encoded by being frozen to strings
# dicts have their values encoded as keys
# lists have their elements encoded as keys
# there is a root dict at key ''; other keys start with '/'
# # How to:
# addKey(whatever) # commands ...
# setKey(whatever)
# delKey(whatever)
# push('user description')
# # the added commands form a fragment until the next push; then the user description appears in the undo stack
# # if the same keys are set multiple times, only the last value is stored (to prevent huge accumulations)
# # if the same command is repeated, it is concatenated with the previous command (so that undo undoes both)
## Utility Function ##
# TODO rethink this; we could store only a <set> or <list nub> and generate the dict/list from the keys
# after State.setKey('/images',[0])
# currently: {'':{'images':'/images'}, '/images':['/images/0'], '/images/0':0}
# future: {'':set('images'), '/images':list, '/images/0':0}
def keyType(v):
if isinstance(v,dict): return 1
if isinstance(v,list): return 2
return 0
def getSel(): global primarySelection; return primarySelection
def setSel(sel): global primarySelection; primarySelection = sel
def version(): return getKey('/version/version')
def appName(): return getKey('/version/app')
def uniqueKeys(): global g_state; return g_state[''].viewkeys() # the root key, contains all the root objects
def hasKey(k): global g_state; return k in g_state
def allKeys(): global g_state; return g_state.viewkeys()
def refKey(path, create=False):
'''return reference information for a particular path'''
global g_state, g_undo
path = path.strip('/').split('/')
key = '' # NB the root key is the empty string
for f in path:
parent_key = key
if create and parent_key not in g_state:
g_state[parent_key] = {}
g_undo.setdefault('del',set()).add(parent_key)
g_undo.setdefault('add',{}).pop(parent_key,None) # in one or the other
parent = g_state[parent_key]
if isinstance(parent,list):
f = int(f)
key = parent_key+'/'+str(f)
if create and f == len(parent):
parent.append(key) # allow appends to lists!
g_undo.setdefault('del',set()).add(key)
g_undo.setdefault('add',{}).pop(key,None) # in one or the other
elif isinstance(parent,dict):
key = parent_key+'/'+f
if create and f not in parent:
parent[f] = key
g_undo.setdefault('del',set()).add(key)
g_undo.setdefault('add',{}).pop(key,None) # in one or the other
else:
print 'State.refKey what?',path,create,f,type(parent)
assert False
return key, parent, f, parent_key
def getKeys(l):
return [getKey(k) for k in l]
def getKey(k, default=Exception, depth=0):
if depth == 10: return default
if k is None: return None
global g_state
k = '/'+k.strip('/')
if k == '/': k = '' # allow the root key
v = g_state.get(k,None)
if v is None:
assert default is not Exception,'missing key '+k
return default
t = keyType(v)
if t == 0: return thaw(v)
if t == 1: return dict([(k,getKey(vv,default=default,depth=depth+1)) for k,vv in v.iteritems()])
if t == 2: return [getKey(vv,default=default,depth=depth+1) for vv in v]
def subkeys(k, default=Exception):
'''return the subkeys for a particular key. returns None if the key is a value.'''
if k is None: return None
global g_state
k = '/'+k.strip('/')
if k == '/': k = '' # allow the root key
v = g_state.get(k,None)
if v is None:
assert default is not Exception,'missing key '+k
return default
t = keyType(v)
if t == 0: return None
if t == 1: return v.keys()
if t == 2: return range(len(v))
def nextKey(k):
'''Generate the next numbered key'''
global g_state, g_reLastNum
if not g_state.has_key(k) and not g_state.has_key('/' + k): return k
ki,kj,kv = len(k),len(k),1
match = g_reLastNum.search(k)
if match:
ki, kj = match.span(1)
kv = int(k[ki:kj])
ki = max(ki, kj - len(str(kv))) # in case of leading zeros
ks = k[:ki]+'%d'+k[kj:]
while True:
nk = (ks % kv)
if not g_state.has_key(nk): return nk
kv += 1
## Undo Functions
def getUndoCmd():
global g_undos
if g_undo: return g_undo.get('cmd','fragment')
if not g_undos: return None
return g_undos[-1].get('cmd','whoops')
def push(cmd):
'''Name the command and push onto the undo stack.'''
global g_undo, g_undos, g_redos
#print 'pushing',cmd
#if not g_undo: print 'warning: nothing to push'
if g_undo:
g_undo['cmd'] = cmd
# test for concatentation
# concatenate repeated commands that only modify the same keys (eg dragging a slider)
if g_undos:
tmp = g_undos[-1]
# sometimes commands set other keys...
if set(tmp.viewkeys()) == set(['cmd','set']) and tmp.viewkeys() == g_undo.viewkeys() and tmp['cmd'] == cmd:
g_undo['set'].update(tmp['set'])
g_undos.pop()
g_undos.append(g_undo)
g_undo = {}
g_redos = []
def undo():
global g_undo, g_undos, g_redos
if not g_undo and g_undos: g_undo = g_undos.pop()
if g_undo:
g_redos.append(repeat(g_undo))
g_undo = {}
def getRedoCmd():
'''peek at the redo cmd; return None if there is none'''
global g_redos
if not g_redos: return None
return g_redos[-1].get('cmd','whoops')
def redo():
global g_undo, g_undos, g_redos
if g_undo:
print 'state.redo warning'
g_undos.append(g_undo)
g_undo = {}
if not g_redos: return None
g_undos.append(repeat(g_redos.pop()))
def repeat(undo):
'''redo a command while generating the undo command'''
global g_state, g_dirty
redo = {'cmd':undo.get('cmd','fragment')}
g_dirty.update(undo.get('del',[]))
dels = sorted(undo.get('del',[]))
for k in dels[::-1]:
if k in g_state:
redo.setdefault('add',{})[k] = _delKey(k)
g_dirty.update(undo.get('add',{}).viewkeys())
adds = undo.get('add',{})
for k in sorted(adds.viewkeys()):
redo.setdefault('del',set()).add(k)
_setKey(k, adds[k], do_freeze=False)
g_dirty.update(undo.get('set',{}).viewkeys())
for k,v in undo.get('set',{}).iteritems():
#print 'set',k, k in g_state
redo.setdefault('set',{})[k] = _delKey(k,False)
_setKey(k, v, do_freeze=False)
#g_state[k] = v
return redo
## Key Creation and Editing
def addKey(k,v):
'''Add a key/value pair to the dictionary. NOTE the actual key is returned, which may be different from the requested one.'''
return setKey(nextKey(k),v)
def setKey(k,v):
'''Update the value for a given key, or add a new key if it doesn't exist.'''
assert isinstance(k,str),'k should be a str not '+str(type(k))
global g_state, g_undo
k = '/'+k.strip('/')
if k == '/': k = '' # allow modifying the root key, for experts only!
has_k = hasKey(k)
k = refKey(k, create=True)[0]
if has_k: g_undo.setdefault('set',{}).setdefault(k,_delKey(k, False))
else:
g_undo.setdefault('add',{}).pop(k,None)
g_undo.setdefault('del',set()).add(k)
_setKey(k, v)
return k
def _setKey(k, v, do_freeze=True):
# an internal function that doesn't touch the undo stack. set a key
global g_state, g_dirty
t = keyType(v)
k, parent, f, parent_key = refKey(k)
if isinstance(parent,list) and f == len(parent): parent.append(k)
if isinstance(parent,dict) and f not in parent: parent[f] = k
# only dicts where all keys are strings are deep-indexed
if t == 1 and not all(isinstance(vk,str) for vk in v.iterkeys()):
#print 'WARNING: %s is a dict with keys of type %s' % (k,type(v.keys()[0]))
t = 0
if t == 1:
dct = {}
g_state[k] = dct
for vk,vv in v.items():
assert isinstance(vk,str),'should be a str, not '+str(type(vk))
kk = k+'/'+vk
dct[vk] = kk
_setKey(kk,vv,do_freeze)
elif t == 2:
dct = []
g_state[k] = dct
for vk,vv in enumerate(v):
kk = k+'/'+str(vk)
dct.append(kk)
_setKey(kk,vv,do_freeze)
else:
g_state[k] = freeze(v) if do_freeze else v
g_dirty.add(k)
## Key Deletion
def delKey(k):
global g_undo
k = '/'+k.strip('/')
g_undo.setdefault('add',{})[k] = _delKey(k)
g_undo.setdefault('del',set()).discard(k) # in one or the other
def _delKey(key, unlink_parent=True):
'''
:param key: The Key in the State dict to remove
:param unlink_parent: Whether to remove the link from the parent (delete, must not be mid-list) or not
:return: the value of the key
A private function that removes a key from the dict but does not add anything to the undo stack. Doesn't unlink the parent.
'''
global g_state, g_dirty
if unlink_parent:
key, parent, field, parent_key = refKey(key)
g_dirty.add(parent_key)
pt = keyType(parent)
if pt == 1: parent.pop(field,None)
elif pt == 2:
field = int(field)
# TODO this could go wrong if you extended a list by multiple keys in a single command
assert field == len(parent)-1,'can only remove the last key in a list: '+repr(field)+"!="+repr(len(parent)-1)+': '+key
parent.pop()
v = g_state.pop(key)
g_dirty.add(key)
t = keyType(v)
if t == 0: return v # NB not thawed here
if t == 1: return dict([(k,_delKey(vv, False)) for k,vv in v.iteritems()])
if t == 2: return [_delKey(vv, False) for vv in v]
## Save and Load
def save(filename):
print 'saving',filename
global g_state
IO.save(filename, g_state)
push('save')
def load(filename):
print 'loading',filename
tmp = IO.load(filename)[1]
load_version = thaw(tmp['/version/version'])
load_app = thaw(tmp['/version/app'])
# don't load bad version or app
assert(load_version == version()),'file has a bad version:'+str(load_version)
assert(load_app == appName()),'file has a bad app name:'+str(load_app)
new(tmp)
def clearUndoStack():
global g_undos, g_redos,g_undo
g_undos,g_redos,g_undo = [],[],{}
def new(state=None):
global g_state,g_dirty
g_dirty.update(g_state.viewkeys()) # all the keys have changed
g_state = {} if state is None else state
g_dirty.update(g_state.viewkeys()) # all the keys have changed
setKey('/version',{'type':'note','version':'0.0', 'app':'Imaginarium App'})
clearUndoStack()
setSel(None)
new()
if __name__ == "__main__":
test_range = 10 # Set the number of keys to test
verbose = True
print '-- test undo/redo --'
state = dict([('key'+str(v),{'v':[1,2,3,'value'+str(v)]}) for v in range(10)])
print state
setKey('/test',state)
push('set keys')
assert getKey('/test') == state
setKey('/test/key5/v/2', 'yoyo')
state['key5']['v'][2] = 'yoyo'
push('set key5')
assert getKey('/test') == state
delKey('/test/key5')
key5 = state.pop('key5')
push('del key 5')
assert getKey('/test') == state
undo()
state['key5'] = key5
assert getKey('/test') == state
redo()
key5 = state.pop('key5')
assert getKey('/test') == state
undo()
state['key5'] = key5
undo()
state['key5']['v'][2] = 3
assert getKey('/test') == state
new()
## Test Insertion ##
print ("-- Testing insertion --")
test_keys = []
for i in xrange(test_range):
test_key = 'test/key'+str(i)
test_value = 'test_value ' + str(i)
test_keys.append(addKey(test_key,test_value))
key_found = hasKey(test_keys[i])
state_value = getKey(test_keys[i])
if verbose:
print ("Adding key: {} - With value: {} - Key found: {} - Key value: {}".format(
test_key, test_value, key_found, state_value
))
assert key_found, "Test key {} not found\nFailed Key: {}".format(i, test_keys[i])
assert state_value == test_value, 'Value {} was not expected value\nExpected: {} - Received: {}'.format(
i,test_value,state_value
)
key, parent, f, parent_key = refKey(test_keys[i])
has_child_key = parent[f] == test_keys[i]
print ('Parent: {} - Parent has child key: {}'.format(parent_key, has_child_key))
print ("\nInsertion completed successfully - No issues found\n")
## Test Value Updates
# TODO Try setting different data types
print ("-- Testing Value Updates --")
for ki, key in enumerate(test_keys):
old_val = getKey(key)
new_val = old_val[::-1]
setKey(key,new_val)
state_value = getKey(key)
if verbose:
print ("From: {} - To: {} - Received: {}".format(old_val,new_val,state_value))
assert new_val == state_value, 'Key update {} failed on key: {}'.format(ki, key)
print ("\nUpdates completed successfully - No issues found\n")
## Test Key Deletion
print ("-- Testing Key Deletion --")
for ki, key in enumerate(test_keys):
delKey(key)
key_delete = not hasKey(key)
if verbose:
print ("Deleting key: {} - Key deleted: {}".format(key,key_delete))
assert key_delete, 'Deletion {} failed on key: {}'.format(ki,key)
print ("\nDeletions completed successfully - No issues found\n")
| mit | 7,403,516,768,357,741,000 | 30.640587 | 126 | 0.653582 | false |
flask-restful/flask-restful | tests/test_accept.py | 1 | 7700 | import unittest
from flask import Flask
import flask_restful
from werkzeug import exceptions
class AcceptTestCase(unittest.TestCase):
def test_accept_default_application_json(self):
class Foo(flask_restful.Resource):
def get(self):
return "data"
app = Flask(__name__)
api = flask_restful.Api(app)
api.add_resource(Foo, '/')
with app.test_client() as client:
res = client.get('/', headers=[('Accept', 'application/json')])
self.assertEqual(res.status_code, 200)
self.assertEqual(res.content_type, 'application/json')
def test_accept_no_default_match_acceptable(self):
class Foo(flask_restful.Resource):
def get(self):
return "data"
app = Flask(__name__)
api = flask_restful.Api(app, default_mediatype=None)
api.add_resource(Foo, '/')
with app.test_client() as client:
res = client.get('/', headers=[('Accept', 'application/json')])
self.assertEqual(res.status_code, 200)
self.assertEqual(res.content_type, 'application/json')
def test_accept_default_override_accept(self):
class Foo(flask_restful.Resource):
def get(self):
return "data"
app = Flask(__name__)
api = flask_restful.Api(app)
api.add_resource(Foo, '/')
with app.test_client() as client:
res = client.get('/', headers=[('Accept', 'text/plain')])
self.assertEqual(res.status_code, 200)
self.assertEqual(res.content_type, 'application/json')
def test_accept_default_any_pick_first(self):
class Foo(flask_restful.Resource):
def get(self):
return "data"
app = Flask(__name__)
api = flask_restful.Api(app)
@api.representation('text/plain')
def text_rep(data, status_code, headers=None):
resp = app.make_response((str(data), status_code, headers))
return resp
api.add_resource(Foo, '/')
with app.test_client() as client:
res = client.get('/', headers=[('Accept', '*/*')])
self.assertEqual(res.status_code, 200)
self.assertEqual(res.content_type, 'application/json')
def test_accept_no_default_no_match_not_acceptable(self):
class Foo(flask_restful.Resource):
def get(self):
return "data"
app = Flask(__name__)
api = flask_restful.Api(app, default_mediatype=None)
api.add_resource(Foo, '/')
with app.test_client() as client:
res = client.get('/', headers=[('Accept', 'text/plain')])
self.assertEqual(res.status_code, 406)
self.assertEqual(res.content_type, 'application/json')
def test_accept_no_default_custom_repr_match(self):
class Foo(flask_restful.Resource):
def get(self):
return "data"
app = Flask(__name__)
api = flask_restful.Api(app, default_mediatype=None)
api.representations = {}
@api.representation('text/plain')
def text_rep(data, status_code, headers=None):
resp = app.make_response((str(data), status_code, headers))
return resp
api.add_resource(Foo, '/')
with app.test_client() as client:
res = client.get('/', headers=[('Accept', 'text/plain')])
self.assertEqual(res.status_code, 200)
self.assertEqual(res.content_type, 'text/plain')
def test_accept_no_default_custom_repr_not_acceptable(self):
class Foo(flask_restful.Resource):
def get(self):
return "data"
app = Flask(__name__)
api = flask_restful.Api(app, default_mediatype=None)
api.representations = {}
@api.representation('text/plain')
def text_rep(data, status_code, headers=None):
resp = app.make_response((str(data), status_code, headers))
return resp
api.add_resource(Foo, '/')
with app.test_client() as client:
res = client.get('/', headers=[('Accept', 'application/json')])
self.assertEqual(res.status_code, 406)
self.assertEqual(res.content_type, 'text/plain')
def test_accept_no_default_match_q0_not_acceptable(self):
"""
q=0 should be considered NotAcceptable,
but this depends on werkzeug >= 1.0 which is not yet released
so this test is expected to fail until we depend on werkzeug >= 1.0
"""
class Foo(flask_restful.Resource):
def get(self):
return "data"
app = Flask(__name__)
api = flask_restful.Api(app, default_mediatype=None)
api.add_resource(Foo, '/')
with app.test_client() as client:
res = client.get('/', headers=[('Accept', 'application/json; q=0')])
self.assertEqual(res.status_code, 406)
self.assertEqual(res.content_type, 'application/json')
def test_accept_no_default_accept_highest_quality_of_two(self):
class Foo(flask_restful.Resource):
def get(self):
return "data"
app = Flask(__name__)
api = flask_restful.Api(app, default_mediatype=None)
@api.representation('text/plain')
def text_rep(data, status_code, headers=None):
resp = app.make_response((str(data), status_code, headers))
return resp
api.add_resource(Foo, '/')
with app.test_client() as client:
res = client.get('/', headers=[('Accept', 'application/json; q=0.1, text/plain; q=1.0')])
self.assertEqual(res.status_code, 200)
self.assertEqual(res.content_type, 'text/plain')
def test_accept_no_default_accept_highest_quality_of_three(self):
class Foo(flask_restful.Resource):
def get(self):
return "data"
app = Flask(__name__)
api = flask_restful.Api(app, default_mediatype=None)
@api.representation('text/html')
@api.representation('text/plain')
def text_rep(data, status_code, headers=None):
resp = app.make_response((str(data), status_code, headers))
return resp
api.add_resource(Foo, '/')
with app.test_client() as client:
res = client.get('/', headers=[('Accept', 'application/json; q=0.1, text/plain; q=0.3, text/html; q=0.2')])
self.assertEqual(res.status_code, 200)
self.assertEqual(res.content_type, 'text/plain')
def test_accept_no_default_no_representations(self):
class Foo(flask_restful.Resource):
def get(self):
return "data"
app = Flask(__name__)
api = flask_restful.Api(app, default_mediatype=None)
api.representations = {}
api.add_resource(Foo, '/')
with app.test_client() as client:
res = client.get('/', headers=[('Accept', 'text/plain')])
self.assertEqual(res.status_code, 406)
self.assertEqual(res.content_type, 'text/plain')
def test_accept_invalid_default_no_representations(self):
class Foo(flask_restful.Resource):
def get(self):
return "data"
app = Flask(__name__)
api = flask_restful.Api(app, default_mediatype='nonexistant/mediatype')
api.representations = {}
api.add_resource(Foo, '/')
with app.test_client() as client:
res = client.get('/', headers=[('Accept', 'text/plain')])
self.assertEqual(res.status_code, 500)
| bsd-3-clause | 4,420,775,435,837,858,000 | 31.083333 | 119 | 0.574026 | false |
Agnishom/agnishomsudoku | sudoku-solver.py | 1 | 6193 | #!/usr/bin/env python
def initiate():
box.append([0, 1, 2, 9, 10, 11, 18, 19, 20])
box.append([3, 4, 5, 12, 13, 14, 21, 22, 23])
box.append([6, 7, 8, 15, 16, 17, 24, 25, 26])
box.append([27, 28, 29, 36, 37, 38, 45, 46, 47])
box.append([30, 31, 32, 39, 40, 41, 48, 49, 50])
box.append([33, 34, 35, 42, 43, 44, 51, 52, 53])
box.append([54, 55, 56, 63, 64, 65, 72, 73, 74])
box.append([57, 58, 59, 66, 67, 68, 75, 76, 77])
box.append([60, 61, 62, 69, 70, 71, 78, 79, 80])
for i in range(0, 81, 9):
row.append(range(i, i+9))
for i in range(9):
column.append(range(i, 80+i, 9))
def valid(n, pos):
current_row = pos/9
current_col = pos%9
current_box = (current_row/3)*3 + (current_col/3)
for i in row[current_row]:
if (grid[i] == n):
return False
for i in column[current_col]:
if (grid[i] == n):
return False
for i in box[current_box]:
if (grid[i] == n):
return False
return True
def solve():
i = 0
proceed = 1
diff = 0.0
while(i < 81):
if given[i]:
if proceed:
i += 1
else:
i -= 1
else:
n = grid[i]
prev = grid[i]
while(n < 9):
if (n < 9):
n += 1
if valid(n, i):
grid[i] = n
proceed = 1
break
if (grid[i] == prev):
grid[i] = 0
proceed = 0
if proceed:
i += 1
else:
i -=1
diff = diff + 1.0
return diff
def gotonext():
global now_selected
now_selected = (now_selected+1) % 81
def checkall():
for i in xrange(81):
if not valid(grid[i],i):
return False
return True
grid = [0]*81
given = [False]*81
box = []
row = []
column = []
initiate()
diff = 0.0
import pygame
import sys
pygame.init()
size = 600, 732
black = 0, 0, 0
screen = pygame.display.set_mode(size)
pygame.display.set_caption('Sudoku Solver')
template = pygame.image.load("SUDOKU.jpg")
rect = template.get_rect()
font = pygame.font.Font('Font.ttf', 48)
now_selected = 0
running = True
while running:
screen.blit(template, rect)
for i in range(81):
if grid[i]:
if given[i]:
color = (0,0,255)
else:
color = (0,0,0)
text = font.render(str(grid[i]), 1, color)
ro = i/9
col = i%9
textpos = text.get_rect(centerx = col*66 + 33, centery = ro*66 + 33)
screen.blit(text, textpos)
try:
screen.blit(font.render('_', 1, (150,150,150)), font.render('O', 1, (100,100,100)).get_rect(centerx = (now_selected%9)*66 + 33, centery = (now_selected/9)*66 + 33))
except:
pass
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.MOUSEBUTTONDOWN:
x,y = pygame.mouse.get_pos()
col_edit = x/66
ro_edit = y/66
last_selected = now_selected
now_selected = ro_edit*9 + col_edit
if (now_selected > 80):
gotonext()
elif event.type == pygame.KEYDOWN and (event.key == pygame.K_1 or event.key == pygame.K_KP1) :
if valid(1,now_selected): grid[now_selected] = 1;given[now_selected]=True;gotonext()
elif event.type == pygame.KEYDOWN and (event.key == pygame.K_2 or event.key == pygame.K_KP2) :
if valid(2,now_selected): grid[now_selected] = 2;given[now_selected]=True;gotonext()
elif event.type == pygame.KEYDOWN and (event.key == pygame.K_3 or event.key == pygame.K_KP3) :
if valid(3,now_selected): grid[now_selected] = 3;given[now_selected]=True;gotonext()
elif event.type == pygame.KEYDOWN and (event.key == pygame.K_4 or event.key == pygame.K_KP4) :
if valid(4,now_selected): grid[now_selected] = 4;given[now_selected]=True;gotonext()
elif event.type == pygame.KEYDOWN and (event.key == pygame.K_5 or event.key == pygame.K_KP5) :
if valid(5,now_selected): grid[now_selected] = 5;given[now_selected]=True;gotonext()
elif event.type == pygame.KEYDOWN and (event.key == pygame.K_6 or event.key == pygame.K_KP6) :
if valid(6,now_selected): grid[now_selected] = 6;given[now_selected]=True;gotonext()
elif event.type == pygame.KEYDOWN and (event.key == pygame.K_7 or event.key == pygame.K_KP7) :
if valid(7,now_selected): grid[now_selected] = 7;given[now_selected]=True;gotonext()
elif event.type == pygame.KEYDOWN and (event.key == pygame.K_8 or event.key == pygame.K_KP8) :
if valid(8,now_selected): grid[now_selected] = 8;given[now_selected]=True;gotonext()
elif event.type == pygame.KEYDOWN and (event.key == pygame.K_9 or event.key == pygame.K_KP9) :
if valid(9,now_selected): grid[now_selected] = 9;given[now_selected]=True;gotonext()
elif event.type == pygame.KEYDOWN and (event.key == pygame.K_0 or event.key == pygame.K_BACKSPACE) :
grid[now_selected] = 0;given[now_selected]=False;
elif event.type == pygame.KEYDOWN and event.key == pygame.K_UP:
now_selected = (now_selected-9) % 81
elif event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:
now_selected = (now_selected+9) % 81
elif event.type == pygame.KEYDOWN and (event.key == pygame.K_RIGHT or event.key == pygame.K_TAB):
now_selected = (now_selected+1) % 81
elif event.type == pygame.KEYDOWN and event.key == pygame.K_LEFT:
now_selected = (now_selected-1) % 81
elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
for i in xrange(81):
if not given[i]:
grid[i]=0
solve()
elif event.type == pygame.KEYDOWN and event.key == pygame.K_DELETE:
grid = [0]*81
given = [False]*81
elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
running = False
try:
pygame.display.update()
except:
pass
pygame.quit()
| mit | 6,588,004,776,784,848,000 | 36.949686 | 166 | 0.550783 | false |
richbrowne/f5-cccl | f5_cccl/test/test_exceptions.py | 1 | 3099 | #!/usr/bin/env python
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5_cccl import exceptions
import pytest
def test_create_f5ccclerror_nomsg():
"""Test the creation of F5CcclError without message."""
e = exceptions.F5CcclError()
assert e
assert not e.msg
assert "{}".format(e) == "F5CcclError"
def test_create_f5ccclerror_msg():
"""Test the creation of F5CcclError with message."""
error_msg = "Test CCCL Error"
e = exceptions.F5CcclError(error_msg)
assert e
assert e.msg == error_msg
assert "{}".format(e) == "F5CcclError - Test CCCL Error"
def test_raise_f5ccclerror():
"""Test raising a F5CcclError."""
with pytest.raises(exceptions.F5CcclError):
def f():
raise exceptions.F5CcclError()
f()
def test_raise_f5cccl_resource_create_error():
"""Test raising a F5CcclResourceCreateError."""
with pytest.raises(exceptions.F5CcclResourceCreateError):
def f():
raise exceptions.F5CcclResourceCreateError()
f()
def test_raise_f5cccl_resource_conflict_error():
"""Test raising a F5CcclConflictError."""
with pytest.raises(exceptions.F5CcclResourceConflictError):
def f():
raise exceptions.F5CcclResourceConflictError()
f()
def test_raise_f5cccl_resource_notfound_error():
"""Test raising a F5CcclResourceNotFoundError."""
with pytest.raises(exceptions.F5CcclResourceNotFoundError):
def f():
raise exceptions.F5CcclResourceNotFoundError()
f()
def test_raise_f5cccl_resource_request_error():
"""Test raising a F5CcclResourceRequestError."""
with pytest.raises(exceptions.F5CcclResourceRequestError):
def f():
raise exceptions.F5CcclResourceRequestError()
f()
def test_raise_f5cccl_resource_update_error():
"""Test raising a F5CcclResourceUpdateError."""
with pytest.raises(exceptions.F5CcclResourceUpdateError):
def f():
raise exceptions.F5CcclResourceUpdateError()
f()
def test_raise_f5cccl_resource_delete_error():
"""Test raising a F5CcclResourceDeleteError."""
with pytest.raises(exceptions.F5CcclResourceDeleteError):
def f():
raise exceptions.F5CcclResourceDeleteError()
f()
def test_raise_f5cccl_configuration_read_error():
"""Test raising a F5CcclConfigurationReadError."""
with pytest.raises(exceptions.F5CcclConfigurationReadError):
def f():
raise exceptions.F5CcclConfigurationReadError()
f()
| apache-2.0 | 1,642,826,834,951,448,000 | 27.431193 | 74 | 0.690223 | false |
rr-/drill | drillsrs/cmd/list_tags.py | 1 | 1333 | import argparse
from typing import Any
import sqlalchemy as sa
from drillsrs import db, util
from drillsrs.cmd.command_base import CommandBase
def _print_single_tag(session: Any, index: int, tag: db.Tag) -> None:
tag_usages = (
session.query(sa.func.count(db.CardTag.tag_id))
.filter(db.CardTag.tag_id == tag.id)
.scalar()
) or 0
print("Tag #%d" % (index + 1))
print("Name: %s" % tag.name)
print("Color: %s" % tag.color)
print("Preview: [%s]" % util.format_card_tag(tag))
print("Usages: %d" % tag_usages)
print()
class ListTagsCommand(CommandBase):
names = ["list-tags"]
description = "print all tags in a deck"
def decorate_arg_parser(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument("deck", nargs="?", help="choose the deck name")
def run(self, args: argparse.Namespace) -> None:
deck_name: str = args.deck
with db.session_scope() as session:
deck = db.get_deck_by_name(session, deck_name)
tags = (
session.query(db.Tag).filter(db.Tag.deck_id == deck.id).all()
)
if not tags:
print("No tags to show.")
return
for i, tag in enumerate(tags):
_print_single_tag(session, i, tag)
| mit | 5,066,354,452,943,796,000 | 28.622222 | 77 | 0.581395 | false |
xuleiboy1234/autoTitle | tensorflow/tensorflow/python/eager/tensor_node.py | 1 | 8327 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorNode for autograd tracing of computations with Tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from autograd import core as ag_core
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.eager import tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
@ag_core.primitive
def _tensor_numpy(t):
return t.numpy()
@ag_core.primitive
def _as_gpu_tensor(t, index=0):
return t.as_gpu_tensor(gpu_index=index)
_as_gpu_tensor.defvjp(
lambda g, ans, vs, gvs, t, index: g.as_cpu_tensor(), argnum=0)
@ag_core.primitive
def _as_cpu_tensor(t):
return t.as_cpu_tensor()
_as_cpu_tensor.defvjp(
lambda g, ans, vs, gvs, t: g.as_gpu_tensor(), argnum=0)
class TensorNode(ag_core.Node):
"""A TensorFlow Tensor."""
__slots__ = []
def __getitem__(self, idx):
return array_ops._SliceHelper(self, idx) # pylint: disable=protected-access
shape = property(lambda self: self.value.shape)
dtype = property(lambda self: self.value.dtype)
def numpy(self):
return _tensor_numpy(self)
def _shape_tuple(self):
return self.value._shape_tuple # pylint: disable=protected-access
def as_cpu_tensor(self):
return _as_cpu_tensor(self)
def as_gpu_tensor(self, gpu_index=0):
return _as_gpu_tensor(self, gpu_index)
def __bool__(self):
return self.value.__bool__() # pylint: disable=protected-access
def __nonzero__(self):
return self.__bool__()
def __len__(self):
return len(self.value)
def __neg__(self):
return math_ops.negative(self)
def __abs__(self):
return math_ops.abs(self) # pylint: disable=protected-access
def __invert__(self):
return self.value.__invert__()
def __hash__(self):
return id(self)
def __add__(self, other):
if isinstance(self.value, tensor.LazyZero):
return other
if isinstance(other, tensor.LazyZero):
return self
return math_ops.add(self, other)
def __radd__(self, other):
if isinstance(self.value, tensor.LazyZero):
return other
if isinstance(ag_core.getval(other), tensor.LazyZero):
return self
return math_ops.add(other, self)
def __sub__(self, other):
return math_ops.subtract(self, other)
def __rsub__(self, other):
return math_ops.subtract(other, self)
def __mul__(self, other):
return math_ops.multiply(self, other)
def __rmul__(self, other):
return math_ops.multiply(other, self)
def __mod__(self, other):
return math_ops.floormod(self, other)
def __rmod__(self, other):
return math_ops.floormod(other, self)
def __pow__(self, other):
return math_ops.pow(self, other)
def __rpow__(self, other):
return math_ops.pow(other, self)
def __div__(self, other):
return math_ops._div_python2(self, other) # pylint: disable=protected-access
def __rdiv__(self, other):
return math_ops._div_python2(other, self) # pylint: disable=protected-access
def __truediv__(self, other):
return math_ops._truediv_python3(self, other) # pylint: disable=protected-access
def __rtruediv__(self, other):
return math_ops._truediv_python3(other, self) # pylint: disable=protected-access
def __floordiv__(self, other):
return math_ops.floordiv(self, other)
def __rfloordiv__(self, other):
return math_ops.floordiv(other, self)
def __eq__(self, other):
return control_flow_ops.equal(self, other) # pylint: disable=protected-access
def __ne__(self, other):
return control_flow_ops.not_equal(self, other) # pylint: disable=protected-access
def __gt__(self, other):
return math_ops.greater(self, other)
def __ge__(self, other):
return math_ops.greater_equal(self, other)
def __lt__(self, other):
return math_ops.less(self, other)
def __le__(self, other):
return math_ops.less_equal(self, other)
ag_core.register_node(TensorNode, tensor.Tensor)
ag_core.register_node(TensorNode, tf_ops.Tensor)
def _zeros(shape, dtype):
with context.device("cpu:0"):
shape = tensor.Tensor(shape, dtype=dtypes.int32)
return array_ops.fill(shape, tensor.Tensor(0, dtype=dtype))
def _ones(shape, dtype):
return array_ops.fill(tensor.Tensor(shape, dtype=dtypes.int32),
tensor.Tensor(1, dtype=dtype))
def _lazy_zero_tensor(zero):
return _zeros(zero.shape, zero.dtype)
tensor.LazyZero.tensor = _lazy_zero_tensor
def _lazy_zero_to_tensor(lazy_zero, dtype=None, name=None, as_ref=False):
del as_ref, name, dtype
return _zeros(lazy_zero.shape, lazy_zero.dtype)
tf_ops.register_tensor_conversion_function(tensor.LazyZero,
_lazy_zero_to_tensor)
def _indexed_slices_to_tensor(value):
"""Converts an IndexedSlices object `value` to a Tensor.
Args:
value: An ops.IndexedSlices object.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0])
class TensorVSpace(ag_core.VSpace):
"""VSpace for tf/tfe Tensors in autograd."""
def __init__(self, value):
if isinstance(value, tensor.IndexedSlices):
self.shape = tensor_shape.TensorShape(value.dense_shape.numpy())
self.dtype = value.values.dtype
else:
self.shape = value.shape
self.dtype = value.dtype
self.size = self.shape.num_elements()
# TODO(apassos) put gradients on the same device as ops.
def __eq__(self, other):
if isinstance(other, tape.NoneVSpace):
return True
if self.dtype == dtypes.resource or other.dtype == dtypes.resource:
return True
return (type(self) == type(other) # pylint: disable=unidiomatic-typecheck
and self.dtype == other.dtype)
def __ne__(self, other):
return not self.__eq__(other)
def zeros(self):
return tensor.LazyZero(self.shape, self.dtype)
def ones(self):
return _ones(self.shape, self.dtype)
def standard_basis(self):
raise NotImplementedError
def flatten(self, value):
return array_ops.reshape(value, tensor.Tensor(-1))
def unflatten(self, value):
return array_ops.reshape(value, tensor.Tensor(self.shape))
def mut_add(self, x, y):
"""Add wrapper safe for IndexedSlices and LazyZero."""
if isinstance(ag_core.getval(x), tensor.LazyZero):
return y
if isinstance(ag_core.getval(y), tensor.LazyZero):
return x
if isinstance(x, tensor.IndexedSlices):
x = _indexed_slices_to_tensor(x)
if isinstance(y, tensor.IndexedSlices):
y = _indexed_slices_to_tensor(y)
return math_ops.add(x, y)
ag_core.register_vspace(TensorVSpace, tensor.Tensor)
ag_core.register_vspace(TensorVSpace, tf_ops.Tensor)
ag_core.register_vspace(TensorVSpace, tensor.IndexedSlices)
ag_core.register_vspace(TensorVSpace, tensor.LazyZero)
ag_core.register_node(TensorNode, tensor.LazyZero)
def _node_to_tensor(value, dtype=None, name=None, as_ref=False):
del as_ref
return tf_ops.convert_to_tensor(value.value, dtype=dtype, name=name)
tf_ops.register_tensor_conversion_function(TensorNode, _node_to_tensor)
| mit | 6,730,232,823,244,881,000 | 28.528369 | 86 | 0.68416 | false |
utah-scs/RAMCloud | scripts/clusterperf.py | 1 | 43425 | #!/usr/bin/env python
# Copyright (c) 2011-2017 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Runs one or more cluster benchmarks for RAMCloud, using cluster.py to
set up the cluster and ClusterPerf.cc to implement the details of the
benchmark.
"""
# TO ADD A NEW BENCHMARK:
# 1. Decide on a symbolic name for the new test.
# 2. Write code for the test in ClusterPerf.cc using the same test name (see
# instructions in ClusterPerf.cc for details).
# 3. If needed, create a driver function for the test (named after the test)
# in the "driver functions" section below. Many tests can just use the
# function "default". If you need to provide special arguments to
# cluster.run for your tests, or if the running of your test is unusual
# in some way (e.g., you call cluster.run several times or collect
# results from unusual places) then you'll need to write a test-specific
# driver function.
# 4. Create a new Test object in one of the tables simple_tests or
# graph_tests below, depending on the kind of test.
from __future__ import division, print_function
from common import *
import cluster
import config
import log
import glob
import os
import pprint
import re
import sys
import time
from optparse import OptionParser
# Each object of the following class represents one test that can be
# performed by this program.
class Test:
def __init__(self,
name, # Symbolic name for the test, used on the
# command line to run the test. This same
# name is normally used for the
# corresponding test in ClusterPerf.cc.
function # Python driver function for the test.
):
"""
Construct a Test object.
"""
self.name = name
self.function = function
def flatten_args(args):
"""
Given a dictionary of arguments, produce a string suitable for inclusion
in a command line, such as "--name1 value1 --name2 value2"
"""
return " ".join(["%s %s" % (name, value)
for name, value in args.iteritems()])
def get_client_log(
index = 1 # Client index (1 for first client,
# which is usually the one that's wanted)
):
"""
Given the index of a client, read the client's log file
from the current log directory and return its contents,
ignoring RAMCloud log messages (what's left should be a
summary of the results from a test.
"""
globResult = glob.glob('%s/latest/client%d.*.log' %
(options.log_dir, index))
if len(globResult) == 0:
raise Exception("couldn't find log file for client %d" % (index))
result = "";
for line in open(globResult[0], 'r'):
if not re.match('([0-9]+\.[0-9]+) ', line):
result += line
return result
def print_cdf_from_log(
index = 1 # Client index (0 for first client,
# which is usually the one that's wanted)
):
"""
Given the index of a client, print in gnuplot format a cumulative
distribution of the data in the client's log file (where "data" consists
of comma-separated numbers stored in all of the lines of the log file
that are not RAMCloud log messages). Each line in the printed output
will contain a fraction and a number, such that the given fraction of all
numbers in the log file have values less than or equal to the given number.
"""
# Read the log file into an array of numbers.
numbers = []
globResult = glob.glob('%s/latest/client%d.*.log' %
(options.log_dir, index))
if len(globResult) == 0:
raise Exception("couldn't find log file for client %d" % (index))
result = "";
leader = '>>> '
for line in open(globResult[0], 'r'):
if re.match(leader, line):
continue
if not re.match('([0-9]+\.[0-9]+) ', line):
for value in line.split(","):
try:
numbers.append(float(value))
except ValueError, e:
print("Skipping, couldn't parse %s" % line)
# Generate a CDF from the array.
numbers.sort()
result = []
print("%8.2f %8.3f" % (0.0, 0.0))
print("%8.2f %8.3f" % (numbers[0], 1/len(numbers)))
for i in range(1, 100):
print("%8.2f %8.3f" % (numbers[int(len(numbers)*i/100)], i/100))
print("%8.2f %8.3f" % (numbers[int(len(numbers)*999/1000)], .999))
print("%8.2f %9.4f" % (numbers[int(len(numbers)*9999/10000)], .9999))
print("%8.2f %8.3f" % (numbers[-1], 1.0))
def print_rcdf_from_log(
index = 1 # Client index (1 for first client,
# which is usually the one that's wanted)
):
"""
Given the index of a client, print in gnuplot format a reverse cumulative
distribution of the data in the client's log file (where "data" consists
of comma-separated numbers stored in all of the lines of the log file
that are not RAMCloud log messages). Each line in the printed output
will contain a fraction and a number, such that the given fraction of all
numbers in the log file have values less than or equal to the given number.
"""
# Read the log file into an array of numbers.
numbers = []
globResult = glob.glob('%s/latest/client%d.*.log' %
(options.log_dir, index))
if len(globResult) == 0:
raise Exception("couldn't find log file for client %d" % (index))
result = "";
leader = '>>> '
for line in open(globResult[0], 'r'):
if re.match(leader, line):
continue
if not re.match('([0-9]+\.[0-9]+) ', line):
for value in line.split(","):
try:
numbers.append(float(value))
except ValueError, e:
print("Skipping, couldn't parse %s" % line)
# Generate a RCDF from the array.
numbers.sort()
result = []
print("%8.2f %11.6f" % (numbers[0], 1.0))
for i in range(1, len(numbers)-1):
if (numbers[i] != numbers[i-1] or numbers[i] != numbers[i+1]):
print("%8.2f %11.6f" % (numbers[i], 1-(i/len(numbers))))
print("%8.2f %11.6f" % (numbers[-1], 1/len(numbers)))
def print_samples_from_log(
outfile = sys.stdout,
index = 1 # Client index (1 for first client,
# which is usually the one that's wanted)
):
"""
Given the index of a client, find all lines starting with '>>> ' strip
the leader off and print to stdout. Mostly used to extract samples/tables
to be passed on to R for postprocessing.
"""
# Read the log file into an array of numbers.
numbers = []
globResult = glob.glob('%s/latest/client%d.*.log' %
(options.log_dir, index))
if len(globResult) == 0:
raise Exception("couldn't find log file for client %d" % (index))
leader = '>>> '
n = len(leader)
for line in open(globResult[0], 'r'):
if re.match(leader, line):
print(line[n:].strip(), file=outfile)
def print_rcdf_from_log_samples(
outfile = sys.stdout,
index = 1 # Client index (1 for first client,
# which is usually the one that's wanted)
):
# Read the log file into an array of numbers.
numbers = []
globResult = glob.glob('%s/latest/client%d.*.log' %
(options.log_dir, index))
if len(globResult) == 0:
raise Exception("couldn't find log file for client %d" % (index))
leader = '>>> '
n = len(leader)
for line in open(globResult[0], 'r'):
if re.match(leader, line):
line = line[n:].strip()
durationNs = line.split(' ')[2]
try:
numbers.append(float(durationNs))
except ValueError, e:
print("Skipping, couldn't parse %s" % line, file=sys.stderr)
if len(numbers) == 0:
return
# Generate a RCDF from the array.
numbers.sort()
result = []
print("%8.2f %11.6f" % (numbers[0], 1.0), file=outfile)
for i in range(1, len(numbers)-1):
if (numbers[i] != numbers[i-1] or numbers[i] != numbers[i+1]):
print("%8.2f %11.6f" % (numbers[i], 1-(i/len(numbers))),
file=outfile)
print("%8.2f %11.6f" % (numbers[-1], 1/len(numbers)), file=outfile)
def run_test(
test, # Test object describing the test to run.
options # Command-line options.
):
"""
Run a given test. The main value provided by this function is to
prepare a candidate set of options for cluster.run and another set
for the ClusterPerf clients, based on the command-line options.
"""
if options.seconds and options.timeout < options.seconds:
options.timeout = options.seconds * 2
cluster_args = {
'debug': options.debug,
'log_dir': options.log_dir,
'log_level': options.log_level,
'backup_disks_per_server': options.backup_disks_per_server,
'num_servers': options.num_servers,
'replicas': options.replicas,
'timeout': options.timeout,
'share_hosts': True,
'transport': options.transport,
'disjunct': options.disjunct,
'verbose': options.verbose,
'superuser': options.superuser
}
# Provide a default value for num_servers here. This is better
# than defaulting it in the OptionParser below, because tests can
# see whether or not an actual value was specified and provide a
# test-specific default.
if cluster_args['num_servers'] == None:
# Make sure there are enough servers to meet replica requirements.
cluster_args['num_servers'] = options.replicas+1
if options.num_clients != None:
cluster_args['num_clients'] = options.num_clients
if options.master_args != None:
cluster_args['master_args'] = options.master_args
if options.dpdk_port != None:
cluster_args['dpdk_port'] = options.dpdk_port
client_args = {}
if options.count != None:
client_args['--count'] = options.count
if options.size != None:
client_args['--size'] = options.size
if options.numObjects != None:
client_args['--numObjects'] = options.numObjects
if options.numTables != None:
client_args['--numTables'] = options.numTables
if options.warmup != None:
client_args['--warmup'] = options.warmup
if options.workload != None:
client_args['--workload'] = options.workload
if options.targetOps != None:
client_args['--targetOps'] = options.targetOps
if options.txSpan != None:
client_args['--txSpan'] = options.txSpan
if options.asyncReplication != None:
client_args['--asyncReplication'] = options.asyncReplication
if options.numIndexlet != None:
client_args['--numIndexlet'] = options.numIndexlet
if options.numIndexes != None:
client_args['--numIndexes'] = options.numIndexes
if options.numVClients != None:
client_args['--numVClients'] = options.numVClients
if options.migratePercentage != None:
client_args['--migratePercentage'] = options.migratePercentage
if options.spannedOps != None:
client_args['--spannedOps'] = options.spannedOps
if options.fullSamples:
client_args['--fullSamples'] = ''
if options.seconds:
client_args['--seconds'] = options.seconds
test.function(test.name, options, cluster_args, client_args)
#-------------------------------------------------------------------
# Driver functions follow below. These functions are responsible for
# invoking ClusterPerf via cluster.py, and they collect and print
# result data. Simple tests can just use the "default" driver function.
#-------------------------------------------------------------------
def default(
name, # Name of this test; passed through
# to ClusterPerf verbatim.
options, # The full set of command-line options.
cluster_args, # Proposed set of arguments to pass to
# cluster.run (extracted from options).
# Individual tests can override as
# appropriate for the test.
client_args, # Proposed set of arguments to pass to
# ClusterPerf (via cluster.run).
# Individual tests can override as
# needed for the test.
):
"""
This function is used as the invocation function for most tests;
it simply invokes ClusterPerf via cluster.run and prints the result.
"""
cluster.run(client='%s/apps/ClusterPerf %s %s' %
(config.hooks.get_remote_obj_path(),
flatten_args(client_args), name), **cluster_args)
print(get_client_log(), end='')
def basic(name, options, cluster_args, client_args):
if 'master_args' not in cluster_args:
cluster_args['master_args'] = '-t 4000'
if cluster_args['timeout'] < 250:
cluster_args['timeout'] = 250
default(name, options, cluster_args, client_args)
def broadcast(name, options, cluster_args, client_args):
if 'num_clients' not in cluster_args:
cluster_args['num_clients'] = 10
default(name, options, cluster_args, client_args)
def echo(name, options, cluster_args, client_args):
if 'master_args' not in cluster_args:
cluster_args['master_args'] = '-t 4000'
if cluster_args['timeout'] < 250:
cluster_args['timeout'] = 250
cluster_args['replicas'] = 0
if options.num_servers == None:
cluster_args['num_servers'] = 1
default(name, options, cluster_args, client_args)
def indexBasic(name, options, cluster_args, client_args):
if 'master_args' not in cluster_args:
cluster_args['master_args'] = '--maxCores 2 --totalMasterMemory 1500'
if cluster_args['timeout'] < 200:
cluster_args['timeout'] = 200
# Ensure at least 5 hosts for optimal performance
if options.num_servers == None:
cluster_args['num_servers'] = len(getHosts())
default(name, options, cluster_args, client_args)
def indexRange(name, options, cluster_args, client_args):
if 'master_args' not in cluster_args:
cluster_args['master_args'] = '--maxCores 2 --totalMasterMemory 1500'
if cluster_args['timeout'] < 360:
cluster_args['timeout'] = 360
if '--numObjects' not in client_args:
client_args['--numObjects'] = 1000
if '--warmup' not in client_args:
client_args['--warmup'] = 10
if '--count' not in client_args:
client_args['--count'] = 90
# Ensure at least 5 hosts for optimal performance
if options.num_servers == None:
cluster_args['num_servers'] = len(getHosts())
default(name, options, cluster_args, client_args)
def indexMultiple(name, options, cluster_args, client_args):
if 'master_args' not in cluster_args:
cluster_args['master_args'] = '--maxCores 2'
if cluster_args['timeout'] < 360:
cluster_args['timeout'] = 360
# Ensure atleast 15 hosts for optimal performance
if options.num_servers == None:
cluster_args['num_servers'] = len(getHosts())
# use a maximum of 10 secondary keys
if len(getHosts()) <= 10:
# Hack until synchronization bug in write RPC handler
# in MasterService is resolved. This bug prevents us from using more
# than 1 MasterSerivice thread. However, we need to use more than 1
# service thread, otherwise if a tablet and its corresponding
# indexlet end up on the same server, we will have a deadlock.
# For now, make sure that we never wrap around the server list
# Once the bug is resolved, we should be able to use len(getHosts())
# for numIndexes
client_args['--numIndexes'] = len(getHosts()) - 1
else:
client_args['--numIndexes'] = 10
cluster.run(client='%s/apps/ClusterPerf %s %s' %
(config.hooks.get_remote_obj_path(),
flatten_args(client_args), name), **cluster_args)
print(get_client_log(), end='')
def indexScalability(name, options, cluster_args, client_args):
if 'master_args' not in cluster_args:
cluster_args['master_args'] = '--maxCores 3'
if cluster_args['timeout'] < 360:
cluster_args['timeout'] = 360
cluster_args['backup_disks_per_server'] = 0
cluster_args['replicas'] = 0
# Number of concurrent rpcs to do per indexlet
if '--count' not in client_args:
client_args['--count'] = 20
# Number of objects per read request
if '--numObjects' not in client_args:
client_args['--numObjects'] = 1
# Ensure at least 15 hosts for optimal performance
if options.num_servers == None:
cluster_args['num_servers'] = len(getHosts())
if 'num_clients' not in cluster_args:
cluster_args['num_clients'] = 10
default(name, options, cluster_args, client_args)
def indexWriteDist(name, options, cluster_args, client_args):
if 'master_args' not in cluster_args:
cluster_args['master_args'] = '--maxCores 2 --totalMasterMemory 1500'
if cluster_args['timeout'] < 200:
cluster_args['timeout'] = 200
if options.num_servers == None:
cluster_args['num_servers'] = len(getHosts())
if '--count' not in client_args:
client_args['--count'] = 10000
if '--numObjects' not in client_args:
client_args['--numObjects'] = 1000000
cluster.run(client='%s/apps/ClusterPerf %s %s' %
(config.hooks.get_remote_obj_path(),
flatten_args(client_args), name), **cluster_args)
print("# Cumulative distribution of time for a single client to write\n"
"# %d %d-byte objects to a table with one index and %d\n"
"# initial objects. Each object has two 30-byte keys and a 100\n"
"# byte value. Each line indicates that a given fraction of all\n"
"# reads took at most a given time to complete.\n"
"#\n"
"# Generated by 'clusterperf.py readDist'\n#\n"
"# Time (usec) Cum. Fraction\n"
"#---------------------------"
% (client_args['--count'], options.size, client_args['--numObjects'] ))
print_cdf_from_log()
def indexReadDist(name, options, cluster_args, client_args):
if 'master_args' not in cluster_args:
cluster_args['master_args'] = '--maxCores 2 --totalMasterMemory 1500'
if cluster_args['timeout'] < 200:
cluster_args['timeout'] = 200
if options.num_servers == None:
cluster_args['num_servers'] = len(getHosts())
if '--count' not in client_args:
client_args['--count'] = 10000
if '--numObjects' not in client_args:
client_args['--numObjects'] = 1000000
if '--warmup' not in client_args:
client_args['--warmup'] = 100
cluster.run(client='%s/apps/ClusterPerf %s %s' %
(config.hooks.get_remote_obj_path(),
flatten_args(client_args), name), **cluster_args)
print("# Cumulative distribution of time for a single client to read\n"
"# %d %d-byte objects to a table with one index and %d\n"
"# initial objects. Each object has two 30-byte keys and a 100\n"
"# byte value. Each line indicates that a given fraction of all\n"
"# reads took at most a given time to complete.\n"
"#\n"
"# Generated by 'clusterperf.py readDist'\n#\n"
"# Time (usec) Cum. Fraction\n"
"#---------------------------"
% (client_args['--count'], options.size, client_args['--numObjects'] ))
print_cdf_from_log()
def transactionDist(name, options, cluster_args, client_args):
if 'master_args' not in cluster_args:
cluster_args['master_args'] = '-t 2000'
if options.numTables == None:
client_args['--numTables'] = 1
cluster.run(client='%s/apps/ClusterPerf %s %s' %
(config.hooks.get_remote_obj_path(),
flatten_args(client_args), name),
**cluster_args)
print("# Cumulative distribution of time for a single client to commit a\n"
"# transactional read-write on a single %d-byte object from a\n"
"# single server. Each line indicates that a given fraction of all\n"
"# commits took at most a given time to complete.\n"
"# Generated by 'clusterperf.py %s'\n#\n"
"# Time (usec) Cum. Fraction\n"
"#---------------------------"
% (options.size, name))
if (options.rcdf):
print_rcdf_from_log()
else:
print_cdf_from_log()
def transactionThroughput(name, options, cluster_args, client_args):
if 'master_args' not in cluster_args:
cluster_args['master_args'] = '-t 2000'
if cluster_args['timeout'] < 250:
cluster_args['timeout'] = 250
if 'num_clients' not in cluster_args:
cluster_args['num_clients'] = len(getHosts()) - cluster_args['num_servers']
if cluster_args['num_clients'] < 2:
print("Not enough machines in the cluster to run the '%s' benchmark"
% name)
print("Need at least 2 machines in this configuration")
return
if options.numTables == None:
client_args['--numTables'] = 1
cluster.run(client='%s/apps/ClusterPerf %s %s' %
(config.hooks.get_remote_obj_path(),
flatten_args(client_args), name), **cluster_args)
for i in range(1, cluster_args['num_clients'] + 1):
print(get_client_log(i), end='')
def multiOp(name, options, cluster_args, client_args):
if cluster_args['timeout'] < 100:
cluster_args['timeout'] = 100
if options.num_servers == None:
cluster_args['num_servers'] = len(getHosts())
client_args['--numTables'] = cluster_args['num_servers'];
cluster.run(client='%s/apps/ClusterPerf %s %s' %
(config.hooks.get_remote_obj_path(),
flatten_args(client_args), name),
**cluster_args)
print(get_client_log(), end='')
def netBandwidth(name, options, cluster_args, client_args):
if 'num_clients' not in cluster_args:
cluster_args['num_clients'] = 2*len(config.getHosts())
if options.num_servers == None:
cluster_args['num_servers'] = cluster_args['num_clients']
if cluster_args['num_servers'] > len(config.getHosts()):
cluster_args['num_servers'] = len(config.getHosts())
if options.size != None:
client_args['--size'] = options.size
else:
client_args['--size'] = 1024*1024;
default(name, options, cluster_args, client_args)
def readAllToAll(name, options, cluster_args, client_args):
cluster_args['backup_disks_per_server'] = 0
cluster_args['replicas'] = 0
if 'num_clients' not in cluster_args:
cluster_args['num_clients'] = len(getHosts())
if options.num_servers == None:
cluster_args['num_servers'] = len(getHosts())
client_args['--numTables'] = cluster_args['num_servers'];
default(name, options, cluster_args, client_args)
def readDist(name, options, cluster_args, client_args):
cluster.run(client='%s/apps/ClusterPerf %s %s' %
(config.hooks.get_remote_obj_path(),
flatten_args(client_args), name),
**cluster_args)
print("# Cumulative distribution of time for a single client to read a\n"
"# single %d-byte object from a single server. Each line indicates\n"
"# that a given fraction of all reads took at most a given time\n"
"# to complete.\n"
"# Generated by 'clusterperf.py readDist'\n#\n"
"# Time (usec) Cum. Fraction\n"
"#---------------------------"
% options.size)
print_cdf_from_log()
def readDistRandom(name, options, cluster_args, client_args):
if 'master_args' not in cluster_args:
cluster_args['master_args'] = '-t 1000'
cluster.run(client='%s/apps/ClusterPerf %s %s' %
(config.hooks.get_remote_obj_path(),
flatten_args(client_args), name),
**cluster_args)
print("# Cumulative distribution of time for a single client to read a\n"
"# random %d-byte object from a single server. Each line indicates\n"
"# that a given fraction of all reads took at most a given time\n"
"# to complete.\n"
"# Generated by 'clusterperf.py readDist'\n#\n"
"# Time (usec) Cum. Fraction\n"
"#---------------------------"
% options.size)
if (options.rcdf):
print_rcdf_from_log()
else:
print_cdf_from_log()
def readLoaded(name, options, cluster_args, client_args):
if 'num_clients' not in cluster_args:
cluster_args['num_clients'] = 20
default(name, options, cluster_args, client_args)
def readRandom(name, options, cluster_args, client_args):
cluster_args['backup_disks_per_server'] = 0
cluster_args['replicas'] = 0
if 'num_clients' not in cluster_args:
cluster_args['num_clients'] = 16
if options.num_servers == None:
cluster_args['num_servers'] = 1
client_args['--numTables'] = cluster_args['num_servers'];
default(name, options, cluster_args, client_args)
# This method is also used for multiReadThroughput and
# linearizableWriteThroughput
def readThroughput(name, options, cluster_args, client_args):
if 'master_args' not in cluster_args:
cluster_args['master_args'] = '-t 2000'
if cluster_args['timeout'] < 250:
cluster_args['timeout'] = 250
if 'num_clients' not in cluster_args:
# Clients should not share a machine with coordinator by default.
cluster_args['num_clients'] = len(getHosts()) - \
cluster_args['num_servers'] - 1
if cluster_args['num_clients'] < 2:
print("Not enough machines in the cluster to run the '%s' benchmark"
% name)
print("Need at least 2 machines in this configuration")
return
default(name, options, cluster_args, client_args)
def txCollision(name, options, cluster_args, client_args):
if cluster_args['timeout'] < 100:
cluster_args['timeout'] = 100
if options.num_servers == None:
cluster_args['num_servers'] = len(getHosts())
#client_args['--numTables'] = cluster_args['num_servers'];
if 'num_clients' not in cluster_args:
cluster_args['num_clients'] = 5
default(name, options, cluster_args, client_args)
def writeDist(name, options, cluster_args, client_args):
if cluster_args['timeout'] < 40:
cluster_args['timeout'] = 40
if 'master_args' not in cluster_args:
cluster_args['master_args'] = '-t 2000'
cluster_args['disjunct'] = True
cluster.run(client='%s/apps/ClusterPerf %s %s' %
(config.hooks.get_remote_obj_path(),
flatten_args(client_args), name),
**cluster_args)
print("# Cumulative distribution of time for a single client to write a\n"
"# single %d-byte object from a single server. Each line indicates\n"
"# that a given fraction of all writes took at most a given time\n"
"# to complete.\n"
"# Generated by 'clusterperf.py %s'\n#\n"
"# Time (usec) Cum. Fraction\n"
"#---------------------------"
% (options.size, name))
if (options.rcdf):
print_rcdf_from_log()
else:
print_cdf_from_log()
def workloadDist(name, options, cluster_args, client_args):
if not options.extract:
if 'master_args' not in cluster_args:
cluster_args['master_args'] = '-t 2000'
cluster_args['disjunct'] = True
cluster.run(client='%s/apps/ClusterPerf %s %s' %
(config.hooks.get_remote_obj_path(),
flatten_args(client_args), name),
**cluster_args)
if options.fullSamples:
import gzip
with gzip.open('logs/latest/rcdf.data.gz', 'wb') as rcdf_file:
print_rcdf_from_log_samples(rcdf_file)
with gzip.open('logs/latest/samples.data.gz', 'wb') as samples_file:
print_samples_from_log(samples_file)
else:
print("# Cumulative distribution latencies for operations specified by\n"
"# the benchmark.\n#\n"
"# Generated by 'clusterperf.py %s'\n#\n"
"# Time (usec) Cum. Fraction\n"
"#---------------------------"
% (name))
if (options.rcdf):
print_rcdf_from_log()
else:
print_cdf_from_log()
def defaultTo(config, field, value):
"""If the field is already in the config dict, do nothing, else set field
in the dict to value. """
if field not in config:
config[field] = value
def calculatePerClientTarget(workload, clients, percentage):
"""Given a workload 'YCSB-A', etc. and a count of client return the
targetOps rate that each client should run to keep a single server at
percentage of peak load.
"""
peak = 0
if workload == 'YCSB-A':
peak = 300 * 1000
elif workload == 'YCSB-B':
peak = 815 * 1000
elif workload == 'YCSB-C':
peak = 1024 * 1000
else:
raise Exception('Unknown peak rate for workload %s' % workload)
return int(peak * (percentage / 100.0) / int(clients))
def migrateLoaded(name, options, cluster_args, client_args):
if not options.extract:
clients = options.num_clients
servers = options.num_servers # len(getHosts()) - clients - 1
if servers < 4:
raise Exception('Not enough servers: only %d left' % servers)
if clients < 16:
print('!!! WARNING !!! Use 16 clients to ensure enough load for ' +
'real experiments !!! WARNING !!!', file=sys.stderr)
cluster_args['num_servers'] = servers
# Use two backups per server for more disk bandwidth.
defaultTo(cluster_args, 'backup_disks_per_server', 2)
# Need lots of mem for big workload and migration.
defaultTo(cluster_args, 'master_args',
'-t 18000 --segmentFrames 8192')
# Sixteen clients to try to generate enough load to keep things at
# about 90% load.
cluster_args['num_clients'] = clients
# Can take awhile due to fillup and migration.
if cluster_args['timeout'] < 300:
cluster_args['timeout'] = 300
# We're really interested in jitter on servers; better keep the clients
# off the server machines.
cluster_args['disjunct'] = True
# Can't default --workload this due to command line default...
# 1 million * 100 B ~= 100 MB table
defaultTo(client_args, '--numObjects', 1 * 1000 * 1000)
# Set clients up to keep server at 90% load.
defaultTo(client_args, '--targetOps',
calculatePerClientTarget(
client_args['--workload'], clients,
options.loadPct))
# Turn on timestamps on latency samples.
defaultTo(client_args, '--fullSamples', '')
name = 'readDistWorkload'
cluster.run(client='%s/apps/ClusterPerf %s %s' %
(obj_path, flatten_args(client_args), name),
**cluster_args)
import gzip
with gzip.open('logs/latest/rcdf.data.gz', 'wb') as rcdf_file:
print_rcdf_from_log_samples(rcdf_file)
with gzip.open('logs/latest/samples.data.gz', 'wb') as samples_file:
print_samples_from_log(samples_file)
#-------------------------------------------------------------------
# End of driver functions.
#-------------------------------------------------------------------
# The following tables define all of the benchmarks supported by this program.
# The benchmarks are divided into two groups:
# * simple_tests describes tests that output one or more individual
# performance metrics
# * graph_tests describe tests that generate one graph per test; the graph
# output is in gnuplot format with comments describing the data.
simple_tests = [
Test("basic", basic),
Test("broadcast", broadcast),
Test("echo_basic", echo),
Test("echo_incast", echo),
Test("multiRead_colocation", default),
Test("netBandwidth", netBandwidth),
Test("readAllToAll", readAllToAll),
Test("readNotFound", default),
]
graph_tests = [
Test("indexBasic", indexBasic),
Test("indexRange", indexRange),
Test("indexMultiple", indexMultiple),
Test("indexScalability", indexScalability),
Test("indexReadDist", indexReadDist),
Test("indexWriteDist", indexWriteDist),
Test("multiRead_general", multiOp),
Test("multiRead_generalRandom", multiOp),
Test("multiRead_oneMaster", multiOp),
Test("multiRead_oneObjectPerMaster", multiOp),
Test("multiReadThroughput", readThroughput),
Test("multiWrite_oneMaster", multiOp),
Test("readDist", readDist),
Test("readDistRandom", readDistRandom),
Test("readDistWorkload", workloadDist),
Test("readInterference", default),
Test("readLoaded", readLoaded),
Test("readRandom", readRandom),
Test("readThroughput", readThroughput),
Test("readVaryingKeyLength", default),
Test("transaction_collision", txCollision),
Test("transaction_oneMaster", multiOp),
Test("transactionContention", transactionThroughput),
Test("transactionDistRandom", transactionDist),
Test("transactionThroughput", transactionThroughput),
Test("writeAsyncSync", default),
Test("writeVaryingKeyLength", default),
Test("writeDist", writeDist),
Test("writeDistRandom", writeDist),
Test("writeDistWorkload", workloadDist),
Test("writeInterference", default),
Test("writeThroughput", readThroughput),
Test("workloadThroughput", readThroughput),
Test("migrateLoaded", migrateLoaded),
]
if __name__ == '__main__':
parser = OptionParser(description=
'Run one or more performance benchmarks on a RAMCloud cluster. Each '
'test argument names one test to run (default: run a selected subset '
'of useful benchmarks; "all" means run all benchmarks). Not all options '
'are used by all benchmarks.',
usage='%prog [options] test test ...',
conflict_handler='resolve')
parser.add_option('-n', '--clients', type=int,
metavar='N', dest='num_clients',
help='Number of instances of the client application '
'to run')
parser.add_option('-c', '--count', type=int,
metavar='N', dest='count',
help='Number of times to perform the operation')
parser.add_option('--disjunct', action='store_true', default=False,
metavar='True/False',
help='Do not colocate clients on a node (servers are never '
'colocated, regardless of this option)')
parser.add_option('--debug', action='store_true', default=False,
help='Pause after starting servers but before running '
'clients to enable debugging setup')
parser.add_option('-d', '--logDir', default='logs', metavar='DIR',
dest='log_dir',
help='Top level directory for log files; the files for '
'each invocation will go in a subdirectory.')
parser.add_option('-l', '--logLevel', default='NOTICE',
choices=['DEBUG', 'NOTICE', 'WARNING', 'ERROR', 'SILENT'],
metavar='L', dest='log_level',
help='Controls degree of logging in servers')
parser.add_option('-b', '--numBackupDisks', type=int, default=2,
metavar='N', dest='backup_disks_per_server',
help='Number of backup disks to use on each server host '
'(0, 1, or 2)')
parser.add_option('-r', '--replicas', type=int, default=3,
metavar='N',
help='Number of disk backup copies for each segment')
parser.add_option('--servers', type=int,
metavar='N', dest='num_servers',
help='Number of hosts on which to run servers')
parser.add_option('-s', '--size', type=int, default=100,
help='Object size in bytes')
parser.add_option('--numObjects', type=int,
help='Number of objects per operation.')
parser.add_option('--numTables', type=int,
help='Number of tables involved.')
parser.add_option('-t', '--timeout', type=int, default=30,
metavar='SECS',
help="Abort if the client application doesn't finish within "
'SECS seconds')
parser.add_option('-m', '--masterArgs', metavar='mARGS',
dest='master_args',
help='Additional command-line arguments to pass to '
'each master')
parser.add_option('--dpdkPort', type=int, dest='dpdk_port',
help='Ethernet port that the DPDK driver should use')
parser.add_option('-T', '--transport', default='basic+infud',
help='Transport to use for communication with servers')
parser.add_option('-v', '--verbose', action='store_true', default=False,
help='Print progress messages')
parser.add_option('-w', '--warmup', type=int,
help='Number of times to execute operating before '
'starting measurements')
parser.add_option('--workload', default='YCSB-A',
choices=['YCSB-A', 'YCSB-B', 'YCSB-C', 'WRITE-ONLY'],
help='Name of workload to run on extra clients to generate load')
parser.add_option('--targetOps', type=int,
help='Operations per second that each load generating client '
'will try to achieve')
parser.add_option('--txSpan', type=int,
help='Number servers a transaction should span.')
parser.add_option('--asyncReplication',
help='Send update RPCs that do not wait for replications.')
parser.add_option('-i', '--numIndexlet', type=int,
help='Number of indexlets for measuring index scalability ')
parser.add_option('-k', '--numIndexes', type=int,
help='Number of secondary keys/object to measure index operations')
parser.add_option('--numVClients', type=int,
metavar='N', dest='numVClients',
help='Number of virtual clients each client instance should '
'simulate')
parser.add_option('--rcdf', action='store_true', default=False,
dest='rcdf',
help='Output reverse CDF data instead.')
parser.add_option('--migratePercentage', type=int, dest='migratePercentage',
help='For readDistWorkload and writeDistWorkload, the percentage '
'of the first table from migrate in the middle of the '
'benchmark. If 0 (the default), then no migration is done.')
parser.add_option('--spannedOps', type=int, dest='spannedOps',
help='Number of objects per multiget that should come from '
'different servers than the rest for multiRead_colocation.')
parser.add_option('--seconds', type=int, default=10, dest='seconds',
help='For doWorkload based workloads, exit benchmarks after about '
'this many seconds.')
parser.add_option('--parse', action='store_true', default=False,
dest='parse',
help='Just output CDF data from latest client log without running '
'anything.')
parser.add_option('--extract',
action='store_true', default=False, dest='extract',
help='For some experiments skip re-running, just parse the '
'latest output file and dump the results.')
parser.add_option('--loadPct', type=int, default=90, dest='loadPct',
help='For doWorkload based workloads, how close to peak load each '
'server should be driven at.')
parser.add_option('--fullSamples',
action='store_true', default=False, dest='fullSamples',
help='Run with alternate sample format that includes sample '
'timestamps along with their durations.')
parser.add_option('--superuser', action='store_true', default=False,
help='Start the cluster and clients as superuser')
(options, args) = parser.parse_args()
if options.parse:
if options.rcdf:
print_rcdf_from_log()
else:
print_cdf_from_log()
raise SystemExit()
# Invoke the requested tests (run all of them if no tests were specified)
try:
if len(args) == 1 and args[0] == 'all':
# Run all of the tests.
for test in simple_tests:
run_test(test, options)
for test in graph_tests:
run_test(test, options)
else:
if len(args) == 0:
# Provide a default set of tests to run (the most useful ones).
args = ["basic",
"echo_basic",
"multiRead_oneMaster",
"multiRead_oneObjectPerMaster",
"multiReadThroughput",
"multiWrite_oneMaster",
"readDistRandom",
"writeDistRandom",
"readThroughput",
"readVaryingKeyLength",
"writeVaryingKeyLength",
"indexBasic",
"indexMultiple",
"transaction_oneMaster"
]
for name in args:
for test in simple_tests:
if test.name == name:
run_test(test, options)
break
else:
for test in graph_tests:
if test.name == name:
run_test(test, options)
break
else:
print("No clusterperf test named '%s'" % (name))
finally:
logInfo = log.scan("%s/latest" % (options.log_dir),
["WARNING", "ERROR"],
["starting new cluster from scratch",
"Ping timeout to server"])
if len(logInfo) > 0:
print(logInfo, file=sys.stderr)
| isc | -7,458,043,584,022,874,000 | 41.783251 | 86 | 0.59468 | false |
sarielsaz/sarielsaz | test/functional/txn_clone.py | 1 | 7604 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Sarielsaz Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs."""
from test_framework.test_framework import SarielsazTestFramework
from test_framework.util import *
class TxnMallTest(SarielsazTestFramework):
def set_test_params(self):
self.num_nodes = 4
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
super(TxnMallTest, self).setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_locktime = rawtx1["locktime"]
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime)
# createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 40 BTC serialized is 00286bee00000000
pos0 = 2*(4+1+36+1+4+1)
hex40 = "00286bee00000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0 : pos0 + 16] != hex40 or
rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0 : pos0 + 16] == hex40):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 100 BTC for 2 matured,
# less possible orphaned matured subsidy
expected += 100
if (self.options.mine_block):
expected -= 50
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 1219
+ fund_foo_tx["fee"]
- 29
+ fund_bar_tx["fee"]
+ 100)
# Node1's "from0" account balance
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]))
if __name__ == '__main__':
TxnMallTest().main()
| mit | 991,336,729,252,938,200 | 48.058065 | 111 | 0.595476 | false |
kluge-iitk/Project-Euler | 58_spiral_primes.py | 1 | 1635 | """
Problem 58:
Starting with 1 and spiralling anticlockwise in the following way,
a square spiral with side length 7 is formed.
37 36 35 34 33 32 31
38 17 16 15 14 13 30
39 18 5 4 3 12 29
40 19 6 1 2 11 28
41 20 7 8 9 10 27
42 21 22 23 24 25 26
43 44 45 46 47 48 49
It is interesting to note that the odd squares lie along the bottom right
diagonal, but what is more interesting is that 8 out of the 13 numbers
lying along both diagonals are prime; that is, a ratio of 8/13 ≈ 62%.
If one complete new layer is wrapped around the spiral above,
a square spiral with side length 9 will be formed.
If this process is continued, what is the side length of the square spiral
for which the ratio of primes along both diagonals first falls below 10%?
"""
# stuff to note
# lower right diagonal - squares of odd integers (3^2, 5^2)
# lower left diagonal - squares of even integers + that even integer + 1 (2^2+3)
# upper left diagonal - squares of even integers + 1 (2^2 + 1)
# upper right diagonal - squares of odd integers + that odd integer + 1 (1^2 + 2)
import time
def is_prime(n):
if n < 2:
return False
if n == 2:
return True
if n%2 == 0:
return False
for j in range(3, round(n**0.5)+1, 2):
if n%j == 0:
return False
return True
start = time.time()
nprimes = 0
ntotal = 1
row = 1
while True:
new_entries = (row**2 + row + 1, (row+1)**2 + 1, (row+1)**2 + row + 2)
nprimes += len(tuple(filter(None, map(is_prime, new_entries))))
ntotal += 4
if nprimes/ntotal < 0.1:
break
row += 2
print(row+2)
print(time.time() - start)
| mit | 5,847,286,381,652,083,000 | 26.677966 | 81 | 0.652786 | false |
shakamunyi/tensorflow | tensorflow/contrib/learn/python/learn/tests/saver_test.py | 1 | 4093 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Save and restore tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
class SaverTest(tf.test.TestCase):
"""Save and restore tests."""
def testIris(self):
path = tf.test.get_temp_dir() + '/tmp.saver'
random.seed(42)
iris = datasets.load_iris()
cont_features = [
tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = learn.LinearClassifier(
feature_columns=cont_features, n_classes=3, model_dir=path)
classifier.fit(iris.data, iris.target, steps=200)
# TODO(ipolosukhin): Remove or restore.
# new_classifier = learn.TensorFlowEstimator.restore(path)
# self.assertEqual(type(new_classifier), type(classifier))
# score = accuracy_score(iris.target, new_classifier.predict(iris.data))
# self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
def testCustomModel(self):
path = tf.test.get_temp_dir() + '/tmp.saver2'
random.seed(42)
iris = datasets.load_iris()
def _custom_model(x, y):
return learn.models.logistic_regression(x, y)
classifier = learn.TensorFlowEstimator(model_fn=_custom_model, n_classes=3)
classifier.fit(iris.data, iris.target)
classifier.save(path)
# TODO(ipolosukhin): Remove or restore.
# new_classifier = learn.TensorFlowEstimator.restore(path)
# self.assertEqual(type(new_classifier), type(classifier))
# score = accuracy_score(iris.target, new_classifier.predict(iris.data))
# self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
def testDNN(self):
random.seed(42)
iris = datasets.load_iris()
cont_features = [
tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = learn.DNNClassifier(feature_columns=cont_features,
hidden_units=[10, 20, 10],
n_classes=3)
classifier.fit(iris.data, iris.target, max_steps=100)
# TODO(ipolosukhin): Remove or restore.
# path = tf.test.get_temp_dir() + '/tmp_saver3'
# classifier.save(path)
# new_classifier = learn.TensorFlowEstimator.restore(path)
# self.assertEqual(type(new_classifier), type(classifier))
# score = accuracy_score(iris.target, new_classifier.predict(iris.data))
# self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
def testNoFolder(self):
with self.assertRaises(ValueError):
learn.TensorFlowEstimator.restore('no_model_path')
def testNoCheckpoints(self):
random.seed(42)
iris = datasets.load_iris()
cont_features = [
tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = learn.DNNClassifier(feature_columns=cont_features,
hidden_units=[10, 20, 10],
n_classes=3)
classifier.fit(iris.data, iris.target, max_steps=100)
# TODO(ipolosukhin): Remove or restore.
# path = tf.test.get_temp_dir() + '/tmp/tmp.saver4'
# classifier.save(path)
# os.remove(os.path.join(path, 'checkpoint'))
# with self.assertRaises(NotImplementedError):
# learn.TensorFlowEstimator.restore(path)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 724,406,720,324,615,200 | 39.127451 | 80 | 0.664794 | false |
thetreerat/WaterPump | WaterPumps/leds.py | 1 | 4201 | # Author: Harold Clark
# Copyright Harold Clark 2017
#
try:
import lib.uasyncio as asyncio
except ImportError:
import uasyncio as asyncio
from utime import time
import machine
from WaterPumps.events import Event
class led(object):
def __init__(self,ledPin=0, name='Not Defined'):
"""Init a single color led object"""
self.powerPin = machine.Pin(ledPin, machine.Pin.OUT)
self._name = name
self.ledOnEvent = Event()
self.ledOffEvent = Event()
def name(self):
return self._name
async def monitorLED(self, debug=False):
print('''%s - %s: monitorLED Started''' % (self.name(), time()))
while True:
if ledOnEvent.is_set():
self.powerPin.value(True)
ledOnEvent.clear()
if ledOffEvent.is_set():
self.powerPin.value(False)
await asyncio.sleep_ms(80)
def registerOnEvent(self):
return self.ledOnEvent
def registerOffEvent(self):
return self.ledOffEvent
class triLed(object):
LED_BLUE = (True, False, True)
LED_RED = (False, True, True)
LED_GREEN = (True, True, False)
LED_YELLOW = (False, True, False)
LED_ORANGE = (False, False, True)
LED_UNKNOWN = (True, False, False)
LED_WHITE = (False, False, False)
LED_OFF = (True, True, True)
def __init__(self, redpin, bluepin, greenpin,name='Test', startColor=None):
"""Init a Tri color led object"""
self.redPin = machine.Pin(redpin, machine.Pin.OUT)
self.bluePin = machine.Pin(bluepin, machine.Pin.OUT)
self.greenPin = machine.Pin(greenpin, machine.Pin.OUT)
self.lastColor = None
self.ledServerList = []
self._name = name
self.flashEvent = Event()
if startColor==None:
self.setStartColor(self.LED_OFF)
else:
self.setStartColor(startColor)
def name(self):
return self._name
def setStartColor(self, color):
R, B, G = color
self.redPin.value(R)
self.bluePin.value(B)
self.greenPin.value(G)
#maybe replaced need to look into hed 6/17/17
def registerLedClient(self, testTuple, index=0, debug=False):
if len(testTuple)==4:
self.ledServerList.insert(index, testTuple)
if debug:
print(len(self.ledServerList))
print(self.ledServerList[-1])
async def monitorLED(self, debug=False):
"""coroutine for monitor event to change the LED color"""
print('''%s - %s: monitorLED Started''' % (self.name(), time()))
mainLoop = asyncio.get_event_loop()
mainLoop.create_task(self.setColor(self.LED_BLUE))
while True:
for pair, func, color, clear in self.ledServerList:
v = True
for event, test in pair:
v = event()==test and v
if v:
if debug:
print('''%s - %s: v is: %s, last color is: %s, current color is: %s''' % (self._name, time(), v, self.lastColor, color))
if self.lastColor!=color:
self.lastColor = color
mainLoop.create_task(func(color))
print('''%s - %s: added task to loop, color: %s''' % (self._name, time(), color))
if clear!=None:
clear.clear()
break
await asyncio.sleep_ms(80)
async def setColor(self, color):
"""set TriColor LED to pass color (RBG)"""
R, B, G = color
self.redPin.value(R)
self.bluePin.value(B)
self.greenPin.value(G)
async def flash(self, color=None):
import uasyncio as asyncio
self.flashEvent.set()
if color==None:
color=self.LED_RED
while self.flashEvent.is_set():
self.setColor(color)
await asyncio.sleep_ms(100)
self.setColor(self.LED_OFF)
await asyncio.sleep_ms(100) | mit | 5,557,994,721,407,910,000 | 33.442623 | 144 | 0.541776 | false |
Scille/parsec-cloud | tests/core/mountpoint/test_file_operations.py | 1 | 3497 | # Parsec Cloud (https://parsec.cloud) Copyright (c) AGPLv3 2016-2021 Scille SAS
import os
import sys
import pytest
from hypothesis.stateful import RuleBasedStateMachine, initialize, rule, run_state_machine_as_test
from hypothesis import strategies as st
# Just an arbitrary value to limit the size of data hypothesis generates
# for read/write operations
BALLPARK = 10000
@pytest.mark.slow
@pytest.mark.mountpoint
@pytest.mark.skipif(sys.platform == "darwin", reason="TODO: Infinitely looping on macOS")
def test_file_operations(tmpdir, caplog, hypothesis_settings, mountpoint_service_factory):
tentative = 0
class FileOperationsStateMachine(RuleBasedStateMachine):
@initialize()
def init(self):
nonlocal tentative
tentative += 1
caplog.clear()
wpath = None
async def _bootstrap(user_fs, mountpoint_manager):
nonlocal wpath
wid = await user_fs.workspace_create("w")
wpath = await mountpoint_manager.mount_workspace(wid)
self.mountpoint_service = mountpoint_service_factory(_bootstrap)
self.oracle_file_path = str(tmpdir / f"oracle-test-{tentative}")
self.file_path = str(wpath / "bar.txt")
self.oracle_fd = os.open(self.oracle_file_path, os.O_RDWR | os.O_CREAT)
self.fd = os.open(self.file_path, os.O_RDWR | os.O_CREAT)
def teardown(self):
self.mountpoint_service.stop()
@rule(size=st.integers(min_value=0, max_value=BALLPARK))
def read(self, size):
expected_data = os.read(self.oracle_fd, size)
data = os.read(self.fd, size)
assert data == expected_data
@rule(content=st.binary(max_size=BALLPARK))
def write(self, content):
expected_ret = os.write(self.oracle_fd, content)
ret = os.write(self.fd, content)
assert ret == expected_ret
@rule(
length=st.integers(min_value=-BALLPARK, max_value=BALLPARK),
seek_type=st.one_of(st.just(os.SEEK_SET), st.just(os.SEEK_CUR), st.just(os.SEEK_END)),
)
def seek(self, length, seek_type):
if seek_type != os.SEEK_END:
length = abs(length)
try:
pos = os.lseek(self.fd, length, seek_type)
except OSError:
# Invalid length/seek_type couple
with pytest.raises(OSError):
os.lseek(self.oracle_fd, length, seek_type)
else:
expected_pos = os.lseek(self.oracle_fd, length, seek_type)
assert pos == expected_pos
@rule(length=st.integers(min_value=0, max_value=BALLPARK))
def truncate(self, length):
os.ftruncate(self.fd, length)
os.ftruncate(self.oracle_fd, length)
@rule()
def sync(self):
os.fsync(self.fd)
os.fsync(self.oracle_fd)
@rule()
def stat(self):
stat = os.fstat(self.fd)
oracle_stat = os.fstat(self.oracle_fd)
assert stat.st_size == oracle_stat.st_size
@rule()
def reopen(self):
os.close(self.fd)
self.fd = os.open(self.file_path, os.O_RDWR)
os.close(self.oracle_fd)
self.oracle_fd = os.open(self.oracle_file_path, os.O_RDWR)
run_state_machine_as_test(FileOperationsStateMachine, settings=hypothesis_settings)
| agpl-3.0 | -9,145,865,238,727,756,000 | 34.323232 | 98 | 0.594796 | false |
google/meterstick | confidence_interval_display_test.py | 1 | 20694 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for confidence_interval_display."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import confidence_interval_display
import numpy as np
import pandas as pd
from pandas import testing
import unittest
DF_WITH_DIMENSIONS = pd.DataFrame({
'CI_Lower': [None, None, -5.035, 0.73],
'CI_Upper': [None, None, 2.235, 4.85],
'Control_Id': ['expr_foo', 'expr_foo', 'expr_foo', 'expr_foo'],
'Control_Value': [None, None, 0.780933, 0.21599],
'Country': ['GB', 'US', 'GB', 'US'],
'Type': ['WEB', 'WEB', 'WEB', 'WEB'],
'Experiment_Id': ['expr_foo', 'expr_foo', 42, 42],
'Is_Control': [True, True, False, False],
'Metric': ['PLA_CONV_CVR', 'PLA_CONV_CVR', 'PLA_CONV_CVR', 'PLA_CONV_CVR'],
'Ratio': [None, None, -1.4, 2.79],
'Value': [0.787, 0.216, 0.77, 0.222026]
})
DF_NO_DIMENSION = pd.DataFrame({
'CI_Lower': [None, 5],
'CI_Upper': [None, 15.000],
'Control_Id': ['expr_foo', 'expr_foo'],
'Control_Value': [None, 1],
'Experiment_Id': [2, 'expr_bar'],
'Is_Control': [True, False],
'Metric': ['metric_foo', 'metric_foo'],
'Ratio': [None, 10],
'Value': [1, 1.10]
})
LINE_BREAK = confidence_interval_display.LINE_BREAK
class DisplayMetricsTest(unittest.TestCase):
def test_normal(self):
expected = pd.DataFrame(
{
'Country': [
'<div>GB</div>',
'<div>GB</div>',
'<div>US</div>',
'<div>US</div>',
],
'Experiment_Id': [
'<div>expr_foo</div>',
'<div>42</div>',
'<div>expr_foo</div>',
'<div>42</div>',
],
'PLA_CONV_CVR': [
'<div class="ci-display-cell">0.7870</div>',
LINE_BREAK.join((
'<div class="ci-display-cell"><div>0.7700',
'<span class="ci-display-ratio">-1.4000</span>',
'<span class="ci-display-ci-range">[-5.0350, 2.2350]</span>'
'</div></div>')),
'<div class="ci-display-cell">0.2160</div>',
LINE_BREAK.join(
('<div class="ci-display-good-change ci-display-cell">'
'<div>0.2220',
'<span class="ci-display-ratio">2.7900</span>',
'<span class="ci-display-ci-range">[0.7300, 4.8500]</span>'
'</div></div>')),
],
},
columns=['Country', 'Experiment_Id', 'PLA_CONV_CVR'])
actual = confidence_interval_display.get_formatted_df(
DF_WITH_DIMENSIONS,
dims=['Country'],
aggregate_dimensions=False,
show_control=True,
ctrl_id='expr_foo',
auto_add_description=False)
testing.assert_frame_equal(expected, actual, check_names=False)
def test_add_control_rows_on_no_cotrol_rows(self):
expected = pd.DataFrame({
'CI_Lower': [5.0, np.nan],
'CI_Upper': [15.0, np.nan],
'Control_Id': ['expr_foo', 'expr_foo'],
'Control_Value': [1.0, np.nan],
'Experiment_Id': ['expr_bar', 'expr_foo'],
'Is_Control': [False, True],
'Metric': ['metric_foo', 'metric_foo'],
'Ratio': [10.0, np.nan],
'Value': [1.1, 1.0]
})
actual = confidence_interval_display.add_control_rows(
DF_NO_DIMENSION[~DF_NO_DIMENSION.Is_Control], []).reset_index(drop=True)
testing.assert_frame_equal(expected, actual)
def test_add_control_rows_on_partial_cotrol_rows(self):
expected = pd.DataFrame({
'CI_Lower': [-5.035, 0.73, np.nan, np.nan],
'CI_Upper': [2.235, 4.85, np.nan, np.nan],
'Control_Id': ['expr_foo'] * 4,
'Control_Value': [0.780933, 0.21599, np.nan, np.nan],
'Country': ['GB', 'US', 'US', 'GB'],
'Experiment_Id': [42, 42, 'expr_foo', 'expr_foo'],
'Is_Control': [False, False, True, True],
'Metric': ['PLA_CONV_CVR'] * 4,
'Ratio': [-1.4, 2.79, np.nan, np.nan],
'Type': ['WEB'] * 4,
'Value': [0.77, 0.222026, 0.216, 0.780933]
})
actual = confidence_interval_display.add_control_rows(
DF_WITH_DIMENSIONS[1:], ['Country', 'Type']).reset_index(drop=True)
# Merging with nonoverlapping dfs returns different orders in PY2 and 3.
testing.assert_frame_equal(expected, actual, check_like=True)
def test_add_control_rows_on_full_cotrol_rows(self):
expected = pd.concat([
DF_WITH_DIMENSIONS[~DF_WITH_DIMENSIONS.Is_Control],
DF_WITH_DIMENSIONS[DF_WITH_DIMENSIONS.Is_Control]
])
actual = confidence_interval_display.add_control_rows(
DF_WITH_DIMENSIONS, ['Country'])
testing.assert_frame_equal(expected, actual)
def test_display_df_with_dimensions(self):
expected = pd.DataFrame(
{
'Dimensions': [{
'Dim_1': 'GB',
'Experiment_Id': 'expr_foo',
'Is_Control': True,
'Dim_2': 'WEB'
}, {
'Dim_1': 'GB',
'Experiment_Id': 42,
'Is_Control': False,
'Dim_2': 'WEB'
}, {
'Dim_1': 'US',
'Experiment_Id': 'expr_foo',
'Is_Control': True,
'Dim_2': 'WEB'
}, {
'Dim_1': 'US',
'Experiment_Id': 42,
'Is_Control': False,
'Dim_2': 'WEB'
}],
'PLA_CONV_CVR': [(0.787, None, None, None),
(0.77, -1.4, -5.035, 2.235),
(0.216, None, None, None),
(0.222026, 2.79, 0.73, 4.85)]
},
columns=['Dimensions', 'PLA_CONV_CVR'])
actual = confidence_interval_display.get_formatted_df(
DF_WITH_DIMENSIONS,
dims=['Country', 'Type'],
aggregate_dimensions=True,
show_control=True,
ctrl_id='expr_foo',
auto_add_description=False,
return_pre_agg_df=True)
testing.assert_frame_equal(expected, actual, check_names=False)
def test_flip_color(self):
expected = pd.DataFrame(
{
'Country': [
'<div>GB</div>',
'<div>GB</div>',
'<div>US</div>',
'<div>US</div>',
],
'Experiment_Id': [
'<div>expr_foo</div>',
'<div>42</div>',
'<div>expr_foo</div>',
'<div>42</div>',
],
'PLA_CONV_CVR': [
'<div class="ci-display-cell">0.7870</div>',
LINE_BREAK.join(
('<div class="ci-display-cell"><div>0.7700',
'<span class="ci-display-ratio">-1.4000</span>',
'<span class="ci-display-ci-range">[-5.0350, 2.2350]'
'</span></div></div>')),
'<div class="ci-display-cell">0.2160</div>',
LINE_BREAK.join(
('<div class="ci-display-bad-change ci-display-cell"><div>'
'0.2220', '<span class="ci-display-ratio">2.7900</span>',
'<span class="ci-display-ci-range">[0.7300, 4.8500]</span>'
'</div></div>')),
],
},
columns=['Country', 'Experiment_Id', 'PLA_CONV_CVR'])
actual = confidence_interval_display.get_formatted_df(
DF_WITH_DIMENSIONS,
dims=['Country'],
aggregate_dimensions=False,
flip_color=['PLA_CONV_CVR'],
show_control=True,
ctrl_id='expr_foo',
auto_add_description=False)
testing.assert_frame_equal(expected, actual, check_names=False)
def test_display_expr_info(self):
expected = pd.DataFrame(
{
'Country': ['GB', 'GB', 'US', 'US'],
'Control_Id': ['expr_foo'] * 4,
'Experiment_Id': ['expr_foo', 42, 'expr_foo', 42],
'Is_Control': [True, False, True, False],
'Type': ['WEB', 'WEB', 'WEB', 'WEB'],
'PLA_CONV_CVR': [(0.787, None, None, None),
(0.77, -1.4, -5.035, 2.235),
(0.216, None, None, None),
(0.222026, 2.79, 0.73, 4.85)]
},
columns=[
'Country', 'Type', 'Control_Id', 'Is_Control', 'Experiment_Id',
'PLA_CONV_CVR'
])
actual = confidence_interval_display.get_formatted_df(
DF_WITH_DIMENSIONS,
dims=['Country', 'Type'],
aggregate_dimensions=False,
show_control=True,
ctrl_id={'expr_foo': [42, 'not existed']},
display_expr_info=True,
auto_add_description=False,
return_pre_agg_df=True)
testing.assert_frame_equal(expected, actual, check_names=False)
def test_auto_add_description(self):
expected = pd.DataFrame(
{
'Country': ['GB', 'GB', 'US', 'US'],
'Control_Id': ['expr_foo'] * 4,
'Experiment_Id': ['expr_foo', 42, 'expr_foo', 42],
'Is_Control': [True, False, True, False],
'Type': ['WEB', 'WEB', 'WEB', 'WEB'],
'Description': ['Control', 'Not Control'] * 2,
'PLA_CONV_CVR': [(0.787, None, None, None),
(0.77, -1.4, -5.035, 2.235),
(0.216, None, None, None),
(0.222026, 2.79, 0.73, 4.85)]
},
columns=[
'Country', 'Type', 'Control_Id', 'Is_Control', 'Experiment_Id',
'Description', 'PLA_CONV_CVR'
])
actual = confidence_interval_display.get_formatted_df(
DF_WITH_DIMENSIONS,
dims=['Country', 'Type'],
aggregate_dimensions=False,
show_control=True,
ctrl_id={'expr_foo': [42, 'not existed']},
display_expr_info=True,
return_pre_agg_df=True)
testing.assert_frame_equal(expected, actual, check_names=False)
def test_display_df_with_dimensions_aggregate_dimensions_false(self):
expected = pd.DataFrame(
{
'Country': ['GB', 'GB', 'US', 'US'],
'Experiment_Id': ['expr_foo', 42, 'expr_foo', 42],
'Type': ['WEB', 'WEB', 'WEB', 'WEB'],
'PLA_CONV_CVR': [(0.787, None, None, None),
(0.77, -1.4, -5.035, 2.235),
(0.216, None, None, None),
(0.222026, 2.79, 0.73, 4.85)]
},
columns=['Country', 'Type', 'Experiment_Id', 'PLA_CONV_CVR'])
actual = confidence_interval_display.get_formatted_df(
DF_WITH_DIMENSIONS,
dims=['Country', 'Type'],
aggregate_dimensions=False,
show_control=True,
ctrl_id={'expr_foo': [42, 'not existed']},
auto_add_description=False,
return_pre_agg_df=True)
testing.assert_frame_equal(expected, actual, check_names=False)
def test_display_df_with_dimensions_show_control_false(self):
expected = pd.DataFrame(
{
'PLA_CONV_CVR': [(None, -1.4, -5.035, 2.235),
(None, 2.79, 0.73, 4.85)],
'Country': ['GB', 'US'],
'Experiment_Id': [42, 42],
'Type': ['WEB', 'WEB']
},
columns=['Country', 'Type', 'Experiment_Id', 'PLA_CONV_CVR'])
actual = confidence_interval_display.get_formatted_df(
DF_WITH_DIMENSIONS,
dims=['Country', 'Type'],
aggregate_dimensions=False,
show_control=False,
ctrl_id='expr_foo',
auto_add_description=False,
return_pre_agg_df=True)
testing.assert_frame_equal(expected, actual, check_names=False)
def test_display_df_no_dimension(self):
expected = pd.DataFrame(
{
'Dimensions': [{
'Experiment_Id': 2,
'Is_Control': True
}, {
'Experiment_Id': 'expr_bar',
'Is_Control': False
}],
'metric_foo': [(1.0, None, None, None), (1.1, 10.0, 5.0, 15.0)]
},
columns=['Dimensions', 'metric_foo'])
actual = confidence_interval_display.get_formatted_df(
DF_NO_DIMENSION,
dims=['Country', 'Type'],
aggregate_dimensions=True,
show_control=True,
ctrl_id=2,
auto_add_description=False,
return_pre_agg_df=True)
testing.assert_frame_equal(expected, actual, check_names=False)
def test_display_using_ci_range(self):
expected = confidence_interval_display.get_formatted_df(
DF_NO_DIMENSION,
dims=['Country', 'Type'],
aggregate_dimensions=True,
show_control=True,
ctrl_id=2,
auto_add_description=False,
return_pre_agg_df=True)
df_no_ci = DF_NO_DIMENSION.copy()
df_no_ci['CI_Range'] = df_no_ci['CI_Upper'] - df_no_ci['CI_Lower']
del df_no_ci['CI_Upper'], df_no_ci['CI_Lower']
actual = confidence_interval_display.get_formatted_df(
df_no_ci,
dims=['Country', 'Type'],
aggregate_dimensions=True,
show_control=True,
ctrl_id=2,
auto_add_description=False,
return_pre_agg_df=True)
testing.assert_frame_equal(expected, actual)
def test_display_df_no_dimension_aggregate_dimensions_false(self):
expected = pd.DataFrame(
{
'Experiment_Id': [2, 'expr_bar'],
'metric_foo': [(1.0, None, None, None), (1.1, 10.0, 5.0, 15.0)]
},
columns=['Experiment_Id', 'metric_foo'])
actual = confidence_interval_display.get_formatted_df(
DF_NO_DIMENSION,
dims=['Country', 'Type'],
aggregate_dimensions=False,
show_control=True,
ctrl_id=2,
auto_add_description=False,
return_pre_agg_df=True)
testing.assert_frame_equal(expected, actual, check_names=False)
def test_display_df_no_dimension_show_control_false(self):
expected = pd.DataFrame(
{
'Experiment_Id': ['expr_bar'],
'metric_foo': [(None, 10.0, 5.0, 15.0)]
},
columns=['Experiment_Id', 'metric_foo'])
actual = confidence_interval_display.get_formatted_df(
DF_NO_DIMENSION,
dims=['Country', 'Type'],
aggregate_dimensions=False,
show_control=False,
ctrl_id=2,
auto_add_description=False,
return_pre_agg_df=True)
testing.assert_frame_equal(expected, actual, check_names=False)
def test_metric_formatter_with_no_ratio(self):
expected = '<div class="ci-display-cell">1.2000</div>'
actual = confidence_interval_display.MetricFormatter()(
(1.2, None, None, None))
self.assertEqual(expected, actual)
def test_metric_formatter_with_no_value(self):
expected = LINE_BREAK.join(
('<div class="ci-display-good-change ci-display-cell"><div>'
'<div>-</div>', '<span class="ci-display-ratio">1.2000</span>',
'<span class="ci-display-ci-range">[1.0000, 1.4000]</span></div>'
'</div>'
))
actual = confidence_interval_display.MetricFormatter()((None, 1.2, 1, 1.4))
self.assertEqual(expected, actual)
def test_metric_formatter_with_percentage_value_and_absolute_ratio(self):
expected = ('<div class="ci-display-good-change ci-display-cell"><div>'
'24.44%' + LINE_BREAK +
'<span class="ci-display-ratio">1.2000</span>' + LINE_BREAK +
'<span class="ci-display-ci-range">[1.0000, 1.4000]</span>' +
'</div></div>')
formats = {'Ratio': 'absolute', 'Value': 'percent'}
actual = confidence_interval_display.MetricFormatter(formats)(
(0.24444, 1.2, 1, 1.4))
self.assertEqual(expected, actual)
def test_metric_formatter_with_percentage_value_and_pp_ratio(self):
expected = LINE_BREAK.join((
'<div class="ci-display-good-change ci-display-cell"><div>24.44%',
'<span class="ci-display-ratio">1.20pp</span>',
'<span class="ci-display-ci-range">[1.00, 1.40] pp</span></div></div>'))
formats = {'Ratio': 'pp', 'Value': 'percent'}
actual = confidence_interval_display.MetricFormatter(formats)(
(0.24444, 1.2, 1, 1.4))
self.assertEqual(expected, actual)
def test_metric_formatter_with_custom_formatting(self):
expected = LINE_BREAK.join(
('<div class="ci-display-good-change ci-display-cell"><div>constant',
'<span class="ci-display-ratio">1</span>',
'<span class="ci-display-ci-range">[1, 1]</span></div></div>'))
formats = {'Ratio': '{:.0f}', 'Value': 'constant'}
actual = confidence_interval_display.MetricFormatter(formats)(
(0.24444, 1.2, 1, 1.4))
self.assertEqual(expected, actual)
def test_metric_formatter_with_positive_ci(self):
expected = LINE_BREAK.join(
('<div class="ci-display-good-change ci-display-cell"><div>1.2000',
'<span class="ci-display-ratio">1.1000</span>',
'<span class="ci-display-ci-range">[1.0200, 1.1800]</span></div></div>'
))
actual = confidence_interval_display.MetricFormatter()(
(1.2, 1.1, 1.02, 1.18))
self.assertEqual(expected, actual)
def test_metric_formatter_with_negative_ci(self):
expected = LINE_BREAK.join((
'<div class="ci-display-bad-change ci-display-cell"><div>1.2000',
'<span class="ci-display-ratio">-1.1000</span>',
'<span class="ci-display-ci-range">[-1.0200, -1.1800]</span></div>'
'</div>'
))
actual = confidence_interval_display.MetricFormatter()(
(1.2, -1.1, -1.02, -1.18))
self.assertEqual(expected, actual)
def test_metric_formatter_with_na_cis(self):
expected = LINE_BREAK.join(
('<div class="ci-display-cell"><div>24.44%',
'<span class="ci-display-ratio">1.2000</span>',
'<span class="ci-display-ci-range">[N/A, N/A]</span></div></div>'))
formats = {'Ratio': 'absolute', 'Value': 'percent'}
actual = confidence_interval_display.MetricFormatter(formats)(
(0.24444, 1.2, None, None))
self.assertEqual(expected, actual)
def test_metric_formatter_with_positive_ci_flip_color(self):
expected = LINE_BREAK.join(
('<div class="ci-display-bad-change ci-display-cell"><div>1.2000',
'<span class="ci-display-ratio">1.1000</span>',
'<span class="ci-display-ci-range">[1.0200, 1.1800]</span></div></div>'
))
actual = confidence_interval_display.MetricFormatter(if_flip_color=True)(
(1.2, 1.1, 1.02, 1.18))
self.assertEqual(expected, actual)
def test_metric_formatter_with_negative_ci_flip_color(self):
expected = LINE_BREAK.join((
'<div class="ci-display-good-change ci-display-cell"><div>1.2000',
'<span class="ci-display-ratio">-1.1000</span>',
'<span class="ci-display-ci-range">[-1.0200, -1.1800]</span></div>'
'</div>'
))
actual = confidence_interval_display.MetricFormatter(if_flip_color=True)(
(1.2, -1.1, -1.02, -1.18))
self.assertEqual(expected, actual)
def test_dimension_formatter(self):
x = {
'Dim_1': 'Mobile',
'Dim_2': 'WEB',
'Description': 'foo',
'Experiment_Id': 42
}
expected = LINE_BREAK.join(
('<div><div><span class="ci-display-description-text">foo</span>',
'<span class="ci-display-experiment-id">42</span>',
'<span class="ci-display-dimension">Mobile * WEB</span></div></div>'))
actual = confidence_interval_display.dimension_formatter(x)
self.assertEqual(expected, actual)
def test_dimension_formatter_with_missing_field(self):
x = {
'Dim_1': 'Mobile',
'Dim_2': 'WEB',
'Experiment_Id': 42
} # No description.
expected = LINE_BREAK.join(
('<div><div><span class="ci-display-experiment-id">42</span>',
'<span class="ci-display-dimension">Mobile * WEB</span></div></div>'))
actual = confidence_interval_display.dimension_formatter(x)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -4,961,730,074,671,474,000 | 38.417143 | 80 | 0.543346 | false |
pythonsingapore/pythonsingapore | website/webapps/django/myproject/myproject/tests/mixins.py | 1 | 3801 | """
Generally useful mixins for tests of any project.
This should be open sourced in the future, so it must be project agnostic.
"""
from django.core.urlresolvers import reverse
from myproject.tests.factories import UserFactory
class ViewTestsMixin(object):
"""Mixin that provides commonly tested assertions."""
def get_view_name(self):
"""
Returns the URL of this view by using ``reverse``.
You must implement this when inheriting this mixin.
"""
return NotImplementedError
def get_view_args(self):
"""
Returns the view's args, if necessary.
If the URL of this view is constructed via args, you can override this
method and return the proper args for the test.
"""
return None
def get_view_kwargs(self):
"""
Returns the view's kwargs, if necessary.
If the URL of this view is constructed via kwargs, you can override
this method and return the proper args for the test.
"""
return None
def get_url(self, view_name=None, view_args=None, view_kwargs=None):
"""
Returns the request params for this view.
When calling ``self.client.get`` we usually need three parameter:
* The URL, which we construct from the view name using ``reverse``
* The args
* The kwargs
In most cases ``args`` and ``kwargs`` are ``None``, so this method will
help to return the proper URL by calling instance methods that can
be overridden where necessary.
"""
if view_name is None:
view_name = self.get_view_name()
if view_args is None:
view_args = self.get_view_args()
if view_kwargs is None:
view_kwargs = self.get_view_kwargs()
return reverse(view_name, args=view_args, kwargs=view_kwargs)
def login(self, user):
"""Performs a login for the given user."""
self.client.login(username=user.username, password='test123')
def should_redirect_to_login_when_anonymous(
self, view_name=None, view_args=None, view_kwargs=None):
"""Tests if the view redirects to login when the user is anonymous."""
url = self.get_url(view_name, view_args, view_kwargs)
resp = self.client.get(url)
self.assertRedirects(
resp, '{0}?next={1}'.format(reverse('auth_login'), url))
return resp
def should_be_callable_when_anonymous(
self, view_name=None, view_args=None, view_kwargs=None):
"""Tests if the view returns 200 when the user is anonymous."""
url = self.get_url(view_name, view_args, view_kwargs)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
return resp
def should_be_callable_when_authenticated(
self, user, view_name=None, view_args=None, view_kwargs=None):
"""Tests if the view returns 200 when the user is logged in."""
url = self.get_url(view_name, view_args, view_kwargs)
self.login(user)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
return resp
def should_be_callable_when_has_correct_permissions(
self, user, view_name=None, view_args=None, view_kwargs=None):
"""Tests if the view returns 200 when the user has permissions."""
url = self.get_url(view_name, view_args, view_kwargs)
user_no_permissions = UserFactory()
self.login(user_no_permissions)
resp = self.client.get(url)
self.assertRedirects(
resp, '{0}?next={1}'.format(reverse('auth_login'), url))
self.login(user)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
| mit | 4,344,034,524,574,619,000 | 33.87156 | 79 | 0.622205 | false |
wbkang/rpi-repo | rpiweather/temppressure.py | 1 | 3102 | #!/usr/bin/env python3
import time
import smbus2 as smbus
import logging
import threading
from collections import deque
from rpiweather.sampler import Sampler
from rpiweather.data import insert_data
from rpiweather import config
import rpiweather.data
import datetime
import pytz
logger = logging.getLogger(__name__)
SAMPLE_INTERVAL = config.temppressure['sample_interval']
bus = smbus.SMBus(1)
addr = 0x60
def configure(sample_interval):
global SAMPLE_INTERVAL
SAMPLE_INTERVAL = sample_interval
def read():
"""
Read a datapoint.
read() -> {'temperature': C, 'pressure':kPa}
Code from https://www.raspberrypi.org/forums/viewtopic.php?t=91185&p=827348
"""
# a0: 16 bits - 1 sign, 12 int, 3 frac
a0 = (bus.read_byte_data(addr, 0x04) << 8) | \
bus.read_byte_data(addr, 0x05)
if a0 & 0x8000:
a0d = -((~a0 & 0xffff) + 1)
else:
a0d = a0
a0f = float(a0d) / 8.0
logger.debug("a0 = 0x%4x %5d %4.3f" % (a0, a0d, a0f))
# b1: 16 bits - 1 sign, 2 int, 13 frac
b1 = (bus.read_byte_data(addr, 0x06) << 8) | \
bus.read_byte_data(addr, 0x07)
if b1 & 0x8000:
b1d = -((~b1 & 0xffff) + 1)
else:
b1d = b1
b1f = float(b1d) / 8192.0
logger.debug("b1 = 0x%4x %5d %1.5f" % (b1, b1d, b1f))
# b2: 16 bits - 1 sign, 1 int, 14 frac
b2 = (bus.read_byte_data(addr, 0x08) << 8) | \
bus.read_byte_data(addr, 0x09)
if b2 & 0x8000:
b2d = -((~b2 & 0xffff) + 1)
else:
b2d = b2
b2f = float(b2d) / 16384.0
logger.debug("b2 = 0x%4x %5d %1.5f" % (b2, b2d, b2f))
# c12: 14 bits - 1 sign, 0 int, 13 frac
# (Documentation in the datasheet is poor on this.)
c12 = (bus.read_byte_data(addr, 0x0a) << 8) | \
bus.read_byte_data(addr, 0x0b)
if c12 & 0x8000:
c12d = -((~c12 & 0xffff) + 1)
else:
c12d = c12
c12f = float(c12d) / 16777216.0
logger.debug("c12 = 0x%4x %5d %1.5f" % (c12, c12d, c12f))
# Start conversion and wait 3mS
bus.write_byte_data(addr, 0x12, 0x0)
time.sleep(0.003)
rawpres = (bus.read_byte_data(addr, 0x00) << 2) | \
(bus.read_byte_data(addr, 0x01) >> 6)
rawtemp = (bus.read_byte_data(addr, 0x02) << 2) | \
(bus.read_byte_data(addr, 0x03) >> 6)
logger.debug("\nRaw pres = 0x%3x %4d" % (rawpres, rawpres))
logger.debug("Raw temp = 0x%3x %4d" % (rawtemp, rawtemp))
pcomp = a0f + (b1f + c12f * rawtemp) * rawpres + b2f * rawtemp
pkpa = pcomp / 15.737 + 50
logger.debug("Pres = %3.2f kPa" % pkpa)
temp = 25.0 - (rawtemp - 498.0) / 5.35
logger.debug("Temp = %3.2f" % temp)
# add 1.63kPa based on the comparison with official data
pkpa += 1.63
return {'temperature': temp, 'pressure': pkpa}
def sample():
record = read()
now = datetime.datetime.now(pytz.utc)
insert_data(now, "temperature2", record['temperature'])
insert_data(now, "pressure", record['pressure'])
sampler = Sampler(SAMPLE_INTERVAL, sample)
def start_recording():
logger.info("Start sampling")
sampler.start()
| mit | -1,262,101,671,438,950,400 | 27.458716 | 79 | 0.591554 | false |
Flamacue/pretix | src/pretix/base/views/metrics.py | 1 | 1363 | import hmac
from django.conf import settings
from django.http import HttpResponse
from .. import metrics
def unauthed_response():
content = "<html><title>Forbidden</title><body>You are not authorized to view this page.</body></html>"
response = HttpResponse(content, content_type="text/html")
response["WWW-Authenticate"] = 'Basic realm="metrics"'
response.status_code = 401
return response
def serve_metrics(request):
if not settings.METRICS_ENABLED:
return unauthed_response()
# check if the user is properly authorized:
if "HTTP_AUTHORIZATION" not in request.META:
return unauthed_response()
method, credentials = request.META["HTTP_AUTHORIZATION"].split(" ", 1)
if method.lower() != "basic":
return unauthed_response()
user, passphrase = credentials.strip().decode("base64").split(":", 1)
if not hmac.compare_digest(user, settings.METRICS_USER):
return unauthed_response()
if not hmac.compare_digest(passphrase, settings.METRICS_PASSPHRASE):
return unauthed_response()
# ok, the request passed the authentication-barrier, let's hand out the metrics:
m = metrics.metric_values()
output = []
for metric, value in m:
output.append("{} {}".format(metric, str(value)))
content = "\n".join(output)
return HttpResponse(content)
| apache-2.0 | 8,437,268,350,399,311,000 | 29.288889 | 107 | 0.682318 | false |
Julian/home-assistant | homeassistant/components/binary_sensor/homematic.py | 1 | 3515 | """
Support for Homematic binary sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.homematic/
"""
import logging
from homeassistant.const import STATE_UNKNOWN
from homeassistant.components.binary_sensor import BinarySensorDevice
import homeassistant.components.homematic as homematic
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['homematic']
SENSOR_TYPES_CLASS = {
"Remote": None,
"ShutterContact": "opening",
"Smoke": "smoke",
"SmokeV2": "smoke",
"Motion": "motion",
"MotionV2": "motion",
"RemoteMotion": None
}
def setup_platform(hass, config, add_callback_devices, discovery_info=None):
"""Setup the Homematic binary sensor platform."""
if discovery_info is None:
return
return homematic.setup_hmdevice_discovery_helper(HMBinarySensor,
discovery_info,
add_callback_devices)
class HMBinarySensor(homematic.HMDevice, BinarySensorDevice):
"""Representation of a binary Homematic device."""
@property
def is_on(self):
"""Return true if switch is on."""
if not self.available:
return False
return bool(self._hm_get_state())
@property
def sensor_class(self):
"""Return the class of this sensor, from SENSOR_CLASSES."""
if not self.available:
return None
# If state is MOTION (RemoteMotion works only)
if self._state == "MOTION":
return "motion"
return SENSOR_TYPES_CLASS.get(self._hmdevice.__class__.__name__, None)
def _check_hm_to_ha_object(self):
"""Check if possible to use the HM Object as this HA type."""
from pyhomematic.devicetypes.sensors import HMBinarySensor\
as pyHMBinarySensor
# Check compatibility from HMDevice
if not super(HMBinarySensor, self)._check_hm_to_ha_object():
return False
# check if the Homematic device correct for this HA device
if not isinstance(self._hmdevice, pyHMBinarySensor):
_LOGGER.critical("This %s can't be use as binary", self._name)
return False
# if exists user value?
if self._state and self._state not in self._hmdevice.BINARYNODE:
_LOGGER.critical("This %s have no binary with %s", self._name,
self._state)
return False
# only check and give a warning to the user
if self._state is None and len(self._hmdevice.BINARYNODE) > 1:
_LOGGER.critical("%s have multiple binary params. It use all "
"binary nodes as one. Possible param values: %s",
self._name, str(self._hmdevice.BINARYNODE))
return False
return True
def _init_data_struct(self):
"""Generate a data struct (self._data) from the Homematic metadata."""
super(HMBinarySensor, self)._init_data_struct()
# object have 1 binary
if self._state is None and len(self._hmdevice.BINARYNODE) == 1:
for value in self._hmdevice.BINARYNODE:
self._state = value
# add state to data struct
if self._state:
_LOGGER.debug("%s init datastruct with main node '%s'", self._name,
self._state)
self._data.update({self._state: STATE_UNKNOWN})
| mit | -9,173,895,971,348,694,000 | 34.15 | 79 | 0.607397 | false |
pyfarm/pyfarm-master | pyfarm/master/user_interface/jobgroups.py | 1 | 11572 | # No shebang line, this module is meant to be imported
#
# Copyright 2015 Ambient Entertainment GmbH & Co. KG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from httplib import BAD_REQUEST
except ImportError: # pragma: no cover
from http.client import BAD_REQUEST
from flask import render_template, request
from sqlalchemy import func, or_, and_, distinct, desc, asc
from pyfarm.core.enums import WorkState, AgentState
from pyfarm.master.application import db
from pyfarm.models.job import Job
from pyfarm.models.jobtype import JobType, JobTypeVersion
from pyfarm.models.task import Task
from pyfarm.models.agent import Agent
from pyfarm.models.user import User
from pyfarm.models.jobgroup import JobGroup
def jobgroups():
agent_count_query = db.session.query(
Job.job_group_id,
func.count(distinct(Task.agent_id)).label('agent_count')).\
join(Task, Task.job_id == Job.id).\
filter(Job.job_group_id != None).\
filter(Task.agent_id != None, or_(Task.state == None,
Task.state == WorkState.RUNNING),
Task.agent.has(Agent.state != AgentState.OFFLINE)).\
group_by(Job.job_group_id).subquery()
submit_time_query = db.session.query(
Job.job_group_id,
func.min(Job.time_submitted).label('time_submitted')).\
group_by(Job.job_group_id).subquery()
jobs_queued_query = db.session.query(
Job.job_group_id, func.count('*').label('j_queued')).\
filter(Job.state == None).group_by(Job.job_group_id).subquery()
jobs_paused_query = db.session.query(
Job.job_group_id, func.count('*').label('j_paused')).\
filter(Job.state == WorkState.PAUSED).\
group_by(Job.job_group_id).subquery()
jobs_running_query = db.session.query(
Job.job_group_id, func.count('*').label('j_running')).\
filter(Job.state == WorkState.RUNNING).\
group_by(Job.job_group_id).subquery()
jobs_done_query = db.session.query(
Job.job_group_id, func.count('*').label('j_done')).\
filter(Job.state == WorkState.DONE).\
group_by(Job.job_group_id).subquery()
jobs_failed_query = db.session.query(
Job.job_group_id, func.count('*').label('j_failed')).\
filter(Job.state == WorkState.FAILED).\
group_by(Job.job_group_id).subquery()
tasks_queued_query = db.session.query(
func.count(Task.id).label("t_queued"), Job.job_group_id).\
join(Job, Task.job_id == Job.id).\
filter(Task.state == None).group_by(Job.job_group_id).subquery()
tasks_running_query = db.session.query(
func.count(Task.id).label('t_running'), Job.job_group_id).\
join(Job, Task.job_id == Job.id).\
filter(Task.state == WorkState.RUNNING).\
group_by(Job.job_group_id).subquery()
tasks_done_query = db.session.query(
func.count(Task.id).label('t_done'), Job.job_group_id).\
join(Job, Task.job_id == Job.id).\
filter(Task.state == WorkState.DONE).\
group_by(Job.job_group_id).subquery()
tasks_failed_query = db.session.query(
func.count(Task.id).label('t_failed'), Job.job_group_id).\
join(Job, Task.job_id == Job.id).\
filter(Task.state == WorkState.FAILED).\
group_by(Job.job_group_id).subquery()
jobgroups_query = db.session.query(JobGroup,
User.username,
JobType.name.label("main_jobtype_name"),
func.coalesce(
agent_count_query.c.agent_count,
0).label("agent_count"),
submit_time_query.c.time_submitted.\
label("time_submitted"),
func.coalesce(
tasks_queued_query.c.t_queued,
0).label("t_queued"),
func.coalesce(
tasks_running_query.c.t_running,
0).label("t_running"),
func.coalesce(
tasks_done_query.c.t_done,
0).label("t_done"),
func.coalesce(
tasks_failed_query.c.t_failed,
0).label("t_failed")
).\
join(JobType, JobGroup.main_jobtype_id == JobType.id).\
outerjoin(jobs_queued_query,
JobGroup.id == jobs_queued_query.c.job_group_id).\
outerjoin(jobs_paused_query,
JobGroup.id == jobs_paused_query.c.job_group_id).\
outerjoin(jobs_running_query,
JobGroup.id == jobs_running_query.c.job_group_id).\
outerjoin(jobs_done_query,
JobGroup.id == jobs_done_query.c.job_group_id).\
outerjoin(jobs_failed_query,
JobGroup.id == jobs_failed_query.c.job_group_id).\
outerjoin(tasks_queued_query,
JobGroup.id == tasks_queued_query.c.job_group_id).\
outerjoin(tasks_running_query,
JobGroup.id == tasks_running_query.c.job_group_id).\
outerjoin(tasks_done_query,
JobGroup.id == tasks_done_query.c.job_group_id).\
outerjoin(tasks_failed_query,
JobGroup.id == tasks_failed_query.c.job_group_id).\
outerjoin(User, JobGroup.user_id == User.id).\
outerjoin(agent_count_query,
JobGroup.id == agent_count_query.c.job_group_id).\
outerjoin(submit_time_query,
JobGroup.id == submit_time_query.c.job_group_id)
filters = {
"st_queued": ("st_queued" in request.args and
request.args["st_queued"].lower() == "true"),
"st_paused": ("st_paused" in request.args and
request.args["st_paused"].lower() == "true"),
"st_running": ("st_running" in request.args and
request.args["st_running"].lower() == "true"),
"st_failed": ("st_failed" in request.args and
request.args["st_failed"].lower() == "true"),
"st_any_done": ("st_any_done" in request.args and
request.args["st_any_done"].lower() == "true"),
"st_all_done": ("st_all_done" in request.args and
request.args["st_all_done"].lower() == "true")}
no_state_filters = True
if (any(filters.values())):
no_state_filters = False
conditions = []
if filters["st_queued"]:
conditions.append(and_(jobs_running_query.c.j_running == None,
jobs_paused_query.c.j_paused == None,
jobs_done_query.c.j_done == None,
jobs_failed_query.c.j_failed == None))
if filters["st_paused"]:
conditions.append(jobs_paused_query.c.j_paused != None)
if filters["st_running"]:
conditions.append(jobs_running_query.c.j_running != None)
if filters["st_failed"]:
conditions.append(jobs_failed_query.c.j_failed != None)
if filters["st_any_done"]:
conditions.append(jobs_done_query.c.j_done != None)
if filters["st_all_done"]:
conditions.append(and_(jobs_queued_query.c.j_queued == None,
jobs_running_query.c.j_running == None,
jobs_paused_query.c.j_paused == None,
jobs_failed_query.c.j_failed == None))
jobgroups_query = jobgroups_query.filter(or_(*conditions))
filters["no_user"] = ("no_user" in request.args and
request.args["no_user"].lower == "true")
if "u" in request.args or filters["no_user"]:
user_ids = request.args.getlist("u")
user_ids = [int(x) for x in user_ids]
jobgroups_query = jobgroups_query.filter(JobGroup.user_id.in_(user_ids))
filters["u"] = user_ids
if "jt" in request.args:
jobtype_ids = request.args.getlist("jt")
jobtype_ids = [int(x) for x in jobtype_ids]
jobgroups_query = jobgroups_query.filter(
JobGroup.main_jobtype_id.in_(jobtype_ids))
filters["jt"] = jobtype_ids
exists_query = jobgroups_query.filter(Job.job_group_id == JobGroup.id).\
exists()
jobs_query = db.session.query(Job.job_group_id,
Job,
JobType.name.label('jobtype_name')).\
join(JobTypeVersion, Job.jobtype_version_id == JobTypeVersion.id).\
join(JobType, JobTypeVersion.jobtype_id == JobType.id).\
filter(exists_query)
jobs_by_group = {}
for group_id, job, jobtype_name in jobs_query:
if group_id in jobs_by_group:
jobs_by_group[group_id].append(job)
else:
jobs_by_group[group_id] = [(job, jobtype_name)]
order_dir = "desc"
order_by = "time_submitted"
if "order_by" in request.args:
order_by = request.args.get("order_by")
if order_by not in ["title", "time_submitted", "username",
"main_jobtype_name", "agent_count", "j_queued",
"j_running", "j_failed", "j_done", "t_queued",
"t_running", "t_failed", "t_done"]:
return (render_template(
"pyfarm/error.html",
error="Unknown order key %r. Options are 'title', "
"'main_jobtype_name' 'time_submitted', 'username', "
"'agent_count', 'j_queued', 'j_running', 'j_failed', "
"'j_done', 't_queued', 't_running', 't_failed', 't_done'" %
order_by),
BAD_REQUEST)
if "order_dir" in request.args:
order_dir = request.args.get("order_dir")
if order_dir not in ["asc", "desc"]:
return (render_template(
"pyfarm/error.html",
error="Unknown order direction %r. Options are 'asc' or 'desc'" %
order_dir),
BAD_REQUEST)
jobgroups_query = jobgroups_query.order_by("%s %s" % (order_by, order_dir))
users_query = User.query.order_by(User.username)
jobtypes_query = JobType.query
return render_template("pyfarm/user_interface/jobgroups.html",
jobgroups=jobgroups_query,
jobs_by_group=jobs_by_group, filters=filters,
order_dir=order_dir, order_by=order_by,
users=users_query, jobtypes=jobtypes_query)
| apache-2.0 | 526,505,934,287,155,460 | 48.033898 | 80 | 0.538714 | false |
Swaraj1998/MyCode | ML-Workshop/day5/analysis3.py | 1 | 1513 | __author__ = 'Ashish'
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.set_option('max_columns', 50)
# pass in column names for each CSV
u_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code']
users = pd.read_csv('Data\ml-100k\u.user', sep='|', names=u_cols)
r_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
ratings = pd.read_csv('Data\ml-100k\u.data', sep='\t', names=r_cols)
# the movies file contains columns indicating the movie's genres
# let's only load the first five columns of the file with usecols
m_cols = ['movie_id', 'title', 'release_date', 'video_release_date', 'imdb_url']
movies = pd.read_csv('Data\ml-100k\u.item', sep='|', names=m_cols, usecols=range(5))
# create one merged DataFrame
movie_ratings = pd.merge(movies, ratings)
lens = pd.merge(movie_ratings, users)
lens.reset_index('movie_id', inplace=True)
pivoted = lens.pivot_table(rows=['movie_id', 'title'],
cols=['sex'],
values='rating',
fill_value=0)
print pivoted.head()
most_50 = lens.groupby('movie_id').size().order(ascending=False)[:50]
pivoted['diff'] = pivoted.M - pivoted.F
pivoted.reset_index('movie_id', inplace=True)
disagreements = pivoted[pivoted.movie_id.isin(most_50.index)]['diff']
disagreements.order().plot(kind='barh', figsize=[5, 5])
plt.title('Male vs. Female Avg. Ratings\n(Difference > 0 = Favored by Men)')
plt.ylabel('Title')
plt.xlabel('Average Rating Difference')
plt.show()
| mit | 109,061,741,188,661,280 | 33.386364 | 84 | 0.658956 | false |
ucarion/git-code-debt | tests/metrics/imports_test.py | 1 | 2077 | from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from git_code_debt.file_diff_stat import FileDiffStat
from git_code_debt.metric import Metric
from git_code_debt.metrics.imports import CheetahTemplateImportCount
from git_code_debt.metrics.imports import is_python_import
from git_code_debt.metrics.imports import is_template_import
from git_code_debt.metrics.imports import PythonImportCount
@pytest.mark.parametrize(('line', 'expected'), (
(b'import collections', True),
(b'from collections import defaultdict', True),
(b'from foo import bar as baz', True),
(b'import bar as baz', True),
(b'#import foo as bar', False),
(b'from with nothing', False),
(b' import foo', True),
(b'herpderp', False),
))
def test_python_imports(line, expected):
assert is_python_import(line) == expected
@pytest.mark.parametrize(('line', 'expected'), (
(b'#import foo', True),
(b'#from foo import bar', True),
(b'#from foo import bar as baz', True),
(b'#import bar as baz', True),
(b' #import foo', True),
(b'## Nothing to import from here', False),
(b'herpderp', False),
))
def test_template_imports(line, expected):
assert is_template_import(line) == expected
def test_python_import_parser():
parser = PythonImportCount()
input_stats = [
FileDiffStat(
b'test.py',
[b'import collections', b'from os import path'],
[b'import os.path', b'nothing'],
None,
),
]
metrics = list(parser.get_metrics_from_stat(input_stats))
assert metrics == [Metric('PythonImportCount', 1)]
def test_template_import_parser():
parser = CheetahTemplateImportCount()
input_stats = [
FileDiffStat(
b'test.tmpl',
[b'#import collections', b'#from os import path'],
[b'#import os.path', b'nothing'],
None,
),
]
metrics = list(parser.get_metrics_from_stat(input_stats))
assert metrics == [Metric('CheetahTemplateImportCount', 1)]
| mit | 2,708,372,159,157,328,400 | 29.544118 | 68 | 0.64131 | false |
radiasoft/radtrack | experimental/LSD_crash/pyqt_undoCommands.py | 1 | 1895 | from PyQt4.QtGui import QUndoCommand, QTreeWidgetItem
# QUndoCommand for creating a new THING and adding it's name to the
# QTreeWidget
class LoadThings(QUndoCommand):
def __init__(self, widget, thing):
super(LoadThings, self).__init__()
self.widget = widget
self.createdThing = thing
self.item = QTreeWidgetItem()
self.item.setText(0, thing.name)
typeName = type(thing).__name__
for i in range(self.widget.ui.treeWidget.topLevelItemCount()):
group = self.widget.ui.treeWidget.topLevelItem(i)
if group.text(0) == typeName:
self.group = group
break
else: # group not found
self.group = QTreeWidgetItem()
self.group.setText(0, typeName)
def redo(self):
if self.widget.ui.treeWidget.indexOfTopLevelItem(self.group) == -1:
self.widget.ui.treeWidget.addTopLevelItem(self.group)
self.group.addChild(self.item)
self.group.setExpanded(True)
self.widget.ui.treeWidget.setCurrentItem(self.item)
def undo(self):
self.group.takeChild(self.group.childCount()-1)
if self.group.childCount() == 0:
parentIndex = self.widget.ui.treeWidget.indexOfTopLevelItem(self.group)
self.widget.ui.treeWidget.takeTopLevelItem(parentIndex)
# QUndoCommand for adding a THING's name to the QListWidget
class Add2List(QUndoCommand):
def __init__(self, widget):
super(Add2List, self).__init__()
self.widget = widget
self.previousList = self.widget.preListSave[:]
self.nextList = self.widget.postListSave[:]
def undo(self):
self.widget.ui.listw.clear()
self.widget.ui.listw.addItems(self.previousList)
def redo(self):
self.widget.ui.listw.clear()
self.widget.ui.listw.addItems(self.nextList)
| apache-2.0 | -3,442,286,966,883,006,500 | 35.442308 | 83 | 0.638522 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.