repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
adamewing/tebreak | scripts/picklereads.py | 1 | 2261 | #!/usr/bin/env python
import os
import pickle
import argparse
import logging
from uuid import uuid4
from collections import defaultdict as dd
logger = logging.getLogger(__name__)
def output_fastq(ins, pickle, uuid):
out_sr_fn = '.'.join(pickle.strip().split('.')[:-1]) + '.' + uuid + '.SR.fastq'
out_dr_fn = '.'.join(pickle.strip().split('.')[:-1]) + '.' + uuid + '.DR.fastq'
sr_count = 0
dr_count = 0
out_sr = open(out_sr_fn, 'w')
out_dr = open(out_dr_fn, 'w')
for read in ins['READSTORE']:
if read.find('.SR/') > 0:
out_sr.write(read)
sr_count += 1
if read.find('.DR/') > 0:
out_dr.write(read)
dr_count += 1
out_sr.close()
out_dr.close()
return out_sr_fn, out_dr_fn, sr_count, dr_count
def main(args):
logger.debug('loading pickle: %s' % args.pickle)
with open(args.pickle, 'rb') as pickin:
insertions = pickle.load(pickin)
logger.debug('finished loading %s' % args.pickle)
logger.debug('raw candidate count: %d' % len(insertions))
uuids = {}
with open(args.uuids) as _:
for line in _:
if not line.startswith('UUID') and not line.startswith ('#'):
uuids[line.strip().split()[0]] = True
for ins in insertions:
if ins['INFO']['ins_uuid'] in uuids:
if len(ins['READSTORE']) == 0:
logger.warning('no reads for insertion: %s' % ins['INFO']['ins_uuid'])
continue
sr_fq, dr_fq, sr_count, dr_count = output_fastq(ins, args.pickle, ins['INFO']['ins_uuid'])
logger.info('wrote %d split reads to %s' % (sr_count, sr_fq))
logger.info('wrote %d discordant reads to %s' % (dr_count, dr_fq))
if __name__ == '__main__':
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT)
logger.setLevel(logging.DEBUG)
parser = argparse.ArgumentParser(description='output reads supporting insertions')
parser.add_argument('-p', '--pickle', required=True, help='input filename (tebreak.py pickle)')
parser.add_argument('-u', '--uuids', required=True, help='list of UUIDS in a .txt file - can use a tebreak table')
args = parser.parse_args()
main(args)
| mit | 6,442,492,667,076,628,000 | 29.146667 | 118 | 0.580716 | false |
webcomics/dosage | dosagelib/plugins/rhjunior.py | 1 | 1471 | # SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from ..scraper import _ParserScraper
class RHJunior(_ParserScraper):
stripUrl = 'https://www.rhjunior.com/%s/'
imageSearch = '//div[contains(@class, "entry-content")]//img'
multipleImagesPerStrip = True
def __init__(self, name, sub, prev, first, last=None):
super().__init__('RHJunior/' + name)
self.prevSearch = ('//a[@rel="prev"]', '//a[@title="' + prev + '"]')
self.url = self.stripUrl % ('comics/' + sub)
self.firstStripUrl = self.stripUrl % (sub + '-' + first)
if last:
self.url = self.stripUrl % (sub + '-' + last)
self.endOfLife = True
@classmethod
def getmodules(cls):
return (
cls('GoblinHollow', 'goblin-hollow',
'', '0001', last='7'),
cls('NipAndTuck', 'nip-and-tuck',
'Nip and Tuck', '0000'),
cls('QuentynQuinnSpaceRanger', 'quentyn-quinn-space-ranger',
'Quentyn Quinn, Space Ranger', '0001'),
cls('TalesOfTheQuestor', 'tales-of-the-questor',
'Tales of the Questor', 'cover'),
cls('TheJournalOfEnniasLongscript', 'the-journal-of-ennias-longscript',
'', '0001', last='0111'),
cls('TheProbabilityBomb', 'the-probability-bomb',
'the Probability Bomb', 'kickstarter'),
)
| mit | -8,525,481,723,409,454,000 | 38.756757 | 83 | 0.558804 | false |
zingale/hydro_examples | advection/weno.py | 1 | 15253 | import numpy
from matplotlib import pyplot
import advection
import weno_coefficients
from scipy.integrate import ode
def weno(order, q):
"""
Do WENO reconstruction
Parameters
----------
order : int
The stencil width
q : numpy array
Scalar data to reconstruct
Returns
-------
qL : numpy array
Reconstructed data - boundary points are zero
"""
C = weno_coefficients.C_all[order]
a = weno_coefficients.a_all[order]
sigma = weno_coefficients.sigma_all[order]
qL = numpy.zeros_like(q)
beta = numpy.zeros((order, len(q)))
w = numpy.zeros_like(beta)
np = len(q) - 2 * order
epsilon = 1e-16
for i in range(order, np+order):
q_stencils = numpy.zeros(order)
alpha = numpy.zeros(order)
for k in range(order):
for l in range(order):
for m in range(l+1):
beta[k, i] += sigma[k, l, m] * q[i+k-l] * q[i+k-m]
alpha[k] = C[k] / (epsilon + beta[k, i]**2)
for l in range(order):
q_stencils[k] += a[k, l] * q[i+k-l]
w[:, i] = alpha / numpy.sum(alpha)
qL[i] = numpy.dot(w[:, i], q_stencils)
return qL
def weno_M(order, q):
"""
Do WENOM reconstruction following Gerolymos equation (18)
Parameters
----------
order : int
The stencil width
q : numpy array
Scalar data to reconstruct
Returns
-------
qL : numpy array
Reconstructed data - boundary points are zero
"""
C = weno_coefficients.C_all[order]
a = weno_coefficients.a_all[order]
sigma = weno_coefficients.sigma_all[order]
qL = numpy.zeros_like(q)
beta = numpy.zeros((order, len(q)))
w = numpy.zeros_like(beta)
np = len(q) - 2 * order
epsilon = 1e-16
for i in range(order, np+order):
q_stencils = numpy.zeros(order)
alpha_JS = numpy.zeros(order)
for k in range(order):
for l in range(order):
for m in range(l+1):
beta[k, i] += sigma[k, l, m] * q[i+k-l] * q[i+k-m]
alpha_JS[k] = C[k] / (epsilon + beta[k, i]**2)
for l in range(order):
q_stencils[k] += a[k, l] * q[i+k-l]
w_JS = alpha_JS / numpy.sum(alpha_JS)
alpha = w_JS * (C + C**2 - 3 * C * w_JS + w_JS**2) / \
(C**2 + w_JS * (1 - 2 * C))
w[:, i] = alpha / numpy.sum(alpha)
qL[i] = numpy.dot(w[:, i], q_stencils)
return qL
class WENOSimulation(advection.Simulation):
def __init__(self, grid, u, C=0.8, weno_order=3):
self.grid = grid
self.t = 0.0 # simulation time
self.u = u # the constant advective velocity
self.C = C # CFL number
self.weno_order = weno_order
def init_cond(self, type="tophat"):
""" initialize the data """
if type == "sine_sine":
self.grid.a[:] = numpy.sin(numpy.pi*self.grid.x -
numpy.sin(numpy.pi*self.grid.x) / numpy.pi)
else:
super().init_cond(type)
def rk_substep(self):
g = self.grid
g.fill_BCs()
f = self.u * g.a
alpha = abs(self.u)
fp = (f + alpha * g.a) / 2
fm = (f - alpha * g.a) / 2
fpr = g.scratch_array()
fml = g.scratch_array()
flux = g.scratch_array()
fpr[1:] = weno(self.weno_order, fp[:-1])
fml[-1::-1] = weno(self.weno_order, fm[-1::-1])
flux[1:-1] = fpr[1:-1] + fml[1:-1]
rhs = g.scratch_array()
rhs[1:-1] = 1/g.dx * (flux[1:-1] - flux[2:])
return rhs
def evolve(self, num_periods=1):
""" evolve the linear advection equation using RK4 """
self.t = 0.0
g = self.grid
tmax = num_periods*self.period()
# main evolution loop
while self.t < tmax:
# fill the boundary conditions
g.fill_BCs()
# get the timestep
dt = self.timestep()
if self.t + dt > tmax:
dt = tmax - self.t
# RK4
# Store the data at the start of the step
a_start = g.a.copy()
k1 = dt * self.rk_substep()
g.a = a_start + k1 / 2
k2 = dt * self.rk_substep()
g.a = a_start + k2 / 2
k3 = dt * self.rk_substep()
g.a = a_start + k3
k4 = dt * self.rk_substep()
g.a = a_start + (k1 + 2 * (k2 + k3) + k4) / 6
self.t += dt
def evolve_scipy(self, num_periods=1):
""" evolve the linear advection equation using RK4 """
self.t = 0.0
g = self.grid
def rk_substep_scipy(t, y):
# Periodic BCs
y[:g.ng] = y[-2*g.ng:-g.ng]
y[-g.ng:] = y[g.ng:2*g.ng]
f = self.u * y
alpha = abs(self.u)
fp = (f + alpha * y) / 2
fm = (f - alpha * y) / 2
fpr = g.scratch_array()
fml = g.scratch_array()
flux = g.scratch_array()
fpr[1:] = weno(self.weno_order, fp[:-1])
fml[-1::-1] = weno(self.weno_order, fm[-1::-1])
flux[1:-1] = fpr[1:-1] + fml[1:-1]
rhs = g.scratch_array()
rhs[1:-1] = 1/g.dx * (flux[1:-1] - flux[2:])
return rhs
tmax = num_periods*self.period()
r = ode(rk_substep_scipy).set_integrator('dop853')
r.set_initial_value(g.a, 0)
dt = self.timestep()
# main evolution loop
while r.successful() and r.t < tmax:
dt = min(dt, tmax - r.t)
r.integrate(r.t+dt)
g.a[:] = r.y
class WENOMSimulation(WENOSimulation):
def rk_substep(self):
g = self.grid
g.fill_BCs()
f = self.u * g.a
alpha = abs(self.u)
fp = (f + alpha * g.a) / 2
fm = (f - alpha * g.a) / 2
fpr = g.scratch_array()
fml = g.scratch_array()
flux = g.scratch_array()
fpr[1:] = weno_M(self.weno_order, fp[:-1])
fml[-1::-1] = weno_M(self.weno_order, fm[-1::-1])
flux[1:-1] = fpr[1:-1] + fml[1:-1]
rhs = g.scratch_array()
rhs[1:-1] = 1/g.dx * (flux[1:-1] - flux[2:])
return rhs
def evolve_scipy(self, num_periods=1):
""" evolve the linear advection equation using scipy """
self.t = 0.0
g = self.grid
def rk_substep_scipy(t, y):
# Periodic BCs
y[:g.ng] = y[-2*g.ng:-g.ng]
y[-g.ng:] = y[g.ng:2*g.ng]
f = self.u * y
alpha = abs(self.u)
fp = (f + alpha * y) / 2
fm = (f - alpha * y) / 2
fpr = g.scratch_array()
fml = g.scratch_array()
flux = g.scratch_array()
fpr[1:] = weno_M(self.weno_order, fp[:-1])
fml[-1::-1] = weno_M(self.weno_order, fm[-1::-1])
flux[1:-1] = fpr[1:-1] + fml[1:-1]
rhs = g.scratch_array()
rhs[1:-1] = 1/g.dx * (flux[1:-1] - flux[2:])
return rhs
tmax = num_periods*self.period()
r = ode(rk_substep_scipy).set_integrator('dop853')
r.set_initial_value(g.a, 0)
dt = self.timestep()
# main evolution loop
while r.successful() and r.t < tmax:
dt = min(dt, tmax - r.t)
r.integrate(r.t+dt)
g.a[:] = r.y
if __name__ == "__main__":
#-------------------------------------------------------------------------
# compute WENO3 case
xmin = 0.0
xmax = 1.0
nx = 64
order = 3
ng = order+1
g = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
u = 1.0
s = WENOSimulation(g, u, C=0.5, weno_order=3)
s.init_cond("gaussian")
ainit = s.grid.a.copy()
s.evolve(num_periods=1)
pyplot.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1],
ls=":", label="exact")
pyplot.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1],
label="WENO3")
# #-------------------------------------------------------------------------
# # convergence test
# # Note that WENO schemes with standard weights lose convergence at
# # critical points. For high degree critical points they lose more orders.
# # The suggestion in Gerolymos is that you may expect to drop down to
# # order r-1 in the limit.
# # The Gaussian has all odd derivatives vanishing at the origin, so
# # the higher order schemes will lose accuracy.
# # For the Gaussian:
# # This shows clean 5th order convergence for r=3
# # But for r=4-6 the best you get is ~6th order, and 5th order is more
# # realistic
# # For sin(x - sin(x)) type data Gerolymos expects better results
# # But the problem actually appears to be the time integrator
# # Switching to Dormand-Price 8th order from scipy (a hack) will make it
# # work for all cases. With sin(.. sin) data you get 2r - 2 thanks to
# # the one critical point.
#
# problem = "sine_sine"
#
# xmin =-1.0
# xmax = 1.0
## orders = [4]
# orders = [3, 4, 5, 6]
## N1 = [2**4*3**i//2**i for i in range(5)]
## N2 = [2**5*3**i//2**i for i in range(6)]
## N3 = [3**4*4**i//3**i for i in range(5)]
## N4 = [2**(4+i) for i in range(4)]
## N = numpy.unique(numpy.array(N1+N2+N3+N4, dtype=numpy.int))
## N.sort()
## N = [32, 64, 128, 256, 512]
## N = [32, 64, 128]
# N = [24, 32, 54, 64, 81, 108, 128]
#
# errs = []
# errsM = []
#
# u = 1.0
#
# colors="bygrc"
#
# for order in orders:
# ng = order+1
# errs.append([])
# errsM.append([])
# for nx in N:
# print(order, nx)
# gu = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
# su = WENOSimulation(gu, u, C=0.5, weno_order=order)
## guM = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
## suM = WENOMSimulation(guM, u, C=0.5, weno_order=order)
#
# su.init_cond("sine_sine")
## suM.init_cond("sine_sine")
# ainit = su.grid.a.copy()
#
# su.evolve_scipy(num_periods=1)
## suM.evolve_scipy(num_periods=1)
#
# errs[-1].append(gu.norm(gu.a - ainit))
## errsM[-1].append(guM.norm(guM.a - ainit))
#
# pyplot.clf()
# N = numpy.array(N, dtype=numpy.float64)
# for n_order, order in enumerate(orders):
# pyplot.scatter(N, errs[n_order],
# color=colors[n_order],
# label=r"WENO, $r={}$".format(order))
## pyplot.scatter(N, errsM[n_order],
## color=colors[n_order],
## label=r"WENOM, $r={}$".format(order))
# pyplot.plot(N, errs[n_order][0]*(N[0]/N)**(2*order-2),
# linestyle="--", color=colors[n_order],
# label=r"$\mathcal{{O}}(\Delta x^{{{}}})$".format(2*order-2))
## pyplot.plot(N, errs[n_order][len(N)-1]*(N[len(N)-1]/N)**4,
## color="k", label=r"$\mathcal{O}(\Delta x^4)$")
#
# ax = pyplot.gca()
# ax.set_ylim(numpy.min(errs)/5, numpy.max(errs)*5)
# ax.set_xscale('log')
# ax.set_yscale('log')
#
# pyplot.xlabel("N")
# pyplot.ylabel(r"$\| a^\mathrm{final} - a^\mathrm{init} \|_2$",
# fontsize=16)
#
# pyplot.legend(frameon=False)
# pyplot.savefig("weno-converge-sine-sine.pdf")
## pyplot.show()
#-------------- RK4
problem = "gaussian"
xmin = 0.0
xmax = 1.0
orders = [3, 5]
N = [54, 64, 81, 108, 128]
errs = []
u = 1.0
colors="brc"
for order in orders:
ng = order+1
errs.append([])
for nx in N:
print(order, nx)
gu = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
su = WENOSimulation(gu, u, C=0.5, weno_order=order)
su.init_cond("gaussian")
ainit = su.grid.a.copy()
su.evolve(num_periods=5)
errs[-1].append(gu.norm(gu.a - ainit))
pyplot.clf()
N = numpy.array(N, dtype=numpy.float64)
for n_order, order in enumerate(orders):
pyplot.scatter(N, errs[n_order],
color=colors[n_order],
label=r"WENO, $r={}$".format(order))
pyplot.plot(N, errs[0][-1]*(N[-1]/N)**(5),
linestyle="--", color=colors[0],
label=r"$\mathcal{{O}}(\Delta x^{{{}}})$".format(5))
pyplot.plot(N, errs[n_order][len(N)-1]*(N[len(N)-1]/N)**4,
color="k", label=r"$\mathcal{O}(\Delta x^4)$")
ax = pyplot.gca()
ax.set_ylim(numpy.min(errs)/5, numpy.max(errs)*5)
ax.set_xscale('log')
ax.set_yscale('log')
pyplot.xlabel("N")
pyplot.ylabel(r"$\| a^\mathrm{final} - a^\mathrm{init} \|_2$",
fontsize=16)
pyplot.title("Convergence of Gaussian, RK4")
pyplot.legend(frameon=False)
pyplot.savefig("weno-converge-gaussian-rk4.pdf")
# pyplot.show()
#-------------- Gaussian
problem = "gaussian"
xmin = 0.0
xmax = 1.0
orders = [3, 4, 5, 6]
N = [24, 32, 54, 64, 81, 108, 128]
# N = [32, 64, 108, 128]
errs = []
errsM = []
u = 1.0
colors="bygrc"
for order in orders:
ng = order+1
errs.append([])
errsM.append([])
for nx in N:
print(order, nx)
gu = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
su = WENOSimulation(gu, u, C=0.5, weno_order=order)
# guM = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
# suM = WENOMSimulation(guM, u, C=0.5, weno_order=order)
su.init_cond("gaussian")
# suM.init_cond("gaussian")
ainit = su.grid.a.copy()
su.evolve_scipy(num_periods=1)
# suM.evolve_scipy(num_periods=1)
errs[-1].append(gu.norm(gu.a - ainit))
# errsM[-1].append(guM.norm(guM.a - ainit))
pyplot.clf()
N = numpy.array(N, dtype=numpy.float64)
for n_order, order in enumerate(orders):
pyplot.scatter(N, errs[n_order],
color=colors[n_order],
label=r"WENO, $r={}$".format(order))
# pyplot.scatter(N, errsM[n_order],
# color=colors[n_order],
# label=r"WENOM, $r={}$".format(order))
pyplot.plot(N, errs[n_order][0]*(N[0]/N)**(2*order-2),
linestyle="--", color=colors[n_order],
label=r"$\mathcal{{O}}(\Delta x^{{{}}})$".format(2*order-2))
# pyplot.plot(N, errs[n_order][len(N)-1]*(N[len(N)-1]/N)**4,
# color="k", label=r"$\mathcal{O}(\Delta x^4)$")
ax = pyplot.gca()
ax.set_ylim(numpy.min(errs)/5, numpy.max(errs)*5)
ax.set_xscale('log')
ax.set_yscale('log')
pyplot.xlabel("N")
pyplot.ylabel(r"$\| a^\mathrm{final} - a^\mathrm{init} \|_2$",
fontsize=16)
pyplot.title("Convergence of Gaussian, DOPRK8")
pyplot.legend(frameon=False)
pyplot.savefig("weno-converge-gaussian.pdf")
# pyplot.show()
| bsd-3-clause | -1,032,854,415,870,718,300 | 29.386454 | 81 | 0.485872 | false |
maikelwever/autoaurbuilder | autoaurbuilder/build/management/commands/schedule.py | 1 | 1383 | from django.core.management.base import BaseCommand
from package.models import Package
class Command(BaseCommand):
args = 'None'
help = 'Checks for updates on packages and schedules rebuilds if needed.'
def handle(self, *args, **kwargs):
self.stdout.write("Checking for updates on packages...")
pkg_count = Package.objects.count()
counter = 1
for package in Package.objects.all():
self.stdout.write("\r[{0: <2}/{1: <2}] Updating package {2: <30}".format(
counter, pkg_count, package.name
), ending="")
self.stdout.flush()
package.check_for_updates()
counter += 1
self.stdout.write("Done, now reschedueling builds.")
rebuild_scheduled = []
for package in Package.objects.all():
if package.needs_rebuild():
if package not in rebuild_scheduled:
for i in package.get_dependencies():
if i not in rebuild_scheduled:
rebuild_scheduled.append(i)
if i.needs_rebuild():
i.rebuild()
rebuild_scheduled.append(package)
package.rebuild()
self.stdout.write("Rescheduled: {0}".format(rebuild_scheduled))
self.stdout.write("DONE!")
| gpl-3.0 | 53,555,113,558,810,240 | 35.394737 | 85 | 0.550253 | false |
gpoulter/pydedupe | dedupe/sim.py | 1 | 14909 | """Compare values, fields, and records for similarity"""
import collections
import logging
from dedupe.dale import similarity as dale
from dedupe.levenshtein import similarity as levenshtein
from dedupe.compat import OrderedDict as _OrderedDict
LOG = logging.getLogger('dedupe.sim')
class Convert(object):
"""Gets a single-valued field and converts it to a comparable value.
:type field: `callable` or `str` or `int`
:param field: Specifies a field from the record record.
:type converter: `callable`
:param converter: Converts field value for comparison.
:return: Converted field value.
>>> c = Convert(1, lambda x:x.lower())
>>> rec = ('A','B','C')
>>> c(rec)
'b'
"""
def __init__(self, field, converter):
from dedupe.get import getter
self.field = field
self.getter = getter(field)
self.converter = converter
def __call__(self, record):
return self.converter(self.getter(record))
class ListConvert(Convert):
"""Gets a multi-valued field converts its values to comparable values.
:type field: `callable`
:param field: Specifies a field from the record record.
:type converter: `callable`
:param converter: Converts field value for comparison.
:return: List of converted field values.
>>> c = ListConvert(lambda x:x[1].split(';'), lambda x:x.lower())
>>> rec = ('A','X;Y;Z','C')
>>> c(rec)
['x', 'y', 'z']
"""
def __call__(self, record):
return [self.converter(v) for v in self.getter(record)]
class Scale(object):
"""Map values of a similarity function onto the 0.0 to `rmax` range.
:param similarity: Callable of two records returning a similarity value.
:param low: Similarity values below `low` (default 0.0) are scaled to 0.0
:param high: Similarity values above `high` (default 1.0) are scaled to 1.0
:param rmax: Upper end of the result range (default 1.0).\
Lower `rmax` reduces contribution to vector distance, downweighting it.
:param missing: Return `missing` when `similarity` returns `None`.
:param test: Callable of record to test bad values. If `a` and `b` pass\
the test then return `similarity(a, b)`, otherwise return `missing`.
>>> from dedupe import sim
>>> simfunc = lambda a, b: 2**-abs(a-b)
>>> simfunc(1, 2)
0.5
>>> sim.Scale(simfunc)(1, 2)
0.5
>>> sim.Scale(simfunc, low=0.6)(1, 2)
0.0
>>> sim.Scale(simfunc, high=0.4)(1, 2)
1.0
>>> sim.Scale(simfunc, low=0.4, high=0.6)(1, 2)
0.5
>>> sim.Scale(simfunc, low=0.4, high=0.6, rmax=0.5)(1, 2)
0.25
>>> isnum = lambda x: isinstance(x, int) or isinstance(x, float)
>>> print sim.Scale(simfunc, test=isnum)("blah", 2)
None
"""
def __init__(self, similarity,
low=0.0, high=1.0, rmax=1.0, missing=None, test=None):
if not (0.0 <= low < high):
raise ValueError("low: {0}, high: {1}".format(low, high))
self.similarity = similarity
self.low = low
self.high = high
self.rmax = rmax
self.missing = missing
self.test = test
def scale(self, value):
"""Scale a value from (low, high) range to (0, 1) range."""
if value <= self.low:
return 0.0
if value >= self.high:
return 1.0
return self.rmax * (value - self.low) / (self.high - self.low)
def __call__(self, a, b):
"""Similarity of a and b, scaled to (0, 1) range."""
if self.test and not (self.test(a) and self.test(b)):
return self.missing
v = self.similarity(a, b)
if v is None:
return self.missing
return self.scale(v)
class Field(object):
"""Computes the similarity of a pair of records on a specific field.
:type compare: callable(`V`, `V`) :class:`float`
:param compare: Returns similarity of a pair of encoded field values.
:type field1: callable(`R`) -> `T1`
:param field1: Gets field value from first record.
:type encode1: callable(`T1`) `V`
:param encode1: Encodes field value from first record (`lambda x:x`)
:type field2: callable(`R`) -> `T1`
:param field2: Gets field value from the second record (`field1`)
:type encode2: callable(`T1`) `V`
:param encode2: Encodes field value from the second record (`encode1`)
>>> # define some 'similarity of numbers' measure
>>> similarity = lambda x, y: 2**-abs(x-y)
>>> similarity(1, 2)
0.5
>>> Field(similarity, lambda r:r[1], float)(('A', '1'), ('B', '2'))
0.5
>>> fsim = Field(similarity, field1=lambda r:r[0], encode1=lambda x:x,
... field2=lambda r:r[1], encode2=float)
>>> fsim((1, 'A'), ('B', '2'))
0.5
"""
def __init__(
self, compare, field1, encode1=None, field2=None, encode2=None):
from dedupe.get import getter
self.compare = compare
self.field1 = getter(field1)
self.encode1 = encode1 if encode1 else lambda x: x
self.field2 = getter(field2) if field2 else self.field1
self.encode2 = encode2 if encode2 else self.encode1
def __call__(self, record1, record2):
"""Returns the similarity of `record1` and `record2` on this field."""
v1 = self.field1(record1)
v2 = self.field2(record2)
if v1 is not None and v2 is not None:
return self.compare(self.encode1(v1), self.encode2(v2))
else:
return None
class Average(Field):
"""Computes the average similarity of a pair of records on
a multi-valued field.
It computes the average by considering each field value from
the record with the fewest values and accumulating the
greatest similarity against the values in the other record.
If the shorter field is a subset of the longer field,
the similarity should be 1.0.
:type compare: callable(`V`, `V`) :class:`float`
:param compare: Returns similarity of a pair of encoded field values.
:type field1: callable(`R`) [`T1`, ...]
:param field1: Returns a list of values for the field on first record.
:type field2: callable(`R`) [`T2`, ...]
:param field2: Returns a list of values for the field on second record \
(default: `field1`).
:type encode1: function(`T1`) `V`
:param encode1: Encodes each field1 value for comparison.
:type encode2: function(`T2`) `V`
:param encode2: Encodes each field2 value for comparison (`encode1`).
:rtype: callable(`R1`, `R2`) float
:return: Computer of average similarity of records `R1` and `R2`\
for values of the field.
>>> # define an exponential 'similarity of numbers' measure
>>> similarity = lambda x, y: 2.0**(-abs(x-y))
>>> field = lambda r: set([r[0], r[2]])
>>> from dedupe import sim
>>> sim.Average(similarity, field, float)((1, 'A', '1'), (-2, 'B', 2))
0.5
>>> field = lambda r: set(r[1].split(';'))
>>> sim.Average(similarity, field, float)(('A', '0;1'), ('B', '1;2'))
0.75
>>> sim.Average(similarity, field, float)(
... ('A', '0;1;2'), ('B', '0;1;2;3;4'))
1.0
"""
def __call__(self, record1, record2):
"""Return the average similarity of `record1` and `record2` on
this multi-valued field"""
f1 = set(self.encode1(v1) for v1 in self.field1(record1))
f2 = set(self.encode2(v2) for v2 in self.field2(record2))
f1, f2 = sorted([f1, f2], key=len) # short set, long set
# Missing value check
if len(f1) == 0 or len(f2) == 0:
return self.compare(None, None)
total = 0.0
for v1 in f1:
best = 0.0
for v2 in f2:
comp = self.compare(v1, v2)
best = max(best, comp)
total += best # score of most similar item in the long set
return total / len(f1)
class Maximum(Field):
"""Computes the maximum similarity of a pair of records on a
multi-valued field.
:type compare: callable(`V`, `V`) :class:`float`
:param compare: Returns similarity of a pair of encoded field values.
:type field1: callable(`R`) [`T1`, ...]
:param field1: Returns a list of values for the field on first record.
:type field2: callable(`R`) [`T2`, ...]
:param field2: Returns a list of values for the field on second record\
(default: `field1`).
:type encode1: function(`T1`) `V`
:param encode1: Encodes each field1 value for comparison.
:type encode2: function(`T2`) `V`
:param encode2: Encodes each field2 value for comparison\
(default: `encode1`).
>>> # define an exponential 'similarity of numbers' measure
>>> similarity = lambda x, y: 2.0**(-abs(x-y))
>>> field = lambda r: set([r[0], r[2]])
>>> from dedupe import sim
>>> sim.Maximum(similarity, field, float)((0, 'A', '1'), (2, 'B', 2))
0.5
>>> field = lambda r: set(r[1].split(';'))
>>> sim.Maximum(similarity, field, float)(('A', '0;1;2'), ('B', '3;4;5'))
0.5
"""
def __call__(self, record1, record2):
"""Return the maximum similarity of `record1` and `record2` on
this multi-valued field."""
f1 = set(self.encode1(v1) for v1 in self.field1(record1))
f2 = set(self.encode2(v2) for v2 in self.field2(record2))
# Missing value check
if len(f1) == 0 or len(f2) == 0:
return self.compare(None, None)
best = 0.0
for v1 in f1:
for v2 in f2:
comp = self.compare(v1, v2)
best = max(best, comp)
return best
class Record(_OrderedDict):
"""Returns a vector of field value similarities between two records.
:type \*simfuncs: [(:class:`str`, :class:`Field`), ...]
:param \*simfuncs: Pairs of (field name, similarity function) used\
to compute the tuple of similarities.
:ivar Similarity: namedtuple class for the similarity of a pair of records\
with field names corresponding to `simfuncs`.
:rtype: function(`R`, `R`) :class:`Similarity`
:return: Takes two records and returns a `Similarity` tuple.
>>> # define a 'similarity of numbers' measure
>>> similarity = lambda x, y: 2.0**(-abs(x-y))
>>> from dedupe import sim
>>> vcomp1 = sim.Field(similarity, 1, float) # field 1 from record
>>> vcomp2 = sim.Field(similarity, 2, float) # field 2 from field
>>> rcomp = sim.Record(("V1", vcomp1), ("V2", vcomp2))
>>> rcomp(('A', 1, 1), ('B', 2, 4))
Similarity(V1=0.5, V2=0.125)
"""
def __init__(self, *simfuncs):
super(Record, self).__init__(simfuncs)
self.Similarity = collections.namedtuple("Similarity", self.keys())
def __call__(self, A, B):
return self.Similarity._make(
simfunc(A, B) for simfunc in self.itervalues())
class Indices(_OrderedDict):
"""Dictionary containing indeces defined on a single set of records.
When comparing, it caches the similarity vectors so that a pair of records
compared in one index is not compared again if the pair shows up in one
of the other indeces.
:type strategy: [ (`str`, `type`, `function`), ... ]
:param strategy: List of indexing strategies, as\
(index name, index class, key function). The index class must support\
the `compare` method, and the key function takes a record and returns\
a list of keys for indexing.
:type records: [ `tuple`, ... ]
:param records: List of records to insert into the indeces.
>>> from dedupe import block, sim
>>> makekey = lambda r: [int(r[1])]
>>> makekey(('A', 3.5))
[3]
>>> strategy = [ ("MyIndex", block.Index, makekey) ]
>>> records1 = [('A', 5.5), ('B', 4.5), ('C', 5.25)]
>>> records2 = [('D', 5.5), ('E', 4.5), ('F', 5.25)]
>>> sim.Indices(strategy, records1)
Indices([('MyIndex', {4: [('B', 4.5)], 5: [('A', 5.5), ('C', 5.25)]})])
>>> sim.Indices.check_strategy((1, 2, 3))
Traceback (most recent call last):
...
TypeError: 1: not a string.
>>> sim.Indices.check_strategy([])
Traceback (most recent call last):
...
TypeError: []: not a strategy triple.
"""
def __init__(self, strategy, records=[]):
for strat in strategy:
self.check_strategy(strat)
super(Indices, self).__init__(
(name, idxtype(keyfunc, records))
for name, idxtype, keyfunc in strategy)
@staticmethod
def check_strategy(strategy):
"""Raise TypeError if strategy tuple is wrong in some way."""
if len(strategy) != 3:
raise TypeError("{0!r}: not a strategy triple.".format(strategy))
name, idxtype, keyfunc = strategy
if not isinstance(name, basestring):
raise TypeError("{0!r}: not a string.".format(name))
if not hasattr(idxtype, "compare") and hasattr(idxtype, "insert"):
raise TypeError("{0!r}: not an index type.".format(idxtype))
if not callable(keyfunc):
raise TypeError("{0!r}: not callable.".format(keyfunc))
def insert(self, record):
"""Insert a record into each :class:`Index`."""
for index in self.itervalues():
index.insert(record)
def compare(self, simfunc, other=None):
"""Compute similarities of indexed pairs of records.
:type simfunc: func(`R`, `R`) (`float`, ...)
:param simfunc: takes pair of records and returns a similarity vector.
:type other: :class:`Indices`
:param other: Another Indices to compare against.
:rtype: {(R, R):(float, ...)}
:return: mapping from pairs of records similarity vectors.
"""
comparisons = {}
if other is None or other is self:
for index in self.itervalues():
index.compare(simfunc, None, comparisons)
else:
for index1, index2 in zip(self.itervalues(), other.itervalues()):
if type(index1) is not type(index2):
raise TypeError(
"Indeces of type {0} and type {1} are incompatible"\
.format(type(index1), type(index2)))
index1.compare(simfunc, index2, comparisons)
return comparisons
def log_comparisons(self, other):
"""Log the expected between-index comparisons."""
if other is not None and other is not self:
for (n1, i1), (n2, i2) in zip(self.items(), other.items()):
LOG.info("name=TwoIndexCompare idx1=%s idx2=%s comparisons=%s",
n1, n2, i1.count(i2))
i1.log_size(n1)
i2.log_size(n2)
else:
for name, index in self.iteritems():
LOG.info("name=OneIndexCompare idx=%s comparisons=%s",
name, index.count())
index.log_size(name)
| gpl-3.0 | -7,948,967,772,968,339,000 | 36.744304 | 79 | 0.592058 | false |
jromang/retina-old | distinclude/spyderlib/widgets/externalshell/baseshell.py | 1 | 12772 | # -*- coding: utf-8 -*-
#
# Copyright © 2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
import sys
import os
import os.path as osp
from time import time, strftime, gmtime
from spyderlib.qt.QtGui import (QApplication, QWidget, QVBoxLayout,
QHBoxLayout, QMenu, QLabel, QInputDialog,
QLineEdit, QToolButton)
from spyderlib.qt.QtCore import (QProcess, SIGNAL, QByteArray, QTimer, Qt,
QTextCodec)
locale_codec = QTextCodec.codecForLocale()
# Local imports
from spyderlib.utils.qthelpers import (create_toolbutton, create_action,
add_actions)
from spyderlib.baseconfig import get_conf_path, _
from spyderlib.config import get_icon
def add_pathlist_to_PYTHONPATH(env, pathlist):
# PyQt API 1/2 compatibility-related tests:
assert isinstance(env, list)
assert all([isinstance(path, basestring) for path in env])
pypath = "PYTHONPATH"
pathstr = os.pathsep.join(pathlist)
if os.environ.get(pypath) is not None:
for index, var in enumerate(env[:]):
if var.startswith(pypath+'='):
env[index] = var.replace(pypath+'=',
pypath+'='+pathstr+os.pathsep)
env.append('OLD_PYTHONPATH='+os.environ[pypath])
else:
env.append(pypath+'='+pathstr)
#TODO: code refactoring/cleaning (together with systemshell.py and pythonshell.py)
class ExternalShellBase(QWidget):
"""External Shell widget: execute Python script in a separate process"""
SHELL_CLASS = None
def __init__(self, parent=None, wdir=None, history_filename=None,
show_icontext=True, light_background=True, menu_actions=None,
show_buttons_inside=True, show_elapsed_time=True):
QWidget.__init__(self, parent)
self.menu_actions = menu_actions
self.run_button = None
self.kill_button = None
self.options_button = None
self.icontext_action = None
self.show_elapsed_time = show_elapsed_time
if wdir is None:
wdir = osp.dirname(osp.abspath(self.fname))
self.wdir = wdir if osp.isdir(wdir) else None
self.arguments = ""
self.shell = self.SHELL_CLASS(parent, get_conf_path(history_filename))
self.shell.set_light_background(light_background)
self.connect(self.shell, SIGNAL("execute(QString)"),
self.send_to_process)
self.connect(self.shell, SIGNAL("keyboard_interrupt()"),
self.keyboard_interrupt)
# Redirecting some SIGNALs:
self.connect(self.shell, SIGNAL('redirect_stdio(bool)'),
lambda state: self.emit(SIGNAL('redirect_stdio(bool)'),
state))
self.state_label = None
self.time_label = None
vlayout = QVBoxLayout()
toolbar_buttons = self.get_toolbar_buttons()
if show_buttons_inside:
self.state_label = QLabel()
hlayout = QHBoxLayout()
hlayout.addWidget(self.state_label)
hlayout.addStretch(0)
hlayout.addWidget(self.create_time_label())
hlayout.addStretch(0)
for button in toolbar_buttons:
hlayout.addWidget(button)
vlayout.addLayout(hlayout)
else:
vlayout.setContentsMargins(0, 0, 0, 0)
vlayout.addWidget(self.get_shell_widget())
self.setLayout(vlayout)
self.resize(640, 480)
if parent is None:
self.setWindowIcon(self.get_icon())
self.setWindowTitle(_("Console"))
self.t0 = None
self.timer = QTimer(self)
self.process = None
self.is_closing = False
if show_buttons_inside:
self.update_time_label_visibility()
def set_elapsed_time_visible(self, state):
self.show_elapsed_time = state
if self.time_label is not None:
self.time_label.setVisible(state)
def create_time_label(self):
"""Create elapsed time label widget (if necessary) and return it"""
if self.time_label is None:
self.time_label = QLabel()
return self.time_label
def update_time_label_visibility(self):
self.time_label.setVisible(self.show_elapsed_time)
def is_running(self):
if self.process is not None:
return self.process.state() == QProcess.Running
def get_toolbar_buttons(self):
if self.run_button is None:
self.run_button = create_toolbutton(self, text=_("Run"),
icon=get_icon('run.png'),
tip=_("Run again this program"),
triggered=self.start_shell)
if self.kill_button is None:
self.kill_button = create_toolbutton(self, text=_("Kill"),
icon=get_icon('kill.png'),
tip=_("Kills the current process, "
"causing it to exit immediately"))
buttons = [self.run_button]
if self.options_button is None:
options = self.get_options_menu()
if options:
self.options_button = create_toolbutton(self, text=_("Options"),
icon=get_icon('tooloptions.png'))
self.options_button.setPopupMode(QToolButton.InstantPopup)
menu = QMenu(self)
add_actions(menu, options)
self.options_button.setMenu(menu)
if self.options_button is not None:
buttons.append(self.options_button)
buttons.append(self.kill_button)
return buttons
def set_icontext_visible(self, state):
"""Set icon text visibility"""
for widget in self.get_toolbar_buttons():
if state:
widget.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
else:
widget.setToolButtonStyle(Qt.ToolButtonIconOnly)
def get_options_menu(self):
self.show_time_action = create_action(self, _("Show elapsed time"),
toggled=self.set_elapsed_time_visible)
self.show_time_action.setChecked(self.show_elapsed_time)
actions = [self.show_time_action]
if self.menu_actions is not None:
actions += [None]+self.menu_actions
return actions
def get_shell_widget(self):
return self.shell
def get_icon(self):
raise NotImplementedError
def show_time(self, end=False):
if self.time_label is None:
return
elapsed_time = time()-self.t0
if elapsed_time > 24*3600: # More than a day...!
format = "%d %H:%M:%S"
else:
format = "%H:%M:%S"
if end:
color = "#AAAAAA"
else:
color = "#AA6655"
text = "<span style=\'color: %s\'><b>%s" \
"</b></span>" % (color, strftime(format, gmtime(elapsed_time)))
self.time_label.setText(text)
def closeEvent(self, event):
if self.process is not None:
self.is_closing = True
self.process.kill()
self.process.waitForFinished(100)
self.disconnect(self.timer, SIGNAL("timeout()"), self.show_time)
def set_running_state(self, state=True):
self.set_buttons_runnning_state(state)
self.shell.setReadOnly(not state)
if state:
if self.state_label is not None:
self.state_label.setText(_(
"<span style=\'color: #44AA44\'><b>Running...</b></span>"))
self.t0 = time()
self.connect(self.timer, SIGNAL("timeout()"), self.show_time)
self.timer.start(1000)
else:
if self.state_label is not None:
self.state_label.setText(_('Terminated.'))
self.disconnect(self.timer, SIGNAL("timeout()"), self.show_time)
def set_buttons_runnning_state(self, state):
self.run_button.setVisible(not state)
self.kill_button.setVisible(state)
def start_shell(self, ask_for_arguments=False):
"""Start shell"""
if ask_for_arguments and not self.get_arguments():
self.set_running_state(False)
return
self.create_process()
def get_arguments(self):
arguments, valid = QInputDialog.getText(self, _('Arguments'),
_('Command line arguments:'),
QLineEdit.Normal,
self.arguments)
if valid:
self.arguments = unicode(arguments)
return valid
def create_process(self):
raise NotImplementedError
def finished(self, exit_code, exit_status):
self.shell.flush()
self.emit(SIGNAL('finished()'))
if self.is_closing:
return
self.set_running_state(False)
self.show_time(end=True)
#===============================================================================
# Input/Output
#===============================================================================
def transcode(self, bytes):
return unicode( locale_codec.toUnicode(bytes.data()) )
def get_stdout(self):
self.process.setReadChannel(QProcess.StandardOutput)
bytes = QByteArray()
while self.process.bytesAvailable():
bytes += self.process.readAllStandardOutput()
return self.transcode(bytes)
def get_stderr(self):
self.process.setReadChannel(QProcess.StandardError)
bytes = QByteArray()
while self.process.bytesAvailable():
bytes += self.process.readAllStandardError()
return self.transcode(bytes)
def write_output(self):
self.shell.write(self.get_stdout(), flush=True)
QApplication.processEvents()
def send_to_process(self, qstr):
raise NotImplementedError
def send_ctrl_to_process(self, letter):
char = chr("abcdefghijklmnopqrstuvwxyz".index(letter) + 1)
byte_array = QByteArray()
byte_array.append(char)
self.process.write(byte_array)
self.process.waitForBytesWritten(-1)
self.shell.write(locale_codec.toUnicode(byte_array), flush=True)
def keyboard_interrupt(self):
raise NotImplementedError
def test():
from spyderlib.utils.qthelpers import qapplication
app = qapplication()
from spyderlib.widgets.externalshell.pythonshell import ExternalPythonShell
from spyderlib.widgets.externalshell.systemshell import ExternalSystemShell
import spyderlib
from spyderlib.plugins.variableexplorer import VariableExplorer
settings = VariableExplorer.get_settings()
shell = ExternalPythonShell(wdir=osp.dirname(spyderlib.__file__),
ipython_kernel=True, stand_alone=settings,
arguments="-q4thread -pylab -colors LightBG",
mpl_patch_enabled=True, light_background=False)
# shell = ExternalPythonShell(wdir=osp.dirname(spyderlib.__file__),
# interact=True, umd_enabled=True,
# stand_alone=settings,
# umd_namelist=['guidata', 'guiqwt'],
# umd_verbose=True, mpl_patch_enabled=False,
# light_background=False)
# shell = ExternalSystemShell(wdir=osp.dirname(spyderlib.__file__),
# light_background=False)
shell.shell.toggle_wrap_mode(True)
shell.start_shell(False)
from spyderlib.qt.QtGui import QFont
font = QFont("Lucida console")
font.setPointSize(10)
shell.shell.set_font(font)
shell.show()
sys.exit(app.exec_())
if __name__ == "__main__":
test() | gpl-3.0 | 7,928,487,626,756,879,000 | 37.791277 | 82 | 0.548743 | false |
rosenbrockc/fortpy | fortpy/interop/converter.py | 1 | 19280 | import os
from .. import msg
import re
import xml.etree.ElementTree as ET
from fortpy.testing.comparer import FileComparer
class TemplateLine(object):
"""Represents a single line in the template file and how to format it.
:arg element: the XML element that defines this line in the file.
:arg group: the [group] that this line belongs to.
"""
def __init__(self, element, group, commentchar):
self.identifier = element.attrib["id"]
#First look at the "mandatory" attributes and assign defaults if missing
if "type" in element.attrib:
self.dtype = re.split(",\s*", element.attrib["type"])
else:
msg.warn("no type specified for {}. Assuming string.".format(self.identifier))
self.dtype = [ "string" ]
#Values specifies how many variable values are present in the file
if "values" in element.attrib:
self.values = re.split(",\s*", element.attrib["values"])
i = 0
for i in range(len(self.values)):
if self.values[i].isdigit():
self.values[i] = int(self.values[i])
elif "from" not in element.attrib:
msg.warn("no value count specified for {}. Assuming *.".format(self.identifier))
self.values = [ "*" ]
else:
self.values = []
#Handle all the optional attributes
if "choices" in element.attrib:
self.choices = re.split(",\s*", element.attrib["choices"])
else:
self.choices = []
if "comment" in element.attrib:
self.comment = "{} {}".format(commentchar, element.attrib["comment"])
else:
self.comment = ""
if "default" in element.attrib:
self.default = eval(element.attrib["default"])
else:
self.default = None
#How from works: if an element has a from attribute, it is included in
#the plaintext file after conversion but does *not* appear in the XML
#file that is being converted. It grabs its value from another group
#or line whose id is the from attribute's value.
if "from" in element.attrib:
self.fromtag = element.attrib["from"]
else:
self.fromtag = None
#Related to from, this operator specifies how the value should be generated
#using the line/group whose id is the from attribute's value.
if "operator" in element.attrib:
self.operator = element.attrib["operator"]
else:
self.operator = "count"
self.group = group
self._nvalues = None
self._caster = {
"int": self._cast_int,
"float": self._cast_float,
#We want to use the same syntax even though we do nothing with strings
"string": lambda s: s
}
@property
def nvalues(self):
"""Returns the number of values recorded on this single line. If the
number is variable, it returns -1."""
if self._nvalues is None:
self._nvalues = 0
for val in self.values:
if type(val) == type(int):
self._nvalues += val
else:
self._nvalues = -1
break
return self._nvalues
def write(self, valuedict):
"""Returns the lines that this template line should add to the input file."""
if self.identifier in valuedict:
value = valuedict[self.identifier]
elif self.default is not None:
value = self.default
elif self.fromtag is not None and self.fromtag in valuedict:
if self.operator == "count":
value = len(valuedict[self.fromtag])
else:
msg.err("referenced 'from' attribute/operator {} not in xml dictionary.".format(self.fromtag))
exit(1)
else:
msg.err("a required line {} had no value or default specified.".format(self.identifier))
exit(1)
#Before we generate the result, validate the choices if they exist
if len(self.choices) > 0:
for single in value:
if str(single) not in self.choices:
msg.warn("failed choices validation for {} in {} (line {})".format(
single, self.choices, self.identifier))
result = []
#Get the string representation of the value
if isinstance(value, list):
sval = " ".join([ str(val) for val in value])
else:
sval = str(value)
if self.comment != "" and (self.nvalues < 0 or self.nvalues > 5):
#We will put the comments on a separate line from the actual values.
result.append(self.comment)
result.append(sval)
else:
result.append("{} {}".format(sval, self.comment))
return result
def parse(self, element):
"""Parses the contents of the specified XML element using template info.
:arg element: the XML element from the input file being converted.
"""
result = []
if element.text is not None and element.tag == self.identifier:
l, k = (0, 0)
raw = element.text.split()
while k < len(self.values):
dtype = self.dtype[k]
if isinstance(self.values[k], int):
for i in range(self.values[k]):
result.append(self._caster[dtype](raw[i + l]))
l += self.values[k]
k += 1
else:
#This is a variable argument line, just use up the rest
#of them as the type of the current line
rest = [ self._caster[dtype](val) for val in raw[l::] ]
result.extend(rest)
break
else:
msg.warn("no results for parsing {} using line {}".format(element.tag, self.identifier))
return result
def _cast_int(self, value):
"""Returns the specified value as int if possible."""
try:
return int(value)
except ValueError:
msg.err("Cannot convert {} to int for line {}.".format(value, self.identifier))
exit(1)
def _cast_float(self, value):
"""Returns the specified value as float if possible."""
try:
return float(value)
except ValueError:
msg.err("Cannot convert {} to float for line {}.".format(value, self.identifier))
exit(1)
class TemplateGroup(object):
"""Represents a logical grouping of line templates.
:arg element: the XML group element to parse.
:arg commentchar: the character(s) that specify comment lines. Used when
inserting comments beside lines in the plaintext file.
"""
def __init__(self, element, commentchar):
self.identifier = element.attrib["name"]
self.order = []
self.lines = {}
if "comment" in element.attrib:
self.comment = "{} {}".format(commentchar, element.attrib["comment"])
else:
self.comment = ""
if "repeat" in element.attrib:
self.repeat = element.attrib["repeat"]
else:
self.repeat = None
self._load(element, commentchar)
def _load(self, element, commentchar):
"""Loads all the child line elements from the XML group element."""
for child in element:
if "id" in child.attrib:
tline = TemplateLine(child, self, commentchar)
self.order.append(tline.identifier)
self.lines[tline.identifier] = tline
else:
msg.warn("no id element in {}. Ignored. (group._load)".format(child))
def parse(self, element):
"""Extracts the values from the specified XML element that is being converted."""
#All the children of this element are what we are trying to parse.
result = []
for child in element:
if child.tag in self.lines:
values = { child.tag: self.lines[child.tag].parse(child) }
result.append(values)
return result
def write(self, valuedict):
"""Generates the lines for the converted input file using the specified
value dictionary."""
result = []
if self.identifier in valuedict:
values = valuedict[self.identifier]
else:
return result
if self.comment != "":
result.append(self.comment)
if self.repeat is not None and type(values) == type([]):
if self.repeat.isdigit():
for i in range(int(self.repeat)):
result.extend(self._write_iterate(values[i]))
else:
#We are repeating for as many values as we have in the value
#entry for the group in the dictionary.
for value in values:
result.extend(self._write_iterate(value))
elif type(values) == type({}):
#This group doesn't get repeated, so the values variable must
#be a dictionary, just run it once.
result = self._write_iterate(values)
return result
def _write_iterate(self, values):
"""Generates the lines for a single pass through the group."""
result = []
for key in self.order:
result.append(self.lines[key].write(values))
if len(result) > 1:
return result
else:
return result[0]
class TemplateContents(object):
"""The contents of an XML input template.
:attr order: a list of id attributes from the lines in the template file
that preserves the order in which the lines showed up in the file.
:attr entries: a dictionary of TemplateLine and TemplateGroup instances
for the corresponding lines and groups in the template. Dict keys are
the identifiers in the order list.
:attr comment: the character(s) at the start of a line that specify it as
a comment line."""
def __init__(self):
self.order = []
self.entries = {}
self.comment = "#"
class FileTemplate(object):
"""Represents an XML template that specifies how to format an input/output
file using a dictionary of keyed values.
:arg path: the full path to the XML template file to load.
"""
def __init__(self, path, name, direction="input"):
self.name = name
self.path = os.path.expanduser(path)
self.versions = {}
self.direction = direction
self._load()
def _load(self):
"""Extracts the XML template data from the file."""
if os.path.exists(self.path):
root = ET.parse(self.path).getroot()
if (root.tag == "fortpy" and "mode" in root.attrib and
root.attrib["mode"] == "template" and "direction" in root.attrib and
root.attrib["direction"] == self.direction):
#First, we need instances of the template contents for each of the
#versions listed in the fortpy tag.
for v in _get_xml_version(root):
self.versions[v] = TemplateContents()
#Now we can update the contents objects using the XML data.
self._load_entries(root)
#See if a custom name was specified for the auto-converted
#files.
if "autoname" in root.attrib:
self.name = root.attrib["autoname"]
else:
msg.err("the specified template {} ".format(self.path) +
"is missing the mode and direction attributes.")
exit(1)
else:
msg.err("could not find the template {}.".format(self.path))
exit(1)
def parse(self, root):
"""Returns a dictionary of values extracted from the root of the
specified XML file. It is assumed that the file is an input/output
file to be converted into plaintext. As such the file should only
specify a single version number."""
#Use the first element in the versions list since there should only be one.
v = _get_xml_version(root)[0]
result = {}
for child in root:
if child.tag in self.versions[v].entries:
entry = self.versions[v].entries[child.tag]
#Entry can be either a line or a group. Both objects have a parse
#method that returns a list of values. In the line's case, the
#list is the values from that line. For the group, it is a list
#of dictionaries, a dictionary for each tag name.
result[child.tag] = entry.parse(child)
return result
def write(self, valuedict, version):
"""Generates the lines for the converted input file from the valuedict.
:arg valuedict: a dictionary of values where the keys are ids in the
template and the values obey their template rules.
:arg version: the target version of the output file.
"""
result = []
if version in self.versions:
for tag in self.versions[version].order:
entry = self.versions[version].entries[tag]
result.extend(entry.write(valuedict))
return result
def _load_entries(self, root):
"""Loads all the child entries of the input template from the
specified root element."""
mdict = {
"comments": self._comment,
"line": self._line,
"group": self._group
}
for entry in root:
mdict[entry.tag](entry)
def _comment(self, element):
"""Extracts the character to use for comments in the input file."""
for v in _get_xml_version(element):
self.versions[v].comment = element.text
def _line(self, element):
"""Parses the XML element as a single line entry in the input file."""
for v in _get_xml_version(element):
if "id" in element.attrib:
tline = TemplateLine(element, None, self.versions[v].comment)
self.versions[v].entries[tline.identifier] = tline
self.versions[v].order.append(tline.identifier)
else:
msg.warn("no id element in {}. Ignored. (_line)".format(element))
def _group(self, element):
"""Parses the XML element as a group of [unknown] number of lines."""
for v in _get_xml_version(element):
if "name" in element.attrib:
g = TemplateGroup(element, self.versions[v].comment)
self.versions[v].entries[g.identifier] = g
self.versions[v].order.append(g.identifier)
else:
msg.warn("no name element in {}. Ignored. (_group)".format(element))
def _get_xml_version(element):
"""Extracts a list of versions that an xml element references. Returns
a [ 1 ] list if there isn't a versions attribute."""
if "versions" in element.attrib:
result = [ int(v) for v in re.split(",\s*", element.attrib["versions"]) ]
else:
result = [ 1 ]
return result
class FileConverter(object):
"""Converts XML-based input/output files into non-keyword based ones.
:arg template_dir: the path to the directory containing input file templates.
"""
def __init__(self, template_dir):
self.template_dir = os.path.expanduser(template_dir)
self.templates = {}
def convert(self, path, version, target = None):
"""Converts the specified file using the relevant template.
:arg path: the full path to the file to convert.
:arg version: the new version of the file.
:arg target: the optional path to save the file under. If not
specified, the file is saved based on the template file name.
"""
#Get the template and values out of the XML input file and
#write them in the format of the keywordless file.
values, template = self.parse(path)
lines = template.write(values, version)
#Finally, write the lines to the correct path.
if target is None:
target = os.path.join(os.path.dirname(path), template.name)
with open(os.path.expanduser(target), 'w') as f:
f.write("\n".join(lines))
def parse(self, path):
"""Extracts a dictionary of values from the XML file at the specified path."""
#Load the template that will be used for parsing the values.
expath, template, root = self._load_template(path)
if expath is not None:
values = template.parse(root)
return (values, template)
class OutputConverter(object):
"""Converts plain-text output files between versions."""
def __init__(self, template_dir):
self.comparer = FileComparer(os.path.expanduser(template_dir))
def convert(self, path, version, target):
"""Converts the specified source file to a new version number."""
source = self.comparer.get_representation(path)
lines = [ '# <fortpy version="{}"></fortpy>\n'.format(version) ]
for line in self.comparer.template.contents[version].preamble:
lines.append(line.write(source.preamble, source.version, source.stored) + "\n")
for line in self.comparer.template.contents[version].body:
for valueset in source.body:
lines.append(line.write(valueset, source.version, source.stored) + "\n")
with open(os.path.expanduser(target), 'w') as f:
f.writelines(lines)
class InputConverter(FileConverter):
"""Converts XML-based input files into non-keyword based ones.
:arg template_dir: the path to the directory containing input file templates.
"""
def __init__(self, template_dir):
super(InputConverter, self).__init__(template_dir)
def _load_template(self, path):
#First we extract the file name for the template or look for it
#in the root element. The naming convention is to use .xin.xml
#as the extension. If we replace the .xin.xml by .in.xml it
#should cover most cases.
expath = os.path.expanduser(path)
root = ET.parse(expath).getroot()
if root.tag == "fortpy" and "mode" in root.attrib and \
root.attrib["mode"] == "input":
#This is a valid input file.
if "template" in root.attrib:
template = root.attrib["template"]
else:
template = os.path.split(expath)[1].replace(".xin.xml", ".in.xml")
tpath = os.path.join(self.template_dir, template)
name = template.replace(".xml","")
self.templates[template] = FileTemplate(tpath, name)
return (expath, self.templates[template], root)
else:
msg.warn("the input file {} is missing the mode attribute.".format(path))
return None
| mit | -2,943,328,751,953,272,000 | 39.504202 | 110 | 0.578631 | false |
room77/py77 | pylib/util/singleton.py | 1 | 2390 | """
Singleton implementation.
Usage:
class A(singleton.Singleton): pass
Please NOTE:
id(A.Instance()), id(A))
"""
__copyright__ = '2013, Room 77, Inc.'
__author__ = 'Pramod Gupta'
import threading
# with_metaclass method from Six compatibility library.
# https://github.com/benjaminp/six/blob/1.11.0/six.py#L819
def with_metaclass(meta, *bases):
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
class SingletonException(Exception):
pass
class _SingletonMeta(type):
def __new__(cls, name, bases, dct):
if '__new__' in dct:
raise SingletonException('Can not override __new__ in a Singleton')
return super(_SingletonMeta, cls).__new__(cls, name, bases, dct)
def __call__(cls, *args, **dictArgs):
raise SingletonException('Singletons may only be instantiated through Instance()')
class Singleton(with_metaclass(_SingletonMeta, object)):
_lock = threading.RLock()
@classmethod
def Instance(cls, *args, **kw):
"""
Call this to instantiate an instance or retrieve the existing instance.
If the singleton requires args to be instantiated, include them the first
time you call Instance.
"""
if not cls.Instantiated(): Singleton._createSingletonInstance(cls, args, kw)
return cls._instance
@classmethod
def Instantiated(cls):
# Don't use hasattr(cls, '_instance'), because that screws things up if there is a singleton
# that extends another singleton.
# hasattr looks in the base class if it doesn't find in subclass.
return '_instance' in cls.__dict__
@staticmethod
def _createSingletonInstance(cls, args, kw):
with Singleton._lock:
# Check if the the class really needs to be instantiated.
if cls.Instantiated(): return
try:
# Create the new instance and init it.
instance = cls.__new__(cls)
instance.__init__(*args, **kw)
except TypeError as e:
if e.message.find('__init__() takes') != -1:
raise SingletonException('If the singleton requires __init__ args, '
'supply them on first call to Instance().')
else:
raise e
cls._instance = instance
| mit | 9,167,899,862,102,201,000 | 28.875 | 98 | 0.648536 | false |
caktus/django-treenav | treenav/tests/test_views.py | 1 | 17704 | from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.http import HttpRequest
from django.template import Template
from django.template.context import Context
from django.test import override_settings
from django.urls import reverse
from treenav.context_processors import treenav_active
from treenav.forms import MenuItemForm
from treenav.models import Item, MenuItem
from .base import TreeNavTestCase as TestCase
from .models import Team
@override_settings(ROOT_URLCONF="treenav.tests.urls")
class TreeNavTestCase(TestCase):
def setUp(self):
self.root = self.create_menu_item(
**{
"label": "Primary Navigation",
"slug": "primary-nav",
"order": 0,
}
)
self.create_menu_item(
**{
"parent": self.root,
"label": "Our Blog",
"slug": "our-blog",
"order": 4,
}
)
self.create_menu_item(
**{
"parent": self.root,
"label": "Home",
"slug": "home",
"order": 0,
}
)
self.child = self.create_menu_item(
**{
"parent": self.root,
"label": "About Us",
"slug": "about-us",
"order": 9,
}
)
self.second_level = self.create_menu_item(
**{
"parent": self.child,
"label": "Second",
"slug": "second",
"order": 0,
}
)
self.third_level = self.create_menu_item(
**{
"parent": self.second_level,
"label": "Third",
"slug": "third",
"order": 0,
}
)
def test_treenav_active(self):
request = HttpRequest()
request.META["PATH_INFO"] = "/"
treenav_active(request)
def test_to_tree(self):
self.root.to_tree()
def compile_string(self, url, template_str):
return Template(template_str).render(Context())
def test_non_unique_form_save(self):
dup = MenuItemForm(
{
"label": "test nav",
"slug": "primary-nav",
"order": 0,
}
)
self.assertFalse(dup.is_valid(), "Form says a duplicate slug is valid.")
def test_single_level_menu_root(self):
template_str = """{% load treenav_tags %}
{% single_level_menu "primary-nav" 0 %}
"""
result = self.compile_string("/", template_str)
self.assertNotIn(self.second_level.label, result)
def test_single_level_menu_about_us(self):
template_str = """{% load treenav_tags %}
{% single_level_menu "about-us" 0 %}
"""
result = self.compile_string("/", template_str)
self.assertIn(self.second_level.label, result)
def test_show_treenav(self):
template_str = """{% load treenav_tags %}
{% show_treenav "primary-nav" %}
"""
result = self.compile_string("/", template_str)
self.assertNotIn(self.second_level.label, result)
def test_single_level_menu_show_treenav_equality(self): # necessary?
"""Tests that the single_level_menu and show_treenav tags output the
same for the top level of the tree.
"""
template_str = """{% load treenav_tags %}
{% single_level_menu "primary-nav" 0 %}
"""
single_level_menu_result = self.compile_string("/", template_str)
template_str = """{% load treenav_tags %}
{% show_treenav "primary-nav" %}
"""
show_treenav_result = self.compile_string("/", template_str)
self.assertEqual(single_level_menu_result, show_treenav_result)
def test_show_treenav_third_level(self):
template_str = """{% load treenav_tags %}
{% show_treenav "primary-nav" full_tree="True" %}
"""
result = self.compile_string("/", template_str)
self.assertIn(self.third_level.label, result)
def test_show_menu_crumbs(self):
template_str = """{% load treenav_tags %}
{% show_menu_crumbs "about-us" %}
"""
team = Team.objects.create(slug="durham-bulls")
ct = ContentType.objects.get(app_label="treenav_tests", model="team")
self.create_menu_item(
**{
"parent": self.root,
"label": "Durham Bulls",
"slug": "durham-bulls",
"order": 4,
"content_type": ct,
"object_id": team.pk,
}
)
compiled = self.compile_string(team.get_absolute_url(), template_str)
# FIXME: This fixes the pep8 warning, but need to figure out what we're asserting
self.assertTrue(compiled)
def test_getabsoluteurl(self):
team = Team.objects.create(slug="durham-bulls")
ct = ContentType.objects.get(app_label="treenav_tests", model="team")
menu = self.create_menu_item(
**{
"label": "Durham Bulls",
"slug": "durham-bulls",
"order": 4,
"content_type": ct,
"object_id": team.pk,
}
)
self.assertEqual(menu.href, team.get_absolute_url())
def test_changed_getabsoluteurl(self):
team = Team.objects.create(slug="durham-bulls")
ct = ContentType.objects.get(app_label="treenav_tests", model="team")
menu = self.create_menu_item(
parent=self.root,
label="Durham Bulls",
slug="durham-bulls",
order=9,
content_type=ct,
object_id=team.pk,
href=team.get_absolute_url(),
)
# change slug and save it to fire post_save signal
team.slug = "wildcats"
team.save()
menu = MenuItem.objects.get(slug="durham-bulls")
self.assertEqual(menu.href, team.get_absolute_url())
def test_active_url(self):
team = Team.objects.create(slug="durham-bulls")
ct = ContentType.objects.get(app_label="treenav_tests", model="team")
self.child.object_id = team.pk
self.child.content_type = ct
self.child.content_object = team
self.child.save()
item = Item(self.child)
active_item = item.set_active(team.get_absolute_url())
self.assertEqual(active_item.node, self.child)
@override_settings(ROOT_URLCONF="treenav.tests.urls")
class TreeNavViewTestCase(TestCase):
def setUp(self):
self.root = self.create_menu_item(
label="Primary Navigation",
slug="primary-nav",
order=0,
)
self.create_menu_item(
parent=self.root,
label="Our Blog",
slug="our-blog",
order=4,
)
self.create_menu_item(
parent=self.root,
label="Home",
slug="home",
order=0,
)
self.child = self.create_menu_item(
parent=self.root,
label="About Us",
slug="about-us",
order=9,
)
def test_tags_level(self):
url = reverse("test_view", args=("home",))
response = self.client.post(url, {"pslug": "primary-nav", "N": 0})
self.assertEqual(response.content.decode("utf-8").count("<li"), 3)
self.assertContains(response, "depth-0")
def test_tags_no_page(self):
url = reverse("test_view", args=("notthere",))
response = self.client.post(url, {"pslug": "primary-nav", "N": 0})
self.assertEqual(response.content.decode("utf-8").count("<li"), 3)
self.assertContains(response, "depth-0")
def test_tags_level2(self):
self.create_menu_item(
parent=self.child,
label="Second Level",
slug="second-level",
order=10,
)
url = reverse("test_view", args=("home",))
response = self.client.post(url, {"pslug": "about-us", "N": 0})
self.assertEqual(response.content.decode("utf-8").count("<li"), 1)
def test_tags_improper(self):
url = reverse("test_view", args=("home",))
response = self.client.post(url, {"pslug": "no-nav", "N": 10000})
self.assertNotContains(response, "<ul")
def test_hierarchy(self):
root = self.root.to_tree()
self.assertEqual(len(root.children), 3)
children = ("Home", "Our Blog", "About Us")
for item, expected_label in zip(root.children, children):
self.assertEqual(item.node.label, expected_label)
def test_undefined_url(self):
"""
Testing the undefined_url view.
"""
slug = self.child.slug
url = reverse(
"treenav_undefined_url",
args=[
slug,
],
)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
@override_settings(ROOT_URLCONF="treenav.tests.urls")
class RefreshViewTestCase(TestCase):
"Admin view to trigger refresh of hrefs."
def setUp(self):
self.superuser = User.objects.create_user("test", "", "test")
self.superuser.is_staff = True
self.superuser.is_superuser = True
self.superuser.save()
self.refresh_url = reverse("admin:treenav_refresh_hrefs")
self.info = MenuItem._meta.app_label, MenuItem._meta.model_name
self.changelist_url = reverse("admin:%s_%s_changelist" % self.info)
self.client.login(username="test", password="test")
def test_trigger_refresh(self):
"Trigger update of menu item HREFs."
team = Team.objects.create(slug="durham-bulls")
ct = ContentType.objects.get(app_label="treenav_tests", model="team")
menu = self.create_menu_item(
label="Durham Bulls",
slug="durham-bulls",
order=9,
content_type=ct,
object_id=team.pk,
href=team.get_absolute_url(),
)
# change slug and save it to fire post_save signal
team.slug = "wildcats"
team.save()
self.assertNotEqual(menu.href, team.get_absolute_url())
response = self.client.get(self.refresh_url, follow=True)
self.assertRedirects(response, self.changelist_url)
menu = MenuItem.objects.get(pk=menu.pk)
self.assertEqual(menu.href, team.get_absolute_url())
self.assertEqual(len(response.context["messages"]), 1)
def test_trigger_refresh_redirects_to_custom_admin(self):
"Trigger update of menu item HREFs for a second custom admin."
refresh_url = reverse("admin:treenav_refresh_hrefs", current_app="admin2")
response = self.client.get(refresh_url, follow=True)
changelist_url = reverse(
"admin:%s_%s_changelist" % self.info, current_app="admin2"
)
self.assertRedirects(response, changelist_url)
def test_no_permission(self):
"Non-staff cannot trigger the refresh."
self.superuser.is_staff = False
self.superuser.save()
response = self.client.get(self.refresh_url, follow=True)
# Admin displays a login page with 200 status code
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["messages"]), 0)
@override_settings(ROOT_URLCONF="treenav.tests.urls")
class ClearCacheViewTestCase(TestCase):
"Admin view to clear menu cache."
def setUp(self):
self.superuser = User.objects.create_user("test", "", "test")
self.superuser.is_staff = True
self.superuser.is_superuser = True
self.superuser.save()
self.cache_url = reverse("admin:treenav_clean_cache")
self.info = MenuItem._meta.app_label, MenuItem._meta.model_name
self.changelist_url = reverse("admin:%s_%s_changelist" % self.info)
self.client.login(username="test", password="test")
def test_reset_cache(self):
"Clear MenuItems from cache."
menu = self.create_menu_item(
label="Our Blog",
slug="our-blog",
order=4,
)
menu.to_tree()
valid = cache.get("menu-tree-%s" % menu.slug)
self.assertTrue(valid, "Menu should be cached")
cache.set("menu-tree-%s" % menu.slug, "INVALID!!!")
response = self.client.get(self.cache_url, follow=True)
self.assertRedirects(response, self.changelist_url)
self.assertEqual(len(response.context["messages"]), 1)
# Cache should be recycled
current = cache.get("menu-tree-%s" % menu.slug)
self.assertNotEqual(current, "INVALID!!!")
def test_reset_cache_redirects_to_custom_admin(self):
"After cleaning cache, redirects to custom admin."
cache_url = reverse("admin:treenav_clean_cache", current_app="admin2")
response = self.client.get(cache_url, follow=True)
changelist_url = reverse(
"admin:%s_%s_changelist" % self.info, current_app="admin2"
)
self.assertRedirects(response, changelist_url)
def test_no_permission(self):
"Non-staff cannot clear the cache."
self.superuser.is_staff = False
self.superuser.save()
response = self.client.get(self.cache_url, follow=True)
# Admin displays a login page with 200 status code
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["messages"]), 0)
@override_settings(ROOT_URLCONF="treenav.tests.urls")
class SimultaneousReorderTestCase(TestCase):
def setUp(self):
self.root = self.create_menu_item(
label="Primary Navigation",
slug="primary-nav",
order=0,
)
self.blog = self.create_menu_item(
parent=self.root,
label="Our Blog",
slug="our-blog",
order=4,
)
self.home = self.create_menu_item(
parent=self.root,
label="Home",
slug="home",
order=0,
)
self.superuser = User.objects.create_user("test", "", "test")
self.superuser.is_staff = True
self.superuser.is_superuser = True
self.superuser.save()
self.info = MenuItem._meta.app_label, MenuItem._meta.model_name
self.changeform_url = reverse("admin:%s_%s_change" % self.info, args=(1,))
self.client.login(username="test", password="test")
def test_reorder(self):
# Build up the post dict, starting with the top form
data = {
"parent": "",
"label": "Primary Navigation",
"slug": "primary-nav",
"order": 0,
"is_enabled": "on",
"link": "",
"content_type": "",
"object_id": "",
}
# Now update the post dict with inline form info
data.update(
{
"children-TOTAL_FORMS": 3,
"children-INITIAL_FORMS": 2,
"children-MAX_NUM_FORMS": 1000,
}
)
# Update the post dict with the children, swapping their order values
data.update(
{
"children-0-id": 3,
"children-0-parent": 1,
"children-0-label": "Home",
"children-0-slug": "home",
"children-0-order": 4,
"children-0-is_enabled": "on",
"children-0-link": "",
"children-0-content_type": "",
"children-0-object_id": "",
"children-1-id": 2,
"children-1-parent": 1,
"children-1-label": "Our Blog",
"children-1-slug": "our-blog",
"children-1-order": 0,
"children-1-is_enabled": "on",
"children-1-link": "",
"children-1-content_type": "",
"children-1-object_id": "",
}
)
# Update the post dict with the empty inline form entry
data.update(
{
"children-2-id": "",
"children-2-parent": 1,
"children-2-label": "",
"children-2-slug": "",
"children-2-order": "",
"children-2-is_enabled": "on",
"children-2-link": "",
"children-2-content_type": "",
"children-2-object_id": "",
}
)
# Update the post dict with the end of the form
data.update(
{
"children-__prefix__-id": "",
"children-__prefix__-parent": 1,
"children-__prefix__-label": "",
"children-__prefix__-slug": "",
"children-__prefix__-order": "",
"children-__prefix__-is_enabled": "on",
"children-__prefix__-link": "",
"children-__prefix__-content_type": "",
"children-__prefix__-object_id": "",
"_save": "Save",
}
)
self.client.post(self.changeform_url, data)
order = self.root.get_children()
# Check if children are in the correct order
self.assertEqual(order[0], self.blog)
self.assertEqual(order[1], self.home)
# Check if the lft and rght attributes assigned by mptt are correct
self.assertNotEqual(order[0].lft, order[1].lft)
self.assertNotEqual(order[0].rght, order[1].rght)
| bsd-3-clause | 514,541,756,168,908,400 | 35.278689 | 89 | 0.546091 | false |
NarlikarLab/DIVERSITY | weblogoMod/weblogolib/__init__.py | 1 | 41331 | #!/usr/bin/env python
# -------------------------------- WebLogo --------------------------------
# Copyright (c) 2003-2004 The Regents of the University of California.
# Copyright (c) 2005 Gavin E. Crooks
# Copyright (c) 2006-2011, The Regents of the University of California, through
# Lawrence Berkeley National Laboratory (subject to receipt of any required
# approvals from the U.S. Dept. of Energy). All rights reserved.
# This software is distributed under the new BSD Open Source License.
# <http://www.opensource.org/licenses/bsd-license.html>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# (1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and or other materials provided with the distribution.
#
# (3) Neither the name of the University of California, Lawrence Berkeley
# National Laboratory, U.S. Dept. of Energy nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Replicates README.txt
"""
WebLogo (http://code.google.com/p/weblogo/) is a tool for creating sequence
logos from biological sequence alignments. It can be run on the command line,
as a standalone webserver, as a CGI webapp, or as a python library.
The main WebLogo webserver is located at http://weblogo.threeplusone.com
Please consult the manual for installation instructions and more information:
(Also located in the weblogolib/htdocs subdirectory.)
http://weblogo.threeplusone.com/manual.html
For help on the command line interface run
./weblogo --help
To build a simple logo run
./weblogo < cap.fa > logo0.eps
To run as a standalone webserver at localhost:8080
./weblogo --serve
To create a logo in python code:
>>> from weblogolib import *
>>> fin = open('cap.fa')
>>> seqs = read_seq_data(fin)
>>> data = LogoData.from_seqs(seqs)
>>> options = LogoOptions()
>>> options.title = "A Logo Title"
>>> format = LogoFormat(data, options)
>>> fout = open('cap.eps', 'w')
>>> eps_formatter( data, format, fout)
-- Distribution and Modification --
This package is distributed under the new BSD Open Source License.
Please see the LICENSE.txt file for details on copyright and licensing.
The WebLogo source code can be downloaded from
http://code.google.com/p/weblogo/
WebLogo requires Python 2.5, 2.6 or 2.7, and the python
array package 'numpy' (http://www.scipy.org/Download)
Generating logos in PDF or bitmap graphics formats require that the ghostscript
program 'gs' be installed. Scalable Vector Graphics (SVG) format also requires
the program 'pdf2svg'.
"""
import sys
import copy
import os
from datetime import datetime
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from math import sqrt
from weblogoMod.corebio.data import rna_letters, dna_letters, amino_acid_letters
from string import Template
from subprocess import *
from weblogoMod.corebio.utils import resource_string, resource_filename
from math import log, sqrt, exp
# Avoid 'from numpy import *' since numpy has lots of names defined
from numpy import array, asarray, float64, ones, zeros, int32,all,any, shape
import numpy as na
from color import *
from colorscheme import *
from weblogoMod.corebio.seq import Alphabet, Seq, SeqList
from weblogoMod.corebio import seq_io
from weblogoMod.corebio.utils import isfloat, find_command, ArgumentError, stdrepr
from weblogoMod.corebio.moremath import *
from weblogoMod.corebio.data import amino_acid_composition
from weblogoMod.corebio.seq import unambiguous_rna_alphabet, unambiguous_dna_alphabet, unambiguous_protein_alphabet
import weblogoMod.corebio
from logomath import Dirichlet
# ------ META DATA ------
__all__ = [ 'LogoOptions',
'description',
'__version__',
'LogoFormat',
'LogoData',
'GhostscriptAPI',
'std_color_schemes',
'default_color_schemes',
'classic',
'std_units',
'std_sizes',
'std_alphabets',
'std_percentCG',
'pdf_formatter',
'jpeg_formatter',
'png_formatter',
'png_print_formatter',
'txt_formatter',
'eps_formatter',
'formatters',
'default_formatter',
'base_distribution',
'equiprobable_distribution',
'read_seq_data',
'color',
'colorscheme',
'logomath',
]
description = "Create sequence logos from biological sequence alignments."
__version__ = weblogoMod.corebio.__version__
# These keywords are substituted by subversion.
# The date and revision will only tell the truth after a branch or tag,
# since different files in trunk will have been changed at different times
release_date ="$Date: 2012-07-02 19:28:12 -0700 (Mon, 02 Jul 2012) $".split()[1]
release_build = "$Revision: 145 $".split()[1]
release_description = "WebLogo %s (%s)" % (__version__, release_date)
def cgi(htdocs_directory) :
import weblogolib._cgi
weblogolib._cgi.main(htdocs_directory)
class GhostscriptAPI(object) :
"""Interface to the command line program Ghostscript ('gs')"""
formats = ('png', 'pdf', 'jpeg')
def __init__(self, path=None) :
try:
command = find_command('gs', path=path)
except EnvironmentError:
try:
command = find_command('gswin32c.exe', path=path)
except EnvironmentError:
raise EnvironmentError("Could not find Ghostscript on path."
" There should be either a gs executable or a gswin32c.exe on your system's path")
self.command = command
def version(self) :
args = [self.command, '--version']
try :
p = Popen(args, stdout=PIPE)
(out,err) = p.communicate()
except OSError :
raise RuntimeError("Cannot communicate with ghostscript.")
return out.strip()
def convert(self, format, fin, fout, width, height, resolution=300) :
device_map = { 'png':'png16m', 'pdf':'pdfwrite', 'jpeg':'jpeg'}
try :
device = device_map[format]
except KeyError:
raise ValueError("Unsupported format.")
args = [self.command,
"-sDEVICE=%s" % device,
"-dPDFSETTINGS=/screen", #Modification printer changed to screen
#"-q", # Quite: Do not dump messages to stdout.
"-sstdout=%stderr", # Redirect messages and errors to stderr
"-sOutputFile=-", # Stdout
"-dUseCIEColor", #Modification
"-dDEVICEWIDTHPOINTS=%s" % str(width),
"-dDEVICEHEIGHTPOINTS=%s" % str(height),
"-dSAFER", # For added security
"-dNOPAUSE",]
if device != 'pdf' :
args.append("-r%s" % str(resolution) )
if resolution < 300 : # Antialias if resolution is Less than 300 DPI
args.append("-dGraphicsAlphaBits=4")
args.append("-dTextAlphaBits=4")
args.append("-dAlignToPixels=0")
args.append("-") # Read from stdin. Must be last argument.
error_msg = "Unrecoverable error : Ghostscript conversion failed " \
"(Invalid postscript?). %s" % " ".join(args)
source = fin.read()
try :
p = Popen(args, stdin=PIPE, stdout = PIPE, stderr= PIPE)
(out,err) = p.communicate(source)
except OSError :
raise RuntimeError(error_msg)
if p.returncode != 0 :
print("COMMAND " + str(self.command))
print("ARGS" + str(args))
error_msg += '\nReturn code: %i\n' % p.returncode
if err is not None : error_msg += err
raise RuntimeError(error_msg)
print >>fout, out
# end class Ghostscript
aa_composition = [ amino_acid_composition[_k] for _k in
unambiguous_protein_alphabet]
# ------ DATA ------
classic = ColorScheme([
ColorGroup("G", "orange" ),
ColorGroup("TU", "red"),
ColorGroup("C", "blue"),
ColorGroup("A", "green")
] )
std_color_schemes = {"auto": None, # Depends on sequence type
"monochrome": monochrome,
"base pairing": base_pairing,
"classic": classic,
"hydrophobicity" : hydrophobicity,
"chemistry" : chemistry,
"charge" : charge,
}#
default_color_schemes = {
unambiguous_protein_alphabet: hydrophobicity,
unambiguous_rna_alphabet: base_pairing,
unambiguous_dna_alphabet: base_pairing
}
std_units = {
"bits" : 1./log(2),
"nats" : 1.,
"digits" : 1./log(10),
"kT" : 1.,
"kJ/mol" : 8.314472 *298.15 /1000.,
"kcal/mol": 1.987 *298.15 /1000.,
"probability" : None,
}
# The base stack width is set equal to 9pt Courier.
# (Courier has a width equal to 3/5 of the point size.)
# Check that can get 80 characters in journal page @small
# 40 characters in a journal column
std_sizes = {
"small" : 5.4 ,
"medium" : 5.4*2,
"large" : 5.4*3
}
std_alphabets = {
'protein': unambiguous_protein_alphabet,
'rna': unambiguous_rna_alphabet,
'dna': unambiguous_dna_alphabet}
std_percentCG = {
'H. sapiens' : 40.,
'E. coli' : 50.5,
'S. cerevisiae' : 38.,
'C. elegans' : 36.,
'D. melanogaster': 43.,
'M. musculus' : 42.,
'T. thermophilus' : 69.4,
}
# Thermus thermophilus: Henne A, Bruggemann H, Raasch C, Wiezer A, Hartsch T,
# Liesegang H, Johann A, Lienard T, Gohl O, Martinez-Arias R, Jacobi C,
# Starkuviene V, Schlenczeck S, Dencker S, Huber R, Klenk HP, Kramer W,
# Merkl R, Gottschalk G, Fritz HJ: The genome sequence of the extreme
# thermophile Thermus thermophilus.
# Nat Biotechnol 2004, 22:547-53
class LogoOptions(object) :
""" A container for all logo formatting options. Not all of these
are directly accessible through the CLI or web interfaces.
To display LogoOption defaults:
>>> from weblogolib import *
>>> LogoOptions()
All physical lengths are measured in points. (72 points per inch, 28.3 points per cm)
String attributes:
o creator_text -- Embedded as comment in figures.
o logo_title
o logo_label
o unit_name -- See std_units for options. (Default 'bits')
o yaxis_label -- Defaults to unit_name
o xaxis_label
o fineprint -- Defaults to WebLogo name and version
Boolean attributes:
o show_yaxis
o show_xaxis
o show_ends
o show_fineprint
o show_errorbars -- Draw errorbars (default: False)
o show_boxes -- Draw boxes around stack characters (default: True)
o debug -- Draw extra graphics debugging information.
o rotate_numbers -- Draw xaxis numbers with vertical orientation?
o scale_width -- boolean, scale width of characters proportional to ungaps
o pad_right -- Make a single line logo the same width as multiline logos (default: False)
Other attributes:
o stacks_per_line
o yaxis_tic_interval
o yaxis_minor_tic_ratio
o yaxis_scale
o xaxis_tic_interval
o number_interval
o shrink_fraction -- Proportional shrinkage of characters if show_boxes is true.
o errorbar_fraction
o errorbar_width_fraction
o errorbar_gray
o resolution -- Dots per inch (default: 96). Used for bitmapped output formats
o default_color
o color_scheme
o stack_width --
o stack_aspect_ratio -- Ratio of stack height to width (default: 5)
o logo_margin -- Default: 2 pts
o stroke_width -- Default: 0.5 pts
o tic_length -- Default: 5 pts
o stack_margin -- Default: 0.5 pts
o small_fontsize -- Small text font size in points
o fontsize -- Regular text font size in points
o title_fontsize -- Title text font size in points
o number_fontsize -- Font size for axis-numbers, in points.
o text_font
o logo_font
o title_font
o first_index
o logo_start
o logo_end
"""
def __init__(self, **kwargs) :
""" Create a new LogoOptions instance.
>>> L = LogoOptions(logo_title = "Some Title String")
>>> L.show_yaxis = False
>>> repr(L)
"""
self.alphabet = None
self.creator_text = release_description
self.logo_title = ""
self.logo_label = ""
self.stacks_per_line = 40
self.unit_name = "bits"
self.show_yaxis = True
# yaxis_lable default depends on other settings. See LogoFormat
self.yaxis_label = None
self.yaxis_tic_interval = 1.
self.yaxis_minor_tic_ratio = 5
self.yaxis_scale = None
self.show_xaxis = True
self.xaxis_label = ""
self.xaxis_tic_interval =1
self.rotate_numbers = False
self.number_interval = 5
self.show_ends = False
self.annotate = None
self.show_fineprint = True
self.fineprint = "Based on WebLogo "+__version__
self.show_boxes = False
self.shrink_fraction = 0.5
self.show_errorbars = True
self.errorbar_fraction = 0.90
self.errorbar_width_fraction = 0.25
self.errorbar_gray = 0.75
self.resolution = 96. # Dots per inch
self.default_color = Color.by_name("black")
self.color_scheme = None
#self.show_color_key = False # NOT yet implemented
self.debug = False
self.logo_margin = 2
self.stroke_width = 0.5
self.tic_length = 5
self.stack_width = std_sizes["large"]
self.stack_aspect_ratio = 5
self.stack_margin = 0.5
self.pad_right = False
self.small_fontsize = 6
self.fontsize = 10
self.title_fontsize = 12
self.number_fontsize = 8
self.text_font = "ArialMT"
self.logo_font = "Arial-BoldMT"
self.title_font = "ArialMT"
self.first_index = 1
self.logo_start = None
self.logo_end=None
self.scale_width = True
self.reverse_stacks = True # If true, draw stacks with largest letters on top.
from weblogoMod.corebio.utils import update
update(self, **kwargs)
def __repr__(self) :
from weblogoMod.corebio.util import stdrepr
return stdrepr( self)
def __repr__(self) :
attributes = vars(self).keys()
attributes.sort()
return stdrepr(self, attributes )
# End class LogoOptions
class LogoFormat(LogoOptions) :
""" Specifies the format of the logo. Requires LogoData and LogoOptions
objects.
>>> data = LogoData.from_seqs(seqs )
>>> options = LogoOptions()
>>> options.title = "A Logo Title"
>>> format = LogoFormat(data, options)
Raises an ArgumentError if arguments are invalid.
"""
def __init__(self, data, options= None) :
""" Create a new LogoFormat instance.
"""
LogoOptions.__init__(self)
if options is not None :
self.__dict__.update(options.__dict__)
self.alphabet = data.alphabet
self.seqlen = data.length
# Derived parameters.
self.show_title = False
self.show_xaxis_label = False
self.yaxis_minor_tic_interval = None
self.lines_per_logo = None
self.char_width = None # Maximum character width. Stack width minus margins.
self.line_margin_left = None
self.line_margin_right = None
self.line_margin_bottom = None
self.line_margin_top = None
self.title_height = None
self.xaxis_label_height = None
self.line_height = None
self.line_width = None
self.logo_height = None
self.logo_width = None
self.creation_date = None
self.end_type = None
self.stack_height = self.stack_width * self.stack_aspect_ratio
# Attribute to test, test, error message
arg_conditions = (
("stacks_per_line", lambda x: x>0 , "Stacks per line must be positive."),
("stack_width", lambda x: x>0.0, "Stack width must be greater than zero."),
("stack_aspect_ratio" , lambda x: x>0, "Stack aspect ratio must be greater than zero."),
("fontsize" , lambda x: x>0 , "Font sizes must be positive."),
("small_fontsize" , lambda x: x>0 , "Font sizes must be positive."),
("title_fontsize" , lambda x: x>0 , "Font sizes must be positive."),
("errorbar_fraction" , lambda x: x>=0.0 and x<=1.0,
"The visible fraction of the error bar must be between zero and one."),
("yaxis_tic_interval" , lambda x: x>=0.0 , "The yaxis tic interval cannot be negative."),
("yaxis_minor_tic_interval" , lambda x: not (x and x<0.0) , "Distances cannot be negative."),
("xaxis_tic_interval" , lambda x: x>0.0 , "Tic interval must be greater than zero."),
("number_interval" , lambda x: x>0.0 , "Invalid interval between numbers."),
("shrink_fraction" , lambda x: x>=0.0 and x<=1.0 , "Invalid shrink fraction."),
("stack_margin" , lambda x: x>0.0 , "Invalid stack margin."),
("logo_margin" , lambda x: x>0.0 , "Invalid logo margin."),
("stroke_width", lambda x: x>0.0 , "Invalid stroke width."),
("tic_length" , lambda x: x>0.0 , "Invalid tic length."),
)
# Run arguments tests. The second, attribute argument to the ArgumentError is
# used by the UI to provide user feedback.
# FIXME: More validation
for test in arg_conditions :
if not test[1]( getattr(self,test[0]) ) : raise ArgumentError(test[2], test[0])
# Inclusive upper and lower bounds
# FIXME: Validate here. Move from eps_formatter
if self.logo_start is None: self.logo_start = self.first_index
if self.logo_end is None :
self.logo_end = self.seqlen + self.first_index -1
self.total_stacks = self.logo_end - self.logo_start +1
if self.logo_start - self.first_index <0 :
raise ArgumentError(
"Logo range extends before start of available sequence.",
'logo_range')
if self.logo_end - self.first_index >= self.seqlen :
raise ArgumentError(
"Logo range extends beyond end of available sequence.",
'logo_range')
if self.logo_title : self.show_title = True
if not self.fineprint : self.show_fineprint = False
if self.xaxis_label : self.show_xaxis_label = True
if self.yaxis_label is None :
self.yaxis_label = self.unit_name
if self.yaxis_label :
self.show_yaxis_label = True
else :
self.show_yaxis_label = False
self.show_ends = False
if not self.yaxis_scale :
conversion_factor = std_units[self.unit_name]
if conversion_factor :
self.yaxis_scale=log(len(self.alphabet))*conversion_factor
else :
self.yaxis_scale=1.0 # probability units
if self.yaxis_scale<=0.0 :
raise ArgumentError("Invalid yaxis scale", 'yaxis_scale',)
if self.yaxis_tic_interval >= self.yaxis_scale:
self.yaxis_tic_interval /= 2.
self.yaxis_minor_tic_interval \
= float(self.yaxis_tic_interval)/self.yaxis_minor_tic_ratio
if self.color_scheme is None :
if self.alphabet in default_color_schemes :
self.color_scheme = default_color_schemes[self.alphabet]
else :
self.color_scheme = monochrome
self.lines_per_logo = 1+ ( (self.total_stacks-1) / self.stacks_per_line)
if self.lines_per_logo==1 and not self.pad_right:
self.stacks_per_line = min(self.stacks_per_line, self.total_stacks)
self.char_width = self.stack_width - 2* self.stack_margin
if self.show_yaxis :
self.line_margin_left = self.fontsize * 3.0
else :
self.line_margin_left = 0
if self.show_ends :
self.line_margin_right = self.fontsize *1.5
else :
self.line_margin_right = self.fontsize
if self.show_xaxis :
if self.rotate_numbers :
self.line_margin_bottom = self.number_fontsize *2.5
else:
self.line_margin_bottom = self.number_fontsize *1.5
else :
self.line_margin_bottom = 4
self.line_margin_top = 4
if self.show_title :
self.title_height = self.title_fontsize
else :
self.title_height = 0
self.xaxis_label_height =0.
if self.show_xaxis_label :
self.xaxis_label_height += self.fontsize
if self.show_fineprint :
self.xaxis_label_height += self.small_fontsize
self.line_height = (self.stack_height + self.line_margin_top +
self.line_margin_bottom )
self.line_width = (self.stack_width*self.stacks_per_line +
self.line_margin_left + self.line_margin_right )
self.logo_height = int(2*self.logo_margin + self.title_height \
+ self.xaxis_label_height + self.line_height*self.lines_per_logo)
self.logo_width = int(2*self.logo_margin + self.line_width )
self.creation_date = datetime.now().isoformat(' ')
end_type = '-'
end_types = {
unambiguous_protein_alphabet: 'p',
unambiguous_rna_alphabet: '-',
unambiguous_dna_alphabet: 'd'
}
if self.show_ends and self.alphabet in end_types:
end_type = end_types[self.alphabet]
self.end_type = end_type
if self.annotate is None :
self.annotate = []
for i in range(self.seqlen):
index = i + self.first_index
if index % self.number_interval == 0 :
self.annotate.append( "%d"%index)
else :
self.annotate.append("")
if len(self.annotate)!=self.seqlen :
raise ArgumentError(
"Annotations must be same length as sequences.",
'annotate')
# End __init__
# End class LogoFormat
# ------ Logo Formaters ------
# Each formatter is a function f(LogoData, LogoFormat, output file).
# that draws a representation of the logo into the given file.
# The main graphical formatter is eps_formatter. A mapping 'formatters'
# containing all available formatters is located after the formatter
# definitions.
def pdf_formatter(data, format, fout) :
""" Generate a logo in PDF format."""
feps = StringIO()
eps_formatter(data, format, feps)
feps.seek(0)
gs = GhostscriptAPI()
gs.convert('pdf', feps, fout, format.logo_width, format.logo_height)
def _bitmap_formatter(data, format, fout, device) :
feps = StringIO()
eps_formatter(data, format, feps)
feps.seek(0)
gs = GhostscriptAPI()
gs.convert(device, feps, fout,
format.logo_width, format.logo_height, format.resolution)
def jpeg_formatter(data, format, fout) :
""" Generate a logo in JPEG format."""
_bitmap_formatter(data, format, fout, device="jpeg")
def svg_formatter(data, format, fout) :
""" Generate a logo in Scalable Vector Graphics (SVG) format.
Requires the program 'pdf2svg' be installed.
"""
fpdf = StringIO()
pdf_formatter(data, format, fpdf)
fpdf.seek(0)
try:
command = find_command('pdf2svg')
except EnvironmentError:
raise EnvironmentError("Scalable Vector Graphics (SVG) format requires the program 'pdf2svg'. "
"Cannot find 'pdf2svg' on search path.")
import tempfile, os
fpdfi, fname_pdf = tempfile.mkstemp(suffix=".pdf")
fsvgi, fname_svg = tempfile.mkstemp(suffix=".svg")
try:
fpdf2 = open(fname_pdf, 'w')
fpdf2.write(fpdf.getvalue() )
fpdf2.seek(0)
args = [command, fname_pdf, fname_svg]
p = Popen(args)
(out,err) = p.communicate()
fsvg = open(fname_svg)
fout.write(fsvg.read())
finally:
os.remove(fname_svg)
os.remove(fname_pdf)
def png_formatter(data, format, fout) :
""" Generate a logo in PNG format."""
_bitmap_formatter(data, format, fout, device="png")
def png_print_formatter(data, format, fout) :
""" Generate a logo in PNG format with print quality (600 DPI) resolution."""
format.resolution = 600
_bitmap_formatter(data, format, fout, device="png")
def txt_formatter( logodata, format, fout) :
""" Create a text representation of the logo data.
"""
print >>fout, str(logodata)
def eps_formatter( logodata, format, fout) :
""" Generate a logo in Encapsulated Postscript (EPS)"""
substitutions = {}
from_format =[
"creation_date", "logo_width", "logo_height",
"lines_per_logo", "line_width", "line_height",
"line_margin_right","line_margin_left", "line_margin_bottom",
"line_margin_top", "title_height", "xaxis_label_height",
"creator_text", "logo_title", "logo_margin",
"stroke_width", "tic_length",
"stacks_per_line", "stack_margin",
"yaxis_label", "yaxis_tic_interval", "yaxis_minor_tic_interval",
"xaxis_label", "xaxis_tic_interval", "number_interval",
"fineprint", "shrink_fraction", "errorbar_fraction",
"errorbar_width_fraction",
"errorbar_gray", "small_fontsize", "fontsize",
"title_fontsize", "number_fontsize", "text_font",
"logo_font", "title_font",
"logo_label", "yaxis_scale", "end_type",
"debug", "show_title", "show_xaxis",
"show_xaxis_label", "show_yaxis", "show_yaxis_label",
"show_boxes", "show_errorbars", "show_fineprint",
"rotate_numbers", "show_ends", "stack_height",
"stack_width"
]
for s in from_format :
substitutions[s] = getattr(format,s)
substitutions["shrink"] = str(format.show_boxes).lower()
# --------- COLORS --------------
def format_color(color):
return " ".join( ("[",str(color.red) , str(color.green),
str(color.blue), "]"))
substitutions["default_color"] = format_color(format.default_color)
colors = []
for group in format.color_scheme.groups :
cf = format_color(group.color)
for s in group.symbols :
colors.append( " ("+s+") " + cf )
substitutions["color_dict"] = "\n".join(colors)
data = []
# Unit conversion. 'None' for probability units
conv_factor = std_units[format.unit_name]
data.append("StartLine")
seq_from = format.logo_start- format.first_index
seq_to = format.logo_end - format.first_index +1
# seq_index : zero based index into sequence data
# logo_index : User visible coordinate, first_index based
# stack_index : zero based index of visible stacks
for seq_index in range(seq_from, seq_to) :
logo_index = seq_index + format.first_index
stack_index = seq_index - seq_from
if stack_index!=0 and (stack_index % format.stacks_per_line) ==0 :
data.append("")
data.append("EndLine")
data.append("StartLine")
data.append("")
data.append("0 0 0 setrgbcolor\n(%s) StartStack" % format.annotate[seq_index] )
# if format.annotate[seq_index][-1] == "*":
# data.append("0 0 1 setrgbcolor\n(%s) StartStack" % format.annotate[seq_index] )
# else:
# data.append("0 0 0 setrgbcolor\n(%s) StartStack" % format.annotate[seq_index] )
if conv_factor:
stack_height = logodata.entropy[seq_index] * std_units[format.unit_name]
else :
stack_height = 1.0 # Probability
s = zip(logodata.counts[seq_index], logodata.alphabet)
def mycmp( c1, c2 ) :
# Sort by frequency. If equal frequency then reverse alphabetic
if c1[0] == c2[0] : return cmp(c2[1], c1[1])
return cmp(c1[0], c2[0])
s.sort(mycmp)
if not format.reverse_stacks: s.reverse()
C = float(sum(logodata.counts[seq_index]))
if C > 0.0 :
fraction_width = 1.0
if format.scale_width :
fraction_width = logodata.weight[seq_index]
# print >>sys.stderr, fraction_width
for c in s:
data.append(" %f %f (%s) ShowSymbol" % (fraction_width, c[0]*stack_height/C, c[1]) )
# Draw error bar on top of logo. Replaced by DrawErrorbarFirst above.
if logodata.entropy_interval is not None and conv_factor and C>0.0:
low, high = logodata.entropy_interval[seq_index]
center = logodata.entropy[seq_index]
low *= conv_factor
high *= conv_factor
center *=conv_factor
if high> format.yaxis_scale : high = format.yaxis_scale
down = (center - low)
up = (high - center)
data.append(" %f %f DrawErrorbar" % (down, up) )
data.append("EndStack")
data.append("")
data.append("EndLine")
substitutions["logo_data"] = "\n".join(data)
# Create and output logo
template = resource_string( __name__, 'template.eps', __file__)
logo = Template(template).substitute(substitutions)
print >>fout, logo
# map between output format names and logo
formatters = {
'eps': eps_formatter,
'pdf': pdf_formatter,
'png': png_formatter,
'png_print' : png_print_formatter,
'jpeg' : jpeg_formatter,
'svg' : svg_formatter,
'logodata' : txt_formatter,
}
default_formatter = eps_formatter
def parse_prior(composition, alphabet, weight=None) :
""" Parse a description of the expected monomer distribution of a sequence.
Valid compositions:
- None or 'none' : No composition sepecified
- 'auto' or 'automatic': Use the typical average distribution
for proteins and an equiprobable distribution for
everything else.
- 'equiprobable' : All monomers have the same probability.
- a percentage, e.g. '45%' or a fraction '0.45':
The fraction of CG bases for nucleotide alphabets
- a species name, e.g. 'E. coli', 'H. sapiens' :
Use the average CG percentage for the specie's
genome.
- An explicit distribution, e.g. {'A':10, 'C':40, 'G':40, 'T':10}
"""
if composition is None: return None
comp = composition.strip()
if comp.lower() == 'none': return None
if weight is None and alphabet is not None:
weight = sqrt(float(len(alphabet)))
if weight<0 : raise ValueError("Weight cannot be negative.")
if comp.lower() == 'equiprobable' :
prior = weight * equiprobable_distribution(len(alphabet))
elif comp.lower() == 'auto' or comp.lower() == 'automatic':
if alphabet == unambiguous_protein_alphabet :
prior = weight * asarray(aa_composition, float64)
else :
prior = weight * equiprobable_distribution(len(alphabet))
elif comp in std_percentCG :
prior = weight * base_distribution(std_percentCG[comp])
elif comp[-1] == '%' :
prior = weight * base_distribution( float(comp[:-1]))
elif isfloat(comp) :
prior = weight * base_distribution( float(comp)*100. )
elif composition[0] == '{' and composition[-1] == '}' :
explicit = composition[1: -1]
explicit = explicit.replace(',',' ').replace("'", ' ').replace('"',' ').replace(':', ' ').split()
if len(explicit) != len(alphabet)*2 :
#print explicit
raise ValueError("Explicit prior does not match length of alphabet")
prior = - ones(len(alphabet), float64)
try :
for r in range(len(explicit)/2) :
letter = explicit[r*2]
index = alphabet.ord(letter)
value = float(explicit[r*2 +1])
prior[index] = value
except ValueError :
raise ValueError("Cannot parse explicit composition")
if any(prior==-1.) :
raise ValueError("Explicit prior does not match alphabet")
prior/= sum(prior)
prior *= weight
else :
raise ValueError("Unknown or malformed composition: %s"%composition)
if len(prior) != len(alphabet) :
raise ValueError(
"The sequence alphabet and composition are incompatible.")
return prior
def base_distribution(percentCG) :
A = (1. - (percentCG/100.))/2.
C = (percentCG/100.)/2.
G = (percentCG/100.)/2.
T = (1. - (percentCG/100))/2.
return asarray((A,C,G,T), float64)
def equiprobable_distribution( length) :
return ones( (length), float64) /length
def read_seq_data(lines,
input_parser=seq_io.read,
alphabet=None,
ignore_lower_case=False,
max_file_size=0):
""" Read sequence data from the input stream and return a seqs object.
The environment variable WEBLOGO_MAX_FILE_SIZE overides the max_file_size argument.
Used to limit the load on the WebLogo webserver.
"""
seqs = input_parser(lines)
if seqs is None or len(seqs) ==0 :
raise ValueError("Please provide a multiple sequence alignment")
if ignore_lower_case :
# Case is significant. Do not count lower case letters.
for i,s in enumerate(seqs) :
seqs[i] = s.mask()
# Add alphabet to seqs.
if alphabet :
seqs.alphabet = alphabet
else :
seqs.alphabet = Alphabet.which(seqs)
return seqs
class LogoData(object) :
"""The data needed to generate a sequence logo.
- alphabet
- length
- counts -- An array of character counts
- entropy -- The relative entropy of each column
- entropy_interval -- entropy confidence interval
"""
def __init__(self, length=None, alphabet = None, counts =None,
entropy =None, entropy_interval = None, weight=None) :
"""Creates a new LogoData object"""
self.length = length
self.alphabet = alphabet
self.counts = counts
self.entropy = entropy
self.entropy_interval = entropy_interval
self.weight = weight
@classmethod
def from_counts(cls, alphabet, counts, prior= None):
"""Build a LogoData object from counts."""
# Counts is a Motif object?
#counts = counts.array
seq_length, A = counts.shape
if prior is not None: prior = array(prior, float64)
if prior is None or sum(prior)==0.0:
R = log(A)
ent = zeros( seq_length, float64)
entropy_interval = None
for i in range (0, seq_length) :
C = sum(counts[i])
#FIXME: fixup corebio.moremath.entropy()?
if C == 0 :
ent[i] = 0.0
else :
ent[i] = R - entropy(counts[i])
else :
ent = zeros( seq_length, float64)
entropy_interval = zeros( (seq_length,2) , float64)
R = log(A)
for i in range (0, seq_length) :
alpha = array(counts[i] , float64)
alpha += prior
posterior = Dirichlet(alpha)
ent[i] = posterior.mean_relative_entropy(prior/sum(prior))
entropy_interval[i][0], entropy_interval[i][1] = \
posterior.interval_relative_entropy(prior/sum(prior), 0.95)
weight = array( na.sum(counts,axis=1) , float)
weight /= max(weight)
return cls(seq_length, alphabet, counts, ent, entropy_interval, weight)
@classmethod
def from_seqs(cls, seqs, prior= None):
"""Build a LogoData object from a SeqList, a list of sequences."""
# --- VALIDATE DATA ---
# check that at least one sequence of length at least 1 long
if len(seqs)==0 or len(seqs[0]) ==0:
raise ValueError("No sequence data found.")
# Check sequence lengths
seq_length = len(seqs[0])
for i,s in enumerate(seqs) :
#print i,s, len(s)
#TODO: Redundant? Should be checked in SeqList?
if seq_length != len(s) :
raise ArgumentError(
"Sequence number %d differs in length from the previous sequences" % (i+1) ,
'sequences')
# FIXME: Check seqs.alphabet?
counts = seqs.profile()
return cls.from_counts(seqs.alphabet, counts, prior)
def __str__(self) :
out = StringIO()
print >>out, '## LogoData'
print >>out, '# First column is position number, counting from zero'
print >>out, '# Subsequent columns are raw symbol counts'
print >>out, '# Entropy is mean entropy measured in nats.'
print >>out, '# Low and High are the 95% confidence limits.'
print >>out, '# Weight is the fraction of non-gap symbols in the column.'
print >>out, '#\t'
print >>out, '#\t',
for a in self.alphabet :
print >>out, a, '\t',
print >>out, 'Entropy\tLow\tHigh\tWeight'
for i in range(self.length) :
print >>out, i+1, '\t',
for c in self.counts[i] : print >>out, c, '\t',
print >>out, "%6.4f" % self.entropy[i], '\t',
if self.entropy_interval is not None:
print >>out, "%6.4f" % self.entropy_interval[i][0], '\t',
print >>out, "%6.4f" % self.entropy_interval[i][1], '\t',
else :
print >>out, '\t','\t',
if self.weight is not None :
print >>out, "%6.4f" % self.weight[i],
print >>out, ''
print >>out, '# End LogoData'
return out.getvalue()
| gpl-3.0 | 6,209,968,925,322,337,000 | 34.265358 | 137 | 0.570637 | false |
EduPepperPDTesting/pepper2013-testing | lms/djangoapps/djangosaml2/unit_tests/settings.py | 1 | 5559 | # Django settings for tests2 project.
import django
import sys
sys.path.append("../..")
sys.path.append("../../../../..")
from siteconf import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': MYSQL_DB_W,
'USER': MYSQL_USER_W,
'PASSWORD': MYSQL_PASSWORD_W,
'HOST': MYSQL_HOST_W,
'PORT': MYSQL_PORT_W,
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'xvds$ppv5ha75qg1yx3aax7ugr_2*fmdrc(lrc%x7kdez-63xn'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = ''
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'tests2.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# django.contrib.admin is needed because we call django_logout()
# and it expect some templates to be registered
'django.contrib.admin',
'djangosaml2',
'testprofiles',
)
AUTH_PROFILE_MODULE = 'testprofiles.TestProfile'
if django.VERSION >= (1, 7):
AUTH_USER_MODEL = 'testprofiles.TestUser'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'djangosaml2': {
'handlers': ['console'],
'level': 'DEBUG',
},
}
}
import django
if django.VERSION < (1, 4):
del LOGGING['filters']['require_debug_false']
del LOGGING['handlers']['mail_admins']['filters']
AUTHENTICATION_BACKENDS = (
'djangosaml2.backends.Saml2Backend',
)
| agpl-3.0 | 6,175,133,029,150,552,000 | 29.377049 | 88 | 0.681597 | false |
Yelp/paasta | tests/api/test_autoscaler.py | 1 | 3640 | # Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from pyramid import testing
from paasta_tools.api.views import autoscaler
from paasta_tools.kubernetes_tools import KubernetesDeploymentConfig
from paasta_tools.marathon_tools import MarathonServiceConfig
@mock.patch("paasta_tools.api.views.autoscaler.get_instance_config", autospec=True)
def test_get_autoscaler_count(mock_get_instance_config):
request = testing.DummyRequest()
request.swagger_data = {"service": "fake_service", "instance": "fake_instance"}
mock_get_instance_config.return_value = mock.MagicMock(
get_instances=mock.MagicMock(return_value=123), spec=KubernetesDeploymentConfig,
)
response = autoscaler.get_autoscaler_count(request)
assert response.json_body["desired_instances"] == 123
assert response.json_body["calculated_instances"] == 123
@mock.patch("paasta_tools.api.views.autoscaler.get_instance_config", autospec=True)
def test_update_autoscaler_count_marathon(mock_get_instance_config):
request = testing.DummyRequest()
request.swagger_data = {
"service": "fake_marathon_service",
"instance": "fake_marathon_instance",
"json_body": {"desired_instances": 123},
}
mock_get_instance_config.return_value = mock.MagicMock(
get_min_instances=mock.MagicMock(return_value=100),
get_max_instances=mock.MagicMock(return_value=200),
spec=MarathonServiceConfig,
)
response = autoscaler.update_autoscaler_count(request)
assert response.json_body["desired_instances"] == 123
assert response.status_code == 202
@mock.patch("paasta_tools.api.views.autoscaler.get_instance_config", autospec=True)
def test_update_autoscaler_count_kubernetes(mock_get_instance_config):
request = testing.DummyRequest()
request.swagger_data = {
"service": "fake_kubernetes_service",
"instance": "fake__kubernetes_instance",
"json_body": {"desired_instances": 155},
}
mock_get_instance_config.return_value = mock.MagicMock(
get_min_instances=mock.MagicMock(return_value=100),
get_max_instances=mock.MagicMock(return_value=200),
spec=KubernetesDeploymentConfig,
)
response = autoscaler.update_autoscaler_count(request)
assert response.json_body["desired_instances"] == 155
assert response.status_code == 202
@mock.patch("paasta_tools.api.views.autoscaler.get_instance_config", autospec=True)
def test_update_autoscaler_count_warning(mock_get_instance_config):
request = testing.DummyRequest()
request.swagger_data = {
"service": "fake_service",
"instance": "fake_instance",
"json_body": {"desired_instances": 123},
}
mock_get_instance_config.return_value = mock.MagicMock(
get_min_instances=mock.MagicMock(return_value=10),
get_max_instances=mock.MagicMock(return_value=100),
spec=KubernetesDeploymentConfig,
)
response = autoscaler.update_autoscaler_count(request)
assert response.json_body["desired_instances"] == 100
assert "WARNING" in response.json_body["status"]
| apache-2.0 | 6,430,667,032,950,880,000 | 38.565217 | 88 | 0.721429 | false |
lizardsystem/lizard-reportgenerator | lizard_reportgenerator/testsettings.py | 1 | 3058 | import os
from lizard_ui.settingshelper import setup_logging
from lizard_ui.settingshelper import STATICFILES_FINDERS
DEBUG = True
TEMPLATE_DEBUG = True
# SETTINGS_DIR allows media paths and so to be relative to this settings file
# instead of hardcoded to c:\only\on\my\computer.
SETTINGS_DIR = os.path.dirname(os.path.realpath(__file__))
# BUILDOUT_DIR is for access to the "surrounding" buildout, for instance for
# BUILDOUT_DIR/var/static files to give django-staticfiles a proper place
# to place all collected static files.
BUILDOUT_DIR = os.path.abspath(os.path.join(SETTINGS_DIR, '..'))
LOGGING = setup_logging(BUILDOUT_DIR)
# ENGINE: 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
# In case of geodatabase, prepend with:
# django.contrib.gis.db.backends.(postgis)
DATABASES = {
# If you want to use another database, consider putting the database
# settings in localsettings.py. Otherwise, if you change the settings in
# the current file and commit them to the repository, other developers will
# also use these settings whether they have that database or not.
# One of those other developers is Jenkins, our continuous integration
# solution. Jenkins can only run the tests of the current application when
# the specified database exists. When the tests cannot run, Jenkins sees
# that as an error.
'default': {
'NAME': os.path.join(BUILDOUT_DIR, 'var', 'sqlite', 'test.db'),
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '', # empty string for localhost.
'PORT': '', # empty string for default.
}
}
SITE_ID = 1
INSTALLED_APPS = [
'lizard_reportgenerator',
'lizard_ui',
'staticfiles',
'compressor',
'south',
'lizard_area',
'django_nose',
'django_extensions',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.gis',
'django.contrib.sites',
]
ROOT_URLCONF = 'lizard_reportgenerator.urls'
TEMPLATE_CONTEXT_PROCESSORS = (
# Uncomment this one if you use lizard-map.
# 'lizard_map.context_processors.processor.processor',
# Default django 1.3 processors.
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages"
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# Used for django-staticfiles (and for media files
STATIC_URL = '/static_media/'
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BUILDOUT_DIR, 'var', 'static')
MEDIA_ROOT = os.path.join(BUILDOUT_DIR, 'var', 'media')
STATICFILES_FINDERS = STATICFILES_FINDERS
try:
# Import local settings that aren't stored in svn/git.
from lizard_reportgenerator.local_testsettings import *
except ImportError:
pass
| gpl-3.0 | -2,051,839,275,009,533,400 | 34.55814 | 79 | 0.701766 | false |
mredar/ucldc_oai_harvest | oai_harvester/oai_harvester.py | 1 | 6784 | #! /usr/bin/env python
'''UCLDC OAI Harvester: Collects records from OAI interfaces and inputs to
basic solr schema. Driven off the collection registry'''
'''
imagining right now that this will be woken by a crontab. It then looks at the
incoming queue and processes any "READY" msgs (maybe only ready ones there)
should you have a number of queues : ready, processing, errors?
Actually, SQS uses a visibility timeout to make msgs "invisible" while being processed. Client process can up the timeout if necessary. May need similar behavior here.
while a msg in queue:
get msg and set timeout?
harvest from msg
delete message from queue
'''
import sys
import csv
import os
import codecs
import datetime
import time
import logging
logging.basicConfig(level=logging.INFO)
import json
import traceback
import hashlib
from sickle import Sickle
from sickle.models import Record
import solr
from lxml import etree
import boto.sqs as sqs
import dateutil.parser
QUEUE_OAI_HARVEST = os.environ.get('QUEUE_OAI_HARVEST', 'OAI_harvest')
QUEUE_OAI_HARVEST_ERR = os.environ.get('QUEUE_OAI_HARVEST_ERR', 'OAI_harvest_error')
QUEUE_OAI_HARVEST_HARVESTING = os.environ.get('QUEUE_OAI_HARVEST_HARVESTING', 'OAI_harvest_harvesting')
#INTIAL dev machine (nutch-dev) URL_SOLR = os.environ.get('URL_SOLR', 'http://54.243.192.165:8080/solr/dc-collection/')
URL_SOLR = os.environ.get('URL_SOLR', 'http://107.21.228.130:8080/solr/dc-collection/')
SQS_CONNECTION = sqs.connect_to_region('us-east-1')
def harvest_to_solr_oai_set(oai_set):
'''Harvest the oai set and return a list of records?
The oai_set is the message dict from SQS'''
client=Sickle(oai_set['url'])
records = client.ListRecords(set=oai_set['set_spec'], metadataPrefix='oai_dc')
n = 0
dt_start = datetime.datetime.now()
for rec in records:
n += 1
dt_iter = datetime.datetime.now()
elapsed_time = (dt_iter -dt_start).seconds
if (n % 100) == 0:
logging.info("Set has taken :" + str(elapsed_time) + " seconds.")
logging.info("OAI REC NUM: " + str(n) + " SET:" + str(oai_set))
solr_index_record(rec, extra_metadata=oai_set)
def datetime_to_solr_date(dt):
'''Return the UTC solr style date string for the given datetime object
'''
#need to get zulu time for obj?
return dt.isoformat() + 'Z'
# doesn't work for dates < 1900 return dt.strftime('%Y-%m-%dT%H:%M:%SZ')
def get_md5_id_from_oai_identifiers(ids):
'''From a list of oai identifier fields, pick a URL and convert to md5
to use as solr id
'''
for i in ids:
if i[:5] == 'http:':
md5= hashlib.md5()
md5.update(i)
return md5.hexdigest()
raise Exception("NO URL found in identifiers")
def solr_index_record(sickle_rec, extra_metadata=None):
'''Index the sickle record object in solr'''
#TODO: make this global for efficiency?
s = solr.Solr(URL_SOLR)
sdoc = sickle_rec.metadata
#use URL identifier md5 hash as id
#should probably move to solr, to help with other inputs
sdoc['id'] = get_md5_id_from_oai_identifiers(sdoc['identifier'])
oai_dt = datetime_to_solr_date(dateutil.parser.parse(sickle_rec.header.datestamp))
#collisions here?
#sdoc['title_exact'] = sdoc['title'][0]
# how to make created write once, then read only - update processor in
# solr
sdoc['created'] = sdoc['last_modified'] = oai_dt
if 'campus' in extra_metadata:
sdoc['campus'] = []
for campus in extra_metadata['campus']:
if 'publisher' in sdoc:
sdoc['publisher'].append(campus['name'])
else:
sdoc['publisher'] = [campus['name'],]
sdoc['campus'].append(campus['name'])
if 'repository' in extra_metadata:
sdoc['repository'] = []
for repository in extra_metadata['repository']:
if 'publisher' in sdoc:
sdoc['publisher'].append(repository['name'])
else:
sdoc['publisher'] = [repository['name'],]
sdoc['repository'].append(repository['name'])
sdoc['collection_name'] = extra_metadata['collection_name']
#convert various dc dates into solr date fields
#need date_facet, date_facet_start, date_facet_end?
#for each dc date value parse into one or more values.
#if exists, update, so later values not deleted
if s.select('id:'+sdoc['id']):
logging.info( 'Updating:'+sdoc['id'])
s.update(sdoc, commit=True)
else:
logging.info( 'Adding:'+sdoc['id'])
s.add(sdoc, commit=True)
def delete_msg_by_content_from_queue(q, msg):
'''Can't just hold an added message object, must retrieve from
queue and then delete. Just delete the first matching body
'''
m = q.read()
while m:
if m.get_body() == msg.get_body():
m.delete()
return
m = q.read()
def process_oai_queue():
'''Run on any messages in the OAI_harvest queue'''
q_oai = SQS_CONNECTION.get_queue(QUEUE_OAI_HARVEST)
q_harvesting = SQS_CONNECTION.get_queue(QUEUE_OAI_HARVEST_HARVESTING)
n = 0
m = q_oai.read()
while m:
m_harvesting = q_harvesting.write(m)
q_oai.delete_message(m) #delete, will pass result to another queue
n += 1
dt_start = datetime.datetime.now()
logging.info("\n" + str(dt_start) + " START MESSAGE " + str(n) + "\n\n")
msg_dict = json.loads(m.get_body())
#msg_dict is {url:XX, set_spec:YY, campus:[{resource_uri:ZZ, slug:TT, name: QQ},]}
logging.info(msg_dict)
try:
harvest_to_solr_oai_set(msg_dict)
dt_end = datetime.datetime.now()
logging.info("\n\n\n============== " + str((dt_end-dt_start).seconds) + " seconds Done with Message:" + str(n) + " : " + m.get_body() + "\n\n\n\n")
except Exception, e:
exc_type, exc_value, exc_traceback = sys.exc_info()
# add message to error q
q_err = SQS_CONNECTION.get_queue(QUEUE_OAI_HARVEST_ERR)
msg_dict['exceptinfo'] = repr(traceback.format_exception(exc_type, exc_value, exc_traceback))
logging.error(str(msg_dict))
msg = json.dumps(msg_dict)
q_msg = sqs.message.Message()
q_msg.set_body(msg)
status = q_err.write(q_msg)
time.sleep(10) #make sure harvesting message back on queue
# this doesn't work, need to "read" the message from queue to
# get a receipt handle that can be used to delete
delete_msg_by_content_from_queue(q_harvesting, m_harvesting)
m = q_oai.read()
def main(args):
process_oai_queue()
if __name__=='__main__':
#TODO: test here?
main(sys.argv)
| bsd-3-clause | -3,734,049,030,187,922,000 | 38.672515 | 167 | 0.630159 | false |
egitto/parchment-and-copper | scratch/cryptopals/ctr.py | 1 | 1530 | from cbc import CBC_encrypt
from ecb import ECB_encrypt
from bytestring_tools import xor, data
from math import ceil
def counter_function(n):
return int.to_bytes(n,9,'big')+b'\x00'*7
def CTR_keystream(key,counter_function,length,start):
# start = nonce = first byte we haven't generated keystream for yet
# I have a feeling this isn't how I should use nonces, but
# this matches the problem specification. Should have more flexible way, though
block_n = start//16
n = start%16
accum = ECB_encrypt(counter_function(block_n),key)[n:]
for _ in range(ceil((length)/16)):
block_n += 1
accum += ECB_encrypt(counter_function(block_n),key)
return accum[:length]
def CTR_encrypt(_bytes,key,counter_function=counter_function,nonce=0):
return xor(_bytes,CTR_keystream(key,counter_function,len(_bytes),nonce))
class CTR_cypher():
def __init__(self,key,counter_function=counter_function,nonce=0):
self.key = key
self.counter_function = counter_function
self.zero = nonce
self.n = nonce
def set_offset(self,offset):
self.n = self.zero + offset
def encrypt(self,_bytes):
x = CTR_encrypt(_bytes,self.key,self.counter_function,self.n)
self.n += len(_bytes)
return x
# a = CTR_cypher(b'yellow submarine',counter_function,0)
# b = a.encrypt(b'some stuff')
# a.set_offset(0)
# print(a.encrypt(b))
# cyphertext = data('L77na/nrFsKvynd6HzOoG7GHTLXsTVu9qvY/2syLXzhPweyyMTJULu/6/kXX0KSvoOLSFQ==','b64').bytes
# print(CTR_encrypt(cyphertext,"YELLOW SUBMARINE",counter_function,0))
| gpl-3.0 | 8,308,754,510,609,592,000 | 33 | 107 | 0.713725 | false |
siconos/siconos-deb | examples/Control/Zhuravlev/ZhuravlevIvanovMCP_C.py | 1 | 4846 | import siconos.numerics as SN
import numpy as np
import matplotlib.pyplot as plt
try:
from cffi import FFI
except:
import sys
print('no cffi module installed, exiting')
sys.exit(0)
withPlot = False
if __name__ == '__main__':
xk = np.array((1., 10.))
T = 10.0
t = 0.0
h = 1e-3
z = np.zeros((4,))
w = np.empty((4,))
kappa = 0.9
g = 9.81
theta = 1.0
gamma = 1.0
mcp = SN.MixedComplementarityProblem2(0, 4)
ffi = FFI()
ffi.cdef('void set_cstruct(uintptr_t p_env, void* p_struct);')
ffi.cdef('''typedef struct
{
int id;
double* xk;
double h;
double theta;
double gamma;
double g;
double kappa;
unsigned int f_eval;
unsigned int nabla_eval;
} data;
''')
data_struct = ffi.new('data*')
data_struct.id = -1 # to avoid freeing the data in the destructor
data_struct.xk = ffi.cast('double *', xk.ctypes.data)
data_struct.h = h
data_struct.theta = theta
data_struct.gamma = gamma
data_struct.g = g
data_struct.kappa = kappa
D = ffi.dlopen(SN._numerics.__file__)
D.set_cstruct(mcp.get_env_as_long(), ffi.cast('void*', data_struct))
mcp.set_compute_F_and_nabla_F_as_C_functions('ZhuravlevIvanov.so', 'compute_Fmcp', 'compute_nabla_Fmcp')
SO=SN.SolverOptions(mcp, SN.SICONOS_MCP_NEWTON_FBLSA)
SO.dparam[0] = 1.0e-24
SO.iparam[0] = 150
SO.iparam[3] = 2
SO.iparam[4] = 10
N = int(T/h + 10)
print(N)
lambdaPM = np.empty((N, 4))
signs = np.empty((N, 2))
sol = np.empty((N, 2))
sol[0, :] = xk
k = 0
while t <= T:
k += 1
info = SN.mcp_newton_minFBLSA(mcp, z, w, SO)
#info = SN.mcp_newton_FBLSA(mcp, z, w, SO)
#print('iter {:} ; solver iter = {:} ; prec = {:}'.format(k, SO.iparam[1], SO.dparam[1]))
if info > 0:
#zi_syst.compute_Fmcp(0, 4, z, w)
sol[k, 0] = w[0] - z[1]
sol[k, 1] = w[2] - z[3]
if sol[k, 0] < -1e-7 and np.abs(z[1]) < 1e-10:
z[1] = -sol[k, 0]
z[0] = 1.0
if xk[1] < -1e-7 and np.abs(z[3]) < 1e-10:
z[3] = -sol[k, 1]
z[2] = 1.0
if z[1] < -1e-7:
z[1] = 0.0
z[0] = 0.0
if z[3] < -1e-7:
z[3] = 0.0
z[2] = 0.0
if z[1] > 1e-7 and z[0] < 1.0 - 1e-7:
z[0] = 1.0
if z[3] > 1e-7 and z[2] < 1.0 - 1e-7:
z[2] = 1.0
info = SN.mcp_newton_minFBLSA(mcp, z, w, SO)
print('iter {:} ; solver iter = {:} ; prec = {:}'.format(k, SO.iparam[1], SO.dparam[1]))
if info >0:
print('MCP solver failed ! info = {:}'.format(info))
print(xk)
print(z)
print(w)
# else:
# print('iter {:} ; solver iter = {:} ; prec = {:}'.format(k, SO.iparam[1], SO.dparam[1]))
#zi_syst.compute_Fmcp(0 ,4, z, w)
sol[k, 0] = w[0] - z[1]
sol[k, 1] = w[2] - z[3]
xk[:] = sol[k, :]
signs[k, 0] = z[0] - w[1]
signs[k, 1] = z[2] - w[3]
t = k*h
#z[:] = 0.0
print('f_eval', data_struct.f_eval, 'nabla_eval', data_struct.nabla_eval)
# np.savetxt("dataZIsol.txt", sol)
# np.savetxt("dataZIlambdaPM.txt", lambdaPM)
# np.savetxt("dataZIsign.txt", signs)
if withPlot:
plt.figure()
plt.plot(sol[:, 0], sol[:, 1], 'b-*')
plt.xlabel('s')
plt.ylabel('v')
plt.figure()
plt.plot(sol[:, 0], label=r's')
plt.plot(sol[:, 1], label=r'v')
plt.legend(loc='best')
plt.figure()
plt.plot(signs[:, 0], label=r'$\lambda_1$')
plt.plot(signs[:, 1], label=r'$\lambda_2$')
plt.legend(loc='best')
plt.show()
pos = np.abs(sol[:, 0])
velocity = (1 - kappa*np.sign(sol[:, 0]*sol[:, 1]))*sol[:, 1]*np.sign(sol[:, 0])
plt.subplot(211)
plt.title('position')
plt.plot(pos)
plt.grid()
plt.subplot(212)
plt.title('velocity')
plt.plot(velocity)
plt.grid()
# plt.subplot(313)
# plt.title('control input')
# plt.plot(dataPlot[:,0], control)
# plt.grid()
plt.show()
# indx = np.nonzero(dataPlot[:, 0]>30)
# ttt = dataPlot[indx, 0].flatten()
#
# plt.subplot(311)
# plt.title('position')
# plt.plot(ttt, pos[indx])
# plt.grid()
# plt.subplot(312)
# plt.title('velocity')
# plt.plot(ttt, velocity[indx])
# plt.grid()
## plt.subplot(313)
## plt.title('control input')
## plt.plot(ttt, control[indx])
# plt.grid()
# plt.show()
| apache-2.0 | 1,840,833,609,540,054,800 | 26.072626 | 108 | 0.470491 | false |
tinyms/ArchiveX | tinyms/bpm/entity.py | 1 | 2197 | __author__ = 'tinyms'
#coding=UTF8
from sqlalchemy import Column, Integer, String, Text, LargeBinary, DateTime
from tinyms.core.orm import Simplify, Entity, many_to_one, many_to_many
#通知引擎处理节点
@many_to_one("BPMProcessInstance")
class BPMWorkflow(Entity, Simplify):
node_id = Column(Integer(), nullable=False)
#行为: 'execute','leave','execute-leave'
behavior = Column(String(20))
params = Column(Text())
#流程定义
class BPMProcessDef(Entity, Simplify):
name = Column(String(100), nullable=False, unique=True)
#Json格式或者类全名
define = Column(Text(), nullable=False)
#是否发布为可用流程,1是,0否
release = Column(Integer(), default=0)
#只有允许的人才可以使用此流程
security_point = Column(String(60))
#流程实例
@many_to_one("BPMProcessDef")
@many_to_one("Archives")
class BPMProcessInstance(Entity, Simplify):
#序列化
bin = Column(LargeBinary(), nullable=False)
#实例是否完成,1完成,0未完成
finish = Column(Integer(), default=0)
start_time = Column(DateTime(), nullable=False)
end_time = Column(DateTime())
#流程实例值
@many_to_one("BPMProcessInstance")
class BPMProcessVars(Entity, Simplify):
name = Column(String(255), nullable=False)
val = Column(Text())
@many_to_one("BPMProcessInstance")
class BPMProcessInstanceNotify(Entity, Simplify):
node_id = Column(Integer(), nullable=False)
tip_content = Column(Text(), nullable=False)
#wait,finish
result = Column(String(20))
@many_to_one("BPMProcessInstance")
class BPMWorklist(Entity, Simplify):
task_name = Column(String(255), nullable=False)
forms = Column(Text(), nullable=False)
#多少小时内过期,则流程自动结束
valid_time_space = Column(Integer(), default=0)
expired = Column(Integer(), default=0)
create_time = Column(DateTime(), nullable=False)
finish_time = Column(DateTime())
#完成者 from Archives
worker = Column(Integer())
#@many_to_one("BPMWorklist")
#@many_to_one("Archives")
#class BPMWorklistAuth(Entity, Simplify):
# #是否允许编辑
# editable = Column(Integer(), nullable=False)
| bsd-3-clause | 3,168,598,971,852,385,300 | 28.028986 | 75 | 0.695457 | false |
simonsdave/yar | yar/key_service/async_creds_retriever.py | 1 | 2387 | """This module contains functionality to async'ly retrieve
credentials from the key store."""
import httplib
import logging
from ks_util import filter_out_non_model_creds_properties
from ks_util import AsyncAction
_logger = logging.getLogger("KEYSERVICE.%s" % __name__)
class AsyncCredsRetriever(AsyncAction):
def fetch(self,
callback,
key=None,
principal=None,
is_filter_out_non_model_properties=False):
self._key = key
self._principal = principal
self._callback = callback
self._is_filter_out_non_model_properties = \
is_filter_out_non_model_properties
if key:
fmt = '_design/by_identifier/_view/by_identifier?key="%s"'
path = fmt % key
else:
fmt = '_design/by_principal/_view/by_principal?key="%s"'
path = fmt % principal
self.async_req_to_key_store(
path,
"GET",
None,
self._on_async_req_to_key_store_done)
def _on_async_req_to_key_store_done(self, is_ok, code=None, body=None):
"""Called when async_req_to_key_store() is done."""
if not is_ok or httplib.OK != code or body is None:
self._callback(None, None)
return
creds = []
for row in body.get("rows", []):
doc = row.get("value", {})
if self._is_filter_out_non_model_properties:
doc = filter_out_non_model_creds_properties(doc)
creds.append(doc)
if self._key:
# asked to retrive a single set of creds so
# expecting 1 or 0 values in "creds"
num_creds = len(creds)
if 0 == num_creds:
self._callback(None, False)
else:
if 1 == num_creds:
self._callback(creds[0], False)
else:
# this is an error case with either the view or the
# data in the key store - we should never here.
fmt = (
"Got %d docs from Key Store for key '%s'. "
"Expected 1 or 0 docs."
)
_logger.error(fmt, num_creds, self._key)
self._callback(None, None)
else:
self._callback(creds, True)
| mit | 6,111,016,473,754,680,000 | 30.826667 | 75 | 0.516967 | false |
j-dasilva/COMP4350 | apartment/rest/serializers.py | 1 | 1201 | from rest_framework import serializers
class MessageSerializer(serializers.Serializer):
sender = serializers.CharField(max_length=32)
recipient = serializers.CharField(max_length=32)
urgency = serializers.IntegerField()
content = serializers.CharField(max_length=256)
timestamp = serializers.IntegerField()
read = serializers.BooleanField()
class BulletinSerializer(serializers.Serializer):
sender = serializers.CharField(max_length=32)
timestamp = serializers.IntegerField()
subject = serializers.CharField(max_length=256)
content = serializers.CharField(max_length=256)
class CommentSerializer(serializers.Serializer):
bulletin_reference = serializers.CharField(max_length=128)
timestamp = serializers.IntegerField()
sender = serializers.CharField(max_length=32)
content = serializers.CharField(max_length=256)
class EventSerializer(serializers.Serializer):
timestamp = serializers.IntegerField()
sender = serializers.CharField(max_length=32)
content = serializers.CharField(max_length=256)
starttime = serializers.IntegerField()
endtime = serializers.IntegerField()
title = serializers.CharField(max_length=256) | gpl-2.0 | 6,784,690,892,494,369,000 | 36.5625 | 62 | 0.766861 | false |
bowen0701/algorithms_data_structures | lc0419_battleships_in_a_board.py | 1 | 3508 | """Leetcode 419. Battleships in a Board
Medium
Given an 2D board, count how many battleships are in it.
The battleships are represented with 'X's, empty slots are represented with '.'s.
You may assume the following rules:
- You receive a valid board, made of only battleships or empty slots.
- Battleships can only be placed horizontally or vertically. In other words,
they can only be made of the shape 1xN (1 row, N columns) or
Nx1 (N rows, 1 column), where N can be of any size.
- At least one horizontal or vertical cell separates between two battleships -
there are no adjacent battleships.
Example:
X..X
...X
...X
In the above board there are 2 battleships.
Invalid Example:
...X
XXXX
...X
This is an invalid board that you will not receive -
as battleships will always have a cell separating between them.
Follow up:
Could you do it in one-pass, using only O(1) extra memory and
without modifying the value of the board?
"""
class SolutionDFSRecur(object):
def _dfsRecur(self, board, r, c):
# Check exit condition: out of boundary or empty.
if (r < 0 or r >= len(board) or c < 0 or c >= len(board[0]) or
board[r][c] == '.'):
return None
# Update board as visited.
board[r][c] = '.'
# Recursively DFS 4 dirs: up, down, left, and right.
dirs = [(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)]
for r_next, c_next in dirs:
self._dfsRecur(board, r_next, c_next)
def countBattleships(self, board):
"""
:type board: List[List[str]]
:rtype: int
Time complexity: O(m*n), where
- m: number of rows.
- n: number of columns.
Space complexity: O(m*n).
"""
if not board or not board[0]:
return 0
count = 0
# For each slot, start DFS if satisfies entry condition.
for r in range(len(board)):
for c in range(len(board[0])):
if board[r][c] == 'X':
count += 1
self._dfsRecur(board, r, c)
return count
class SolutionCheckFirst(object):
def countBattleships(self, board):
"""
:type board: List[List[str]]
:rtype: int
Time complexity: O(m*n).
Space complexity: O(1).
"""
if not board or not board[0]:
return 0
count = 0
# Start from top-left to check the 1st only.
for r in range(len(board)):
for c in range(len(board[0])):
# Skip if empty.
if board[r][c] == '.':
continue
# Skip if its up is 'X'.
if r > 0 and board[r - 1][c] == 'X':
continue
# Skip if its left is 'X'.
if c > 0 and board[r][c - 1] == 'X':
continue
count += 1
return count
def main():
import time
print 'By DFS recur:'
start_time = time.time()
board = [['X','.','.','X'],
['.','.','.','X'],
['.','.','.','X']]
print SolutionDFSRecur().countBattleships(board)
print 'Time:', time.time() - start_time
print 'By checking the 1st:'
start_time = time.time()
board = [['X','.','.','X'],
['.','.','.','X'],
['.','.','.','X']]
print SolutionCheckFirst().countBattleships(board)
print 'Time:', time.time() - start_time
if __name__ == '__main__':
main()
| bsd-2-clause | -7,352,669,705,755,854,000 | 26.622047 | 81 | 0.531357 | false |
firulais/snap-RPi | RPiGPIO.py | 1 | 3629 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Snap! extension to support Raspberry Pi -- server component.
Copyright (C) 2014 Paul C. Brown <[email protected]>.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import http.server
import os
import re
import socketserver
import urllib.request
import logging
if __debug__:
import RPi.GPIO as GPIO
else:
import MockupRPi.GPIO as GPIO
class CORSHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
regex = re.compile('.*pin=([0-9]*).*state=(LOW|HIGH)')
ospath = os.path.abspath('')
def send_head(self):
path = self.path
logging.info(path)
# path looks like this:
# /pinwrite?pin=1&state=LOW
# or
# /pinread?pin=1&state=LOW
self.pin = 0
self.state = False
GPIO.setmode(GPIO.BCM)
m = self.regex.match(path)
if 'pinwrite' in path: # write HIGH or LOW to pin
self.pin = int(m.group(1))
self.state = True
if m.group(2) == 'LOW':
self.state = False
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, self.state)
#The Snap! block reports the body of the Web server’s response
#(minus HTTP header), without interpretation.
#At a minimum, we must provide a header with a status line and a date.
self.send_response(200)
self.send_header('Date', self.date_time_string())
self.end_headers()
elif 'pinread' in path:
# Read state of pin.
self.pin = int(m.group(1))
self.state = True
if m.group(2) == 'LOW':
self.state = False
f = open(self.ospath + '/return', 'w+')
GPIO.setup(self.pin, GPIO.IN)
if GPIO.input(self.pin) == self.state:
f.write(str(True))
else:
f.write(str(False))
f.close()
f = open(self.ospath + '/return', 'rb')
ctype = self.guess_type(self.ospath + '/rpireturn')
#create minimal response
self.send_response(200)
self.send_header('Date', self.date_time_string())
self.send_header('Content-type', ctype)
fs = os.fstat(f.fileno())
self.send_header('Content-Length', str(fs[6]))
self.send_header('Last-Modified',
self.date_time_string(fs.st_mtime))
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
return f
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, handlers=[logging.FileHandler("access.log"), logging.StreamHandler()])
PORT = 8280 # R+P in ASCII Decimal
Handler = CORSHTTPRequestHandler
httpd = socketserver.TCPServer(('', PORT), Handler)
logging.info('serving at port ' + str(PORT))
print('Go ahead and launch Snap!')
httpd.serve_forever()
| gpl-3.0 | -8,815,553,636,177,159,000 | 29.225 | 114 | 0.587538 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/network_interface_ip_configuration.py | 1 | 5689 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class NetworkInterfaceIPConfiguration(SubResource):
"""IPConfiguration in a network interface.
:param id: Resource ID.
:type id: str
:param application_gateway_backend_address_pools: The reference of
ApplicationGatewayBackendAddressPool resource.
:type application_gateway_backend_address_pools:
list[~azure.mgmt.network.v2017_10_01.models.ApplicationGatewayBackendAddressPool]
:param load_balancer_backend_address_pools: The reference of
LoadBalancerBackendAddressPool resource.
:type load_balancer_backend_address_pools:
list[~azure.mgmt.network.v2017_10_01.models.BackendAddressPool]
:param load_balancer_inbound_nat_rules: A list of references of
LoadBalancerInboundNatRules.
:type load_balancer_inbound_nat_rules:
list[~azure.mgmt.network.v2017_10_01.models.InboundNatRule]
:param private_ip_address: Private IP address of the IP configuration.
:type private_ip_address: str
:param private_ip_allocation_method: Defines how a private IP address is
assigned. Possible values are: 'Static' and 'Dynamic'. Possible values
include: 'Static', 'Dynamic'
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2017_10_01.models.IPAllocationMethod
:param private_ip_address_version: Available from Api-Version 2016-03-30
onwards, it represents whether the specific ipconfiguration is IPv4 or
IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'.
Possible values include: 'IPv4', 'IPv6'
:type private_ip_address_version: str or
~azure.mgmt.network.v2017_10_01.models.IPVersion
:param subnet: Subnet bound to the IP configuration.
:type subnet: ~azure.mgmt.network.v2017_10_01.models.Subnet
:param primary: Gets whether this is a primary customer address on the
network interface.
:type primary: bool
:param public_ip_address: Public IP address bound to the IP configuration.
:type public_ip_address:
~azure.mgmt.network.v2017_10_01.models.PublicIPAddress
:param application_security_groups: Application security groups in which
the IP configuration is included.
:type application_security_groups:
list[~azure.mgmt.network.v2017_10_01.models.ApplicationSecurityGroup]
:param provisioning_state: The provisioning state of the network interface
IP configuration. Possible values are: 'Updating', 'Deleting', and
'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'application_gateway_backend_address_pools': {'key': 'properties.applicationGatewayBackendAddressPools', 'type': '[ApplicationGatewayBackendAddressPool]'},
'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[BackendAddressPool]'},
'load_balancer_inbound_nat_rules': {'key': 'properties.loadBalancerInboundNatRules', 'type': '[InboundNatRule]'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'private_ip_address_version': {'key': 'properties.privateIPAddressVersion', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'Subnet'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'},
'application_security_groups': {'key': 'properties.applicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(NetworkInterfaceIPConfiguration, self).__init__(**kwargs)
self.application_gateway_backend_address_pools = kwargs.get('application_gateway_backend_address_pools', None)
self.load_balancer_backend_address_pools = kwargs.get('load_balancer_backend_address_pools', None)
self.load_balancer_inbound_nat_rules = kwargs.get('load_balancer_inbound_nat_rules', None)
self.private_ip_address = kwargs.get('private_ip_address', None)
self.private_ip_allocation_method = kwargs.get('private_ip_allocation_method', None)
self.private_ip_address_version = kwargs.get('private_ip_address_version', None)
self.subnet = kwargs.get('subnet', None)
self.primary = kwargs.get('primary', None)
self.public_ip_address = kwargs.get('public_ip_address', None)
self.application_security_groups = kwargs.get('application_security_groups', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.name = kwargs.get('name', None)
self.etag = kwargs.get('etag', None)
| mit | -2,864,916,992,377,040,400 | 55.89 | 163 | 0.687291 | false |
gaccardo/buxfer_api | api/buxfer.py | 1 | 3631 | import requests
import settings
from pybles import pybles
from account import Account
from transaction import Transaction
from budget import Budget
from reminder import Reminder
requests.packages.urllib3.disable_warnings()
class ErrorWithBuxferAPI( Exception ): pass
class BuxferAPIUnauthorized( Exception ): pass
class BuxferAPI( object ):
def __init__(self):
self.base_url = settings.BASE_URL
self.token = None
def __get_request(self, resource):
url = "%s/%s?token=%s" % (self.base_url,
resource, self.token)
response = requests.get(url)
if response.status_code == 400:
error = response.json()
error = error['error']
print "ERROR"
print "* Resource: %s" % resource
print "* Type: %s" % error['type']
print "* Request id: %d" % error['request_id']
print "* Message: %s" % error['message']
raise BuxferAPIUnauthorized
if response.status_code != 200:
raise ErrorWithBuxferAPI
return response.json()
def login(self, user, password):
response = requests.get("%s/login?userid=%s" \
"&password=%s" % (self.base_url, user, password))
if response.status_code != 200:
raise ErrorWithBuferAPI
token = response.json()
self.token = token['response']['token']
def logout(self):
pass
def __from_json_accounts_to_objects(self, accounts):
accounts_list = list()
for acc in accounts['response']['accounts']:
accounts_list.append(Account(currency=acc['currency'],
balance=acc['balance'],
id=acc['id'],
bank=acc['bank'],
name=acc['name']))
return accounts_list
def get_accounts(self):
response = self.__get_request('accounts')
return self.__from_json_accounts_to_objects(response)
def __from_json_transactions_to_objects(self, transactions):
transactions_list = list()
for tra in transactions['response']['transactions']:
transactions_list.append(Transaction(description=tra['description'],
account=tra['accountName'],
expense=tra['expenseAmount'],
amount=tra['amount'],
t_type=tra['transactionType'],
date=tra['normalizedDate'],
tags=tra['tagNames']))
return transactions_list
def get_transactions(self):
response = self.__get_request('transactions')
return self.__from_json_transactions_to_objects(response)
def __from_json_reminder_to_objects(self, reminders):
reminders_list = list()
for rem in reminders['response']['reminders']:
reminders_list.append(Reminder(start_date=rem['startDate'],
description=rem['description'], amount=rem['amount'],
id=rem['id'], account_id=['account_id']))
return reminders_list
def get_reminders(self):
response = self.__get_request('reminders')
return self.__from_json_reminder_to_objects(response)
def __from_json_budgets_to_objects(self, budgets):
budgets_list = list()
for bud in budgets['response']['budgets']:
budgets_list.append(Budget(name=bud['name'],
spent=bud['spent'], limit=bud['limit'],
balance=bud['balance']))
return budgets_list
def get_budgets(self):
response = self.__get_request('budgets')
return self.__from_json_budgets_to_objects(response)
| gpl-2.0 | 1,461,001,327,499,848,700 | 30.850877 | 80 | 0.590746 | false |
willkg/socorro-collector | collector/app/for_application_defaults.py | 1 | 3980 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""This is an extension to configman for Socorro. It creates a ValueSource
object that is also a 'from_string_converter'. It is tailored to work with
the Socorro 'application' configuration parameter. Once configman has made
a final determination as to which application to actually run, this class
allows Configman to go to that application and fetch its preferred defaults
for the rest of options required by that application."""
from configman.converters import str_to_python_object
from configman.dotdict import DotDict
#==============================================================================
class ApplicationDefaultsProxy(object):
"""a placeholder class that will induce configman to query the application
object for the application's preferred defaults. """
def __init__(self):
self.application_defaults = DotDict()
self.apps = self.find_all_the_apps()
#--------------------------------------------------------------------------
def str_to_application_class(self, an_app_key):
"""a configman compatible str_to_* converter"""
try:
app_class = str_to_python_object(self.apps[an_app_key])
except KeyError:
app_class = str_to_python_object(an_app_key)
try:
self.application_defaults = DotDict(
app_class.get_application_defaults()
)
except AttributeError:
# no get_application_defaults, skip this step
pass
return app_class
#--------------------------------------------------------------------------
@staticmethod
def find_all_the_apps():
"""in the future, re-implement this as an automatic discovery service
"""
return {
'collector': 'collector.collector_app.CollectorApp',
'collector2015': 'collector.collector_app.Collector2015App',
'crashmover': 'collector.crashmover_app.CrashMoverApp',
'fetch': 'collector.external.fetch_app.FetchApp',
'copy_processed': 'collector.collector.crashmover_app.ProcessedCrashCopierApp',
'copy_raw_and_processed': 'collector.collector.crashmover_app.RawAndProcessedCopierApp',
'reprocess_crashlist': 'collector.external.rabbitmq.reprocess_crashlist.ReprocessCrashlistApp',
'purge_rmq': 'collector.external.rabbitmq.purge_queue_app.PurgeRabbitMQQueueApp',
}
can_handle = (
ApplicationDefaultsProxy
)
#==============================================================================
class ValueSource(object):
"""This is meant to be used as both a value source and a from string
converter. An instance, as a value source, always returns an empty
dictionary from its 'get_values' method. However, if it gets used as
a 'from string' converter, the 'get_values' behavior changes. Just before
the 'from string' converter returns the conversion result, this class calls
the method 'get_application_defaults' on it and saves the result. That
saved result becomes the new value for 'get_values' to return.
The end result is that an app that has a prefered set of defaults can still
get them loaded and used even if the app was itself loaded through
Configman.
"""
#--------------------------------------------------------------------------
def __init__(self, source, the_config_manager=None):
self.source = source
#--------------------------------------------------------------------------
def get_values(self, config_manager, ignore_mismatches, obj_hook=DotDict):
if isinstance(self.source.application_defaults, obj_hook):
return self.source.application_defaults
return obj_hook(self.source.application_defaults)
| mpl-2.0 | 5,234,856,872,957,514,000 | 45.27907 | 107 | 0.609548 | false |
jhford/picsort | picsort/sort.py | 1 | 8858 | import os
import optparse
import hashlib
import json
import shutil
from xml.dom import minidom
import multiprocessing # Only for CPU Count
import Queue
import threading
import time
import re
try:
import exifread
except ImportError:
print 'You are missing the exifread module. Try installing it'
print 'with "sudo pip install exifread" or "sudo easy_install exifread"'
exit(1)
digest_type = 'sha1'
picture_extensions = ['.jpg', '.jpeg', '.psd', '.nef', '.cr2', '.png']
stdout_lock = threading.Lock()
def split_filename(filename):
for e in picture_extensions:
if filename.lower().endswith(e):
ext = e
basename = os.path.basename(filename)
return os.path.dirname(filename), basename[:-len(e)], basename[-len(e):]
def find_pictures(root):
img_files = []
for root, dirs, files in os.walk(root):
for f in sorted(files):
dirname, basename, ext = split_filename(f)
if ext.lower() in picture_extensions:
img_files.append(os.path.abspath(os.path.join(root, f)))
return img_files
def build_hashes(file_lists, num_threads, bufsize=1024*1024):
directory = {}
def update_directory(digest, new_file):
if directory.has_key(digest):
directory[digest].append(new_file)
else:
directory[digest] = [new_file]
def hash_file(filename):
with open(filename) as f:
h = hashlib.new(digest_type)
while True:
d = f.read(bufsize)
if not d:
break
h.update(d)
return h.hexdigest()
def worker():
while True:
item = q.get()
if item is DONE:
q.task_done()
break
digest = hash_file(item)
with directory_lock:
update_directory(digest, item)
q.task_done()
if num_threads == 0:
for l in file_lists:
for f in l:
digest = hash_file(f)
update_directory(digest, f)
else:
directory_lock = threading.Lock()
threads = []
DONE = 'DONE'
q = Queue.Queue()
for i in range(num_threads):
t = threading.Thread(target=worker)
threads.append(t)
t.daemon = True
t.start()
for l in file_lists:
for f in l:
q.put(f)
q.join()
while len([x for x in threads if x.isAlive()]) != 0:
q.put(DONE)
for thread in threads:
thread.join(0.001)
return directory
def verify_files(file_lists, num_threads):
hash_len = len(hashlib.new(digest_type).hexdigest())
pattern = re.compile('.*_%s_(?P<digest>[a-fA-F0-9]{%d}).*' % (digest_type, hash_len))
directory = build_hashes(file_lists, num_threads)
failed_files = []
for digest in directory.keys():
filename = directory[digest][0]
match = pattern.match(filename)
if match:
found_digest = match.group('digest')
if found_digest == digest:
print 'verified %s' % filename
else:
failed_files.append(filename)
print '%s failed to verify: %s vs %s' % (filename, digest, found_digest)
else:
print '%s does not have a hash, skipping' % filename
return failed_files
def dirs_from_image_data(source):
# http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/EXIF.html
try:
with open(source) as f:
exifdata = exifread.process_file(f, details=False)
except:
return os.path.join('bad exif')
dirs = []
if exifdata.has_key('Image Model'):
dirs.append(exifdata['Image Model'].printable)
else:
dirs.append('unknown camera')
if exifdata.has_key('EXIF DateTimeOriginal'):
date, time = exifdata['EXIF DateTimeOriginal'].printable.split(' ')
year, month, day = date.split(':')
dirs.extend([year, month, day])
else:
dirs.append('unknown date')
return os.path.join(*dirs)
def find_sidecars(img_files):
sidecars = []
for img_file in img_files:
dirname, basename, ext = split_filename(img_file)
sidecar = os.path.join(dirname, basename + '.xmp')
if os.path.exists(sidecar):
sidecars.append(sidecar)
return sidecars
mkdir_lock = threading.Lock()
def make_dirs_p(name):
with mkdir_lock:
if not os.path.exists(name):
os.makedirs(name)
def copy_file(source, dest):
with stdout_lock:
print 'Copying %s ==> %s' % (source, dest)
make_dirs_p(os.path.dirname(dest))
shutil.copy2(source, dest)
def alter_sidecar(source, dest, image_dest):
with stdout_lock:
print 'New sidecar for %s ==> %s' % (source, dest)
make_dirs_p(os.path.dirname(dest))
dom = minidom.parse(source)
dom.getElementsByTagName('rdf:Description')[0].attributes.get('crs:RawFileName').value = image_dest
with open(dest, 'w+') as f:
f.write(dom.toxml())
def handle_file(new_root, digest, filenames):
source = filenames[0]
dirname, filename, ext = split_filename(source)
data_based_directories = dirs_from_image_data(source)
output_directory = os.path.join(new_root, data_based_directories)
base_dest = '%s_%s_%s' % (filename, digest_type, digest)
image_dest = base_dest + ext
copy_file(source, os.path.join(output_directory, image_dest))
sidecars = find_sidecars(filenames)
if len(sidecars) == 0:
return
default_sidecar_dest = os.path.join(output_directory, base_dest + '.xmp')
newest_sidecar = max(sidecars, key=os.path.getctime)
alter_sidecar(newest_sidecar, default_sidecar_dest, image_dest)
i = 1
for sidecar in sidecars:
if sidecar is newest_sidecar:
continue
sidecar_dest = os.path.join(output_directory, '%s_sidecar%d.xmp' %(base_dest, i))
i += 1
alter_sidecar(sidecar, sidecar_dest, image_dest)
def handle_files(new_root, file_lists, num_threads):
directory = build_hashes(file_lists, num_threads)
if num_threads == 0:
for digest in directory.keys():
handle_file(new_root, digest, directory[digest])
return
threads = []
q = Queue.Queue()
bad_files = Queue.Queue()
DONE = 'DONE'
def worker():
while True:
item = q.get()
if item is DONE:
q.task_done()
break
try:
handle_file(new_root, item, directory[item])
except:
bad_files.put({'hash': item, 'files': directory[item]})
q.task_done()
for i in range(num_threads):
t = threading.Thread(target=worker)
threads.append(t)
t.daemon = True
t.start()
for digest in directory.keys():
q.put(digest)
q.join()
while len([x for x in threads if x.isAlive()]) != 0:
q.put(DONE)
for thread in threads:
thread.join(0.001)
failing_files = []
while not bad_files.empty():
bad_file = bad_files.get()
failing_files.append(bad_file)
bad_files.task_done()
return failing_files
def main():
print 'Find and sort pictures'
parser = optparse.OptionParser('%prog <dir1> <dirN>');
parser.add_option('-o', '--output', help='Root directory for output',
action='store', dest='output', default=None)
parser.add_option('-t', '--threads', help='Number of work threads to use. ' +
'0 means ignore threading',
action='store', dest='threads', default=multiprocessing.cpu_count())
parser.add_option('--verify', help='Verify files instead of sorting them',
action='store_true', default=False, dest='only_verify')
opts, args = parser.parse_args();
try:
threads = int(opts.threads)
except ValueError:
parser.error("Thread count must be an integer")
if not opts.output and not opts.only_verify:
parser.error("You must specify an output directory")
elif opts.only_verify:
outputdir = None
else:
outputdir = os.path.abspath(opts.output)
print "Output directory: %s" % outputdir
if len(args) < 1:
parser.error("You haven't specified any input directories")
file_lists = []
for arg in args:
file_lists.append(find_pictures(arg))
if opts.only_verify:
failures = verify_files(file_lists, threads)
else:
failures = handle_files(outputdir, file_lists, threads)
with open('failed_files.json', 'w+') as f:
json.dump(failures, f, indent=2)
print 'Done!'
if __name__ == '__main__':
main()
| gpl-2.0 | -670,674,206,479,652,600 | 29.335616 | 103 | 0.581847 | false |
BrandonLMorris/ml-examples | linear_regression/linear_regression.py | 1 | 5643 | #!/usr/local/env python3
"""
Simple linear regression with gradient descent. No optimization attempts have
been made, this is purely for example and educational purposes.
Data is randomly generated and saved to a file in the current directory for
reproduceability. To regenerate the data, simply delete the
'linear_regression_data.txt' file.
"""
from random import randint
from numpy import linspace, random, array
from matplotlib import pyplot
from sklearn.linear_model import LinearRegression
# Defined parameters for linear function (y = ax + b)
A = 4
B = 15
SIGMA = 5
LEARNING_RATE = .0003
DATA_FILE = 'linear_regression_data.txt'
def generate_noisy_data(slope, intercept, start=0, stop=100):
"""Create some linear data with some noise added to it"""
num_points = abs(start) + abs(stop)
line = linspace(start, stop, num_points)
noise = random.normal(0, SIGMA, num_points)
noisy = [(slope * d + intercept) + n for (d, n) in zip(line, noise)]
return noisy
def save_data(x, y):
"""Save the x and y coordinates to a file"""
with open(DATA_FILE, 'w+') as f:
for (xx, yy) in zip(x, y):
f.write('{} {}\n'.format(xx, yy))
def get_data():
"""Retrieve the data from the cached file
If the data can't be found in a saved file, generate some new data
and save that to a file
"""
x, y = list(), list()
try:
with open(DATA_FILE, 'r') as f:
lines = f.readlines()
for line in lines:
xx, yy = line.split()
x.append(float(xx))
y.append(float(yy))
return x, y
except FileNotFoundError:
# No data file, generate the data
y = generate_noisy_data(A, B)
x = list(range(len(y)))
save_data(x, y)
return get_data()
def hypothesis(theta0, theta1, x):
"""Return our hypothesis, or guess, given the parameters and input value"""
return theta0 + (theta1 * x)
def cost(X, Y, theta0, theta1):
"""Find the total cost of our model from the training examples
:param X: a list of input values
:param Y: a list of ouput values
:param theta0: the value of the first parameter of the model
:param theta1: the value of the second parameter of the model
"""
errors = [(hypothesis(theta0, theta1, x) - y) for (x, y) in zip(X, Y)]
return (1 / (2 * len(Y))) * sum([e * e for e in errors])
def descend(alpha, theta0, theta1, X, Y):
"""One iteration of gradient descent
Adjusts the model to reflect a single step of the linear descent algorithm
:param alpha: the learning rate
:param thetas: a list of parameters for the model
:param Y: a list of output values
:param X: a list of input values
"""
results = list()
n = len(Y)
partial0 = (1 / n) * sum([(hypothesis(theta0, theta1, x) - y)
for (x, y) in zip(X, Y)])
partial1 = (1 / n) * sum([(hypothesis(theta0, theta1, x) - y) * x
for (x, y) in zip(X, Y)])
new_theta0 = theta0 - alpha * partial0
new_theta1 = theta1 - alpha * partial1
return (new_theta0, new_theta1)
def r_squared(Y_predict, Y_true):
"""Calculate the R-squared value of the model
:param theta0: the first parameter of the model
:param theta1: the second parameter of the model
:param X: a list of input values
:param Y: a list of output values
"""
u = sum([(yt - yp) ** 2 for (yt, yp) in zip(Y_true, Y_predict)])
mean = sum(Y_true) / len(Y_true)
v = sum([(y - mean) ** 2 for y in Y_true])
return (1 - (u / v))
def extract_test_set(X, Y):
"""Segregate the data set into test and training sets"""
num_test = round(len(X) * .3)
X_test, Y_test = list(), list()
for i in range(num_test):
index = randint(0, len(X) - 1)
X_test.append(X.pop(index))
Y_test.append(Y.pop(index))
return (X, X_test, Y, Y_test)
def main():
# Plot the original data set
X, Y = get_data()
pyplot.title("Original Data Set")
pyplot.plot(Y, 'k.')
pyplot.show()
# Create our initial values
X_train, X_test, Y_train, Y_test = extract_test_set(X, Y)
theta0, theta1 = 0, 0
# Train out model
prev_cost = 2 << 32 # Arbitrarily large number
iterations = 0
while True:
theta0, theta1 = descend(LEARNING_RATE, theta0, theta1, X_train,
Y_train)
current_cost = cost(X_train, Y_train, theta0, theta1)
# Stop if we've converged
if abs(current_cost - prev_cost) < 0.0001:
print('{} iterations'.format(iterations))
break
else:
iterations += 1
prev_cost = current_cost
# Plot our results
result = [hypothesis(theta0, theta1, yy) for yy in range(0, 100)]
pyplot.title("By-Hand Results")
pyplot.plot(X, Y, 'k.')
pyplot.plot(result)
pyplot.show()
Y_predict = [hypothesis(theta0, theta1, x) for x in X_test]
print('R^2 from by-hand: {}'.format(r_squared(Y_predict, Y_test)))
# Same algorithm, but utilize sklearn
lr = LinearRegression()
X_vector, Y_vector = (array(X_train).reshape(-1, 1),
array(Y_train).reshape(-1, 1))
lr.fit(X_vector, Y_vector)
sk_predictions = [lr.predict(x) for x in X]
print('R^2 from sklearn: {}'.format(lr.score(X_vector, Y_vector)))
# Plot the results
sk_y_predict = [lr.predict(x)[0, 0] for x in X]
pyplot.title('sklearn Results')
pyplot.plot(X, Y, 'k.')
pyplot.plot(X, sk_y_predict)
pyplot.show()
if __name__ == '__main__':
main()
| apache-2.0 | -3,545,554,035,035,114,000 | 30.176796 | 79 | 0.600744 | false |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/recipes/transparent_legends.py | 1 | 1700 | """
Transparent, fancy legends
==========================
Sometimes you know what your data looks like before you plot it, and
may know for instance that there won't be much data in the upper right
hand corner. Then you can safely create a legend that doesn't overlay
your data:
ax.legend(loc='upper right')
Other times you don't know where your data is, and loc='best' will try
and place the legend::
ax.legend(loc='best')
but still, your legend may overlap your data, and in these cases it's
nice to make the legend frame transparent.
"""
import matplotlib.pyplot as plt
import numpy as np
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
np.random.seed(1234)
fig, ax = plt.subplots(1)
ax.plot(np.random.randn(300), 'o-', label='normal distribution')
ax.plot(np.random.rand(300), 's-', label='uniform distribution')
ax.set_ylim(-3, 3)
ax.legend(loc='best', fancybox=True, framealpha=0.5)
ax.set_title('fancy, transparent legends')
pltshow(plt) | mit | 154,122,098,309,639,550 | 25.169231 | 82 | 0.641765 | false |
lcy-seso/models | fluid/image_classification/caffe2fluid/kaffe/shapes.py | 1 | 5047 | import math
from collections import namedtuple
from .errors import KaffeError
Tensor4DShape = namedtuple('Tensor4DShape',
['batch_size', 'channels', 'height', 'width'])
Tensor3DShape = namedtuple('Tensor3DShape', ['batch_size', 'data1', 'data2'])
Tensor2DShape = namedtuple('Tensor2DShape', ['batch_size', 'data'])
ScalarShape = namedtuple('ScalarShape', ['batch_size'])
def make_tensor(batch_size, d1=None, d2=None, d3=None):
if d3 is not None:
return Tensor4DShape(batch_size, d1, d2, d3)
elif d1 is not None and d2 is not None:
return Tensor3DShape(batch_size, d1, d2)
elif d1 is not None and d2 is None:
return Tensor2DShape(batch_size, d1)
elif d1 is None and d2 is None and d3 is None:
return ScalarShape(batch_size)
else:
raise NotImplementedError('invalid params for make_tensor %s' \
% (str((batch_size, d1, d2, d3))))
def get_filter_output_shape(i_h, i_w, params, round_func):
dila_h = getattr(params, 'dila_h', 1)
dila_w = getattr(params, 'dila_w', 1)
o_h = (i_h + 2 * params.pad_h -
(dila_h * (params.kernel_h - 1) + 1)) / float(params.stride_h) + 1
o_w = (i_w + 2 * params.pad_w -
(dila_w * (params.kernel_w - 1) + 1)) / float(params.stride_w) + 1
return (int(round_func(o_h)), int(round_func(o_w)))
def get_strided_kernel_output_shape(node, round_func):
assert node.layer is not None
input_shape = node.get_only_parent().output_shape
o_h, o_w = get_filter_output_shape(input_shape.height, input_shape.width,
node.layer.kernel_parameters, round_func)
params = node.layer.parameters
has_c_o = hasattr(params, 'num_output')
c = params.num_output if has_c_o else input_shape.channels
return make_tensor(input_shape.batch_size, c, o_h, o_w)
def shape_not_implemented(node):
raise NotImplementedError
def shape_identity(node):
assert len(node.parents) > 0
return node.parents[0].output_shape
def shape_scalar(node):
return make_tensor(1, 1, 1, 1)
def shape_crop(node):
raise KaffeError('crop function had been defined in customer_layers')
def shape_data(node):
if node.output_shape:
# Old-style input specification
shape = node.output_shape
else:
try:
# New-style input specification
shape = map(int, node.parameters.shape[0].dim)
except:
# We most likely have a data layer on our hands. The problem is,
# Caffe infers the dimensions of the data from the source (eg: LMDB).
# We want to avoid reading datasets here. Fail for now.
# This can be temporarily fixed by transforming the data layer to
# Caffe's "input" layer (as is usually used in the "deploy" version).
# TODO: Find a better solution for this.
raise KaffeError(
'Cannot determine dimensions of data layer.\n'
'See comments in function shape_data for more info.')
return shape
def shape_mem_data(node):
params = node.parameters
return make_tensor(params.batch_size, params.channels, params.height,
params.width)
def shape_concat(node):
axis = node.layer.parameters.axis
output_shape = None
for parent in node.parents:
if output_shape is None:
output_shape = list(parent.output_shape)
else:
output_shape[axis] += parent.output_shape[axis]
return tuple(output_shape)
def shape_convolution(node):
return get_strided_kernel_output_shape(node, math.floor)
def shape_deconvolution(node):
assert node.layer is not None
input_shape = node.get_only_parent().output_shape
h_i = input_shape.height
w_i = input_shape.width
params = node.layer.kernel_parameters
p_h = params.pad_h
p_w = params.pad_w
dila_h = params.dila_h
dila_w = params.dila_w
k_h = params.kernel_h
k_w = params.kernel_w
s_h = params.stride_h
s_w = params.stride_w
h_o = (h_i - 1) * s_h - 2 * p_h + dila_h * (k_h - 1) + 1
w_o = (w_i - 1) * s_w - 2 * p_w + dila_w * (k_w - 1) + 1
params = node.layer.parameters
has_c_o = hasattr(params, 'num_output')
c = params.num_output if has_c_o else input_shape.channels
return make_tensor(input_shape.batch_size, c, h_o, w_o)
def shape_pool(node):
global_pool = getattr(node.layer.parameters, 'global_pooling', False)
if global_pool:
input_shape = node.get_only_parent().output_shape
return make_tensor(input_shape.batch_size, input_shape.channels, 1, 1)
ceil_mode = getattr(node.layer.parameters, 'ceil_mode', True)
if ceil_mode is True:
method = math.ceil
else:
method = math.floor
return get_strided_kernel_output_shape(node, method)
def shape_inner_product(node):
input_shape = node.get_only_parent().output_shape
return make_tensor(input_shape.batch_size, node.layer.parameters.num_output)
| apache-2.0 | -4,642,686,649,229,181,000 | 31.772727 | 81 | 0.632059 | false |
adamfast/django-slowpoke | slowpoke/tests.py | 1 | 1096 | import time
from django.conf import settings
from django.test import TestCase
from slowpoke.decorator import time_my_test
from slowpoke.models import *
class SlowPokeDecoratorTests(TestCase):
@time_my_test('setup')
def setUp(self):
self._old_time_standards = getattr(settings, 'TIME_STANDARDS', False)
settings.TIME_STANDARDS = False # we want the defaults to take effect for these or else tests will have inconsistent results
time.sleep(0.2)
@time_my_test('teardown')
def tearDown(self):
settings.TIME_STANDARDS = self._old_time_standards
time.sleep(0.1)
@time_my_test('task')
def test_task_ok(self):
time.sleep(0.9)
self.assertEqual(True, True)
@time_my_test('task')
def test_task_slow(self):
time.sleep(1.1)
self.assertEqual(True, True)
@time_my_test('web_view')
def test_view_ok(self):
time.sleep(0.2)
self.assertEqual(True, True)
@time_my_test('web_view')
def test_view_slow(self):
time.sleep(0.35)
self.assertEqual(True, True)
| bsd-3-clause | 6,152,065,789,078,622,000 | 25.731707 | 133 | 0.64781 | false |
vponomaryov/manila | manila/tests/share/drivers/ibm/test_gpfs.py | 1 | 76891 | # Copyright (c) 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the IBM GPFS driver module."""
import re
import socket
import ddt
import mock
from oslo_config import cfg
from manila import context
from manila import exception
import manila.share.configuration as config
import manila.share.drivers.ibm.gpfs as gpfs
from manila.share import share_types
from manila import test
from manila.tests import fake_share
from manila import utils
CONF = cfg.CONF
@ddt.ddt
class GPFSShareDriverTestCase(test.TestCase):
"""Tests GPFSShareDriver."""
def setUp(self):
super(GPFSShareDriverTestCase, self).setUp()
self._context = context.get_admin_context()
self._gpfs_execute = mock.Mock(return_value=('', ''))
self._helper_fake = mock.Mock()
CONF.set_default('driver_handles_share_servers', False)
CONF.set_default('share_backend_name', 'GPFS')
self.fake_conf = config.Configuration(None)
self._driver = gpfs.GPFSShareDriver(execute=self._gpfs_execute,
configuration=self.fake_conf)
self._knfs_helper = gpfs.KNFSHelper(self._gpfs_execute,
self.fake_conf)
self._ces_helper = gpfs.CESHelper(self._gpfs_execute,
self.fake_conf)
self.fakedev = "/dev/gpfs0"
self.fakefspath = "/gpfs0"
self.fakesharepath = "/gpfs0/share-fakeid"
self.fakeexistingshare = "existingshare"
self.fakesnapshotpath = "/gpfs0/.snapshots/snapshot-fakesnapshotid"
self.fake_ces_exports = """
mmcesnfslsexport:nfsexports:HEADER:version:reserved:reserved:Path:Delegations:Clients:Access_Type:Protocols:Transports:Squash:Anonymous_uid:Anonymous_gid:SecType:PrivilegedPort:DefaultDelegations:Manage_Gids:NFS_Commit:
mmcesnfslsexport:nfsexports:0:1:::/gpfs0/share-fakeid:none:44.3.2.11:RW:3,4:TCP:ROOT_SQUASH:-2:-2:SYS:FALSE:none:FALSE:FALSE:
mmcesnfslsexport:nfsexports:0:1:::/gpfs0/share-fakeid:none:1:2:3:4:5:6:7:8:RW:3,4:TCP:ROOT_SQUASH:-2:-2:SYS:FALSE:none:FALSE:FALSE:
mmcesnfslsexport:nfsexports:0:1:::/gpfs0/share-fakeid:none:10.0.0.1:RW:3,4:TCP:ROOT_SQUASH:-2:-2:SYS:FALSE:none:FALSE:FALSE:
"""
self.fake_ces_exports_not_found = """
mmcesnfslsexport:nfsexports:HEADER:version:reserved:reserved:Path:Delegations:Clients:Access_Type:Protocols:Transports:Squash:Anonymous_uid:Anonymous_gid:SecType:PrivilegedPort:DefaultDelegations:Manage_Gids:NFS_Commit:
"""
self.mock_object(gpfs.os.path, 'exists', mock.Mock(return_value=True))
self._driver._helpers = {
'KNFS': self._helper_fake
}
self.share = fake_share.fake_share(share_proto='NFS',
host='fakehost@fakehost#GPFS')
self.server = {
'backend_details': {
'ip': '1.2.3.4',
'instance_id': 'fake'
}
}
self.access = fake_share.fake_access()
self.snapshot = fake_share.fake_snapshot()
self.local_ip = "192.11.22.1"
self.remote_ip = "192.11.22.2"
self.remote_ip2 = "2.2.2.2"
gpfs_nfs_server_list = [self.remote_ip, self.local_ip, self.remote_ip2,
"fake_location"]
self._knfs_helper.configuration.gpfs_nfs_server_list = \
gpfs_nfs_server_list
self._ces_helper.configuration.gpfs_nfs_server_list = \
gpfs_nfs_server_list
self._ces_helper.configuration.ganesha_config_path = \
"fake_ganesha_config_path"
self.sshlogin = "fake_login"
self.sshkey = "fake_sshkey"
self.gservice = "fake_ganesha_service"
self._ces_helper.configuration.gpfs_ssh_login = self.sshlogin
self._ces_helper.configuration.gpfs_ssh_private_key = self.sshkey
self._ces_helper.configuration.ganesha_service_name = self.gservice
self.mock_object(socket, 'gethostname',
mock.Mock(return_value="testserver"))
self.mock_object(socket, 'gethostbyname_ex', mock.Mock(
return_value=('localhost',
['localhost.localdomain', 'testserver'],
['127.0.0.1', self.local_ip])
))
def test__run_ssh(self):
cmd_list = ['fake', 'cmd']
expected_cmd = 'fake cmd'
ssh_pool = mock.Mock()
ssh = mock.Mock()
self.mock_object(utils, 'SSHPool', mock.Mock(return_value=ssh_pool))
ssh_pool.item = mock.Mock(return_value=ssh)
setattr(ssh, '__enter__', mock.Mock())
setattr(ssh, '__exit__', mock.Mock())
self.mock_object(self._driver, '_gpfs_ssh_execute')
self._driver._run_ssh(self.local_ip, cmd_list)
self._driver._gpfs_ssh_execute.assert_called_once_with(
mock.ANY, expected_cmd, check_exit_code=True,
ignore_exit_code=None)
def test__run_ssh_exception(self):
cmd_list = ['fake', 'cmd']
ssh_pool = mock.Mock()
ssh = mock.Mock()
self.mock_object(utils, 'SSHPool', mock.Mock(return_value=ssh_pool))
ssh_pool.item = mock.Mock(return_value=ssh)
self.mock_object(self._driver, '_gpfs_ssh_execute')
self.assertRaises(exception.GPFSException,
self._driver._run_ssh,
self.local_ip, cmd_list)
def test__gpfs_ssh_execute(self):
cmd = 'fake cmd'
expected_out = 'cmd successful'
expected_err = 'cmd error'
ssh = mock.Mock()
stdin_stream = mock.Mock()
stdout_stream = mock.Mock()
stderr_stream = mock.Mock()
ssh.exec_command = mock.Mock(return_value=(stdin_stream,
stdout_stream,
stderr_stream))
stdout_stream.channel.recv_exit_status = mock.Mock(return_value=-1)
stdout_stream.read = mock.Mock(return_value=expected_out)
stderr_stream.read = mock.Mock(return_value=expected_err)
stdin_stream.close = mock.Mock()
actual_out, actual_err = self._driver._gpfs_ssh_execute(ssh, cmd)
self.assertEqual(actual_out, expected_out)
self.assertEqual(actual_err, expected_err)
def test__gpfs_ssh_execute_exception(self):
cmd = 'fake cmd'
ssh = mock.Mock()
stdin_stream = mock.Mock()
stdout_stream = mock.Mock()
stderr_stream = mock.Mock()
ssh.exec_command = mock.Mock(return_value=(stdin_stream,
stdout_stream,
stderr_stream))
stdout_stream.channel.recv_exit_status = mock.Mock(return_value=1)
stdout_stream.read = mock.Mock()
stderr_stream.read = mock.Mock()
stdin_stream.close = mock.Mock()
self.assertRaises(exception.ProcessExecutionError,
self._driver._gpfs_ssh_execute,
ssh, cmd)
def test_get_share_stats_refresh_false(self):
self._driver._stats = {'fake_key': 'fake_value'}
result = self._driver.get_share_stats(False)
self.assertEqual(self._driver._stats, result)
def test_get_share_stats_refresh_true(self):
self.mock_object(
self._driver, '_get_available_capacity',
mock.Mock(return_value=(11111.0, 12345.0)))
result = self._driver.get_share_stats(True)
expected_keys = [
'qos', 'driver_version', 'share_backend_name',
'free_capacity_gb', 'total_capacity_gb',
'driver_handles_share_servers',
'reserved_percentage', 'vendor_name', 'storage_protocol',
]
for key in expected_keys:
self.assertIn(key, result)
self.assertFalse(result['driver_handles_share_servers'])
self.assertEqual('IBM', result['vendor_name'])
self._driver._get_available_capacity.assert_called_once_with(
self._driver.configuration.gpfs_mount_point_base)
def test_do_setup(self):
self.mock_object(self._driver, '_setup_helpers')
self._driver.do_setup(self._context)
self.assertEqual(self._driver._gpfs_execute,
self._driver._gpfs_remote_execute)
self._driver._setup_helpers.assert_called_once_with()
def test_do_setup_gpfs_local_execute(self):
self.mock_object(self._driver, '_setup_helpers')
self._driver.configuration.is_gpfs_node = True
self._driver.do_setup(self._context)
self.assertEqual(self._driver._gpfs_execute,
self._driver._gpfs_local_execute)
self._driver._setup_helpers.assert_called_once_with()
def test_setup_helpers(self):
self._driver._helpers = {}
CONF.set_default('gpfs_share_helpers', ['KNFS=fakenfs'])
self.mock_object(gpfs.importutils, 'import_class',
mock.Mock(return_value=self._helper_fake))
self._driver._setup_helpers()
gpfs.importutils.import_class.assert_has_calls(
[mock.call('fakenfs')]
)
self.assertEqual(len(self._driver._helpers), 1)
@ddt.data(fake_share.fake_share(),
fake_share.fake_share(share_proto='NFSBOGUS'))
def test__get_helper_with_wrong_proto(self, share):
self.assertRaises(exception.InvalidShare,
self._driver._get_helper, share)
def test__local_path(self):
sharename = 'fakesharename'
self._driver.configuration.gpfs_mount_point_base =\
self.fakefspath
local_path = self._driver._local_path(sharename)
self.assertEqual(self.fakefspath + '/' + sharename,
local_path)
def test__get_share_path(self):
self._driver.configuration.gpfs_mount_point_base =\
self.fakefspath
share_path = self._driver._get_share_path(self.share)
self.assertEqual(self.fakefspath + '/' + self.share['name'],
share_path)
def test__get_snapshot_path(self):
self._driver.configuration.gpfs_mount_point_base =\
self.fakefspath
snapshot_path = self._driver._get_snapshot_path(self.snapshot)
self.assertEqual(self.fakefspath + '/' + self.snapshot['share_name'] +
'/.snapshots/' + self.snapshot['name'],
snapshot_path)
def test_check_for_setup_error_for_gpfs_state(self):
self.mock_object(self._driver, '_check_gpfs_state',
mock.Mock(return_value=False))
self.assertRaises(exception.GPFSException,
self._driver.check_for_setup_error)
def test_check_for_setup_error_for_export_ip(self):
self.mock_object(self._driver, '_check_gpfs_state',
mock.Mock(return_value=True))
self._driver.configuration.gpfs_share_export_ip = None
self.assertRaises(exception.InvalidParameterValue,
self._driver.check_for_setup_error)
def test_check_for_setup_error_for_gpfs_mount_point_base(self):
self.mock_object(self._driver, '_check_gpfs_state',
mock.Mock(return_value=True))
self._driver.configuration.gpfs_share_export_ip = self.local_ip
self._driver.configuration.gpfs_mount_point_base = 'test'
self.assertRaises(exception.GPFSException,
self._driver.check_for_setup_error)
def test_check_for_setup_error_for_directory_check(self):
self.mock_object(self._driver, '_check_gpfs_state',
mock.Mock(return_value=True))
self._driver.configuration.gpfs_share_export_ip = self.local_ip
self._driver.configuration.gpfs_mount_point_base = self.fakefspath
self.mock_object(self._driver, '_is_dir',
mock.Mock(return_value=False))
self.assertRaises(exception.GPFSException,
self._driver.check_for_setup_error)
def test_check_for_setup_error_for_gpfs_path_check(self):
self.mock_object(self._driver, '_check_gpfs_state',
mock.Mock(return_value=True))
self._driver.configuration.gpfs_share_export_ip = self.local_ip
self._driver.configuration.gpfs_mount_point_base = self.fakefspath
self.mock_object(self._driver, '_is_dir',
mock.Mock(return_value=True))
self.mock_object(self._driver, '_is_gpfs_path',
mock.Mock(return_value=False))
self.assertRaises(exception.GPFSException,
self._driver.check_for_setup_error)
def test_check_for_setup_error_for_nfs_server_type(self):
self.mock_object(self._driver, '_check_gpfs_state',
mock.Mock(return_value=True))
self._driver.configuration.gpfs_share_export_ip = self.local_ip
self._driver.configuration.gpfs_mount_point_base = self.fakefspath
self.mock_object(self._driver, '_is_dir',
mock.Mock(return_value=True))
self.mock_object(self._driver, '_is_gpfs_path',
mock.Mock(return_value=True))
self._driver.configuration.gpfs_nfs_server_type = 'test'
self.assertRaises(exception.InvalidParameterValue,
self._driver.check_for_setup_error)
def test_check_for_setup_error_for_nfs_server_list(self):
self.mock_object(self._driver, '_check_gpfs_state',
mock.Mock(return_value=True))
self._driver.configuration.gpfs_share_export_ip = self.local_ip
self._driver.configuration.gpfs_mount_point_base = self.fakefspath
self.mock_object(self._driver, '_is_dir',
mock.Mock(return_value=True))
self.mock_object(self._driver, '_is_gpfs_path',
mock.Mock(return_value=True))
self._driver.configuration.gpfs_nfs_server_type = 'KNFS'
self._driver.configuration.gpfs_nfs_server_list = None
self.assertRaises(exception.InvalidParameterValue,
self._driver.check_for_setup_error)
def test__get_available_capacity(self):
path = self.fakefspath
mock_out = "Filesystem 1-blocks Used Available Capacity Mounted on\n\
/dev/gpfs0 100 30 70 30% /gpfs0"
self.mock_object(self._driver, '_gpfs_execute',
mock.Mock(return_value=(mock_out, '')))
available, size = self._driver._get_available_capacity(path)
self.assertEqual(70, available)
self.assertEqual(100, size)
def test_create_share(self):
self._helper_fake.create_export.return_value = 'fakelocation'
methods = ('_create_share', '_get_share_path')
for method in methods:
self.mock_object(self._driver, method)
result = self._driver.create_share(self._context, self.share,
share_server=self.server)
self._driver._create_share.assert_called_once_with(self.share)
self._driver._get_share_path.assert_called_once_with(self.share)
self.assertEqual(result, 'fakelocation')
def test_create_share_from_snapshot(self):
self._helper_fake.create_export.return_value = 'fakelocation'
self._driver._get_share_path = mock.Mock(return_value=self.
fakesharepath)
self._driver._create_share_from_snapshot = mock.Mock()
result = self._driver.create_share_from_snapshot(self._context,
self.share,
self.snapshot,
share_server=None)
self._driver._get_share_path.assert_called_once_with(self.share)
self._driver._create_share_from_snapshot.assert_called_once_with(
self.share, self.snapshot,
self.fakesharepath
)
self.assertEqual(result, 'fakelocation')
def test_create_snapshot(self):
self._driver._create_share_snapshot = mock.Mock()
self._driver.create_snapshot(self._context, self.snapshot,
share_server=None)
self._driver._create_share_snapshot.assert_called_once_with(
self.snapshot
)
def test_delete_share(self):
self._driver._get_share_path = mock.Mock(
return_value=self.fakesharepath
)
self._driver._delete_share = mock.Mock()
self._driver.delete_share(self._context, self.share,
share_server=None)
self._driver._get_share_path.assert_called_once_with(self.share)
self._driver._delete_share.assert_called_once_with(self.share)
self._helper_fake.remove_export.assert_called_once_with(
self.fakesharepath, self.share
)
def test_delete_snapshot(self):
self._driver._delete_share_snapshot = mock.Mock()
self._driver.delete_snapshot(self._context, self.snapshot,
share_server=None)
self._driver._delete_share_snapshot.assert_called_once_with(
self.snapshot
)
def test__delete_share_snapshot(self):
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._gpfs_execute = mock.Mock(return_value=0)
self._driver._delete_share_snapshot(self.snapshot)
self._driver._gpfs_execute.assert_called_once_with(
'mmdelsnapshot', self.fakedev, self.snapshot['name'],
'-j', self.snapshot['share_name']
)
self._driver._get_gpfs_device.assert_called_once_with()
def test__delete_share_snapshot_exception(self):
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._delete_share_snapshot, self.snapshot)
self._driver._get_gpfs_device.assert_called_once_with()
self._driver._gpfs_execute.assert_called_once_with(
'mmdelsnapshot', self.fakedev, self.snapshot['name'],
'-j', self.snapshot['share_name']
)
def test_extend_share(self):
self._driver._extend_share = mock.Mock()
self._driver.extend_share(self.share, 10)
self._driver._extend_share.assert_called_once_with(self.share, 10)
def test__extend_share(self):
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._gpfs_execute = mock.Mock(return_value=True)
self._driver._extend_share(self.share, 10)
self._driver._gpfs_execute.assert_called_once_with(
'mmsetquota',
self.fakedev + ':' + self.share['name'],
'--block',
'0:10G')
self._driver._get_gpfs_device.assert_called_once_with()
def test__extend_share_exception(self):
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._extend_share, self.share, 10)
self._driver._gpfs_execute.assert_called_once_with('mmsetquota',
self.fakedev +
':' +
self.share['name'],
'--block',
'0:10G')
self._driver._get_gpfs_device.assert_called_once_with()
def test_update_access_allow(self):
"""Test allow_access functionality via update_access."""
self._driver._get_share_path = mock.Mock(
return_value=self.fakesharepath
)
self._helper_fake.allow_access = mock.Mock()
self._driver.update_access(self._context,
self.share,
["ignored"],
[self.access],
[],
share_server=None)
self._helper_fake.allow_access.assert_called_once_with(
self.fakesharepath, self.share, self.access)
self.assertFalse(self._helper_fake.resync_access.called)
self._driver._get_share_path.assert_called_once_with(self.share)
def test_update_access_deny(self):
"""Test deny_access functionality via update_access."""
self._driver._get_share_path = mock.Mock(return_value=self.
fakesharepath)
self._helper_fake.deny_access = mock.Mock()
self._driver.update_access(self._context,
self.share,
["ignored"],
[],
[self.access],
share_server=None)
self._helper_fake.deny_access.assert_called_once_with(
self.fakesharepath, self.share, self.access)
self.assertFalse(self._helper_fake.resync_access.called)
self._driver._get_share_path.assert_called_once_with(self.share)
def test_update_access_both(self):
"""Test update_access with allow and deny lists."""
self._driver._get_share_path = mock.Mock(return_value=self.
fakesharepath)
self._helper_fake.deny_access = mock.Mock()
self._helper_fake.allow_access = mock.Mock()
self._helper_fake.resync_access = mock.Mock()
access_1 = fake_share.fake_access(access_to="1.1.1.1")
access_2 = fake_share.fake_access(access_to="2.2.2.2")
self._driver.update_access(self._context,
self.share,
["ignore"],
[access_1],
[access_2],
share_server=None)
self.assertFalse(self._helper_fake.resync_access.called)
self._helper_fake.allow_access.assert_called_once_with(
self.fakesharepath, self.share, access_1)
self._helper_fake.deny_access.assert_called_once_with(
self.fakesharepath, self.share, access_2)
self._driver._get_share_path.assert_called_once_with(self.share)
def test_update_access_resync(self):
"""Test recovery mode update_access."""
self._driver._get_share_path = mock.Mock(return_value=self.
fakesharepath)
self._helper_fake.deny_access = mock.Mock()
self._helper_fake.allow_access = mock.Mock()
self._helper_fake.resync_access = mock.Mock()
access_1 = fake_share.fake_access(access_to="1.1.1.1")
access_2 = fake_share.fake_access(access_to="2.2.2.2")
self._driver.update_access(self._context,
self.share,
[access_1, access_2],
[],
[],
share_server=None)
self._helper_fake.resync_access.assert_called_once_with(
self.fakesharepath, self.share, [access_1, access_2])
self.assertFalse(self._helper_fake.allow_access.called)
self.assertFalse(self._helper_fake.allow_access.called)
self._driver._get_share_path.assert_called_once_with(self.share)
def test__check_gpfs_state_active(self):
fakeout = "mmgetstate::state:\nmmgetstate::active:"
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
result = self._driver._check_gpfs_state()
self._driver._gpfs_execute.assert_called_once_with('mmgetstate', '-Y')
self.assertEqual(result, True)
def test__check_gpfs_state_down(self):
fakeout = "mmgetstate::state:\nmmgetstate::down:"
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
result = self._driver._check_gpfs_state()
self._driver._gpfs_execute.assert_called_once_with('mmgetstate', '-Y')
self.assertEqual(result, False)
def test__check_gpfs_state_wrong_output_exception(self):
fakeout = "mmgetstate fake out"
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
self.assertRaises(exception.GPFSException,
self._driver._check_gpfs_state)
self._driver._gpfs_execute.assert_called_once_with('mmgetstate', '-Y')
def test__check_gpfs_state_exception(self):
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._check_gpfs_state)
self._driver._gpfs_execute.assert_called_once_with('mmgetstate', '-Y')
def test__is_dir_success(self):
fakeoutput = "directory"
self._driver._gpfs_execute = mock.Mock(return_value=(fakeoutput, ''))
result = self._driver._is_dir(self.fakefspath)
self._driver._gpfs_execute.assert_called_once_with(
'stat', '--format=%F', self.fakefspath, run_as_root=False
)
self.assertEqual(result, True)
def test__is_dir_failure(self):
fakeoutput = "regular file"
self._driver._gpfs_execute = mock.Mock(return_value=(fakeoutput, ''))
result = self._driver._is_dir(self.fakefspath)
self._driver._gpfs_execute.assert_called_once_with(
'stat', '--format=%F', self.fakefspath, run_as_root=False
)
self.assertEqual(result, False)
def test__is_dir_exception(self):
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._is_dir, self.fakefspath)
self._driver._gpfs_execute.assert_called_once_with(
'stat', '--format=%F', self.fakefspath, run_as_root=False
)
def test__is_gpfs_path_ok(self):
self._driver._gpfs_execute = mock.Mock(return_value=0)
result = self._driver._is_gpfs_path(self.fakefspath)
self._driver._gpfs_execute.assert_called_once_with('mmlsattr',
self.fakefspath)
self.assertEqual(result, True)
def test__is_gpfs_path_exception(self):
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._is_gpfs_path,
self.fakefspath)
self._driver._gpfs_execute.assert_called_once_with('mmlsattr',
self.fakefspath)
def test__get_gpfs_device(self):
fakeout = "Filesystem\n" + self.fakedev
orig_val = self._driver.configuration.gpfs_mount_point_base
self._driver.configuration.gpfs_mount_point_base = self.fakefspath
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
result = self._driver._get_gpfs_device()
self._driver._gpfs_execute.assert_called_once_with('df',
self.fakefspath)
self.assertEqual(result, self.fakedev)
self._driver.configuration.gpfs_mount_point_base = orig_val
def test__get_gpfs_device_exception(self):
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.GPFSException,
self._driver._get_gpfs_device)
def test__create_share(self):
sizestr = '%sG' % self.share['size']
self._driver._gpfs_execute = mock.Mock(return_value=True)
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._create_share(self.share)
self._driver._gpfs_execute.assert_any_call('mmcrfileset',
self.fakedev,
self.share['name'],
'--inode-space', 'new')
self._driver._gpfs_execute.assert_any_call('mmlinkfileset',
self.fakedev,
self.share['name'],
'-J', self.fakesharepath)
self._driver._gpfs_execute.assert_any_call('mmsetquota', self.fakedev +
':' + self.share['name'],
'--block', '0:' + sizestr)
self._driver._gpfs_execute.assert_any_call('chmod',
'777',
self.fakesharepath)
self._driver._local_path.assert_called_once_with(self.share['name'])
self._driver._get_gpfs_device.assert_called_once_with()
def test__create_share_exception(self):
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._create_share, self.share)
self._driver._get_gpfs_device.assert_called_once_with()
self._driver._local_path.assert_called_once_with(self.share['name'])
self._driver._gpfs_execute.assert_called_once_with('mmcrfileset',
self.fakedev,
self.share['name'],
'--inode-space',
'new')
def test__delete_share(self):
self._driver._gpfs_execute = mock.Mock(return_value=True)
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._delete_share(self.share)
self._driver._gpfs_execute.assert_any_call(
'mmunlinkfileset', self.fakedev, self.share['name'],
'-f', ignore_exit_code=[2])
self._driver._gpfs_execute.assert_any_call(
'mmdelfileset', self.fakedev, self.share['name'],
'-f', ignore_exit_code=[2])
self._driver._get_gpfs_device.assert_called_once_with()
def test__delete_share_exception(self):
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._delete_share, self.share)
self._driver._get_gpfs_device.assert_called_once_with()
self._driver._gpfs_execute.assert_called_once_with(
'mmunlinkfileset', self.fakedev, self.share['name'],
'-f', ignore_exit_code=[2])
def test__create_share_snapshot(self):
self._driver._gpfs_execute = mock.Mock(return_value=True)
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._create_share_snapshot(self.snapshot)
self._driver._gpfs_execute.assert_called_once_with(
'mmcrsnapshot', self.fakedev, self.snapshot['name'],
'-j', self.snapshot['share_name']
)
self._driver._get_gpfs_device.assert_called_once_with()
def test__create_share_snapshot_exception(self):
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._create_share_snapshot, self.snapshot)
self._driver._get_gpfs_device.assert_called_once_with()
self._driver._gpfs_execute.assert_called_once_with(
'mmcrsnapshot', self.fakedev, self.snapshot['name'],
'-j', self.snapshot['share_name']
)
def test__create_share_from_snapshot(self):
self._driver._gpfs_execute = mock.Mock(return_value=True)
self._driver._create_share = mock.Mock(return_value=True)
self._driver._get_snapshot_path = mock.Mock(return_value=self.
fakesnapshotpath)
self._driver._create_share_from_snapshot(self.share, self.snapshot,
self.fakesharepath)
self._driver._gpfs_execute.assert_called_once_with(
'rsync', '-rp', self.fakesnapshotpath + '/', self.fakesharepath
)
self._driver._create_share.assert_called_once_with(self.share)
self._driver._get_snapshot_path.assert_called_once_with(self.snapshot)
def test__create_share_from_snapshot_exception(self):
self._driver._create_share = mock.Mock(return_value=True)
self._driver._get_snapshot_path = mock.Mock(return_value=self.
fakesnapshotpath)
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
self.assertRaises(exception.GPFSException,
self._driver._create_share_from_snapshot,
self.share, self.snapshot, self.fakesharepath)
self._driver._create_share.assert_called_once_with(self.share)
self._driver._get_snapshot_path.assert_called_once_with(self.snapshot)
self._driver._gpfs_execute.assert_called_once_with(
'rsync', '-rp', self.fakesnapshotpath + '/', self.fakesharepath
)
@ddt.data("mmlsfileset::allocInodes:\nmmlsfileset::100096:",
"mmlsfileset::allocInodes:\nmmlsfileset::0:")
def test__is_share_valid_with_quota(self, fakeout):
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
result = self._driver._is_share_valid(self.fakedev, self.fakesharepath)
self._driver._gpfs_execute.assert_called_once_with(
'mmlsfileset', self.fakedev, '-J', self.fakesharepath, '-L', '-Y')
if fakeout == "mmlsfileset::allocInodes:\nmmlsfileset::100096:":
self.assertTrue(result)
else:
self.assertFalse(result)
def test__is_share_valid_exception(self):
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.ManageInvalidShare,
self._driver._is_share_valid, self.fakedev,
self.fakesharepath)
self._driver._gpfs_execute.assert_called_once_with(
'mmlsfileset', self.fakedev, '-J', self.fakesharepath, '-L', '-Y')
def test__is_share_valid_no_share_exist_exception(self):
fakeout = "mmlsfileset::allocInodes:"
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
self.assertRaises(exception.GPFSException,
self._driver._is_share_valid, self.fakedev,
self.fakesharepath)
self._driver._gpfs_execute.assert_called_once_with(
'mmlsfileset', self.fakedev, '-J', self.fakesharepath, '-L', '-Y')
def test__get_share_name(self):
fakeout = "mmlsfileset::filesetName:\nmmlsfileset::existingshare:"
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
result = self._driver._get_share_name(self.fakedev, self.fakesharepath)
self.assertEqual('existingshare', result)
def test__get_share_name_exception(self):
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.ManageInvalidShare,
self._driver._get_share_name, self.fakedev,
self.fakesharepath)
self._driver._gpfs_execute.assert_called_once_with(
'mmlsfileset', self.fakedev, '-J', self.fakesharepath, '-L', '-Y')
def test__get_share_name_no_share_exist_exception(self):
fakeout = "mmlsfileset::filesetName:"
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
self.assertRaises(exception.GPFSException,
self._driver._get_share_name, self.fakedev,
self.fakesharepath)
self._driver._gpfs_execute.assert_called_once_with(
'mmlsfileset', self.fakedev, '-J', self.fakesharepath, '-L', '-Y')
@ddt.data("mmlsquota::blockLimit:\nmmlsquota::1048577",
"mmlsquota::blockLimit:\nmmlsquota::1048576",
"mmlsquota::blockLimit:\nmmlsquota::0")
def test__manage_existing(self, fakeout):
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
self._helper_fake.create_export.return_value = 'fakelocation'
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
actual_size, actual_path = self._driver._manage_existing(
self.fakedev, self.share, self.fakeexistingshare)
self._driver._gpfs_execute.assert_any_call('mmunlinkfileset',
self.fakedev,
self.fakeexistingshare,
'-f')
self._driver._gpfs_execute.assert_any_call('mmchfileset',
self.fakedev,
self.fakeexistingshare,
'-j', self.share['name'])
self._driver._gpfs_execute.assert_any_call('mmlinkfileset',
self.fakedev,
self.share['name'],
'-J', self.fakesharepath)
self._driver._gpfs_execute.assert_any_call('chmod',
'777',
self.fakesharepath)
if fakeout == "mmlsquota::blockLimit:\nmmlsquota::1048577":
self._driver._gpfs_execute.assert_called_with('mmsetquota',
self.fakedev + ':' +
self.share['name'],
'--block',
'0:2G')
self.assertEqual(2, actual_size)
self.assertEqual('fakelocation', actual_path)
elif fakeout == "mmlsquota::blockLimit:\nmmlsquota::0":
self._driver._gpfs_execute.assert_called_with('mmsetquota',
self.fakedev + ':' +
self.share['name'],
'--block',
'0:1G')
self.assertEqual(1, actual_size)
self.assertEqual('fakelocation', actual_path)
else:
self.assertEqual(1, actual_size)
self.assertEqual('fakelocation', actual_path)
def test__manage_existing_fileset_unlink_exception(self):
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self._driver._gpfs_execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.GPFSException,
self._driver._manage_existing, self.fakedev,
self.share, self.fakeexistingshare)
self._driver._local_path.assert_called_once_with(self.share['name'])
self._driver._gpfs_execute.assert_called_once_with(
'mmunlinkfileset', self.fakedev, self.fakeexistingshare, '-f')
def test__manage_existing_fileset_creation_exception(self):
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self.mock_object(self._driver, '_gpfs_execute', mock.Mock(
side_effect=['', exception.ProcessExecutionError]))
self.assertRaises(exception.GPFSException,
self._driver._manage_existing, self.fakedev,
self.share, self.fakeexistingshare)
self._driver._local_path.assert_any_call(self.share['name'])
self._driver._gpfs_execute.assert_has_calls([
mock.call('mmunlinkfileset', self.fakedev, self.fakeexistingshare,
'-f'),
mock.call('mmchfileset', self.fakedev, self.fakeexistingshare,
'-j', self.share['name'])])
def test__manage_existing_fileset_relink_exception(self):
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self.mock_object(self._driver, '_gpfs_execute', mock.Mock(
side_effect=['', '', exception.ProcessExecutionError]))
self.assertRaises(exception.GPFSException,
self._driver._manage_existing, self.fakedev,
self.share, self.fakeexistingshare)
self._driver._local_path.assert_any_call(self.share['name'])
self._driver._gpfs_execute.assert_has_calls([
mock.call('mmunlinkfileset', self.fakedev, self.fakeexistingshare,
'-f'),
mock.call('mmchfileset', self.fakedev, self.fakeexistingshare,
'-j', self.share['name']),
mock.call('mmlinkfileset', self.fakedev, self.share['name'], '-J',
self.fakesharepath)])
def test__manage_existing_permission_change_exception(self):
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self.mock_object(self._driver, '_gpfs_execute', mock.Mock(
side_effect=['', '', '', exception.ProcessExecutionError]))
self.assertRaises(exception.GPFSException,
self._driver._manage_existing, self.fakedev,
self.share, self.fakeexistingshare)
self._driver._local_path.assert_any_call(self.share['name'])
self._driver._gpfs_execute.assert_has_calls([
mock.call('mmunlinkfileset', self.fakedev, self.fakeexistingshare,
'-f'),
mock.call('mmchfileset', self.fakedev, self.fakeexistingshare,
'-j', self.share['name']),
mock.call('mmlinkfileset', self.fakedev, self.share['name'], '-J',
self.fakesharepath),
mock.call('chmod', '777', self.fakesharepath)])
def test__manage_existing_checking_quota_of_fileset_exception(self):
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self.mock_object(self._driver, '_gpfs_execute', mock.Mock(
side_effect=['', '', '', '', exception.ProcessExecutionError]))
self.assertRaises(exception.GPFSException,
self._driver._manage_existing, self.fakedev,
self.share, self.fakeexistingshare)
self._driver._local_path.assert_any_call(self.share['name'])
self._driver._gpfs_execute.assert_has_calls([
mock.call('mmunlinkfileset', self.fakedev, self.fakeexistingshare,
'-f'),
mock.call('mmchfileset', self.fakedev, self.fakeexistingshare,
'-j', self.share['name']),
mock.call('mmlinkfileset', self.fakedev, self.share['name'], '-J',
self.fakesharepath),
mock.call('chmod', '777', self.fakesharepath),
mock.call('mmlsquota', '-j', self.share['name'], '-Y',
self.fakedev)])
def test__manage_existing_unable_to_get_quota_of_fileset_exception(self):
fakeout = "mmlsquota::blockLimit:"
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, ''))
self.assertRaises(exception.GPFSException,
self._driver._manage_existing, self.fakedev,
self.share, self.fakeexistingshare)
self._driver._local_path.assert_any_call(self.share['name'])
self._driver._gpfs_execute.assert_any_call('mmunlinkfileset',
self.fakedev,
self.fakeexistingshare,
'-f')
self._driver._gpfs_execute.assert_any_call('mmchfileset',
self.fakedev,
self.fakeexistingshare,
'-j', self.share['name'])
self._driver._gpfs_execute.assert_any_call('mmlinkfileset',
self.fakedev,
self.share['name'],
'-J', self.fakesharepath)
self._driver._gpfs_execute.assert_any_call('chmod',
'777',
self.fakesharepath)
self._driver._gpfs_execute.assert_called_with(
'mmlsquota', '-j', self.share['name'], '-Y', self.fakedev)
def test__manage_existing_set_quota_of_fileset_less_than_1G_exception(
self):
sizestr = '1G'
mock_out = "mmlsquota::blockLimit:\nmmlsquota::0:", None
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self.mock_object(self._driver, '_gpfs_execute', mock.Mock(
side_effect=['', '', '', '', mock_out,
exception.ProcessExecutionError]))
self.assertRaises(exception.GPFSException,
self._driver._manage_existing, self.fakedev,
self.share, self.fakeexistingshare)
self._driver._local_path.assert_any_call(self.share['name'])
self._driver._gpfs_execute.assert_has_calls([
mock.call('mmunlinkfileset', self.fakedev, self.fakeexistingshare,
'-f'),
mock.call('mmchfileset', self.fakedev, self.fakeexistingshare,
'-j', self.share['name']),
mock.call('mmlinkfileset', self.fakedev, self.share['name'], '-J',
self.fakesharepath),
mock.call('chmod', '777', self.fakesharepath),
mock.call('mmlsquota', '-j', self.share['name'], '-Y',
self.fakedev),
mock.call('mmsetquota', self.fakedev + ':' + self.share['name'],
'--block', '0:' + sizestr)])
def test__manage_existing_set_quota_of_fileset_grater_than_1G_exception(
self):
sizestr = '2G'
mock_out = "mmlsquota::blockLimit:\nmmlsquota::1048577:", None
self._driver._local_path = mock.Mock(return_value=self.fakesharepath)
self.mock_object(self._driver, '_gpfs_execute', mock.Mock(
side_effect=['', '', '', '', mock_out,
exception.ProcessExecutionError]))
self.assertRaises(exception.GPFSException,
self._driver._manage_existing, self.fakedev,
self.share, self.fakeexistingshare)
self._driver._local_path.assert_any_call(self.share['name'])
self._driver._gpfs_execute.assert_has_calls([
mock.call('mmunlinkfileset', self.fakedev, self.fakeexistingshare,
'-f'),
mock.call('mmchfileset', self.fakedev, self.fakeexistingshare,
'-j', self.share['name']),
mock.call('mmlinkfileset', self.fakedev, self.share['name'], '-J',
self.fakesharepath),
mock.call('chmod', '777', self.fakesharepath),
mock.call('mmlsquota', '-j', self.share['name'], '-Y',
self.fakedev),
mock.call('mmsetquota', self.fakedev + ':' + self.share['name'],
'--block', '0:' + sizestr)])
def test_manage_existing(self):
self._driver._manage_existing = mock.Mock(return_value=('1',
'fakelocation'))
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._is_share_valid = mock.Mock(return_value=True)
self._driver._get_share_name = mock.Mock(return_value=self.
fakeexistingshare)
self._helper_fake._has_client_access = mock.Mock(return_value=[])
result = self._driver.manage_existing(self.share, {})
self.assertEqual('1', result['size'])
self.assertEqual('fakelocation', result['export_locations'])
def test_manage_existing_incorrect_path_exception(self):
share = fake_share.fake_share(export_location="wrong_ip::wrong_path")
self.assertRaises(exception.ShareBackendException,
self._driver.manage_existing, share, {})
def test_manage_existing_incorrect_ip_exception(self):
share = fake_share.fake_share(export_location="wrong_ip:wrong_path")
self.assertRaises(exception.ShareBackendException,
self._driver.manage_existing, share, {})
def test__manage_existing_invalid_export_exception(self):
share = fake_share.fake_share(export_location="wrong_ip/wrong_path")
self.assertRaises(exception.ShareBackendException,
self._driver.manage_existing, share, {})
@ddt.data(True, False)
def test_manage_existing_invalid_share_exception(self, valid_share):
self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev)
self._driver._is_share_valid = mock.Mock(return_value=valid_share)
if valid_share:
self._driver._get_share_name = mock.Mock(return_value=self.
fakeexistingshare)
self._helper_fake._has_client_access = mock.Mock()
else:
self.assertFalse(self._helper_fake._has_client_access.called)
self.assertRaises(exception.ManageInvalidShare,
self._driver.manage_existing, self.share, {})
def test__gpfs_local_execute(self):
self.mock_object(utils, 'execute', mock.Mock(return_value=True))
cmd = "testcmd"
self._driver._gpfs_local_execute(cmd, ignore_exit_code=[2])
utils.execute.assert_called_once_with(cmd, run_as_root=True,
check_exit_code=[2, 0])
def test__gpfs_remote_execute(self):
self._driver._run_ssh = mock.Mock(return_value=True)
cmd = "testcmd"
orig_value = self._driver.configuration.gpfs_share_export_ip
self._driver.configuration.gpfs_share_export_ip = self.local_ip
self._driver._gpfs_remote_execute(cmd, check_exit_code=True)
self._driver._run_ssh.assert_called_once_with(
self.local_ip, tuple([cmd]), None, True
)
self._driver.configuration.gpfs_share_export_ip = orig_value
def test_knfs_resync_access(self):
self._knfs_helper.allow_access = mock.Mock()
path = self.fakesharepath
to_remove = '3.3.3.3'
fake_exportfs_before = ('%(path)s\n\t\t%(ip)s\n'
'/other/path\n\t\t4.4.4.4\n' %
{'path': path, 'ip': to_remove})
fake_exportfs_after = '/other/path\n\t\t4.4.4.4\n'
self._knfs_helper._execute = mock.Mock(
return_value=(fake_exportfs_before, ''))
self._knfs_helper._publish_access = mock.Mock(
side_effect=[[(fake_exportfs_before, '')],
[(fake_exportfs_after, '')]])
access_1 = fake_share.fake_access(access_to="1.1.1.1")
access_2 = fake_share.fake_access(access_to="2.2.2.2")
self._knfs_helper.resync_access(path, self.share, [access_1, access_2])
self._knfs_helper.allow_access.assert_has_calls([
mock.call(path, self.share, access_1, error_on_exists=False),
mock.call(path, self.share, access_2, error_on_exists=False)])
self._knfs_helper._execute.assert_called_once_with(
'exportfs', run_as_root=True)
self._knfs_helper._publish_access.assert_has_calls([
mock.call('exportfs', '-u',
'%(ip)s:%(path)s' % {'ip': to_remove, 'path': path},
check_exit_code=[0, 1]),
mock.call('exportfs')])
@ddt.data('rw', 'ro')
def test_knfs_get_export_options(self, access_level):
mock_out = {"knfs:export_options": "no_root_squash"}
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value=mock_out))
access = fake_share.fake_access(access_level=access_level)
out = self._knfs_helper.get_export_options(self.share, access, 'KNFS')
self.assertEqual("no_root_squash,%s" % access_level, out)
def test_knfs_get_export_options_default(self):
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value={}))
access = self.access
out = self._knfs_helper.get_export_options(self.share, access, 'KNFS')
self.assertEqual("rw", out)
def test_knfs_get_export_options_invalid_option_ro(self):
mock_out = {"knfs:export_options": "ro"}
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value=mock_out))
access = self.access
share = fake_share.fake_share(share_type="fake_share_type")
self.assertRaises(exception.InvalidInput,
self._knfs_helper.get_export_options,
share, access, 'KNFS')
def test_knfs_get_export_options_invalid_option_rw(self):
mock_out = {"knfs:export_options": "rw"}
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value=mock_out))
access = self.access
share = fake_share.fake_share(share_type="fake_share_type")
self.assertRaises(exception.InvalidInput,
self._knfs_helper.get_export_options,
share, access, 'KNFS')
@ddt.data(("/gpfs0/share-fakeid\t10.0.0.1", None),
("", None),
("/gpfs0/share-fakeid\t10.0.0.1", "10.0.0.1"),
("/gpfs0/share-fakeid\t10.0.0.1", "10.0.0.2"))
@ddt.unpack
def test_knfs__has_client_access(self, mock_out, access_to):
self._knfs_helper._execute = mock.Mock(return_value=[mock_out, 0])
result = self._knfs_helper._has_client_access(self.fakesharepath,
access_to)
self._ces_helper._execute.assert_called_once_with('exportfs',
check_exit_code=True,
run_as_root=True)
if mock_out == "/gpfs0/share-fakeid\t10.0.0.1":
if access_to in (None, "10.0.0.1"):
self.assertTrue(result)
else:
self.assertFalse(result)
else:
self.assertFalse(result)
def test_knfs_allow_access(self):
self._knfs_helper._execute = mock.Mock(
return_value=['/fs0 <world>', 0]
)
self.mock_object(re, 'search', mock.Mock(return_value=None))
export_opts = None
self._knfs_helper.get_export_options = mock.Mock(
return_value=export_opts
)
self._knfs_helper._publish_access = mock.Mock()
access = self.access
local_path = self.fakesharepath
self._knfs_helper.allow_access(local_path, self.share, access)
self._knfs_helper._execute.assert_called_once_with('exportfs',
run_as_root=True)
self.assertTrue(re.search.called)
self._knfs_helper.get_export_options.assert_any_call(
self.share, access, 'KNFS')
cmd = ['exportfs', '-o', export_opts, ':'.join([access['access_to'],
local_path])]
self._knfs_helper._publish_access.assert_called_once_with(*cmd)
def test_knfs_allow_access_access_exists(self):
out = ['/fs0 <world>', 0]
self._knfs_helper._execute = mock.Mock(return_value=out)
self.mock_object(re, 'search', mock.Mock(return_value="fake"))
self._knfs_helper.get_export_options = mock.Mock()
access = self.access
local_path = self.fakesharepath
self.assertRaises(exception.ShareAccessExists,
self._knfs_helper.allow_access,
local_path, self.share, access)
self._knfs_helper._execute.assert_any_call('exportfs',
run_as_root=True)
self.assertTrue(re.search.called)
self.assertFalse(self._knfs_helper.get_export_options.called)
def test_knfs_allow_access_publish_exception(self):
self._knfs_helper.get_export_options = mock.Mock()
self._knfs_helper._publish_access = mock.Mock(
side_effect=exception.ProcessExecutionError('boom'))
self.assertRaises(exception.GPFSException,
self._knfs_helper.allow_access,
self.fakesharepath,
self.share,
self.access,
error_on_exists=False)
self.assertTrue(self._knfs_helper.get_export_options.called)
self.assertTrue(self._knfs_helper._publish_access.called)
def test_knfs_allow_access_invalid_access(self):
access = fake_share.fake_access(access_type='test')
self.assertRaises(exception.InvalidShareAccess,
self._knfs_helper.allow_access,
self.fakesharepath, self.share,
access)
def test_knfs_allow_access_exception(self):
self._knfs_helper._execute = mock.Mock(
side_effect=exception.ProcessExecutionError
)
access = self.access
local_path = self.fakesharepath
self.assertRaises(exception.GPFSException,
self._knfs_helper.allow_access,
local_path, self.share,
access)
self._knfs_helper._execute.assert_called_once_with('exportfs',
run_as_root=True)
def test_knfs__verify_denied_access_pass(self):
local_path = self.fakesharepath
ip = self.access['access_to']
fake_exportfs = ('/shares/share-1\n\t\t1.1.1.1\n'
'/shares/share-2\n\t\t2.2.2.2\n')
self._knfs_helper._publish_access = mock.Mock(
return_value=[(fake_exportfs, '')])
self._knfs_helper._verify_denied_access(local_path, self.share, ip)
self._knfs_helper._publish_access.assert_called_once_with('exportfs')
def test_knfs__verify_denied_access_fail(self):
local_path = self.fakesharepath
ip = self.access['access_to']
data = {'path': local_path, 'ip': ip}
fake_exportfs = ('/shares/share-1\n\t\t1.1.1.1\n'
'%(path)s\n\t\t%(ip)s\n'
'/shares/share-2\n\t\t2.2.2.2\n') % data
self._knfs_helper._publish_access = mock.Mock(
return_value=[(fake_exportfs, '')])
self.assertRaises(exception.GPFSException,
self._knfs_helper._verify_denied_access,
local_path,
self.share,
ip)
self._knfs_helper._publish_access.assert_called_once_with('exportfs')
def test_knfs__verify_denied_access_exception(self):
self._knfs_helper._publish_access = mock.Mock(
side_effect=exception.ProcessExecutionError
)
ip = self.access['access_to']
local_path = self.fakesharepath
self.assertRaises(exception.GPFSException,
self._knfs_helper._verify_denied_access,
local_path,
self.share,
ip)
self._knfs_helper._publish_access.assert_called_once_with('exportfs')
@ddt.data((None, False),
('', False),
(' ', False),
('Some error to log', True))
@ddt.unpack
def test_knfs__verify_denied_access_stderr(self, stderr, is_logged):
"""Stderr debug logging should only happen when not empty."""
outputs = [('', stderr)]
self._knfs_helper._publish_access = mock.Mock(return_value=outputs)
gpfs.LOG.debug = mock.Mock()
self._knfs_helper._verify_denied_access(
self.fakesharepath, self.share, self.remote_ip)
self._knfs_helper._publish_access.assert_called_once_with('exportfs')
self.assertEqual(is_logged, gpfs.LOG.debug.called)
def test_knfs_deny_access(self):
self._knfs_helper._publish_access = mock.Mock(return_value=[('', '')])
access = self.access
local_path = self.fakesharepath
self._knfs_helper.deny_access(local_path, self.share, access)
deny = ['exportfs', '-u', ':'.join([access['access_to'], local_path])]
self._knfs_helper._publish_access.assert_has_calls([
mock.call(*deny, check_exit_code=[0, 1]),
mock.call('exportfs')])
def test_knfs_deny_access_exception(self):
self._knfs_helper._publish_access = mock.Mock(
side_effect=exception.ProcessExecutionError
)
access = self.access
local_path = self.fakesharepath
cmd = ['exportfs', '-u', ':'.join([access['access_to'], local_path])]
self.assertRaises(exception.GPFSException,
self._knfs_helper.deny_access, local_path,
self.share, access)
self._knfs_helper._publish_access.assert_called_once_with(
*cmd, check_exit_code=[0, 1])
def test_knfs__publish_access(self):
self.mock_object(utils, 'execute')
fake_command = 'fakecmd'
cmd = [fake_command]
self._knfs_helper._publish_access(*cmd)
utils.execute.assert_any_call(*cmd, run_as_root=True,
check_exit_code=True)
remote_login = self.sshlogin + '@' + self.remote_ip
remote_login2 = self.sshlogin + '@' + self.remote_ip2
utils.execute.assert_has_calls([
mock.call('ssh', remote_login, fake_command,
check_exit_code=True, run_as_root=False),
mock.call(fake_command, check_exit_code=True, run_as_root=True),
mock.call('ssh', remote_login2, fake_command,
check_exit_code=True, run_as_root=False)])
self.assertTrue(socket.gethostbyname_ex.called)
self.assertTrue(socket.gethostname.called)
def test_knfs__publish_access_exception(self):
self.mock_object(
utils, 'execute',
mock.Mock(side_effect=(0, exception.ProcessExecutionError)))
fake_command = 'fakecmd'
cmd = [fake_command]
self.assertRaises(exception.ProcessExecutionError,
self._knfs_helper._publish_access, *cmd)
self.assertTrue(socket.gethostbyname_ex.called)
self.assertTrue(socket.gethostname.called)
remote_login = self.sshlogin + '@' + self.remote_ip
utils.execute.assert_has_calls([
mock.call('ssh', remote_login, fake_command,
check_exit_code=True, run_as_root=False),
mock.call(fake_command, check_exit_code=True, run_as_root=True)])
@ddt.data('rw', 'ro')
def test_ces_get_export_options(self, access_level):
mock_out = {"ces:export_options": "squash=no_root_squash"}
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value=mock_out))
access = fake_share.fake_access(access_level=access_level)
out = self._ces_helper.get_export_options(self.share, access, 'CES')
self.assertEqual("squash=no_root_squash,access_type=%s" % access_level,
out)
def test_ces_get_export_options_default(self):
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value={}))
access = self.access
out = self._ces_helper.get_export_options(self.share, access,
'CES')
self.assertEqual("access_type=rw", out)
def test_ces_get_export_options_invalid_option_ro(self):
mock_out = {"ces:export_options": "access_type=ro"}
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value=mock_out))
access = self.access
share = fake_share.fake_share(share_type="fake_share_type")
self.assertRaises(exception.InvalidInput,
self._ces_helper.get_export_options,
share, access, 'CES')
def test_ces_get_export_options_invalid_option_rw(self):
mock_out = {"ces:export_options": "access_type=rw"}
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value=mock_out))
access = self.access
share = fake_share.fake_share(share_type="fake_share_type")
self.assertRaises(exception.InvalidInput,
self._ces_helper.get_export_options,
share, access, 'CES')
def test__get_nfs_client_exports_exception(self):
self._ces_helper._execute = mock.Mock(return_value=('junk', ''))
local_path = self.fakesharepath
self.assertRaises(exception.GPFSException,
self._ces_helper._get_nfs_client_exports,
local_path)
self._ces_helper._execute.assert_called_once_with(
'mmnfs', 'export', 'list', '-n', local_path, '-Y')
@ddt.data('44.3.2.11', '1:2:3:4:5:6:7:8')
def test__fix_export_data(self, ip):
data = None
for line in self.fake_ces_exports.splitlines():
if "HEADER" in line:
headers = line.split(':')
if ip in line:
data = line.split(':')
break
self.assertIsNotNone(
data, "Test data did not contain a line with the test IP.")
result_data = self._ces_helper._fix_export_data(data, headers)
self.assertEqual(ip, result_data[headers.index('Clients')])
@ddt.data((None, True),
('44.3.2.11', True),
('44.3.2.1', False),
('4.3.2.1', False),
('4.3.2.11', False),
('1.2.3.4', False),
('', False),
('*', False),
('.', False),
('1:2:3:4:5:6:7:8', True))
@ddt.unpack
def test_ces__has_client_access(self, ip, has_access):
mock_out = self.fake_ces_exports
self._ces_helper._execute = mock.Mock(
return_value=(mock_out, ''))
local_path = self.fakesharepath
self.assertEqual(has_access,
self._ces_helper._has_client_access(local_path, ip))
self._ces_helper._execute.assert_called_once_with(
'mmnfs', 'export', 'list', '-n', local_path, '-Y')
def test_ces_remove_export_no_exports(self):
mock_out = self.fake_ces_exports_not_found
self._ces_helper._execute = mock.Mock(
return_value=(mock_out, ''))
local_path = self.fakesharepath
self._ces_helper.remove_export(local_path, self.share)
self._ces_helper._execute.assert_called_once_with(
'mmnfs', 'export', 'list', '-n', local_path, '-Y')
def test_ces_remove_export_existing_exports(self):
mock_out = self.fake_ces_exports
self._ces_helper._execute = mock.Mock(
return_value=(mock_out, ''))
local_path = self.fakesharepath
self._ces_helper.remove_export(local_path, self.share)
self._ces_helper._execute.assert_has_calls([
mock.call('mmnfs', 'export', 'list', '-n', local_path, '-Y'),
mock.call('mmnfs', 'export', 'remove', local_path),
])
def test_ces_remove_export_exception(self):
local_path = self.fakesharepath
self._ces_helper._execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.GPFSException,
self._ces_helper.remove_export,
local_path, self.share)
def test_ces_allow_access(self):
mock_out = self.fake_ces_exports_not_found
self._ces_helper._execute = mock.Mock(
return_value=(mock_out, ''))
export_opts = "access_type=rw"
self._ces_helper.get_export_options = mock.Mock(
return_value=export_opts)
access = self.access
local_path = self.fakesharepath
self._ces_helper.allow_access(local_path, self.share, access)
self._ces_helper._execute.assert_has_calls([
mock.call('mmnfs', 'export', 'list', '-n', local_path, '-Y'),
mock.call('mmnfs', 'export', 'add', local_path, '-c',
access['access_to'] + '(' + export_opts + ')')])
def test_ces_allow_access_existing_exports(self):
mock_out = self.fake_ces_exports
self._ces_helper._execute = mock.Mock(
return_value=(mock_out, ''))
export_opts = "access_type=rw"
self._ces_helper.get_export_options = mock.Mock(
return_value=export_opts)
access = self.access
local_path = self.fakesharepath
self._ces_helper.allow_access(self.fakesharepath, self.share,
self.access)
self._ces_helper._execute.assert_has_calls([
mock.call('mmnfs', 'export', 'list', '-n', local_path, '-Y'),
mock.call('mmnfs', 'export', 'change', local_path, '--nfsadd',
access['access_to'] + '(' + export_opts + ')')])
def test_ces_allow_access_invalid_access_type(self):
access = fake_share.fake_access(access_type='test')
self.assertRaises(exception.InvalidShareAccess,
self._ces_helper.allow_access,
self.fakesharepath, self.share,
access)
def test_ces_allow_access_exception(self):
access = self.access
local_path = self.fakesharepath
self._ces_helper._execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.GPFSException,
self._ces_helper.allow_access, local_path,
self.share, access)
def test_ces_deny_access(self):
mock_out = self.fake_ces_exports
self._ces_helper._execute = mock.Mock(
return_value=(mock_out, ''))
access = self.access
local_path = self.fakesharepath
self._ces_helper.deny_access(local_path, self.share, access)
self._ces_helper._execute.assert_has_calls([
mock.call('mmnfs', 'export', 'list', '-n', local_path, '-Y'),
mock.call('mmnfs', 'export', 'change', local_path, '--nfsremove',
access['access_to'])])
def test_ces_deny_access_exception(self):
access = self.access
local_path = self.fakesharepath
self._ces_helper._execute = mock.Mock(
side_effect=exception.ProcessExecutionError)
self.assertRaises(exception.GPFSException,
self._ces_helper.deny_access, local_path,
self.share, access)
def test_ces_resync_access_add(self):
mock_out = self.fake_ces_exports_not_found
self._ces_helper._execute = mock.Mock(return_value=(mock_out, ''))
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value={}))
access_rules = [self.access]
local_path = self.fakesharepath
self._ces_helper.resync_access(local_path, self.share, access_rules)
self._ces_helper._execute.assert_has_calls([
mock.call('mmnfs', 'export', 'list', '-n', local_path, '-Y'),
mock.call('mmnfs', 'export', 'add', local_path, '-c',
self.access['access_to'] + '(' + "access_type=rw" + ')')
])
share_types.get_extra_specs_from_share.assert_called_once_with(
self.share)
def test_ces_resync_access_change(self):
class SortedMatch(object):
def __init__(self, f, expected):
self.assertEqual = f
self.expected = expected
def __eq__(self, actual):
expected_list = self.expected.split(',')
actual_list = actual.split(',')
self.assertEqual(sorted(expected_list), sorted(actual_list))
return True
mock_out = self.fake_ces_exports
self._ces_helper._execute = mock.Mock(
return_value=(mock_out, ''))
self.mock_object(share_types, 'get_extra_specs_from_share',
mock.Mock(return_value={}))
access_rules = [fake_share.fake_access(access_to='1.1.1.1'),
fake_share.fake_access(
access_to='10.0.0.1', access_level='ro')]
local_path = self.fakesharepath
self._ces_helper.resync_access(local_path, self.share, access_rules)
share_types.get_extra_specs_from_share.assert_called_once_with(
self.share)
to_remove = '1:2:3:4:5:6:7:8,44.3.2.11'
to_add = access_rules[0]['access_to'] + '(' + "access_type=rw" + ')'
to_change = access_rules[1]['access_to'] + '(' + "access_type=ro" + ')'
self._ces_helper._execute.assert_has_calls([
mock.call('mmnfs', 'export', 'list', '-n', local_path, '-Y'),
mock.call('mmnfs', 'export', 'change', local_path,
'--nfsremove', SortedMatch(self.assertEqual, to_remove),
'--nfsadd', to_add,
'--nfschange', to_change)
])
def test_ces_resync_nothing(self):
"""Test that hits the add-no-rules case."""
mock_out = self.fake_ces_exports_not_found
self._ces_helper._execute = mock.Mock(return_value=(mock_out, ''))
local_path = self.fakesharepath
self._ces_helper.resync_access(local_path, None, [])
self._ces_helper._execute.assert_called_once_with(
'mmnfs', 'export', 'list', '-n', local_path, '-Y')
| apache-2.0 | 8,466,903,491,964,898,000 | 45.970678 | 219 | 0.5632 | false |
underloki/Cyprium | app/cli/root/crypto/text/atomicdigits.py | 1 | 12349 | #! /usr/bin/python3
########################################################################
# #
# Cyprium is a multifunction cryptographic, steganographic and #
# cryptanalysis tool developped by members of The Hackademy. #
# French White Hat Hackers Community! #
# cyprium.hackademics.fr # #
# Authors: SAKAROV, mont29, afranck64 #
# Contact: [email protected] #
# Forum: hackademics.fr #
# Twitter: @hackademics_ #
# #
# Cyprium is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published #
# by the Free Software Foundation, either version 3 of the License, #
# or any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but without any warranty; without even the implied warranty of #
# merchantability or fitness for a particular purpose. See the #
# GNU General Public License for more details. #
# #
# The terms of the GNU General Public License is detailed in the #
# COPYING attached file. If not, see : http://www.gnu.org/licenses #
# #
########################################################################
import sys
import os
import random
# In case we directly run that file, we need to add the whole cyprium to path,
# to get access to CLI stuff!
if __name__ == "__main__":
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
"..", "..", "..", "..",
"..")))
import app.cli
import kernel.crypto.text.atomicdigits as atomicdigits
import kernel.utils as utils
class AtomicDigits(app.cli.Tool):
"""CLI wrapper for atomicdigits crypto text tool."""
def main(self, ui):
ui.message("********** Welcome to Cyprium.AtomicDigits! **********")
quit = False
while not quit:
options = [(self.about, "*about", "Show some help!"),
(self.demo, "*demo", "Show some examples"),
(self.cypher, "*cypher",
"Cypher some text in atomic digits"),
(self.decypher, "d*ecypher",
"Decypher atomic digits into text"),
("", "-----", ""),
("tree", "*tree", "Show the whole tree"),
("quit", "*quit", "Quit Cyprium.AtomicDigits")]
msg = "Cyprium.AtomicDigits"
answ = ui.get_choice(msg, options)
if answ == 'tree':
self._tree.print_tree(ui, self._tree.FULL)
elif answ == 'quit':
self._tree.current = self._tree.current.parent
quit = True
else:
answ(ui)
ui.message("Back to Cyprium menus! Bye.")
def _get_exhaustive_txt(self, out, ui, min_cypher, act=None):
ui.message("Exaustive found {} solutions for a minimum cyphering of "
"{}, among which {} solutions with the highest possible "
"cyphering ({})."
"".format(out["n_solutions"], min_cypher,
out["best_n_solutions"],
out["best_cypher"]))
if act not in {"all", "best", "rand", "rand_best"}:
options = [("all", "*all solutions", ""),
("best", "all $best solutions", ""),
("rand", "*one random solution", ""),
("rand_best", "or one *random best solution", "")]
act = ui.get_choice("Do you want to get", options,
oneline=True)
if act == "all":
lines = utils.format_multiwords(out["solutions"], sep=" ")
return "\n {}".format("\n ".join(lines))
elif act == "best":
lines = utils.format_multiwords(out["best_solutions"], sep=" ")
return "\n {}".format("\n ".join(lines))
elif act == "rand":
return " ".join((random.choice(w) for w in out["solutions"]))
else:
return " ".join((random.choice(w) for w in out["best_solutions"]))
def about(self, ui):
ui.message(atomicdigits.__about__)
ui.get_choice("", [("", "Go back to $menu", "")], oneline=True)
def demo(self, ui):
ui.message("===== Demo Mode =====")
ui.message("Running a small demo/testing!")
ui.message("")
ui.message("--- Cyphering ---")
ui.message("Data to cypher: {}".format("HOW ARE YOU NICEDAYISNTIT"))
out = atomicdigits.cypher("HOW ARE YOU NICEDAYISNTIT")
ui.message("Atomic digits cyphered data:\n {}"
"".format("\n ".join(utils.format_multiwords(out,
sep=" "))))
ui.message("")
htext = "90 53 16 53 16 A Q 92 53 52 16 53 M 15 L E 52 16 T"
ui.message("--- Decyphering ---")
ui.message("Atomic digits text used as input: {}".format(htext))
out = atomicdigits.decypher(htext)
ui.message("The decyphered data is:\n {}"
"".format("\n ".join(utils.format_multiwords(out))))
ui.message("")
ui.message("--- Notes ---")
ui.message("+ You can choose the optionnal Exhaustive option, to get "
"all possible encodings of each words higher than the "
"given threshold of cyphering (or the highest possible):")
ui.message("Data to cypher: {}".format("HOW ARE YOU NICEDAYISNTIT"))
out = atomicdigits.cypher("HOW ARE YOU NICEDAYISNTIT", exhaustive=True,
min_cypher=0.8)
out = self._get_exhaustive_txt(out, ui, min_cypher=0.8, act="all")
ui.message(out)
ui.message("")
htext = "1874 A75 39892 75358DA39535081T"
ui.message("+ You can try to decypher a text with atomic numbers "
"merged (i.e. no more spaces between them – nasty!):")
ui.message("Data to decypher: {}".format(htext))
out = atomicdigits.decypher(htext)
ui.message("Atomic digits decyphered data:\n {}"
"".format("\n ".join(utils.format_multiwords(out))))
ui.message("")
ui.message("--- Won’t work ---")
ui.message("+ The input text to cypher must be ASCII uppercase "
"chars only:")
ui.message("Data to cypher: {}\n".format("Hello WORLD !"))
try:
out = atomicdigits.cypher("Hello WORLD !")
ui.message("Atomic digits cyphered data:\n {}"
"".format("\n ".join(utils.format_multiwords(out))))
except Exception as e:
ui.message(str(e), level=ui.ERROR)
ui.message("")
ui.message("+ The input text to decypher must be valid atomic digits:")
htext = "90 53 016 53 16 A Q 922 53 52 16 53 M 15 L E 52 16 T"
ui.message("Atomic digits text used as input: {}".format(htext))
try:
out = atomicdigits.decypher(htext)
ui.message("Atomic digits decyphered data:\n {}"
"".format("\n ".join(utils.format_multiwords(out))))
except Exception as e:
ui.message(str(e), level=ui.ERROR)
ui.message("")
ui.get_choice("", [("", "Go back to $menu", "")], oneline=True)
def cypher(self, ui):
"""Interactive version of cypher()."""
txt = ""
ui.message("===== Cypher Mode =====")
while 1:
done = False
while 1:
exhaustive = False
threshold = 0.8
txt = ui.text_input("Text to cypher to atomic digits",
sub_type=ui.UPPER)
if txt is None:
break # Go back to main Cypher menu.
options = [("exhst", "*exhaustive cyphering", ""),
("simple", "or $simple one", "")]
answ = ui.get_choice("Do you want to use", options,
oneline=True)
if answ == "exhst":
exhaustive = True
t = ui.get_data("Cypher threshold (nothing to use default "
"{} one): ".format(threshold),
sub_type=ui.FLOAT, allow_void=True)
if t is not None:
threshold = t
try:
# Will also raise an exception if data is None.
txt = atomicdigits.cypher(txt, exhaustive=exhaustive,
min_cypher=threshold)
if exhaustive:
txt = self._get_exhaustive_txt(txt, ui,
min_cypher=threshold)
done = True # Out of those loops, output result.
break
except Exception as e:
if utils.DEBUG:
import traceback
traceback.print_tb(sys.exc_info()[2])
ui.message(str(e), level=ui.ERROR)
options = [("retry", "*try again", ""),
("menu", "or go back to *menu", "")]
answ = ui.get_choice("Could not convert that data into "
"atomic digits, please", options,
oneline=True)
if answ in {None, "menu"}:
return # Go back to main Sema menu.
# Else, retry with another data to hide.
if done:
ui.text_output("Text successfully converted", txt,
"Atomic digits version of text")
options = [("redo", "*cypher another text", ""),
("quit", "or go back to *menu", "")]
answ = ui.get_choice("Do you want to", options, oneline=True)
if answ in {None, "quit"}:
return
def decypher(self, ui):
"""Interactive version of decypher()."""
txt = ""
ui.message("===== Decypher Mode =====")
while 1:
txt = ui.text_input("Please choose some atomic digits text",
sub_type=ui.UPPER)
try:
txt = atomicdigits.decypher(txt)
txt = "\n " + "\n ".join(utils.format_multiwords(txt))
ui.text_output("Text successfully decyphered",
txt,
"The decyphered text is")
except Exception as e:
if utils.DEBUG:
import traceback
traceback.print_tb(sys.exc_info()[2])
ui.message(str(e), level=ui.ERROR)
options = [("redo", "*decypher another data", ""),
("quit", "or go back to *menu", "")]
answ = ui.get_choice("Do you want to", options, oneline=True)
if answ == "quit":
return
NAME = "atomic"
TIP = "Tool to convert text to/from atomic digits code."
TYPE = app.cli.Node.TOOL
CLASS = AtomicDigits
# Allow tool to be used directly, without using Cyprium menu.
if __name__ == "__main__":
import app.cli.ui
ui = app.cli.ui.UI()
tree = app.cli.NoTree("AtomicDigits")
AtomicDigits(tree).main(ui)
| gpl-3.0 | -3,187,348,069,849,674,000 | 44.881041 | 123 | 0.458273 | false |
patrickfuller/imolecule | imolecule/format_converter.py | 1 | 7752 | """
Methods to interconvert between json and other (cif, mol, smi, etc.) files
"""
import imolecule.json_formatter as json
from collections import Counter
from fractions import gcd
from functools import reduce
# Open Babel <= '2.4.1'
try:
import pybel
ob = pybel.ob
table = ob.OBElementTable()
GetAtomicNum = table.GetAtomicNum
GetSymbol = table.GetSymbol
has_ob = True
except ImportError:
has_ob = False
# Open Babel >= '3.0.0'
try:
from openbabel import pybel
ob = pybel.ob
GetAtomicNum = ob.GetAtomicNum
GetSymbol = ob.GetSymbol
has_ob = True
except ImportError:
has_ob = False
def convert(data, in_format, out_format, name=None, pretty=False):
"""Converts between two inputted chemical formats.
Args:
data: A string representing the chemical file to be converted. If the
`in_format` is "json", this can also be a Python object
in_format: The format of the `data` string. Can be "json" or any format
recognized by Open Babel
out_format: The format to convert to. Can be "json" or any format
recognized by Open Babel
name: (Optional) If `out_format` is "json", will save the specified
value in a "name" property
pretty: (Optional) If True and `out_format` is "json", will pretty-
print the output for human readability
Returns:
A string representing the inputted `data` in the specified `out_format`
"""
# Decide on a json formatter depending on desired prettiness
dumps = json.dumps if pretty else json.compress
# Shortcut for avoiding pybel dependency
if not has_ob and in_format == 'json' and out_format == 'json':
return dumps(json.loads(data) if is_string(data) else data)
elif not has_ob:
raise ImportError("Chemical file format conversion requires pybel.")
# These use the open babel library to interconvert, with additions for json
if in_format == 'json':
mol = json_to_pybel(json.loads(data) if is_string(data) else data)
elif in_format == 'pybel':
mol = data
else:
mol = pybel.readstring(in_format, data)
# Infer structure in cases where the input format has no specification
if not mol.OBMol.HasNonZeroCoords():
mol.make3D()
# Make P1 if that's a thing, recalculating bonds in process
if in_format == 'mmcif' and hasattr(mol, 'unitcell'):
mol.unitcell.FillUnitCell(mol.OBMol)
mol.OBMol.ConnectTheDots()
mol.OBMol.PerceiveBondOrders()
mol.OBMol.Center()
if out_format == 'pybel':
return mol
elif out_format == 'object':
return pybel_to_json(mol, name)
elif out_format == 'json':
return dumps(pybel_to_json(mol, name))
else:
return mol.write(out_format)
def json_to_pybel(data, infer_bonds=False):
"""Converts python data structure to pybel.Molecule.
This will infer bond data if not specified.
Args:
data: The loaded json data of a molecule, as a Python object
infer_bonds (Optional): If no bonds specified in input, infer them
Returns:
An instance of `pybel.Molecule`
"""
obmol = ob.OBMol()
obmol.BeginModify()
for atom in data['atoms']:
obatom = obmol.NewAtom()
obatom.SetAtomicNum(GetAtomicNum(str(atom['element'])))
obatom.SetVector(*atom['location'])
if 'label' in atom:
pd = ob.OBPairData()
pd.SetAttribute('_atom_site_label')
pd.SetValue(atom['label'])
obatom.CloneData(pd)
# If there is no bond data, try to infer them
if 'bonds' not in data or not data['bonds']:
if infer_bonds:
obmol.ConnectTheDots()
obmol.PerceiveBondOrders()
# Otherwise, use the bonds in the data set
else:
for bond in data['bonds']:
if 'atoms' not in bond:
continue
obmol.AddBond(bond['atoms'][0] + 1, bond['atoms'][1] + 1,
bond['order'])
# Check for unit cell data
if 'unitcell' in data:
uc = ob.OBUnitCell()
uc.SetData(*(ob.vector3(*v) for v in data['unitcell']))
uc.SetSpaceGroup('P1')
obmol.CloneData(uc)
obmol.EndModify()
mol = pybel.Molecule(obmol)
# Add partial charges
if 'charge' in data['atoms'][0]:
mol.OBMol.SetPartialChargesPerceived()
for atom, pyatom in zip(data['atoms'], mol.atoms):
pyatom.OBAtom.SetPartialCharge(atom['charge'])
return mol
def pybel_to_json(molecule, name=None):
"""Converts a pybel molecule to json.
Args:
molecule: An instance of `pybel.Molecule`
name: (Optional) If specified, will save a "name" property
Returns:
A Python dictionary containing atom and bond data
"""
# Save atom element type and 3D location.
atoms = [{'element': GetSymbol(atom.atomicnum),
'location': list(atom.coords)}
for atom in molecule.atoms]
# Recover auxiliary data, if exists
for json_atom, pybel_atom in zip(atoms, molecule.atoms):
if pybel_atom.partialcharge != 0:
json_atom['charge'] = pybel_atom.partialcharge
if pybel_atom.OBAtom.HasData('_atom_site_label'):
obatom = pybel_atom.OBAtom
json_atom['label'] = obatom.GetData('_atom_site_label').GetValue()
if pybel_atom.OBAtom.HasData('color'):
obatom = pybel_atom.OBAtom
json_atom['color'] = obatom.GetData('color').GetValue()
# Save number of bonds and indices of endpoint atoms
bonds = [{'atoms': [b.GetBeginAtom().GetIndex(),
b.GetEndAtom().GetIndex()],
'order': b.GetBondOrder()}
for b in ob.OBMolBondIter(molecule.OBMol)]
output = {'atoms': atoms, 'bonds': bonds, 'units': {}}
# If there's unit cell data, save it to the json output
if hasattr(molecule, 'unitcell'):
uc = molecule.unitcell
output['unitcell'] = [[v.GetX(), v.GetY(), v.GetZ()]
for v in uc.GetCellVectors()]
density = (sum(atom.atomicmass for atom in molecule.atoms) /
(uc.GetCellVolume() * 0.6022))
output['density'] = density
output['units']['density'] = 'kg / L'
# Save the formula to json. Use Hill notation, just to have a standard.
element_count = Counter(GetSymbol(a.atomicnum) for a in molecule)
hill_count = []
for element in ['C', 'H']:
if element in element_count:
hill_count += [(element, element_count[element])]
del element_count[element]
hill_count += sorted(element_count.items())
# If it's a crystal, then reduce the Hill formula
div = (reduce(gcd, (c[1] for c in hill_count))
if hasattr(molecule, 'unitcell') else 1)
output['formula'] = ''.join(n if c / div == 1 else '%s%d' % (n, c / div)
for n, c in hill_count)
output['molecular_weight'] = molecule.molwt / div
output['units']['molecular_weight'] = 'g / mol'
# If the input has been given a name, add that
if name:
output['name'] = name
return output
def is_string(obj):
"""Wraps Python2.x and 3.x ways to test if string."""
try:
return isinstance(obj, basestring)
except NameError:
return isinstance(obj, str)
if __name__ == '__main__':
# Lazy converter to test this out
import sys
in_data, in_format, out_format = sys.argv[1:]
try:
with open(in_data) as in_file:
data = in_file.read()
except IOError:
data = in_data
print(convert(data, in_format, out_format, pretty=True))
| mit | -2,810,372,494,749,063,700 | 33.14978 | 79 | 0.611326 | false |
Dangetsu/vnr | Frameworks/Sakura/py/apps/browser/core/network.py | 1 | 6136 | # coding: utf8
# network.py
# 12/13/2012 jichi
__all__ = 'WbNetworkAccessManager',
import os
from PySide.QtNetwork import QNetworkAccessManager, QNetworkRequest, QNetworkDiskCache
from sakurakit import skfileio, sknetwork
from sakurakit.skdebug import dprint
import proxy, rc
## Cookie ##
class WbNetworkCookieJar(sknetwork.SkNetworkCookieJar):
def __init__(self, path, parent=None): # unicode
super(WbNetworkCookieJar, self).__init__(parent)
self.path = path
self.load()
self._injectCookies()
# Automatically save cookies using timer
from PySide.QtCore import QCoreApplication
qApp = QCoreApplication.instance()
qApp.aboutToQuit.connect(self.save)
# Persistent storage
def load(self): # unicode ->
path = self.path
if path and os.path.exists(path):
self.unmarshal(skfileio.readdata(path))
def save(self): # unicode -> bool
return bool(self.path) and skfileio.writedata(self.path, self.marshal())
def _injectCookies(self):
#from PySide.QtCore import QUrl
from PySide.QtNetwork import QNetworkCookie
import cookies
# Use parent cookie
setCookiesFromUrl = super(WbNetworkCookieJar, self).setCookiesFromUrl
for kvdict,urls in cookies.itercookies():
cookies = [QNetworkCookie(k,v) for k,v in kvdict.iteritems()]
for url in urls:
l = cookies
if url.startswith("http://www."):
domain = url.replace("http://www", '') # such as .dmm.co.jp
l = [QNetworkCookie(it) for it in l] # copy l
for c in l:
c.setDomain(domain)
self.setCookiesFromOriginalUrl(l, url)
# See: http://kancolle.wikia.com/wiki/Tutorial:_Proxy_Connection
#c = QNetworkCookie('ckcy', '1')
#c.setDomain("http://www.dmm.com")
#c.setPath("/netgame")
#self.setCookiesFromUrl([c], "http://www.dmm.com/netgame")
#c.setPath("/netgame_s")
#self.setCookiesFromUrl([c], "http://www.dmm.com/netgame_s")
# Proxy
def cookiesForUrl(self, url): # override
"""@reimp"""
url = proxy.fromproxyurl(url) or url
return super(WbNetworkCookieJar, self).cookiesForUrl(url)
def setCookiesFromUrl(self, cookies, url):
"""@reimp"""
url = proxy.fromproxyurl(url) or url
return super(WbNetworkCookieJar, self).setCookiesFromUrl(cookies, url)
# Expose API to set cookies without proxy
def setCookiesFromOriginalUrl(self, cookies, url):
return super(WbNetworkCookieJar, self).setCookiesFromUrl(cookies, url)
## Network ##
REQ_PROXY_URL = 'proxy'
class WbNetworkAccessManager(QNetworkAccessManager):
def __init__(self, parent=None):
super(WbNetworkAccessManager, self).__init__(parent)
self.sslErrors.connect(_WbNetworkAccessManager.onSslErrors)
self.finished.connect(_WbNetworkAccessManager.onReplyFinished)
# Enable offline cache
cache = QNetworkDiskCache(self)
cache.setCacheDirectory(rc.DIR_CACHE_NETMAN) # QNetworkDiskCache will create this directory if it does not exists.
self.setCache(cache)
# Load cookies
jar = WbNetworkCookieJar(rc.COOKIES_LOCATION)
self.setCookieJar(jar)
# QNetworkReply *createRequest(Operation op, const QNetworkRequest &req, QIODevice *outgoingData = nullptr) override;
def createRequest(self, op, req, outgoingData=None): # override
url = req.url()
#print url
#if url.scheme() == 'https' and url.host() in ('www.dmm.com', 'dmm.com'):
# path = url.path()
# if path.startswith('/js/') or path.startswith('/css/'):
# url.setScheme('http') # downgrade to http
# req.setUrl(url)
# dprint("downgrade https to http:", url)
#print url
newurl = _WbNetworkAccessManager.getBlockedUrl(url)
if newurl:
req = QNetworkRequest(newurl)
else:
newurl = proxy.toproxyurl(url)
if newurl and newurl != url:
req = QNetworkRequest(req) # since request tis constent
req.setUrl(newurl)
_WbNetworkAccessManager.setRequestHeaders(req)
reply = super(WbNetworkAccessManager, self).createRequest(op, req, outgoingData)
#if url.host().lower().endswith('dmm.co.jp'):
reply.setUrl(url) # restore the old url
reply.setProperty(REQ_PROXY_URL, url)
#print newurl
return reply
#else:
# print url
_WbNetworkAccessManager.setRequestHeaders(req)
return super(WbNetworkAccessManager, self).createRequest(op, req, outgoingData)
class _WbNetworkAccessManager:
@staticmethod
def setRequestHeaders(req):
"""Set the http header
@param req QNetworkRequest
"""
if req.hasRawHeader('Referer'): # Delete Referer so that it will not get tracked
req.setRawHeader('Referer', '')
#req.setRawHeader('User-Agent', config.USER_AGENT) # handled in WebKit
#IP = '153.121.52.138'
#keys = 'X-Forwarded-For', 'Client-IP', 'X-Client-IP', 'Real-IP', 'X-Real-IP'
#for k in keys:
# req.setRawHeader(k, IP)
@staticmethod
def onReplyFinished(reply):
"""Fix the redirect URL
@param reply QNetworkReply
"""
proxyUrl = reply.property(REQ_PROXY_URL)
if proxyUrl:
#statusCode = reply.attribute(QNetworkRequest.HttpStatusCodeAttribute)
redirectUrl = reply.attribute(QNetworkRequest.RedirectionTargetAttribute)
if redirectUrl:
if not redirectUrl.host() and redirectUrl != reply.url() and redirectUrl != proxyUrl:
redirectUrl.setHost(proxyUrl.host())
else:
redirectUrl = proxy.fromproxyurl(redirectUrl)
if redirectUrl:
reply.setAttribute(QNetworkRequest.RedirectionTargetAttribute, redirectUrl)
@staticmethod
def getBlockedUrl(url):
"""
@param url QUrl
@return unicode or QUrl or None
"""
if url.path() == '/js/localize_welcome.js': # for DMM
dprint("block dmm localize welcome")
return rc.DMM_LOCALIZED_WELCOME_URL
# http://stackoverflow.com/questions/8362506/qwebview-qt-webkit-wont-open-some-ssl-pages-redirects-not-allowed
@staticmethod
def onSslErrors(reply, errors): # QNetworkReply, [QSslError] ->
reply.ignoreSslErrors()
#dprint("ignore ssl error")
#print errors
# EOF
| gpl-3.0 | -7,939,426,695,941,175,000 | 33.088889 | 119 | 0.683833 | false |
strange/django-simple-comments | simple_comments/views.py | 1 | 1442 | from django import http
from simple_comments import comments
def get_configuration_or_404(configuration_key):
try:
return comments.get_configuration(configuration_key)
except comments.CommentConfigurationNotRegistered:
raise http.Http404
def create_comment(request, configuration_key, target_id, extra_context=None):
config = get_configuration_or_404(configuration_key)
return config.create_comment(request, target_id, extra_context)
def comment_posted(request, configuration_key, target_id, comment_id,
extra_context=None):
config = get_configuration_or_404(configuration_key)
return config.comment_posted(request, target_id, comment_id,
extra_context)
def delete_comment(request, configuration_key, target_id, comment_id):
config = get_configuration_or_404(configuration_key)
return config.delete_comment(request, target_id, comment_id)
def comment_deleted(request, configuration_key, target_id,
extra_context=None):
config = get_configuration_or_404(configuration_key)
return config.comment_deleted(request, target_id, comment_id,
extra_context)
def comment_list(request, configuration_key, target_id=None,
extra_context=None):
config = get_configuration_or_404(configuration_key)
return config.comment_list(request, target_id, extra_context)
| bsd-3-clause | -7,282,391,726,829,976,000 | 40.2 | 78 | 0.702497 | false |
jpvmm/DLearningExp | fuzzy.py | 1 | 4518 |
#Fuzzy Algorithm to mark calcifications in mammography
#I'm using Mandani Defuzzification
#FutureBox Analytics
from __future__ import division
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as p
from skimage.io import imread
from skimage.measure import label, regionprops
from skimage.exposure import histogram
from skimage.morphology import binary_closing,binary_opening, disk, skeletonize, dilation,rectangle
class Fuzzy:
''' A fuzzy class '''
def __init__(self, path):
self.path = path
def open_image(self):
''' Open the image given the path'''
img = imread(self.path)
counts,grays = histogram(img, nbins = 256)
#img = (img-0)/(255 -0)
return img,counts,grays
def Fuzzification(self,counts,grays):
''' Fuzzificate a image '''
# Regra 1 (Claros,Poucos) = 1
u1 = []
# Regra 2 (Claros,Medios) = 1
u2 = []
# Regra 3 (Claros,Muitos) == 0.5
u3 = []
# Regra 4 Aplicada (Cinza,Poucos) =0.5
u4 = []
# Regra 5 (Cinza,Medios) = 0.5
u5 = []
for i in range(counts.shape[0]):
u1 = np.append(u1,self.Few(counts[i])*self.White(grays[i]))
u2 = np.append(u2,self.Moderate(counts[i]) * self.White(grays[i]))
u3 = np.append(u3,self.Many(counts[i]) * self.White(grays[i]))
u4 = np.append(u4,self.Few(counts[i]) * self.Grey(grays[i]))
u5 = np.append(u5,self.Moderate(counts[i]) * self.Grey(grays[i]))
return u1,u2,u3,u4,u5
def Defuzzification(self,u1,u2,u3,u4,u5):
''' Defuzzification of an image using singletons function '''
CA = ((1*u1+1*u2+0.5*u3+0.5*u4+0.5*u5)/(u1+u2+u3+u4+u5)) #Center of AVERAGE!! function
CA[np.isnan(CA)] = 0
return CA
def Inference(self,img,counts,grays):
''' Fuzzy Inference System '''
u1,u2,u3,u4,u5 = self.Fuzzification(counts,grays)
defuzz = self.Defuzzification(u1,u2,u3,u4,u5)
imgdefuzz = img
imgsuspeito = img
for i in range(counts.shape[0]):
if defuzz[i] ==1 :
imgdefuzz[imgdefuzz == grays[i]] == 255
if defuzz[i] >= 0.5 and defuzz[i] < 0.9:
imgsuspeito[imgsuspeito == grays[i]] = 255
return defuzz,imgdefuzz,imgsuspeito
def Grey(self,gray_scale):
''' Grey Pixels fuzzy function '''
if gray_scale > 122 and gray_scale <= 141:
c = (gray_scale - 122)/(141-122)
elif gray_scale > 141 and gray_scale <= 179:
c = (179 - gray_scale)/(179 - 141)
else:
c = 0
return c
def White(self,gray_scale):
''' White Pixels Fuzzy Function'''
if gray_scale >= 170 and gray_scale <= 230:
c = (gray_scale - 170)/(230 -170)
elif gray_scale > 230:
c = 1
else:
c = 0
return c
def Black(self,gray_scale):
''' Black Pixels Fuzzy Function '''
if gray_scale >=77 and gray_scale <= 128:
c = (128 - gray_scale)/(128-77)
elif gray_scale < 77:
c = 1
else:
c = 0
return c
def Few(self,counts):
''' Fuzzy function for few pixels '''
if counts <= 250:
p = 1
elif counts > 250 and counts <= 450:
p = (450 - counts)/(450-250)
else:
p = 0
return p
def Moderate(self,counts):
''' Fuzzy Function to a reasonable amount of pixels '''
if counts >= 600 and counts <=750:
p = (counts - 600)/(750-600)
elif counts > 750 and counts <= 850:
p = 1
elif counts > 850 and counts <= 1050:
p = (1050 - counts)/(1050-850)
else:
p = 0
return p
def Many(self,counts):
''' Fuzzy function for many pixels '''
if counts >= 1000 and counts <= 2000:
p = (counts - 1000)/(2000-1000)
elif counts > 2000:
p = 1
else:
p = 0
return p
t = Fuzzy('/home/joao/UnB/Academia/Git/DATA/ROIs/49.pgm')
img2,counts,grays = t.open_image()
teste,imgdefuz,imgsus = t.Inference(img2,counts,grays)
#Binarization
binary = imgdefuz >= 255
se = disk(10)
binary_image = dilation(binary, se)
se3 = disk(20)
binary_image = binary_closing(binary_image,se3)
p.imshow(imgdefuz, cmap = cm.Greys_r)
p.show()
| gpl-3.0 | 8,367,580,780,083,394,000 | 27.77707 | 99 | 0.542939 | false |
RaVenHelm/SortingClass | SortingClass.py | 1 | 9012 | from copy import copy
class SortingUtilClass:
@staticmethod
def list_to_string(values):
count = len(values) - 1
res = ''
for n in values:
fmt_string = '{:<3}'
res += fmt_string.format(n)
return res
@staticmethod
def print_results(comparisons, swaps):
print()
print('Analysis: ')
print('\t{0:<12} {1:>3}'.format('Comarisons:', comparisons))
print('\t{0:<12} {1:>3}'.format('Swaps:', swaps))
print('\t{0:<12} {1:>3}'.format('Work:', comparisons + (5 * swaps)))
print()
@staticmethod
def print_title(assign_num, title):
print('Tyberius Enders')
print('Assignment {} - {}'.format(assign_num, title))
print()
@staticmethod
def print_loop_position(num, array):
print('Loop #{0} Array = {1}'.format(num, SortingUtilClass.list_to_string(array)))
@staticmethod
def print_comparison_level(array, comparison, spacing, print_list, adjust):
print('Comparison'.rjust(14), end=' ')
print('#{}'.format(comparison).ljust(spacing), end='')
print('{}'.format(SortingUtilClass.list_to_string(print_list)).rjust(adjust))
@staticmethod
def print_comparison_simple(comparison, values, low, high, **kwargs):
fmt = 'Comparison #{}'.format(comparison)
end_char = '\n'
if kwargs['stop']:
end_char = ''
if (comparison / 10) < 1:
print(fmt.rjust(18), end='')
else:
print(fmt.rjust(19), end='')
base_spaces = 3*low + 6
print('{}'.format(values[low]).rjust(base_spaces), end=end_char)
if kwargs['stop']:
print('{}'.format('(Stop)').rjust(8))
@staticmethod
def print_swap_simple(swap, values, low, high):
fmt = 'Swap #{}'.format(swap)
if (swap / 10) < 1:
print(fmt.rjust(12), end='')
else:
print(fmt.rjust(13), end='')
print('{}'.format(values[low]).rjust(3*low+12), end='')
print('{}'.format(values[high]).rjust(3*(high-low)))
@staticmethod
def print_swap_level(array, swap, spacing, print_list, adjust):
print('Swap'.rjust(8), end=' ')
print('#{}'.format(swap).ljust(spacing), end='')
print('{}'.format(SortingUtilClass.list_to_string(print_list).rjust(adjust)))
@staticmethod
def print_pivot_level(pivot):
pivot_spaces = 14
if (pivot / 10) >= 1:
pivot_spaces = 15
print('Pivot = {}'.format(pivot).rjust(pivot_spaces))
@staticmethod
def print_level_with_array(level, array):
print('Level {}:'.format(level), end='')
print('Array = {}'.format(SortingUtilClass.list_to_string(array)).rjust(15 + len(array) * 3))
@staticmethod
def print_high_low(high, low):
low_spaces = 12
if (low / 10) >= 1:
low_spaces = 11
print('Low = {}'.format(low).rjust(low_spaces))
high_spaces = 13
if (high / 10) >= 1:
high_spaces = 14
print('High = {}'.format(high).rjust(high_spaces))
@staticmethod
def print_qs_fn(low, high, index):
a = low
b = index - 1
c = index + 1
d = high
if a > b:
a, b = b, a
if c > d:
c, d = d, c
print('Calling QS ({}-{}) and ({}-{})'.format(a, b, c, d))
@staticmethod
def print_char_line(char):
for i in range(1,55):
print(char, end='')
print()
@staticmethod
def print_algorithm_title(title):
SortingUtilClass.print_char_line('#')
print(title)
print()
class SortingClass:
def __init__(self, to_print=True):
self.comparisons = 1
self.swaps = 1
self.level = 1
self.print = to_print
def set_defaults(self):
self.comparisons = 1
self.swaps = 1
self.level = 1
def finish(self):
if self.print:
SortingUtilClass.print_results(self.comparisons, self.swaps)
work = self.comparisons+(5*self.swaps)
return dict(comparisons=self.comparisons, swaps=self.swaps, work=work, level=self.level)
def get_analysis(self):
work = self.comparisons + (5*self.swaps)
return dict(comparisons=self.comparisons, swaps=self.swaps, work=work)
def bubble_sort(self, values):
n = len(values)
for i in range(n):
# print loop level
if self.print:
SortingUtilClass.print_loop_position(i+1,values)
for j in range(1,n):
# print comparison
if self.print:
SortingUtilClass.print_comparison_level(values, self.comparisons, 3*(j - 2) + 7, [values[j-1], values[j]], j)
self.comparisons += 1
if values[j-1] > values[j]:
values[j-1], values[j] = values[j], values[j-1]
# print swaps
if self.print:
SortingUtilClass.print_swap_level(values, self.swaps, 3*(j - 2) + 13, [values[j-1], values[j]], j)
self.swaps += 1
return self
def insertion_sort(self, values):
n = len(values)
for i in range(1,n):
j = i
if self.print:
SortingUtilClass.print_loop_position(j, values)
while j > 0 and values[j-1] > values[j]:
# print comparison
if self.print:
SortingUtilClass.print_comparison_level(values, self.comparisons, 3*(j - 1) + 4, [values[j-1], values[j]], j)
self.comparisons += 1
# swap values
values[j-1], values[j] = values[j], values[j-1]
# print swaps
if self.print:
SortingUtilClass.print_swap_level(values, self.swaps, 3*(j - 1) + 10, [values[j-1], values[j]], j)
self.swaps += 1
j -= 1
else:
# print comparison
if self.print:
SortingUtilClass.print_comparison_level(values, self.comparisons, 3*(j - 1) + 4, [values[j-1], values[j]], j)
self.comparisons += 1
return self
def selection_sort(self, values):
n = len(values)
count = 1
for i in range(n-1, 0, -1):
# print loop level
if self.print:
SortingUtilClass.print_loop_position(count, values)
maximum = self.max_key(0, i, values)
# swap values
values[maximum], values[i] = values[i], values[maximum]
# print swaps
if self.print:
SortingUtilClass.print_swap_level(values, self.swaps, 3*(i-1) + 10, [values[maximum], values[i]], i)
self.swaps += 1
count += 1
return self
def max_key(self, low, high, values):
largest = low
for j in range(low+1, high+1):
# print comparison
if self.print:
SortingUtilClass.print_comparison_level(values, self.comparisons, 3*(j - 1) + 4, [values[j-1], values[j]], j)
self.comparisons += 1
if values[largest] < values[j]:
largest = j
# print max and array
if self.print:
pass
return largest
# Assignment 4 methods
def partition(self, values, low, high):
pivot = values[high]
i = low
# print pivot
if self.print:
SortingUtilClass.print_pivot_level(pivot)
for j in range(low, high):
# print comparison
if self.print:
to_stop = False
if values[j] >= pivot:
to_stop = True
SortingUtilClass.print_comparison_simple(self.comparisons, values, j, high, stop=to_stop)
self.comparisons += 1
if values[j] <= pivot:
if self.print:
print('Moving high...')
values[i], values[j] = values[j], values[i]
i += 1
# swap values
values[i], values[high] = values[high], values[i]
if self.print:
SortingUtilClass.print_swap_simple(self.swaps, values, i, high)
print('{}'.format('Moving low...').rjust(18))
self.swaps += 1
return i
def quick_sort(self, values, low, high):
# print level
# print array
# print low
# print high
if self.print:
SortingUtilClass.print_level_with_array(self.level, values)
SortingUtilClass.print_high_low(high, low)
if low < high:
p = self.partition(values, low, high)
self.level += 1
# print 'Calling QS'...
if self.print:
SortingUtilClass.print_qs_fn(low, high, p)
self.quick_sort(values, low, p - 1)
self.quick_sort(values, p + 1, high)
return self
# Heap sort methods (Extra)
def heap_sort(self, values, count):
self.heapify(values, count)
end = count - 1
while end > 0:
values[end], values[0] = values[0], values[end]
self.swaps += 1
end -= 1
self.sift_down(values, 0, end)
return self
def heapify(self, values, count):
start = int((count - 2) / 2)
while start >= 0:
self.sift_down(values, start, count - 1)
start -= 1
def sift_down(self, values, start, end):
root = start
while root * 2 + 1 <= end:
child = root * 2 + 1
if child + 1 <= end and values[child] < values[child + 1]:
child += 1
self.comparisons += 1
if values[root] < values[child]:
values[root], values[child] = values[child], values[root]
self.swaps += 1
root = child
self.comparisons += 1
else:
return
def all(self, orig):
values = copy(orig)
self.set_defaults()
if self.print:
SortingUtilClass.print_algorithm_title('Bubble Sort')
self.bubble_sort(values).finish()
values = copy(orig)
self.set_defaults()
if self.print:
SortingUtilClass.print_algorithm_title('Insertion Sort')
self.insertion_sort(values).finish()
values = copy(orig)
self.set_defaults()
if self.print:
SortingUtilClass.print_algorithm_title('Selection Sort')
self.selection_sort(values).finish()
values = copy(orig)
self.set_defaults()
if self.print:
SortingUtilClass.print_algorithm_title('Quick Sort')
self.quick_sort(values, 0, len(values) - 1).finish()
values = copy(orig)
self.set_defaults()
if self.print:
SortingUtilClass.print_algorithm_title('Heap Sort')
self.heap_sort(values, len(values)).finish()
| mit | 2,211,133,984,667,170,600 | 26.063063 | 114 | 0.642255 | false |
dgouldin/myspaceid-python-sdk | src/openid/fetchers.py | 1 | 14001 | # -*- test-case-name: openid.test.test_fetchers -*-
"""
This module contains the HTTP fetcher interface and several implementations.
"""
__all__ = ['fetch', 'getDefaultFetcher', 'setDefaultFetcher', 'HTTPResponse',
'HTTPFetcher', 'createHTTPFetcher', 'HTTPFetchingError',
'HTTPError']
import urllib2
import time
import cStringIO
import sys
import openid
import openid.urinorm
# Try to import httplib2 for caching support
# http://bitworking.org/projects/httplib2/
try:
import httplib2
except ImportError:
# httplib2 not available
httplib2 = None
# try to import pycurl, which will let us use CurlHTTPFetcher
try:
import pycurl
except ImportError:
pycurl = None
USER_AGENT = "python-openid/%s (%s)" % (openid.__version__, sys.platform)
MAX_RESPONSE_KB = 1024
def fetch(url, body=None, headers=None):
"""Invoke the fetch method on the default fetcher. Most users
should need only this method.
@raises Exception: any exceptions that may be raised by the default fetcher
"""
fetcher = getDefaultFetcher()
return fetcher.fetch(url, body, headers)
def createHTTPFetcher():
"""Create a default HTTP fetcher instance
prefers Curl to urllib2."""
if pycurl is None:
fetcher = Urllib2Fetcher()
else:
fetcher = CurlHTTPFetcher()
return fetcher
# Contains the currently set HTTP fetcher. If it is set to None, the
# library will call createHTTPFetcher() to set it. Do not access this
# variable outside of this module.
_default_fetcher = None
def getDefaultFetcher():
"""Return the default fetcher instance
if no fetcher has been set, it will create a default fetcher.
@return: the default fetcher
@rtype: HTTPFetcher
"""
global _default_fetcher
if _default_fetcher is None:
setDefaultFetcher(createHTTPFetcher())
return _default_fetcher
def setDefaultFetcher(fetcher, wrap_exceptions=True):
"""Set the default fetcher
@param fetcher: The fetcher to use as the default HTTP fetcher
@type fetcher: HTTPFetcher
@param wrap_exceptions: Whether to wrap exceptions thrown by the
fetcher wil HTTPFetchingError so that they may be caught
easier. By default, exceptions will be wrapped. In general,
unwrapped fetchers are useful for debugging of fetching errors
or if your fetcher raises well-known exceptions that you would
like to catch.
@type wrap_exceptions: bool
"""
global _default_fetcher
if fetcher is None or not wrap_exceptions:
_default_fetcher = fetcher
else:
_default_fetcher = ExceptionWrappingFetcher(fetcher)
def usingCurl():
"""Whether the currently set HTTP fetcher is a Curl HTTP fetcher."""
return isinstance(getDefaultFetcher(), CurlHTTPFetcher)
class HTTPResponse(object):
"""XXX document attributes"""
headers = None
status = None
body = None
final_url = None
def __init__(self, final_url=None, status=None, headers=None, body=None):
self.final_url = final_url
self.status = status
self.headers = headers
self.body = body
def __repr__(self):
return "<%s status %s for %s>" % (self.__class__.__name__,
self.status,
self.final_url)
class HTTPFetcher(object):
"""
This class is the interface for openid HTTP fetchers. This
interface is only important if you need to write a new fetcher for
some reason.
"""
def fetch(self, url, body=None, headers=None):
"""
This performs an HTTP POST or GET, following redirects along
the way. If a body is specified, then the request will be a
POST. Otherwise, it will be a GET.
@param headers: HTTP headers to include with the request
@type headers: {str:str}
@return: An object representing the server's HTTP response. If
there are network or protocol errors, an exception will be
raised. HTTP error responses, like 404 or 500, do not
cause exceptions.
@rtype: L{HTTPResponse}
@raise Exception: Different implementations will raise
different errors based on the underlying HTTP library.
"""
raise NotImplementedError
def _allowedURL(url):
return url.startswith('http://') or url.startswith('https://')
class HTTPFetchingError(Exception):
"""Exception that is wrapped around all exceptions that are raised
by the underlying fetcher when using the ExceptionWrappingFetcher
@ivar why: The exception that caused this exception
"""
def __init__(self, why=None):
Exception.__init__(self, why)
self.why = why
class ExceptionWrappingFetcher(HTTPFetcher):
"""Fetcher that wraps another fetcher, causing all exceptions
@cvar uncaught_exceptions: Exceptions that should be exposed to the
user if they are raised by the fetch call
"""
uncaught_exceptions = (SystemExit, KeyboardInterrupt, MemoryError)
def __init__(self, fetcher):
self.fetcher = fetcher
def fetch(self, *args, **kwargs):
try:
return self.fetcher.fetch(*args, **kwargs)
except self.uncaught_exceptions:
raise
except:
exc_cls, exc_inst = sys.exc_info()[:2]
if exc_inst is None:
# string exceptions
exc_inst = exc_cls
raise HTTPFetchingError(why=exc_inst)
class Urllib2Fetcher(HTTPFetcher):
"""An C{L{HTTPFetcher}} that uses urllib2.
"""
# Parameterized for the benefit of testing frameworks, see
# http://trac.openidenabled.com/trac/ticket/85
urlopen = staticmethod(urllib2.urlopen)
def fetch(self, url, body=None, headers=None):
if not _allowedURL(url):
raise ValueError('Bad URL scheme: %r' % (url,))
if headers is None:
headers = {}
headers.setdefault(
'User-Agent',
"%s Python-urllib/%s" % (USER_AGENT, urllib2.__version__,))
headers.setdefault(
'Range',
'0-%s' % (1024*MAX_RESPONSE_KB,))
req = urllib2.Request(url, data=body, headers=headers)
try:
f = self.urlopen(req)
try:
return self._makeResponse(f)
finally:
f.close()
except urllib2.HTTPError, why:
try:
return self._makeResponse(why)
finally:
why.close()
def _makeResponse(self, urllib2_response):
resp = HTTPResponse()
resp.body = urllib2_response.read(MAX_RESPONSE_KB * 1024)
resp.final_url = urllib2_response.geturl()
resp.headers = dict(urllib2_response.info().items())
if hasattr(urllib2_response, 'code'):
resp.status = urllib2_response.code
else:
resp.status = 200
return resp
class HTTPError(HTTPFetchingError):
"""
This exception is raised by the C{L{CurlHTTPFetcher}} when it
encounters an exceptional situation fetching a URL.
"""
pass
# XXX: define what we mean by paranoid, and make sure it is.
class CurlHTTPFetcher(HTTPFetcher):
"""
An C{L{HTTPFetcher}} that uses pycurl for fetching.
See U{http://pycurl.sourceforge.net/}.
"""
ALLOWED_TIME = 20 # seconds
def __init__(self):
HTTPFetcher.__init__(self)
if pycurl is None:
raise RuntimeError('Cannot find pycurl library')
def _parseHeaders(self, header_file):
header_file.seek(0)
# Remove the status line from the beginning of the input
unused_http_status_line = header_file.readline()
lines = [line.strip() for line in header_file]
# and the blank line from the end
empty_line = lines.pop()
if empty_line:
raise HTTPError("No blank line at end of headers: %r" % (line,))
headers = {}
for line in lines:
try:
name, value = line.split(':', 1)
except ValueError:
raise HTTPError(
"Malformed HTTP header line in response: %r" % (line,))
value = value.strip()
# HTTP headers are case-insensitive
name = name.lower()
headers[name] = value
return headers
def _checkURL(self, url):
# XXX: document that this can be overridden to match desired policy
# XXX: make sure url is well-formed and routeable
return _allowedURL(url)
def fetch(self, url, body=None, headers=None):
stop = int(time.time()) + self.ALLOWED_TIME
off = self.ALLOWED_TIME
if headers is None:
headers = {}
headers.setdefault('User-Agent',
"%s %s" % (USER_AGENT, pycurl.version,))
header_list = []
if headers is not None:
for header_name, header_value in headers.iteritems():
header_list.append('%s: %s' % (header_name, header_value))
c = pycurl.Curl()
try:
c.setopt(pycurl.NOSIGNAL, 1)
if header_list:
c.setopt(pycurl.HTTPHEADER, header_list)
# Presence of a body indicates that we should do a POST
if body is not None:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.POSTFIELDS, body)
while off > 0:
if not self._checkURL(url):
raise HTTPError("Fetching URL not allowed: %r" % (url,))
data = cStringIO.StringIO()
def write_data(chunk):
if data.tell() > 1024*MAX_RESPONSE_KB:
return 0
else:
return data.write(chunk)
response_header_data = cStringIO.StringIO()
c.setopt(pycurl.WRITEFUNCTION, write_data)
c.setopt(pycurl.HEADERFUNCTION, response_header_data.write)
c.setopt(pycurl.TIMEOUT, off)
c.setopt(pycurl.URL, openid.urinorm.urinorm(url))
c.setopt(pycurl.RANGE, '0-%s'%(MAX_RESPONSE_KB*1024))
c.perform()
response_headers = self._parseHeaders(response_header_data)
code = c.getinfo(pycurl.RESPONSE_CODE)
if code in [301, 302, 303, 307]:
url = response_headers.get('location')
if url is None:
raise HTTPError(
'Redirect (%s) returned without a location' % code)
# Redirects are always GETs
c.setopt(pycurl.POST, 0)
# There is no way to reset POSTFIELDS to empty and
# reuse the connection, but we only use it once.
else:
resp = HTTPResponse()
resp.headers = response_headers
resp.status = code
resp.final_url = url
resp.body = data.getvalue()
return resp
off = stop - int(time.time())
raise HTTPError("Timed out fetching: %r" % (url,))
finally:
c.close()
class HTTPLib2Fetcher(HTTPFetcher):
"""A fetcher that uses C{httplib2} for performing HTTP
requests. This implementation supports HTTP caching.
@see: http://bitworking.org/projects/httplib2/
"""
def __init__(self, cache=None):
"""@param cache: An object suitable for use as an C{httplib2}
cache. If a string is passed, it is assumed to be a
directory name.
"""
if httplib2 is None:
raise RuntimeError('Cannot find httplib2 library. '
'See http://bitworking.org/projects/httplib2/')
super(HTTPLib2Fetcher, self).__init__()
# An instance of the httplib2 object that performs HTTP requests
self.httplib2 = httplib2.Http(cache)
# We want httplib2 to raise exceptions for errors, just like
# the other fetchers.
self.httplib2.force_exception_to_status_code = False
def fetch(self, url, body=None, headers=None):
"""Perform an HTTP request
@raises Exception: Any exception that can be raised by httplib2
@see: C{L{HTTPFetcher.fetch}}
"""
if body:
method = 'POST'
else:
method = 'GET'
if headers is None:
headers = {}
headers.setdefault(
'Range',
'0-%s' % (1024*MAX_RESPONSE_KB,))
# httplib2 doesn't check to make sure that the URL's scheme is
# 'http' so we do it here.
if not (url.startswith('http://') or url.startswith('https://')):
raise ValueError('URL is not a HTTP URL: %r' % (url,))
httplib2_response, content = self.httplib2.request(
url, method, body=body, headers=headers)
# Translate the httplib2 response to our HTTP response abstraction
# When a 400 is returned, there is no "content-location"
# header set. This seems like a bug to me. I can't think of a
# case where we really care about the final URL when it is an
# error response, but being careful about it can't hurt.
try:
final_url = httplib2_response['content-location']
except KeyError:
# We're assuming that no redirects occurred
assert not httplib2_response.previous
# And this should never happen for a successful response
assert httplib2_response.status != 200
final_url = url
return HTTPResponse(
body=content,
final_url=final_url,
headers=dict(httplib2_response.items()),
status=httplib2_response.status,
)
| apache-2.0 | -4,169,098,957,301,690,000 | 31.560465 | 79 | 0.592029 | false |
g-weatherill/oq-risklib | openquake/commonlib/tests/_utils.py | 1 | 1745 | # Copyright (c) 2010-2014, GEM Foundation.
#
# NRML is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NRML is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with NRML. If not, see <http://www.gnu.org/licenses/>.
from xml.etree.ElementTree import parse
from openquake.baselib.general import writetmp
from openquake.commonlib.writers import tostring
from openquake.commonlib.nrml import PARSE_NS_MAP
def get_path(fname_or_fileobject):
if isinstance(fname_or_fileobject, str):
return fname_or_fileobject
elif hasattr(fname_or_fileobject, 'getvalue'):
return writetmp(fname_or_fileobject.getvalue())
elif hasattr(fname_or_fileobject, 'name'):
return fname_or_fileobject.name
else:
return TypeError(fname_or_fileobject)
def assert_xml_equal(a, b):
"""
Compare two XML artifacts for equality.
:param a, b:
Paths to XML files, or a file-like object containing the XML
contents.
"""
path_a = get_path(a)
path_b = get_path(b)
content_a = tostring(parse(a).getroot(), nsmap=PARSE_NS_MAP)
content_b = tostring(parse(b).getroot(), nsmap=PARSE_NS_MAP)
if content_a != content_b:
raise AssertionError('The files %s and %s are different!' %
(path_a, path_b))
| agpl-3.0 | -301,822,622,540,493,440 | 35.354167 | 74 | 0.699713 | false |
tuxlifan/moneyguru | core/model/currency.py | 1 | 17254 | # Copyright 2017 Virgil Dupras
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
"""This module facilitates currencies management. It exposes :class:`Currency` which lets you
easily figure out their exchange value.
"""
import os
from datetime import datetime, date, timedelta
import logging
import sqlite3 as sqlite
import threading
from queue import Queue, Empty
from operator import attrgetter
from hscommon.util import iterdaterange
class CurrencyNotSupportedException(Exception):
"""The current exchange rate provider doesn't support the requested currency."""
class RateProviderUnavailable(Exception):
"""The rate provider is temporarily unavailable."""
class Currency:
"""Represents a currency and allow easy exchange rate lookups.
A ``Currency`` instance is created with either a 3-letter ISO code or with a full name. If it's
present in the database, an instance will be returned. If not, ``ValueError`` is raised. The
easiest way to access a currency instance, however, is by using module-level constants. For
example::
>>> from hscommon.currency import USD, EUR
>>> from datetime import date
>>> USD.value_in(EUR, date.today())
0.6339119851386843
Unless a :class:`RatesDB` global instance is set through :meth:`Currency.set_rate_db` however,
only fallback values will be used as exchange rates.
"""
all = []
by_code = {}
by_name = {}
rates_db = None
def __new__(cls, code=None, name=None):
"""Returns the currency with the given code."""
assert (code is None and name is not None) or (code is not None and name is None)
if code is not None:
code = code.upper()
try:
return cls.by_code[code]
except KeyError:
raise ValueError('Unknown currency code: %r' % code)
else:
try:
return cls.by_name[name]
except KeyError:
raise ValueError('Unknown currency name: %r' % name)
def __getnewargs__(self):
return (self.code,)
def __getstate__(self):
return None
def __setstate__(self, state):
pass
def __repr__(self):
return '<Currency %s>' % self.code
@staticmethod
def register(
code, name, exponent=2, start_date=None, start_rate=1, stop_date=None, latest_rate=1,
priority=100):
"""Registers a new currency and returns it.
``priority`` determines the order of currencies in :meth:`all`. Lower priority goes first.
"""
code = code.upper()
if code in Currency.by_code:
return Currency.by_code[code]
assert name not in Currency.by_name
currency = object.__new__(Currency)
currency.code = code
currency.name = name
currency.exponent = exponent
currency.start_date = start_date
currency.start_rate = start_rate
currency.stop_date = stop_date
currency.latest_rate = latest_rate
currency.priority = priority
Currency.by_code[code] = currency
Currency.by_name[name] = currency
Currency.all.append(currency)
return currency
@staticmethod
def reset_currencies():
# Clear all currencies except USD, EUR and CAD because these are directly imported in too
# many modules and we depend on those instances being present at too many places.
# For now, this is only called during testing.
Currency.all = [c for c in Currency.all if c.code in {'CAD', 'USD', 'EUR'}]
Currency.by_name = {c.name: c for c in Currency.all}
Currency.by_code = {c.code: c for c in Currency.all}
Currency.rates_db = None
Currency.sort_currencies()
@staticmethod
def set_rates_db(db):
"""Sets a new currency ``RatesDB`` instance to be used with all ``Currency`` instances.
"""
Currency.rates_db = db
@staticmethod
def get_rates_db():
"""Returns the current ``RatesDB`` instance.
"""
if Currency.rates_db is None:
Currency.rates_db = RatesDB() # Make sure we always have some db to work with
return Currency.rates_db
@staticmethod
def sort_currencies():
Currency.all = sorted(Currency.all, key=attrgetter('priority', 'code'))
def rates_date_range(self):
"""Returns the range of date for which rates are available for this currency."""
return self.get_rates_db().date_range(self.code)
def value_in(self, currency, date):
"""Returns the value of this currency in terms of the other currency on the given date."""
if self.start_date is not None and date < self.start_date:
return self.start_rate
elif self.stop_date is not None and date > self.stop_date:
return self.latest_rate
else:
return self.get_rates_db().get_rate(date, self.code, currency.code)
def set_CAD_value(self, value, date):
"""Sets the currency's value in CAD on the given date."""
self.get_rates_db().set_CAD_value(date, self.code, value)
# For legacy purpose, we create USD, EUR and CAD in here, but all other currencies are app-defined.
USD = Currency.register(
'USD', 'U.S. dollar',
start_date=date(1998, 1, 2), start_rate=1.425, latest_rate=1.0128
)
EUR = Currency.register(
'EUR', 'European Euro',
start_date=date(1999, 1, 4), start_rate=1.8123, latest_rate=1.3298
)
CAD = Currency.register('CAD', 'Canadian dollar', latest_rate=1)
def date2str(date):
return '%d%02d%02d' % (date.year, date.month, date.day)
class RatesDB:
"""Stores exchange rates for currencies.
The currencies are identified with ISO 4217 code (USD, CAD, EUR, etc.).
The rates are represented as float and represent the value of the currency in CAD.
"""
def __init__(self, db_or_path=':memory:', async=True):
self._cache = {} # {(date, currency): CAD value
self.db_or_path = db_or_path
if isinstance(db_or_path, str):
self.con = sqlite.connect(str(db_or_path))
else:
self.con = db_or_path
self._execute("select * from rates where 1=2")
self._rate_providers = []
self.async = async
self._fetched_values = Queue()
self._fetched_ranges = {} # a currency --> (start, end) map
def _execute(self, *args, **kwargs):
def create_tables():
# date is stored as a TEXT YYYYMMDD
sql = "create table rates(date TEXT, currency TEXT, rate REAL NOT NULL)"
self.con.execute(sql)
sql = "create unique index idx_rate on rates (date, currency)"
self.con.execute(sql)
try:
return self.con.execute(*args, **kwargs)
except sqlite.OperationalError: # new db, or other problems
try:
create_tables()
except Exception:
logging.warning("Messy problems with the currency db, starting anew with a memory db")
self.con = sqlite.connect(':memory:')
create_tables()
except sqlite.DatabaseError: # corrupt db
logging.warning("Corrupt currency database at {0}. Starting over.".format(repr(self.db_or_path)))
if isinstance(self.db_or_path, str) and self.db_or_path != ':memory:':
self.con.close()
os.remove(str(self.db_or_path))
self.con = sqlite.connect(str(self.db_or_path))
else:
self.con = sqlite.connect(':memory:')
create_tables()
return self.con.execute(*args, **kwargs) # try again
def _seek_value_in_CAD(self, str_date, currency_code):
if currency_code == 'CAD':
return 1
def seek(date_op, desc):
sql = "select rate from rates where date %s ? and currency = ? order by date %s limit 1" % (date_op, desc)
cur = self._execute(sql, [str_date, currency_code])
row = cur.fetchone()
if row:
return row[0]
return seek('<=', 'desc') or seek('>=', '') or Currency(currency_code).latest_rate
def _ensure_filled(self, date_start, date_end, currency_code):
"""Make sure that the cache contains *something* for each of the dates in the range.
Sometimes, our provider doesn't return us the range we sought. When it does, it usually
means that it never will and to avoid repeatedly querying those ranges forever, we have to
fill them. We use the closest rate for this.
"""
# We don't want to fill today, because we want to repeatedly fetch that one until the
# provider gives it to us.
if date_end >= date.today():
date_end = date.today() - timedelta(1)
sql = "select rate from rates where date = ? and currency = ?"
for curdate in iterdaterange(date_start, date_end):
cur = self._execute(sql, [date2str(curdate), currency_code])
if cur.fetchone() is None:
nearby_rate = self._seek_value_in_CAD(date2str(curdate), currency_code)
self.set_CAD_value(curdate, currency_code, nearby_rate)
logging.debug("Filled currency void for %s at %s (value: %2.2f)", currency_code, curdate, nearby_rate)
def _save_fetched_rates(self):
while True:
try:
rates, currency, fetch_start, fetch_end = self._fetched_values.get_nowait()
logging.debug("Saving %d rates for the currency %s", len(rates), currency)
for rate_date, rate in rates:
if not rate:
logging.debug("Empty rate for %s. Skipping", rate_date)
continue
logging.debug("Saving rate %2.2f for %s", rate, rate_date)
self.set_CAD_value(rate_date, currency, rate)
self._ensure_filled(fetch_start, fetch_end, currency)
logging.debug("Finished saving rates for currency %s", currency)
except Empty:
break
def clear_cache(self):
self._cache = {}
def date_range(self, currency_code):
"""Returns (start, end) of the cached rates for currency.
Returns a tuple ``(start_date, end_date)`` representing dates covered in the database for
currency ``currency_code``. If there are gaps, they are not accounted for (subclasses that
automatically update themselves are not supposed to introduce gaps in the db).
"""
sql = "select min(date), max(date) from rates where currency = '%s'" % currency_code
cur = self._execute(sql)
start, end = cur.fetchone()
if start and end:
convert = lambda s: datetime.strptime(s, '%Y%m%d').date()
return convert(start), convert(end)
else:
return None
def get_rate(self, date, currency1_code, currency2_code):
"""Returns the exchange rate between currency1 and currency2 for date.
The rate returned means '1 unit of currency1 is worth X units of currency2'.
The rate of the nearest date that is smaller than 'date' is returned. If
there is none, a seek for a rate with a higher date will be made.
"""
# We want to check self._fetched_values for rates to add.
if not self._fetched_values.empty():
self._save_fetched_rates()
# This method is a bottleneck and has been optimized for speed.
value1 = None
value2 = None
if currency1_code == 'CAD':
value1 = 1
else:
value1 = self._cache.get((date, currency1_code))
if currency2_code == 'CAD':
value2 = 1
else:
value2 = self._cache.get((date, currency2_code))
if value1 is None or value2 is None:
str_date = date2str(date)
if value1 is None:
value1 = self._seek_value_in_CAD(str_date, currency1_code)
self._cache[(date, currency1_code)] = value1
if value2 is None:
value2 = self._seek_value_in_CAD(str_date, currency2_code)
self._cache[(date, currency2_code)] = value2
return value1 / value2
def set_CAD_value(self, date, currency_code, value):
"""Sets the daily value in CAD for currency at date"""
# we must clear the whole cache because there might be other dates affected by this change
# (dates when the currency server has no rates).
self.clear_cache()
str_date = date2str(date)
sql = "replace into rates(date, currency, rate) values(?, ?, ?)"
self._execute(sql, [str_date, currency_code, value])
self.con.commit()
def register_rate_provider(self, rate_provider):
"""Adds `rate_provider` to the list of providers supported by this DB.
A provider if a function(currency, start_date, end_date) that returns a list of
(rate_date, float_rate) as a result. This function will be called asyncronously, so it's ok
if it takes a long time to return.
The rates returned must be the value of 1 `currency` in CAD (Canadian Dollars) at the
specified date.
The provider can be asked for any currency. If it doesn't support it, it has to raise
CurrencyNotSupportedException.
If we support the currency but that there is no rate available for the specified range,
simply return an empty list or None.
"""
self._rate_providers.append(rate_provider)
def ensure_rates(self, start_date, currencies):
"""Ensures that the DB has all the rates it needs for 'currencies' between 'start_date' and today
If there is any rate missing, a request will be made to the currency server. The requests
are made asynchronously.
"""
def do():
for currency, fetch_start, fetch_end in currencies_and_range:
logging.debug("Fetching rates for %s for date range %s to %s", currency, fetch_start, fetch_end)
for rate_provider in self._rate_providers:
try:
values = rate_provider(currency, fetch_start, fetch_end)
except CurrencyNotSupportedException:
continue
except RateProviderUnavailable:
logging.warning("Fetching of %s failed due to temporary problems.", currency)
break
else:
if not values:
# We didn't get any value from the server, which means that we asked for
# rates that couldn't be delivered. Still, we report empty values so
# that the cache can correctly remember this unavailability so that we
# don't repeatedly fetch those ranges.
values = []
self._fetched_values.put((values, currency, fetch_start, fetch_end))
logging.debug("Fetching successful!")
break
else:
logging.debug("Fetching failed!")
if start_date >= date.today():
return # we never return rates in the future
currencies_and_range = []
for currency in currencies:
if currency == 'CAD':
continue
try:
cached_range = self._fetched_ranges[currency]
except KeyError:
cached_range = self.date_range(currency)
range_start = start_date
# Don't try to fetch today's rate, it's never there and results in useless server
# hitting.
range_end = date.today() - timedelta(1)
if cached_range is not None:
cached_start, cached_end = cached_range
if range_start >= cached_start:
# Make a forward fetch
range_start = cached_end + timedelta(days=1)
else:
# Make a backward fetch
range_end = cached_start - timedelta(days=1)
# We don't want to fetch ranges that are too big. It can cause various problems, such
# as hangs. We prefer to take smaller bites.
cur_start = cur_end = range_start
while cur_end < range_end:
cur_end = min(cur_end + timedelta(days=30), range_end)
currencies_and_range.append((currency, cur_start, cur_end))
cur_start = cur_end
self._fetched_ranges[currency] = (start_date, date.today())
if self.async:
threading.Thread(target=do).start()
else:
do()
def initialize_db(path):
"""Initialize the app wide currency db if not already initialized."""
ratesdb = RatesDB(str(path))
Currency.set_rates_db(ratesdb)
| gpl-3.0 | 8,473,458,377,083,210,000 | 41.707921 | 118 | 0.596789 | false |
resolutedreamer/IR-Transmit-Receive | scripts/etc/init.d/notify_ip.py | 1 | 6098 | import smtplib
import subprocess
import sys
import time
import threading
import datetime
from email.mime.text import MIMEText
# Please Fill in with Correct Information to use
SMTP_SERVER = "smtp.gmail.com:587"
SMTP_UNAME = "[email protected]"
SMTP_PASSWD = "incorrect_password"
DEFAULT_NOTIFY_PAUSE = 3600
DEFAULT_CHECK_INTERVAL = 60
CELL_SERVICE_DOMAIN = {
"att": "txt.att.net",
"verizon": "vtext.com",
"tmobile": "tmomail.net",
"sprint": "messaging.sprintpcs.com",
"virgin": "vmobl.com",
"uscellular": "email.uscc.net",
"nextel": "messaging.nextel.com",
"boost": "myboostmobile.com",
"alltel": "message.alltel.com"
}
class Notification(object):
"""The base class for all notification objects.
Notification subclasses must implement the notify() method, which
actually sends the notification.
"""
def __init__(self, condition, arg, notify_pause=DEFAULT_NOTIFY_PAUSE):
"""
Args:
condition (function): A function that takes one argument (arg) and
and returns a boolean, which indicates whether the notification
should be sent.
arg (any) : arg is passed to condition function. It can be anything
the user wants.
notify_pause (int, optional): The number of seconds to wait after
sending a notification before sending a repeat notification.
"""
self.condition = condition
self.arg = arg
self.notify_pause = notify_pause
def try_notify(self):
"""Tries to send the notification if the condition is satisfied and
we haven't already sent a notification too recently.
"""
if self.last_notify_time == 0:
notify_ready_time = 0
else:
notify_ready_time = self.last_notify_time + self.notify_pause
if self.condition(self.arg) and notify_ready_time < time.time():
self.notify()
self.last_notify_time = time.time()
class EmailNotification(Notification):
"""Sends email notifications"""
def __init__(self, email, msg, condition, arg,
notify_pause=DEFAULT_NOTIFY_PAUSE):
"""
Args:
email (string): The email to send the notification to.
msg (string): The message to send in the email.
condition, arg, notify_pause: Same as for Notification.
"""
self.email = email
self.msg = msg
super(EmailNotification, self).__init__(condition, arg,
notify_pause)
self.last_notify_time = 0
def notify(self):
"""Sends the email notification"""
subject = "Energy Dashboard Notification"
from_addr = "[email protected]"
if hasattr(self.msg, "__call__"):
mimetext = MIMEText(self.msg())
else:
mimetext = MIMEText(self.msg)
mimetext["Subject"] = subject
mimetext["From"] = from_addr
mimetext["To"] = self.email
server = smtplib.SMTP(SMTP_SERVER)
server.starttls()
server.login(SMTP_UNAME, SMTP_PASSWD)
server.sendmail(from_addr, [self.email], mimetext.as_string())
server.quit()
class TxtNotification(EmailNotification):
"""Sends text message notifications"""
def __init__(self, number, service, msg, condition, args,
notify_pause=DEFAULT_NOTIFY_PAUSE):
"""
Args:
number (int or string): The phone number to receive the text
message.
service (string): Must be one of the keys of CELL_SERVICE_DOMAIN.
msg (string): The content of the text message.
condition, args, notify_pause: Same as for Notification.
"""
email = str(number) + "@" + CELL_SERVICE_DOMAIN[service]
super(TxtNotification, self).__init__(email, msg, condition, args,
notify_pause)
class NotificationManager(threading.Thread):
"""Thread that will continue to try to send notifications if the
notification conditions are satisfied.
"""
def __init__(self, check_interval=DEFAULT_CHECK_INTERVAL):
"""
Args:
check_interval (int, optional): The number of seconds to wait in
between checking the conditions of the notifications.
"""
super(NotificationManager, self).__init__()
self.notifications = []
self.active = False
self.check_interval = check_interval
def add_notification(self, notification):
"""Adds a notification to monitor.
Args:
notification (subclass of Notification): The notification to
monitor.
"""
self.notifications.append(notification)
def start_notifications(self):
"""Start the notification thread."""
self.active = True
self.start()
def stop_notifications(self):
"""Stop the notification thread."""
self.active = False
def run(self):
"""Runs inside the thread. You don't need to call this."""
while self.active:
self._send_notifications()
time.sleep(self.check_interval)
def _send_notifications(self):
for notif in self.notifications:
notif.try_notify()
if __name__ == "__main__":
time.sleep(10)
nm = NotificationManager()
output = subprocess.check_output("ifconfig", shell=True)
addr_index = output.rfind('inet addr:')
ip_start_index = (addr_index + 10)
ip_max_index = (ip_start_index + 16)
ip = output[ip_start_index:ip_max_index]
ip_true_end_index = ip.find(' ')
ip = ip[:ip_true_end_index]
msg = "Time is: " + str(datetime.datetime.now()) + '\n' + 'ip is ' + ip
email_notification = EmailNotification("[email protected]", msg, lambda x: True, None, notify_pause=60)
txtnot = TxtNotification("8001234567", "att", msg,
lambda x: True, None, notify_pause=60)
nm.add_notification(email_notification)
nm.add_notification(txtnot)
nm.start_notifications()
time.sleep(5)
nm.stop_notifications()
sys.exit()
| apache-2.0 | 7,988,192,192,993,335,000 | 33.067039 | 108 | 0.61594 | false |
marvinpinto/charlesbot-rundeck | tests/test_set_channel_topic.py | 1 | 3082 | import asynctest
import json
from asynctest.mock import patch
from asynctest.mock import call
class TestSetChannelTopic(asynctest.TestCase):
def setUp(self):
patcher1 = patch('charlesbot_rundeck.rundeck_lock.RundeckLock.seed_job_list') # NOQA
self.addCleanup(patcher1.stop)
self.mock_seed_job_list = patcher1.start()
patcher2 = patch('charlesbot.slack.slack_connection.SlackConnection.api_call') # NOQA
self.addCleanup(patcher2.stop)
self.mock_api_call = patcher2.start()
from charlesbot_rundeck.rundeck_lock import RundeckLock
self.rd_lock = RundeckLock("token",
"url",
"channel",
[])
def test_topic_channel_not_set(self):
self.rd_lock.topic_channel = None
yield from self.rd_lock.set_channel_topic(True)
self.assertEqual(self.mock_api_call.mock_calls, [])
def test_topic_channel_id_already_set(self):
self.rd_lock.locked_by_user = "bob"
self.rd_lock.topic_channel = "chan1"
self.rd_lock.topic_channel_id = "C1234"
yield from self.rd_lock.set_channel_topic(True)
calls = [
call("channels.setTopic",
channel="C1234",
topic=":lock: Rundeck executions locked by @bob :lock:")
]
self.assertEqual(self.mock_api_call.mock_calls, calls)
def test_topic_channel_not_found(self):
channels = {
"ok": True,
"channels": [
{
"name": "chan1",
},
{
"name": "chan2",
},
{
"name": "chan3",
},
]
}
self.rd_lock.locked_by_user = "bob"
self.rd_lock.topic_channel = "chan4"
self.mock_api_call.side_effect = [json.dumps(channels), None]
yield from self.rd_lock.set_channel_topic(True)
calls = [
call("channels.list", exclude_archived=1),
]
self.assertEqual(self.mock_api_call.mock_calls, calls)
def test_topic_channel_found(self):
channels = {
"ok": True,
"channels": [
{
"name": "chan1",
"id": "C1",
},
{
"name": "chan2",
"id": "C2",
},
{
"name": "chan3",
"id": "C3",
},
]
}
self.rd_lock.locked_by_user = "bob"
self.rd_lock.topic_channel = "chan2"
self.mock_api_call.side_effect = [json.dumps(channels), None]
yield from self.rd_lock.set_channel_topic(False)
calls = [
call("channels.list", exclude_archived=1),
call("channels.setTopic",
channel="C2",
topic="")
]
self.assertEqual(self.mock_api_call.mock_calls, calls)
| mit | 7,809,130,937,788,622,000 | 32.139785 | 94 | 0.490591 | false |
shreesundara/netsnmp | pysmi/compiler.py | 1 | 21084 | import sys
import os
import time
try:
from pwd import getpwuid
except ImportError:
getpwuid = lambda x: ['<unknown>']
from pysmi import __name__ as packageName
from pysmi import __version__ as packageVersion
from pysmi.mibinfo import MibInfo
from pysmi.codegen.symtable import SymtableCodeGen
from pysmi import error
from pysmi import debug
class MibStatus(str):
"""Indicate MIB transformation result.
*MibStatus* is a subclass of Python string type. Some additional
attributes may be set to indicate the details.
The following *MibStatus* class instances are defined:
* *compiled* - MIB is successfully transformed
* *untouched* - fresh transformed version of this MIB already exisits
* *failed* - MIB transformation failed. *error* attribute carries details.
* *unprocessed* - MIB transformation required but waived for some reason
* *missing* - ASN.1 MIB source can't be found
* *borrowed* - MIB transformation failed but pre-transformed version was used
"""
def setOptions(self, **kwargs):
n = self.__class__(self)
for k in kwargs:
setattr(n, k, kwargs[k])
return n
statusCompiled = MibStatus('compiled')
statusUntouched = MibStatus('untouched')
statusFailed = MibStatus('failed')
statusUnprocessed = MibStatus('unprocessed')
statusMissing = MibStatus('missing')
statusBorrowed = MibStatus('borrowed')
class MibCompiler(object):
"""Top-level, user-facing, composite MIB compiler object.
MibCompiler implements high-level MIB transformation processing logic.
It executes its actions by calling the following specialized objects:
* *readers* - to acquire ASN.1 MIB data
* *searchers* - to see if transformed MIB already exists and no processing is necessary
* *parser* - to parse ASN.1 MIB into AST
* *code generator* - to perform actual MIB transformation
* *borrowers* - to fetch pre-transformed MIB if transformation is impossible
* *writer* - to store transformed MIB data
Required components must be passed to MibCompiler on instantiation. Those
components are: *parser*, *codegenerator* and *writer*.
Optional components could be set or modified at later phases of MibCompiler
life. Unlike singular, required components, optional one can be present
in sequences to address many possible sources of data. They are
*readers*, *searchers* and *borrowers*.
Examples: ::
from pysmi.reader.localfile import FileReader
from pysmi.searcher.pyfile import PyFileSearcher
from pysmi.searcher.pypackage import PyPackageSearcher
from pysmi.searcher.stub import StubSearcher
from pysmi.writer.pyfile import PyFileWriter
from pysmi.parser.smi import SmiV2Parser
from pysmi.codegen.pysnmp import PySnmpCodeGen, baseMibs
mibCompiler = MibCompiler(SmiV2Parser(),
PySnmpCodeGen(),
PyFileWriter('/tmp/pysnmp/mibs'))
mibCompiler.addSources(FileReader('/usr/share/snmp/mibs'))
mibCompiler.addSearchers(PyFileSearcher('/tmp/pysnmp/mibs'))
mibCompiler.addSearchers(PyPackageSearcher('pysnmp.mibs'))
mibCompiler.addSearchers(StubSearcher(*baseMibs))
results = mibCompiler.compile('IF-MIB', 'IP-MIB')
"""
indexFile = 'index'
def __init__(self, parser, codegen, writer):
"""Creates an instance of *MibCompiler* class.
Args:
parser: ASN.1 MIB parser object
codegen: MIB transformation object
writer: transformed MIB storing object
"""
self._parser = parser
self._codegen = codegen
self._symbolgen = SymtableCodeGen()
self._writer = writer
self._sources = []
self._searchers = []
self._borrowers = []
def addSources(self, *sources):
"""Add more ASN.1 MIB source repositories.
MibCompiler.compile will invoke each of configured source objects
in order of their addition asking each to fetch MIB module specified
by name.
Args:
sources: reader object(s)
Returns:
reference to itself (can be used for call chaining)
"""
self._sources.extend(sources)
debug.logger & debug.flagCompiler and debug.logger('current MIB source(s): %s' % ', '.join([str(x) for x in self._sources]))
return self
def addSearchers(self, *searchers):
"""Add more transformed MIBs repositories.
MibCompiler.compile will invoke each of configured searcher objects
in order of their addition asking each if already transformed MIB
module already exists and is more recent than specified.
Args:
searchers: searcher object(s)
Returns:
reference to itself (can be used for call chaining)
"""
self._searchers.extend(searchers)
debug.logger & debug.flagCompiler and debug.logger('current compiled MIBs location(s): %s' % ', '.join([str(x) for x in self._searchers]))
return self
def addBorrowers(self, *borrowers):
"""Add more transformed MIBs repositories to borrow MIBs from.
Whenever MibCompiler.compile encounters MIB module which neither of
the *searchers* can find or fetched ASN.1 MIB module can not be
parsed (due to syntax errors), these *borrowers* objects will be
invoked in order of their addition asking each if already transformed
MIB can be fetched (borrowed).
Args:
borrowers: borrower object(s)
Returns:
reference to itself (can be used for call chaining)
"""
self._borrowers.extend(borrowers)
debug.logger & debug.flagCompiler and debug.logger('current MIB borrower(s): %s' % ', '.join([str(x) for x in self._borrowers]))
return self
def compile(self, *mibnames, **options):
"""Transform requested and possibly referred MIBs.
The *compile* method should be invoked when *MibCompiler* object
is operational meaning at least *sources* are specified.
Once called with a MIB module name, *compile* will:
* fetch ASN.1 MIB module with given name by calling *sources*
* make sure no such transformed MIB already exists (with *searchers*)
* parse ASN.1 MIB text with *parser*
* perform actual MIB transformation into target format with *code generator*
* may attempt to borrow pre-transformed MIB through *borrowers*
* write transformed MIB through *writer*
The above sequence will be performed for each MIB name given in
*mibnames* and may be performed for all MIBs referred to from
MIBs being processed.
Args:
mibnames: list of ASN.1 MIBs names
options: options that affect the way PySMI components work
Returns:
A dictionary of MIB module names processed (keys) and *MibStatus*
class instances (values)
"""
processed = {}
parsedMibs = {}; failedMibs = {}; borrowedMibs = {}; builtMibs = {}
symbolTableMap = {}
originalMib = mibnames[0]
mibsToParse = [x for x in mibnames]
while mibsToParse:
mibname = mibsToParse.pop(0)
if mibname in parsedMibs:
debug.logger & debug.flagCompiler and debug.logger('MIB %s already parsed' % mibname)
continue
if mibname in failedMibs:
debug.logger & debug.flagCompiler and debug.logger('MIB %s already failed' % mibname)
continue
for source in self._sources:
debug.logger & debug.flagCompiler and debug.logger('trying source %s' % source)
try:
fileInfo, fileData = source.getData(mibname)
for mibTree in self._parser.parse(fileData):
mibInfo, symbolTable = self._symbolgen.genCode(
mibTree, symbolTableMap
)
symbolTableMap[mibInfo.name] = symbolTable
parsedMibs[mibInfo.name] = fileInfo, mibInfo, mibTree
if mibname in failedMibs:
del failedMibs[mibname]
mibsToParse.extend(mibInfo.imported)
debug.logger & debug.flagCompiler and debug.logger('%s (%s) read from %s, immediate dependencies: %s' % (mibInfo.name, mibname, fileInfo.path, ', '.join(mibInfo.imported) or '<none>'))
break
except error.PySmiReaderFileNotFoundError:
debug.logger & debug.flagCompiler and debug.logger('no %s found at %s' % (mibname, source))
continue
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.source = source
exc.mibname = mibname
exc.msg += ' at MIB %s' % mibname
debug.logger & debug.flagCompiler and debug.logger('%serror %s from %s' % (options.get('ignoreErrors') and 'ignoring ' or 'failing on ', exc, source))
failedMibs[mibname] = exc
processed[mibname] = statusFailed.setOptions(error=exc)
else:
exc = error.PySmiError('MIB source %s not found' % mibname)
exc.mibname = mibname
debug.logger & debug.flagCompiler and debug.logger('no %s found everywhare' % mibname)
if mibname not in failedMibs:
failedMibs[mibname] = exc
if mibname not in processed:
processed[mibname] = statusMissing
debug.logger & debug.flagCompiler and debug.logger('MIBs analized %s, MIBs failed %s' % (len(parsedMibs), len(failedMibs)))
#
# See what MIBs need generating
#
for mibname in parsedMibs.copy():
fileInfo, mibInfo, mibTree = parsedMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger('checking if %s requires updating' % mibname)
for searcher in self._searchers:
try:
searcher.fileExists(mibname, fileInfo.mtime, rebuild=options.get('rebuild'))
except error.PySmiFileNotFoundError:
debug.logger & debug.flagCompiler and debug.logger('no compiled MIB %s available through %s' % (mibname, searcher))
continue
except error.PySmiFileNotModifiedError:
debug.logger & debug.flagCompiler and debug.logger('will be using existing compiled MIB %s found by %s' % (mibname, searcher))
del parsedMibs[mibname]
processed[mibname] = statusUntouched
break
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.searcher = searcher
exc.mibname = mibname
exc.msg += ' at MIB %s' % mibname
debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (searcher, exc))
continue
else:
debug.logger & debug.flagCompiler and debug.logger('no suitable compiled MIB %s found anywhere' % mibname)
if options.get('noDeps') and mibname not in mibnames:
debug.logger & debug.flagCompiler and debug.logger('excluding imported MIB %s from code generation' % mibname)
del parsedMibs[mibname]
processed[mibname] = statusUntouched
continue
debug.logger & debug.flagCompiler and debug.logger('MIBs parsed %s, MIBs failed %s' % (len(parsedMibs), len(failedMibs)))
#
# Generate code for parsed MIBs
#
for mibname in [originalMib]:
fileInfo, mibInfo, mibTree = parsedMibs[mibname]
comments = [
'ASN.1 source %s' % fileInfo.path,
'Produced by %s-%s at %s' % (packageName, packageVersion, time.asctime()),
'On host %s platform %s version %s by user %s' % (hasattr(os, 'uname') and os.uname()[1] or '?', hasattr(os, 'uname') and os.uname()[0] or '?', hasattr(os, 'uname') and os.uname()[2] or '?', hasattr(os, 'getuid') and getpwuid(os.getuid())[0] or '?'),
'Using Python version %s' % sys.version.split('\n')[0]
]
try:
mibInfo, mibData = self._codegen.genCode(
mibTree,
symbolTableMap,
comments=comments,
genTexts=options.get('genTexts'),
parsedMibs = parsedMibs
)
builtMibs[mibname] = fileInfo, mibInfo, mibData
del parsedMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger('%s read from %s and compiled by %s' % (mibname, fileInfo.path, self._writer))
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.handler = self._codegen
exc.mibname = mibname
exc.msg += ' at MIB %s' % mibname
debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (self._codegen, exc))
processed[mibname] = statusFailed.setOptions(error=exc)
failedMibs[mibname] = exc
del parsedMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger('MIBs built %s, MIBs failed %s' % (len(parsedMibs), len(failedMibs)))
#
# Try to borrow pre-compiled MIBs for failed ones
#
for mibname in failedMibs.copy():
if options.get('noDeps') and mibname not in mibnames:
debug.logger & debug.flagCompiler and debug.logger('excluding imported MIB %s from borrowing' % mibname)
continue
for borrower in self._borrowers:
debug.logger & debug.flagCompiler and debug.logger('trying to borrow %s from %s' % (mibname, borrower))
try:
fileInfo, fileData = borrower.getData(
mibname,
genTexts=options.get('genTexts')
)
borrowedMibs[mibname] = fileInfo, MibInfo(name=mibname, imported=[]), fileData
del failedMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger('%s borrowed with %s' % (mibname, borrower))
break
except error.PySmiError:
debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (borrower, exc))
debug.logger & debug.flagCompiler and debug.logger('MIBs available for borrowing %s, MIBs failed %s' % (len(borrowedMibs), len(failedMibs)))
#
# See what MIBs need borrowing
#
for mibname in borrowedMibs.copy():
debug.logger & debug.flagCompiler and debug.logger('checking if failed MIB %s requires borrowing' % mibname)
fileInfo, mibInfo, mibData = borrowedMibs[mibname]
for searcher in self._searchers:
try:
searcher.fileExists(mibname, fileInfo.mtime, rebuild=options.get('rebuild'))
except error.PySmiFileNotFoundError:
debug.logger & debug.flagCompiler and debug.logger('no compiled MIB %s available through %s' % (mibname, searcher))
continue
except error.PySmiFileNotModifiedError:
debug.logger & debug.flagCompiler and debug.logger('will be using existing compiled MIB %s found by %s' % (mibname, searcher))
del borrowedMibs[mibname]
processed[mibname] = statusUntouched
break
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.searcher = searcher
exc.mibname = mibname
exc.msg += ' at MIB %s' % mibname
debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (searcher, exc))
continue
else:
debug.logger & debug.flagCompiler and debug.logger('no suitable compiled MIB %s found anywhere' % mibname)
if options.get('noDeps') and mibname not in mibnames:
debug.logger & debug.flagCompiler and debug.logger('excluding imported MIB %s from borrowing' % mibname)
processed[mibname] = statusUntouched
else:
debug.logger & debug.flagCompiler and debug.logger('will borrow MIB %s' % mibname)
builtMibs[mibname] = borrowedMibs[mibname]
processed[mibname] = statusBorrowed.setOptions(
path=fileInfo.path, file=fileInfo.file,
alias=fileInfo.name
)
del borrowedMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger('MIBs built %s, MIBs failed %s' % (len(builtMibs), len(failedMibs)))
#
# We could attempt to ignore missing/failed MIBs
#
if failedMibs and not options.get('ignoreErrors'):
debug.logger & debug.flagCompiler and debug.logger('failing with problem MIBs %s' % ', '.join(failedMibs))
for mibname in builtMibs:
processed[mibname] = statusUnprocessed
return processed
debug.logger & debug.flagCompiler and debug.logger('proceeding with built MIBs %s, failed MIBs %s' % (', '.join(builtMibs), ', '.join(failedMibs)))
#
# Store compiled MIBs
#
for mibname in builtMibs.copy():
fileInfo, mibInfo, mibData = builtMibs[mibname]
try:
self._writer.putData(
mibname, mibData, dryRun=options.get('dryRun')
)
debug.logger & debug.flagCompiler and debug.logger('%s stored by %s' % (mibname, self._writer))
del builtMibs[mibname]
if mibname not in processed:
processed[mibname] = statusCompiled.setOptions(
path=fileInfo.path, file=fileInfo.file,
alias=fileInfo.name
)
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.handler = self._codegen
exc.mibname = mibname
exc.msg += ' at MIB %s' % mibname
debug.logger & debug.flagCompiler and debug.logger('error %s from %s' % (exc, self._writer))
processed[mibname] = statusFailed.setOptions(error=exc)
failedMibs[mibname] = exc
del builtMibs[mibname]
debug.logger & debug.flagCompiler and debug.logger('MIBs modifed: %s' % ', '.join([x for x in processed if processed[x] in ('compiled', 'borrowed')]))
return processed
def buildIndex(self, processedMibs, **options):
comments = [
'Produced by %s-%s at %s' % (packageName, packageVersion, time.asctime()),
'On host %s platform %s version %s by user %s' % (hasattr(os, 'uname') and os.uname()[1] or '?', hasattr(os, 'uname') and os.uname()[0] or '?', hasattr(os, 'uname') and os.uname()[2] or '?', hasattr(os, 'getuid') and getpwuid(os.getuid())[0]) or '?',
'Using Python version %s' % sys.version.split('\n')[0]
]
try:
self._writer.putData(
self.indexFile,
self._codegen.genIndex(
dict([(x, x.oid) for x in processedMibs if hasattr(x, 'oid')]),
comments=comments
),
dryRun=options.get('dryRun')
)
except error.PySmiError:
exc_class, exc, tb = sys.exc_info()
exc.msg += ' at MIB index %s' % self.indexFile
debug.logger & debug.flagCompiler and debug.logger('error %s when building %s' % (exc, self.indexFile))
if options.get('ignoreErrors'):
return
if hasattr(exc, 'with_traceback'):
raise exc.with_traceback(tb)
else:
raise exc
| bsd-2-clause | 2,209,945,687,160,253,700 | 42.955224 | 266 | 0.572282 | false |
ST-Data-Mining/crater | george/klazzifiers.py | 1 | 5539 | from __future__ import division,print_function
from os import environ
import sys
HOME=environ['HOME']
PROJECT_ROOT=HOME+'/Panzer/NCSU/Spatial and Temporal/crater'
EXPTS = PROJECT_ROOT+'/expts'
sys.path.extend([PROJECT_ROOT,EXPTS])
sys.dont_write_bytecode = True
from george.lib import *
from expts.csvParser import parseCSV, randomPoints
import config
def normalize_points(points):
if not len(points): return
tot = sum([point.w for point in points])
for point in points:
point.w = point.w/tot
def best_weak_classifier(points, attrLen, ignores=None, start=0):
best_c = None
if not ignores: ignores = []
for i in range(0,attrLen):
if i in ignores:
continue
classifier = WeakClassifier(points, i)
if (not best_c) or (classifier.trainError(start) < best_c.trainError(start)):
best_c = classifier
return best_c
def booster(fname, mu=0.475, T=150):
def updateWeights(classifier, b):
for p in points:
predicted = classifier.predict(p.x)
actual = int(p.y)
e = 0 if predicted == actual else 1
p.w *= b**(1-e)
points = parseCSV(fname)
strong = StrongClassifier(mu, T)
ignores = []
for t in range(0,T):
say(t+1, ' ')
normalize_points(points)
weak_classifier = best_weak_classifier(points, len(points[0].x), ignores)
ignores.append(weak_classifier.index)
error = weak_classifier.trainError()
beta = error/(1-error)
if beta == 0:
strong.T = t
break
updateWeights(weak_classifier, beta)
alpha = math.log(1/beta)
strong.update(weak_classifier,alpha)
print('')
return strong
def _booster(fname, T=150):
print('***BOOSTER CLASSIFIER***')
boost_classifier = booster(fname, T=T)
#print(boost_classifier)
for region,test_files in [('west',['1_24.csv','1_25.csv']), ('center',['2_24.csv','2_25.csv']),
('east',['3_24.csv','3_25.csv']), ('all',['all.csv']) ]:
points = parseCSV(config.FEATURES_FOLDER+test_files[0], False)
if len(test_files) > 1:
points += parseCSV(config.FEATURES_FOLDER+test_files[1], False)
stat = ABCD()
for point in points:
pred = boost_classifier.predict(point.x)
act = int(point.y)
stat.update(pred, act)
print('\n'+region)
print(stat)
def greedy(fname, mu=0.325, T=150):
points = parseCSV(fname)
strong = StrongClassifier(mu, T)
ignores = []
normalize_points(points)
for t in range(0, T):
say(t+1,' ')
weak_classifier = best_weak_classifier(points, len(points[0].x), ignores)
ignores.append(weak_classifier.index)
error = weak_classifier.trainError()
beta = error/(1-error)
if beta == 0:
strong.T = t
break
alpha = math.log(1/beta)
strong.update(weak_classifier,alpha)
print('')
return strong
def _greedy(fname, T=150):
print('***GREEDY CLASSIFIER***')
greedy_classifier = greedy(fname, T=T)
#print(greedy_classifier)
for region,test_files in [('west',['1_24.csv','1_25.csv']), ('center',['2_24.csv','2_25.csv']),
('east',['3_24.csv','3_25.csv']), ('all',['all.csv']) ]:
points = parseCSV(config.FEATURES_FOLDER+test_files[0], False)
if len(test_files) > 1:
points += parseCSV(config.FEATURES_FOLDER+test_files[1], False)
stat = ABCD()
for point in points:
pred = greedy_classifier.predict(point.x)
act = int(point.y)
stat.update(pred, act)
print('\n'+region)
print(stat)
def transfer(fname, sameFiles, mu=0.5, T=150):
def craterCount(points):
count=0
for point in points:
if point.y==0: count+=1
return count
def updateWeights(classifier, b, b_t):
for i, point in enumerate(total):
predicted = classifier.predict(point.x)
actual = int(point.y)
e = 0 if predicted == actual else 1
if i<len(diff):
point.w *= b**e
else:
point.w *= b_t**-e
diff = parseCSV(fname, False)
same = randomPoints(sameFiles, craters=102, non_craters=153)
total = diff+same
craters = craterCount(total)
non_craters = len(total) - craters
[p.updateWeight(non_craters, craters) for p in total]
strong = StrongClassifier(mu, T)
ignores=[]
for t in range(0,T):
say(t+1,' ')
normalize_points(total)
weak_classifier = best_weak_classifier(total, len(total[0].x), ignores, len(diff))
ignores.append(weak_classifier.index)
error = weak_classifier.trainError(start=len(diff))
if error == 0:
strong.T = t
break
beta_t = error/(1-error)
beta = 1/(1+(2*math.log(len(total)/T))**0.5)
updateWeights(weak_classifier, beta, beta_t)
alpha = math.log(1/beta_t)
strong.update(weak_classifier, alpha)
print('')
return strong
def _transfer(fname, T=150):
print('***TRANSFER CLASSIFIER***')
#print(tl_classifier)
for region,test_files in [('west',['1_24.csv','1_25.csv']), ('center',['2_24.csv','2_25.csv']),
('east',['3_24.csv','3_25.csv']), ('all',['all.csv']) ]:
tl_classifier = transfer(fname, test_files, T=T)
points = parseCSV(config.FEATURES_FOLDER+test_files[0], True)
if len(test_files) > 1:
points += parseCSV(config.FEATURES_FOLDER+test_files[1], True)
stat = ABCD()
for point in points:
pred = tl_classifier.predict(point.x)
act = int(point.y)
stat.update(pred, act)
print('\n'+region)
print(stat)
def _runner(T=150):
train = config.TRAIN_FILE
_booster(train,T)
_greedy(train,T)
_transfer(train,T)
if __name__=="__main__":
_runner(150)
| mit | -4,664,917,264,122,412,000 | 28.462766 | 97 | 0.625925 | false |
rosarior/django-sabot | setup.py | 1 | 1301 | #!/usr/bin/env python
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
with open('README.rst') as f:
readme = f.read()
with open('HISTORY.rst') as f:
history = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
author='Roberto Rosario',
author_email='[email protected]',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
],
description='Provoke predictable errors in your Django projects.',
include_package_data=True,
install_requires=['Django>=1.7.0'],
license=license,
long_description=readme + '\n\n' + history,
name='django-sabot',
package_data={'': ['LICENSE']},
package_dir={'sabot': 'sabot'},
packages=['sabot'],
platforms=['any'],
url='https://github.com/rosarior/django-sabot',
version=,
zip_safe=False,
)
| mit | 5,138,860,737,166,539,000 | 26.104167 | 70 | 0.624135 | false |
YoungKwonJo/mlxtend | mlxtend/data/iris.py | 1 | 6046 | # Sebastian Raschka 2015
# mlxtend Machine Learning Library Extensions
import numpy as np
def iris_data():
"""Iris flower dataset.
Returns
--------
X, y : [n_samples, n_features], [n_class_labels]
X is the feature matrix with 150 flower samples as rows,
and the 3 feature columns sepal length, sepal width,
petal length, and petal width.
y is a 1-dimensional array of the class labels where
0 = setosa, 1 = versicolor, 2 = virginica.
Reference: https://archive.ics.uci.edu/ml/datasets/Iris
"""
X = np.array([[ 5.1, 3.5, 1.4, 0.2],
[ 4.9, 3. , 1.4, 0.2],
[ 4.7, 3.2, 1.3, 0.2],
[ 4.6, 3.1, 1.5, 0.2],
[ 5. , 3.6, 1.4, 0.2],
[ 5.4, 3.9, 1.7, 0.4],
[ 4.6, 3.4, 1.4, 0.3],
[ 5. , 3.4, 1.5, 0.2],
[ 4.4, 2.9, 1.4, 0.2],
[ 4.9, 3.1, 1.5, 0.1],
[ 5.4, 3.7, 1.5, 0.2],
[ 4.8, 3.4, 1.6, 0.2],
[ 4.8, 3. , 1.4, 0.1],
[ 4.3, 3. , 1.1, 0.1],
[ 5.8, 4. , 1.2, 0.2],
[ 5.7, 4.4, 1.5, 0.4],
[ 5.4, 3.9, 1.3, 0.4],
[ 5.1, 3.5, 1.4, 0.3],
[ 5.7, 3.8, 1.7, 0.3],
[ 5.1, 3.8, 1.5, 0.3],
[ 5.4, 3.4, 1.7, 0.2],
[ 5.1, 3.7, 1.5, 0.4],
[ 4.6, 3.6, 1. , 0.2],
[ 5.1, 3.3, 1.7, 0.5],
[ 4.8, 3.4, 1.9, 0.2],
[ 5. , 3. , 1.6, 0.2],
[ 5. , 3.4, 1.6, 0.4],
[ 5.2, 3.5, 1.5, 0.2],
[ 5.2, 3.4, 1.4, 0.2],
[ 4.7, 3.2, 1.6, 0.2],
[ 4.8, 3.1, 1.6, 0.2],
[ 5.4, 3.4, 1.5, 0.4],
[ 5.2, 4.1, 1.5, 0.1],
[ 5.5, 4.2, 1.4, 0.2],
[ 4.9, 3.1, 1.5, 0.1],
[ 5. , 3.2, 1.2, 0.2],
[ 5.5, 3.5, 1.3, 0.2],
[ 4.9, 3.1, 1.5, 0.1],
[ 4.4, 3. , 1.3, 0.2],
[ 5.1, 3.4, 1.5, 0.2],
[ 5. , 3.5, 1.3, 0.3],
[ 4.5, 2.3, 1.3, 0.3],
[ 4.4, 3.2, 1.3, 0.2],
[ 5. , 3.5, 1.6, 0.6],
[ 5.1, 3.8, 1.9, 0.4],
[ 4.8, 3. , 1.4, 0.3],
[ 5.1, 3.8, 1.6, 0.2],
[ 4.6, 3.2, 1.4, 0.2],
[ 5.3, 3.7, 1.5, 0.2],
[ 5. , 3.3, 1.4, 0.2],
[ 7. , 3.2, 4.7, 1.4],
[ 6.4, 3.2, 4.5, 1.5],
[ 6.9, 3.1, 4.9, 1.5],
[ 5.5, 2.3, 4. , 1.3],
[ 6.5, 2.8, 4.6, 1.5],
[ 5.7, 2.8, 4.5, 1.3],
[ 6.3, 3.3, 4.7, 1.6],
[ 4.9, 2.4, 3.3, 1. ],
[ 6.6, 2.9, 4.6, 1.3],
[ 5.2, 2.7, 3.9, 1.4],
[ 5. , 2. , 3.5, 1. ],
[ 5.9, 3. , 4.2, 1.5],
[ 6. , 2.2, 4. , 1. ],
[ 6.1, 2.9, 4.7, 1.4],
[ 5.6, 2.9, 3.6, 1.3],
[ 6.7, 3.1, 4.4, 1.4],
[ 5.6, 3. , 4.5, 1.5],
[ 5.8, 2.7, 4.1, 1. ],
[ 6.2, 2.2, 4.5, 1.5],
[ 5.6, 2.5, 3.9, 1.1],
[ 5.9, 3.2, 4.8, 1.8],
[ 6.1, 2.8, 4. , 1.3],
[ 6.3, 2.5, 4.9, 1.5],
[ 6.1, 2.8, 4.7, 1.2],
[ 6.4, 2.9, 4.3, 1.3],
[ 6.6, 3. , 4.4, 1.4],
[ 6.8, 2.8, 4.8, 1.4],
[ 6.7, 3. , 5. , 1.7],
[ 6. , 2.9, 4.5, 1.5],
[ 5.7, 2.6, 3.5, 1. ],
[ 5.5, 2.4, 3.8, 1.1],
[ 5.5, 2.4, 3.7, 1. ],
[ 5.8, 2.7, 3.9, 1.2],
[ 6. , 2.7, 5.1, 1.6],
[ 5.4, 3. , 4.5, 1.5],
[ 6. , 3.4, 4.5, 1.6],
[ 6.7, 3.1, 4.7, 1.5],
[ 6.3, 2.3, 4.4, 1.3],
[ 5.6, 3. , 4.1, 1.3],
[ 5.5, 2.5, 4. , 1.3],
[ 5.5, 2.6, 4.4, 1.2],
[ 6.1, 3. , 4.6, 1.4],
[ 5.8, 2.6, 4. , 1.2],
[ 5. , 2.3, 3.3, 1. ],
[ 5.6, 2.7, 4.2, 1.3],
[ 5.7, 3. , 4.2, 1.2],
[ 5.7, 2.9, 4.2, 1.3],
[ 6.2, 2.9, 4.3, 1.3],
[ 5.1, 2.5, 3. , 1.1],
[ 5.7, 2.8, 4.1, 1.3],
[ 6.3, 3.3, 6. , 2.5],
[ 5.8, 2.7, 5.1, 1.9],
[ 7.1, 3. , 5.9, 2.1],
[ 6.3, 2.9, 5.6, 1.8],
[ 6.5, 3. , 5.8, 2.2],
[ 7.6, 3. , 6.6, 2.1],
[ 4.9, 2.5, 4.5, 1.7],
[ 7.3, 2.9, 6.3, 1.8],
[ 6.7, 2.5, 5.8, 1.8],
[ 7.2, 3.6, 6.1, 2.5],
[ 6.5, 3.2, 5.1, 2. ],
[ 6.4, 2.7, 5.3, 1.9],
[ 6.8, 3. , 5.5, 2.1],
[ 5.7, 2.5, 5. , 2. ],
[ 5.8, 2.8, 5.1, 2.4],
[ 6.4, 3.2, 5.3, 2.3],
[ 6.5, 3. , 5.5, 1.8],
[ 7.7, 3.8, 6.7, 2.2],
[ 7.7, 2.6, 6.9, 2.3],
[ 6. , 2.2, 5. , 1.5],
[ 6.9, 3.2, 5.7, 2.3],
[ 5.6, 2.8, 4.9, 2. ],
[ 7.7, 2.8, 6.7, 2. ],
[ 6.3, 2.7, 4.9, 1.8],
[ 6.7, 3.3, 5.7, 2.1],
[ 7.2, 3.2, 6. , 1.8],
[ 6.2, 2.8, 4.8, 1.8],
[ 6.1, 3. , 4.9, 1.8],
[ 6.4, 2.8, 5.6, 2.1],
[ 7.2, 3. , 5.8, 1.6],
[ 7.4, 2.8, 6.1, 1.9],
[ 7.9, 3.8, 6.4, 2. ],
[ 6.4, 2.8, 5.6, 2.2],
[ 6.3, 2.8, 5.1, 1.5],
[ 6.1, 2.6, 5.6, 1.4],
[ 7.7, 3. , 6.1, 2.3],
[ 6.3, 3.4, 5.6, 2.4],
[ 6.4, 3.1, 5.5, 1.8],
[ 6. , 3. , 4.8, 1.8],
[ 6.9, 3.1, 5.4, 2.1],
[ 6.7, 3.1, 5.6, 2.4],
[ 6.9, 3.1, 5.1, 2.3],
[ 5.8, 2.7, 5.1, 1.9],
[ 6.8, 3.2, 5.9, 2.3],
[ 6.7, 3.3, 5.7, 2.5],
[ 6.7, 3. , 5.2, 2.3],
[ 6.3, 2.5, 5. , 1.9],
[ 6.5, 3. , 5.2, 2. ],
[ 6.2, 3.4, 5.4, 2.3],
[ 5.9, 3. , 5.1, 1.8]])
y = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
return X, y
| bsd-3-clause | -8,277,334,933,845,212,000 | 32.588889 | 86 | 0.275885 | false |
isb-cgc/ISB-CGC-data-proc | bigquery_etl/extract/utils.py | 1 | 2377 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
# Copyright 2015, Institute for Systems Biology.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract Utils
"""
import json
import pandas as pd
import logging
log = logging.getLogger(__name__)
def convert_file_to_dataframe(filepath_or_buffer, sep="\t", skiprows=0, rollover=False, nrows=None, header = 'infer'):
"""does some required data cleaning and
then converts into a dataframe
"""
log.info("Converting file to a dataframe")
try:
# items to change to NaN/NULL
# when you change something here, remember to change in clean_up_dataframe too.
na_values = ['none', 'None', 'NONE', 'null', 'Null', 'NULL', ' ', 'NA', '__UNKNOWN__', '?']
# read the table/file
data_df = pd.read_table(filepath_or_buffer, sep=sep, skiprows=skiprows, lineterminator='\n',
comment='#', na_values=na_values, dtype='object', nrows=nrows, header = header,
encoding='utf-8')
except Exception as exp:
log.exception('problem converting to dataframe: %s' % (exp.message))
raise
filepath_or_buffer.close() # close StringIO
return data_df
#----------------------------------------
# Convert newline-delimited JSON string to dataframe
# -- should work for a small to medium files
# we are not loading into string, but into a temp file
# works only in a single bucket
#----------------------------------------
def convert_njson_file_to_df(filebuffer):
"""Converting new-line delimited JSON file into dataframe"""
log.info("Converting new-line delimited JSON file into dataframe")
# convert the file into a dataframe
lines = [json.loads(l) for l in filebuffer.splitlines()]
data_df = pd.DataFrame(lines)
# delete the temp file
filebuffer.close()
return data_df
| apache-2.0 | 8,472,730,164,494,883,000 | 33.449275 | 118 | 0.6504 | false |
jambonrose/roman-numerals | tests/test_to_numeral.py | 1 | 1683 | """
Test conversion from integer to Roman numeral
"""
from typing import Any
import pytest
from roman_numerals import LOWERCASE, convert_to_numeral
from .parameters import LOWERCASE_PARAMETERS, STANDARD_PARAMETERS
@pytest.mark.parametrize(
"decimal_integer, expected_numeral",
STANDARD_PARAMETERS)
def test_standard_numeral_conversion(
decimal_integer: int, expected_numeral: str,
) -> None:
"""
Test conversion from integers to uppercase Unicode Roman numerals
"""
assert convert_to_numeral(decimal_integer) == expected_numeral
@pytest.mark.parametrize(
"decimal_integer, expected_numeral",
LOWERCASE_PARAMETERS)
def test_lowercase_numeral_conversion(
decimal_integer: int, expected_numeral: str,
) -> None:
"""
Test conversion from integers to lowercase Unicode Roman numerals
"""
assert (
convert_to_numeral(decimal_integer, mode=LOWERCASE) == expected_numeral
)
@pytest.mark.parametrize("non_integer_values", [
'hello',
1.0,
True,
set(),
{'hello': 5},
])
def test_invalid_types(non_integer_values: Any) -> None:
"""
Ensure that passing in non-integers results in Type exceptions
"""
with pytest.raises(TypeError):
convert_to_numeral(non_integer_values)
@pytest.mark.parametrize("invalid_mode_values", [
'moo',
True,
False,
1000,
-5,
19.04,
set([1, 2, 3]),
{'hi': 'there'},
])
def test_invalid_mode_values(invalid_mode_values: Any) -> None:
"""
Ensure that passing in non-integers results in Type exceptions
"""
with pytest.raises(ValueError):
convert_to_numeral(10, mode=invalid_mode_values)
| bsd-2-clause | 6,595,135,498,576,320,000 | 23.391304 | 79 | 0.672014 | false |
sargm/selenium-py-traning-barancev | php4dvd/model/application.py | 1 | 4501 | from selenium.common.exceptions import *
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from php4dvd.pages.page import Page
from php4dvd.pages.login_page import LoginPage
from php4dvd.pages.internal_page import InternalPage
from php4dvd.pages.user_management_page import UserManagementPage
from php4dvd.pages.user_profile_page import UserProfilePage
from php4dvd.pages.add_movie_page import AddMoviePage
from php4dvd.pages.view_movie_page import ViewMoviePage
from model.user import User
from model.movie import Movie
class Application(object):
def __init__(self, driver, base_url):
self.driver = driver
driver.get(base_url)
self.wait = WebDriverWait(driver, 10)
self.login_page = LoginPage(driver, base_url)
self.internal_page = InternalPage(driver, base_url)
self.user_management_page = UserManagementPage(driver, base_url)
self.user_profile_page = UserProfilePage(driver, base_url)
self.add_movie_page = AddMoviePage(driver, base_url)
self.view_movie_page = ViewMoviePage(driver, base_url)
def login(self, user):
lp = self.login_page
lp.is_this_page
#lp.username_field.clear()
lp.username_field.send_keys(user.username)
#lp.password_field.clear()
#print("sdadsadasd", user.password)
lp.password_field.send_keys(user.password)
lp.submit_button.click()
def logout(self):
self.internal_page.logout_button.click()
self.wait.until(EC.alert_is_present()).accept()
def is_logged_in(self):
return self.internal_page.is_this_page
def is_logged_in_as(self,user):
return self.is_logged_in() \
and self.get_logged_user().username == user.username
def get_logged_user(self):
self.internal_page.user_profile_link.click()
upp = self.user_profile_page
upp.is_this_page
return User(username=upp.user_form.username_field.get_attribute("value"),
email=upp.user_form.email_field.get_attribute("value"))
def is_not_logged_in(self):
return self.login_page.is_this_page
def add_user(self,user):
self.internal_page.user_management_link.click()
ump = self.user_management_page
ump.is_this_page
ump.user_form.username_field.send_keys(user.username)
ump.user_form.email_field.send_keys(user.email)
ump.user_form.password_field.send_keys(user.password)
ump.user_form.password1_field.send_keys(user.password)
#ump.user_form.role_select.select_by_visible_text(user.role)
ump.user_form.submit_button.click()
def ensure_logout(self):
element = self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "nav, #loginform")))
if element.tag_name == "nav":
self.logout()
def ensure_login_as(self,user):
element = self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "nav, #loginform")))
if element.tag_name == "nav":
#we are on internal page
if self.is_logged_in_as(user):
return
else:
self.logout()
self.login(user)
def add_movie(self, movie):
self.internal_page.add_movie_link.click()
amp = self.add_movie_page
amp.is_this_page
amp.movie_form.movietitle_field.send_keys(movie.title)
amp.movie_form.movieyear_field.send_keys(movie.year)
#amp.movie_form.movieformat_field.send_keys(movie.format)
amp.movie_form.submit_button.click()
def is_added_movie(self, movie):
return self.get_added_movie().title == movie.title + " (" + movie.year + ")"
def get_added_movie(self):
vmp = self.view_movie_page
vmp.is_this_page
return Movie(title=vmp.movietitle_field.text)
def delete_movie(self, movie):
vmp = self.view_movie_page
vmp.is_this_page
vmp.movie_delete_link.click()
try:
element = vmp.wait.until(EC.alert_is_present())
alert = vmp.driver.switch_to_alert()
alert.accept()
#alert_text = alert.text
#print("text", alert_text)
print("alert accepted")
return True
except TimeoutException:
print("no alert")
return False
| apache-2.0 | -5,599,819,519,281,207,000 | 36.508333 | 103 | 0.647856 | false |
pkilambi/ceilometer | ceilometer/storage/impl_mongodb.py | 1 | 35357 | #
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 eNovance
# Copyright 2014 Red Hat, Inc
#
# Authors: Doug Hellmann <[email protected]>
# Julien Danjou <[email protected]>
# Eoghan Glynn <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""MongoDB storage backend"""
import calendar
import copy
import datetime
import json
import operator
import uuid
import bson.code
import bson.objectid
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import pymongo
import six
import ceilometer
from ceilometer.i18n import _
from ceilometer import storage
from ceilometer.storage import base
from ceilometer.storage import models
from ceilometer.storage.mongo import utils as pymongo_utils
from ceilometer.storage import pymongo_base
from ceilometer import utils
LOG = log.getLogger(__name__)
AVAILABLE_CAPABILITIES = {
'resources': {'query': {'simple': True,
'metadata': True}},
'statistics': {'groupby': True,
'query': {'simple': True,
'metadata': True},
'aggregation': {'standard': True,
'selectable': {'max': True,
'min': True,
'sum': True,
'avg': True,
'count': True,
'stddev': True,
'cardinality': True}}}
}
class Connection(pymongo_base.Connection):
"""Put the data into a MongoDB database
Collections::
- meter
- the raw incoming data
- resource
- the metadata for resources
- { _id: uuid of resource,
metadata: metadata dictionaries
user_id: uuid
project_id: uuid
meter: [ array of {counter_name: string, counter_type: string,
counter_unit: string} ]
}
"""
CAPABILITIES = utils.update_nested(pymongo_base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
CONNECTION_POOL = pymongo_utils.ConnectionPool()
STANDARD_AGGREGATES = dict(
emit_initial=dict(
sum='',
count='',
avg='',
min='',
max=''
),
emit_body=dict(
sum='sum: this.counter_volume,',
count='count: NumberInt(1),',
avg='acount: NumberInt(1), asum: this.counter_volume,',
min='min: this.counter_volume,',
max='max: this.counter_volume,'
),
reduce_initial=dict(
sum='',
count='',
avg='',
min='',
max=''
),
reduce_body=dict(
sum='sum: values[0].sum,',
count='count: values[0].count,',
avg='acount: values[0].acount, asum: values[0].asum,',
min='min: values[0].min,',
max='max: values[0].max,'
),
reduce_computation=dict(
sum='res.sum += values[i].sum;',
count='res.count = NumberInt(res.count + values[i].count);',
avg=('res.acount = NumberInt(res.acount + values[i].acount);'
'res.asum += values[i].asum;'),
min='if ( values[i].min < res.min ) {res.min = values[i].min;}',
max='if ( values[i].max > res.max ) {res.max = values[i].max;}'
),
finalize=dict(
sum='',
count='',
avg='value.avg = value.asum / value.acount;',
min='',
max=''
),
)
UNPARAMETERIZED_AGGREGATES = dict(
emit_initial=dict(
stddev=(
''
)
),
emit_body=dict(
stddev='sdsum: this.counter_volume,'
'sdcount: 1,'
'weighted_distances: 0,'
'stddev: 0,'
),
reduce_initial=dict(
stddev=''
),
reduce_body=dict(
stddev='sdsum: values[0].sdsum,'
'sdcount: values[0].sdcount,'
'weighted_distances: values[0].weighted_distances,'
'stddev: values[0].stddev,'
),
reduce_computation=dict(
stddev=(
'var deviance = (res.sdsum / res.sdcount) - values[i].sdsum;'
'var weight = res.sdcount / ++res.sdcount;'
'res.weighted_distances += (Math.pow(deviance, 2) * weight);'
'res.sdsum += values[i].sdsum;'
)
),
finalize=dict(
stddev=(
'value.stddev = Math.sqrt(value.weighted_distances /'
' value.sdcount);'
)
),
)
PARAMETERIZED_AGGREGATES = dict(
validate=dict(
cardinality=lambda p: p in ['resource_id', 'user_id', 'project_id',
'source']
),
emit_initial=dict(
cardinality=(
'aggregate["cardinality/%(aggregate_param)s"] = 1;'
'var distinct_%(aggregate_param)s = {};'
'distinct_%(aggregate_param)s[this["%(aggregate_param)s"]]'
' = true;'
)
),
emit_body=dict(
cardinality=(
'distinct_%(aggregate_param)s : distinct_%(aggregate_param)s,'
'%(aggregate_param)s : this["%(aggregate_param)s"],'
)
),
reduce_initial=dict(
cardinality=''
),
reduce_body=dict(
cardinality=(
'aggregate : values[0].aggregate,'
'distinct_%(aggregate_param)s:'
' values[0].distinct_%(aggregate_param)s,'
'%(aggregate_param)s : values[0]["%(aggregate_param)s"],'
)
),
reduce_computation=dict(
cardinality=(
'if (!(values[i]["%(aggregate_param)s"] in'
' res.distinct_%(aggregate_param)s)) {'
' res.distinct_%(aggregate_param)s[values[i]'
' ["%(aggregate_param)s"]] = true;'
' res.aggregate["cardinality/%(aggregate_param)s"] += 1;}'
)
),
finalize=dict(
cardinality=''
),
)
EMIT_STATS_COMMON = """
var aggregate = {};
%(aggregate_initial_placeholder)s
emit(%(key_val)s, { unit: this.counter_unit,
aggregate : aggregate,
%(aggregate_body_placeholder)s
groupby : %(groupby_val)s,
duration_start : this.timestamp,
duration_end : this.timestamp,
period_start : %(period_start_val)s,
period_end : %(period_end_val)s} )
"""
MAP_STATS_PERIOD_VAR = """
var period = %(period)d * 1000;
var period_first = %(period_first)d * 1000;
var period_start = period_first
+ (Math.floor(new Date(this.timestamp.getTime()
- period_first) / period)
* period);
"""
MAP_STATS_GROUPBY_VAR = """
var groupby_fields = %(groupby_fields)s;
var groupby = {};
var groupby_key = {};
for ( var i=0; i<groupby_fields.length; i++ ) {
if (groupby_fields[i].search("resource_metadata") != -1) {
var key = "resource_metadata";
var j = groupby_fields[i].indexOf('.');
var value = groupby_fields[i].slice(j+1, groupby_fields[i].length);
groupby[groupby_fields[i]] = this[key][value];
groupby_key[groupby_fields[i]] = this[key][value];
} else {
groupby[groupby_fields[i]] = this[groupby_fields[i]]
groupby_key[groupby_fields[i]] = this[groupby_fields[i]]
}
}
"""
PARAMS_MAP_STATS = {
'key_val': '\'statistics\'',
'groupby_val': 'null',
'period_start_val': 'this.timestamp',
'period_end_val': 'this.timestamp',
'aggregate_initial_placeholder': '%(aggregate_initial_val)s',
'aggregate_body_placeholder': '%(aggregate_body_val)s'
}
MAP_STATS = bson.code.Code("function () {" +
EMIT_STATS_COMMON % PARAMS_MAP_STATS +
"}")
PARAMS_MAP_STATS_PERIOD = {
'key_val': 'period_start',
'groupby_val': 'null',
'period_start_val': 'new Date(period_start)',
'period_end_val': 'new Date(period_start + period)',
'aggregate_initial_placeholder': '%(aggregate_initial_val)s',
'aggregate_body_placeholder': '%(aggregate_body_val)s'
}
MAP_STATS_PERIOD = bson.code.Code(
"function () {" +
MAP_STATS_PERIOD_VAR +
EMIT_STATS_COMMON % PARAMS_MAP_STATS_PERIOD +
"}")
PARAMS_MAP_STATS_GROUPBY = {
'key_val': 'groupby_key',
'groupby_val': 'groupby',
'period_start_val': 'this.timestamp',
'period_end_val': 'this.timestamp',
'aggregate_initial_placeholder': '%(aggregate_initial_val)s',
'aggregate_body_placeholder': '%(aggregate_body_val)s'
}
MAP_STATS_GROUPBY = bson.code.Code(
"function () {" +
MAP_STATS_GROUPBY_VAR +
EMIT_STATS_COMMON % PARAMS_MAP_STATS_GROUPBY +
"}")
PARAMS_MAP_STATS_PERIOD_GROUPBY = {
'key_val': 'groupby_key',
'groupby_val': 'groupby',
'period_start_val': 'new Date(period_start)',
'period_end_val': 'new Date(period_start + period)',
'aggregate_initial_placeholder': '%(aggregate_initial_val)s',
'aggregate_body_placeholder': '%(aggregate_body_val)s'
}
MAP_STATS_PERIOD_GROUPBY = bson.code.Code(
"function () {" +
MAP_STATS_PERIOD_VAR +
MAP_STATS_GROUPBY_VAR +
" groupby_key['period_start'] = period_start\n" +
EMIT_STATS_COMMON % PARAMS_MAP_STATS_PERIOD_GROUPBY +
"}")
REDUCE_STATS = bson.code.Code("""
function (key, values) {
%(aggregate_initial_val)s
var res = { unit: values[0].unit,
aggregate: values[0].aggregate,
%(aggregate_body_val)s
groupby: values[0].groupby,
period_start: values[0].period_start,
period_end: values[0].period_end,
duration_start: values[0].duration_start,
duration_end: values[0].duration_end };
for ( var i=1; i<values.length; i++ ) {
%(aggregate_computation_val)s
if ( values[i].duration_start < res.duration_start )
res.duration_start = values[i].duration_start;
if ( values[i].duration_end > res.duration_end )
res.duration_end = values[i].duration_end;
if ( values[i].period_start < res.period_start )
res.period_start = values[i].period_start;
if ( values[i].period_end > res.period_end )
res.period_end = values[i].period_end; }
return res;
}
""")
FINALIZE_STATS = bson.code.Code("""
function (key, value) {
%(aggregate_val)s
value.duration = (value.duration_end - value.duration_start) / 1000;
value.period = NumberInt(%(period)d);
return value;
}""")
SORT_OPERATION_MAPPING = {'desc': (pymongo.DESCENDING, '$lt'),
'asc': (pymongo.ASCENDING, '$gt')}
MAP_RESOURCES = bson.code.Code("""
function () {
emit(this.resource_id,
{user_id: this.user_id,
project_id: this.project_id,
source: this.source,
first_timestamp: this.timestamp,
last_timestamp: this.timestamp,
metadata: this.resource_metadata})
}""")
REDUCE_RESOURCES = bson.code.Code("""
function (key, values) {
var merge = {user_id: values[0].user_id,
project_id: values[0].project_id,
source: values[0].source,
first_timestamp: values[0].first_timestamp,
last_timestamp: values[0].last_timestamp,
metadata: values[0].metadata}
values.forEach(function(value) {
if (merge.first_timestamp - value.first_timestamp > 0) {
merge.first_timestamp = value.first_timestamp;
merge.user_id = value.user_id;
merge.project_id = value.project_id;
merge.source = value.source;
} else if (merge.last_timestamp - value.last_timestamp <= 0) {
merge.last_timestamp = value.last_timestamp;
merge.metadata = value.metadata;
}
});
return merge;
}""")
_GENESIS = datetime.datetime(year=datetime.MINYEAR, month=1, day=1)
_APOCALYPSE = datetime.datetime(year=datetime.MAXYEAR, month=12, day=31,
hour=23, minute=59, second=59)
def __init__(self, url):
# NOTE(jd) Use our own connection pooling on top of the Pymongo one.
# We need that otherwise we overflow the MongoDB instance with new
# connection since we instantiate a Pymongo client each time someone
# requires a new storage connection.
self.conn = self.CONNECTION_POOL.connect(url)
# Require MongoDB 2.4 to use $setOnInsert
if self.conn.server_info()['versionArray'] < [2, 4]:
raise storage.StorageBadVersion("Need at least MongoDB 2.4")
connection_options = pymongo.uri_parser.parse_uri(url)
self.db = getattr(self.conn, connection_options['database'])
if connection_options.get('username'):
self.db.authenticate(connection_options['username'],
connection_options['password'])
# NOTE(jd) Upgrading is just about creating index, so let's do this
# on connection to be sure at least the TTL is correctly updated if
# needed.
self.upgrade()
@staticmethod
def update_ttl(ttl, ttl_index_name, index_field, coll):
"""Update or ensure time_to_live indexes.
:param ttl: time to live in seconds.
:param ttl_index_name: name of the index we want to update or ensure.
:param index_field: field with the index that we need to update.
:param coll: collection which indexes need to be updated.
"""
indexes = coll.index_information()
if ttl <= 0:
if ttl_index_name in indexes:
coll.drop_index(ttl_index_name)
return
if ttl_index_name in indexes:
return coll.database.command(
'collMod', coll.name,
index={'keyPattern': {index_field: pymongo.ASCENDING},
'expireAfterSeconds': ttl})
coll.create_index([(index_field, pymongo.ASCENDING)],
expireAfterSeconds=ttl,
name=ttl_index_name)
def upgrade(self):
# Establish indexes
#
# We need variations for user_id vs. project_id because of the
# way the indexes are stored in b-trees. The user_id and
# project_id values are usually mutually exclusive in the
# queries, so the database won't take advantage of an index
# including both.
# create collection if not present
if 'resource' not in self.db.conn.collection_names():
self.db.conn.create_collection('resource')
if 'meter' not in self.db.conn.collection_names():
self.db.conn.create_collection('meter')
name_qualifier = dict(user_id='', project_id='project_')
background = dict(user_id=False, project_id=True)
for primary in ['user_id', 'project_id']:
name = 'resource_%sidx' % name_qualifier[primary]
self.db.resource.create_index([
(primary, pymongo.ASCENDING),
('source', pymongo.ASCENDING),
], name=name, background=background[primary])
name = 'meter_%sidx' % name_qualifier[primary]
self.db.meter.create_index([
('resource_id', pymongo.ASCENDING),
(primary, pymongo.ASCENDING),
('counter_name', pymongo.ASCENDING),
('timestamp', pymongo.ASCENDING),
('source', pymongo.ASCENDING),
], name=name, background=background[primary])
self.db.resource.create_index([('last_sample_timestamp',
pymongo.DESCENDING)],
name='last_sample_timestamp_idx',
sparse=True)
self.db.meter.create_index([('timestamp', pymongo.DESCENDING)],
name='timestamp_idx')
# update or ensure time_to_live index
ttl = cfg.CONF.database.metering_time_to_live
self.update_ttl(ttl, 'meter_ttl', 'timestamp', self.db.meter)
self.update_ttl(ttl, 'resource_ttl', 'last_sample_timestamp',
self.db.resource)
def clear(self):
self.conn.drop_database(self.db.name)
# Connection will be reopened automatically if needed
self.conn.close()
def record_metering_data(self, data):
"""Write the data to the backend storage system.
:param data: a dictionary such as returned by
ceilometer.meter.meter_message_from_counter
"""
# Record the updated resource metadata - we use $setOnInsert to
# unconditionally insert sample timestamps and resource metadata
# (in the update case, this must be conditional on the sample not
# being out-of-order)
data = copy.deepcopy(data)
data['resource_metadata'] = pymongo_utils.improve_keys(
data.pop('resource_metadata'))
resource = self.db.resource.find_one_and_update(
{'_id': data['resource_id']},
{'$set': {'project_id': data['project_id'],
'user_id': data['user_id'],
'source': data['source'],
},
'$setOnInsert': {'metadata': data['resource_metadata'],
'first_sample_timestamp': data['timestamp'],
'last_sample_timestamp': data['timestamp'],
},
'$addToSet': {'meter': {'counter_name': data['counter_name'],
'counter_type': data['counter_type'],
'counter_unit': data['counter_unit'],
},
},
},
upsert=True,
return_document=pymongo.ReturnDocument.AFTER,
)
# only update last sample timestamp if actually later (the usual
# in-order case)
last_sample_timestamp = resource.get('last_sample_timestamp')
if (last_sample_timestamp is None or
last_sample_timestamp <= data['timestamp']):
self.db.resource.update_one(
{'_id': data['resource_id']},
{'$set': {'metadata': data['resource_metadata'],
'last_sample_timestamp': data['timestamp']}}
)
# only update first sample timestamp if actually earlier (the unusual
# out-of-order case)
# NOTE: a null first sample timestamp is not updated as this indicates
# a pre-existing resource document dating from before we started
# recording these timestamps in the resource collection
first_sample_timestamp = resource.get('first_sample_timestamp')
if (first_sample_timestamp is not None and
first_sample_timestamp > data['timestamp']):
self.db.resource.update_one(
{'_id': data['resource_id']},
{'$set': {'first_sample_timestamp': data['timestamp']}}
)
# Record the raw data for the meter. Use a copy so we do not
# modify a data structure owned by our caller (the driver adds
# a new key '_id').
record = copy.copy(data)
record['recorded_at'] = timeutils.utcnow()
self.db.meter.insert_one(record)
def clear_expired_metering_data(self, ttl):
"""Clear expired data from the backend storage system.
Clearing occurs with native MongoDB time-to-live feature.
"""
LOG.debug(_("Clearing expired metering data is based on native "
"MongoDB time to live feature and going in background."))
@staticmethod
def _get_marker(db_collection, marker_pairs):
"""Return the mark document according to the attribute-value pairs.
:param db_collection: Database collection that be query.
:param maker_pairs: Attribute-value pairs filter.
"""
if db_collection is None:
return
if not marker_pairs:
return
ret = db_collection.find(marker_pairs, limit=2)
if ret.count() == 0:
raise base.NoResultFound
elif ret.count() > 1:
raise base.MultipleResultsFound
else:
_ret = ret.__getitem__(0)
return _ret
@classmethod
def _recurse_sort_keys(cls, sort_keys, marker, flag):
_first = sort_keys[0]
value = marker[_first]
if len(sort_keys) == 1:
return {_first: {flag: value}}
else:
criteria_equ = {_first: {'eq': value}}
criteria_cmp = cls._recurse_sort_keys(sort_keys[1:], marker, flag)
return dict(criteria_equ, ** criteria_cmp)
@classmethod
def _build_sort_instructions(cls, sort_keys=None, sort_dir='desc'):
"""Returns a sort_instruction and paging operator.
Sort instructions are used in the query to determine what attributes
to sort on and what direction to use.
:param q: The query dict passed in.
:param sort_keys: array of attributes by which results be sorted.
:param sort_dir: direction in which results be sorted (asc, desc).
:return: sort instructions and paging operator
"""
sort_keys = sort_keys or []
sort_instructions = []
_sort_dir, operation = cls.SORT_OPERATION_MAPPING.get(
sort_dir, cls.SORT_OPERATION_MAPPING['desc'])
for _sort_key in sort_keys:
_instruction = (_sort_key, _sort_dir)
sort_instructions.append(_instruction)
return sort_instructions, operation
def _get_time_constrained_resources(self, query,
start_timestamp, start_timestamp_op,
end_timestamp, end_timestamp_op,
metaquery, resource):
"""Return an iterable of models.Resource instances
Items are constrained by sample timestamp.
:param query: project/user/source query
:param start_timestamp: modified timestamp start range.
:param start_timestamp_op: start time operator, like gt, ge.
:param end_timestamp: modified timestamp end range.
:param end_timestamp_op: end time operator, like lt, le.
:param metaquery: dict with metadata to match on.
:param resource: resource filter.
"""
if resource is not None:
query['resource_id'] = resource
# Add resource_ prefix so it matches the field in the db
query.update(dict(('resource_' + k, v)
for (k, v) in six.iteritems(metaquery)))
# FIXME(dhellmann): This may not perform very well,
# but doing any better will require changing the database
# schema and that will need more thought than I have time
# to put into it today.
# Look for resources matching the above criteria and with
# samples in the time range we care about, then change the
# resource query to return just those resources by id.
ts_range = pymongo_utils.make_timestamp_range(start_timestamp,
end_timestamp,
start_timestamp_op,
end_timestamp_op)
if ts_range:
query['timestamp'] = ts_range
sort_keys = base._handle_sort_key('resource')
sort_instructions = self._build_sort_instructions(sort_keys)[0]
# use a unique collection name for the results collection,
# as result post-sorting (as oppposed to reduce pre-sorting)
# is not possible on an inline M-R
out = 'resource_list_%s' % uuid.uuid4()
self.db.meter.map_reduce(self.MAP_RESOURCES,
self.REDUCE_RESOURCES,
out=out,
sort={'resource_id': 1},
query=query)
try:
for r in self.db[out].find(sort=sort_instructions):
resource = r['value']
yield models.Resource(
resource_id=r['_id'],
user_id=resource['user_id'],
project_id=resource['project_id'],
first_sample_timestamp=resource['first_timestamp'],
last_sample_timestamp=resource['last_timestamp'],
source=resource['source'],
metadata=pymongo_utils.unquote_keys(resource['metadata']))
finally:
self.db[out].drop()
def _get_floating_resources(self, query, metaquery, resource):
"""Return an iterable of models.Resource instances
Items are unconstrained by timestamp.
:param query: project/user/source query
:param metaquery: dict with metadata to match on.
:param resource: resource filter.
"""
if resource is not None:
query['_id'] = resource
query.update(dict((k, v)
for (k, v) in six.iteritems(metaquery)))
keys = base._handle_sort_key('resource')
sort_keys = ['last_sample_timestamp' if i == 'timestamp' else i
for i in keys]
sort_instructions = self._build_sort_instructions(sort_keys)[0]
for r in self.db.resource.find(query, sort=sort_instructions):
yield models.Resource(
resource_id=r['_id'],
user_id=r['user_id'],
project_id=r['project_id'],
first_sample_timestamp=r.get('first_sample_timestamp',
self._GENESIS),
last_sample_timestamp=r.get('last_sample_timestamp',
self._APOCALYPSE),
source=r['source'],
metadata=pymongo_utils.unquote_keys(r['metadata']))
def get_resources(self, user=None, project=None, source=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None,
metaquery=None, resource=None):
"""Return an iterable of models.Resource instances
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param source: Optional source filter.
:param start_timestamp: Optional modified timestamp start range.
:param start_timestamp_op: Optional start time operator, like gt, ge.
:param end_timestamp: Optional modified timestamp end range.
:param end_timestamp_op: Optional end time operator, like lt, le.
:param metaquery: Optional dict with metadata to match on.
:param resource: Optional resource filter.
"""
metaquery = pymongo_utils.improve_keys(metaquery, metaquery=True) or {}
query = {}
if user is not None:
query['user_id'] = user
if project is not None:
query['project_id'] = project
if source is not None:
query['source'] = source
if start_timestamp or end_timestamp:
return self._get_time_constrained_resources(query,
start_timestamp,
start_timestamp_op,
end_timestamp,
end_timestamp_op,
metaquery, resource)
else:
return self._get_floating_resources(query, metaquery, resource)
def _aggregate_param(self, fragment_key, aggregate):
fragment_map = self.STANDARD_AGGREGATES[fragment_key]
if not aggregate:
return ''.join([f for f in fragment_map.values()])
fragments = ''
for a in aggregate:
if a.func in self.STANDARD_AGGREGATES[fragment_key]:
fragment_map = self.STANDARD_AGGREGATES[fragment_key]
fragments += fragment_map[a.func]
elif a.func in self.UNPARAMETERIZED_AGGREGATES[fragment_key]:
fragment_map = self.UNPARAMETERIZED_AGGREGATES[fragment_key]
fragments += fragment_map[a.func]
elif a.func in self.PARAMETERIZED_AGGREGATES[fragment_key]:
fragment_map = self.PARAMETERIZED_AGGREGATES[fragment_key]
v = self.PARAMETERIZED_AGGREGATES['validate'].get(a.func)
if not (v and v(a.param)):
raise storage.StorageBadAggregate('Bad aggregate: %s.%s'
% (a.func, a.param))
params = dict(aggregate_param=a.param)
fragments += (fragment_map[a.func] % params)
else:
raise ceilometer.NotImplementedError(
'Selectable aggregate function %s'
' is not supported' % a.func)
return fragments
def get_meter_statistics(self, sample_filter, period=None, groupby=None,
aggregate=None):
"""Return an iterable of models.Statistics instance.
Items are containing meter statistics described by the query
parameters. The filter must have a meter value set.
"""
if (groupby and set(groupby) -
set(['user_id', 'project_id', 'resource_id', 'source',
'resource_metadata.instance_type'])):
raise ceilometer.NotImplementedError(
"Unable to group by these fields")
q = pymongo_utils.make_query_from_filter(sample_filter)
if period:
if sample_filter.start_timestamp:
period_start = sample_filter.start_timestamp
else:
period_start = self.db.meter.find(
limit=1, sort=[('timestamp',
pymongo.ASCENDING)])[0]['timestamp']
period_start = int(calendar.timegm(period_start.utctimetuple()))
map_params = {'period': period,
'period_first': period_start,
'groupby_fields': json.dumps(groupby)}
if groupby:
map_fragment = self.MAP_STATS_PERIOD_GROUPBY
else:
map_fragment = self.MAP_STATS_PERIOD
else:
if groupby:
map_params = {'groupby_fields': json.dumps(groupby)}
map_fragment = self.MAP_STATS_GROUPBY
else:
map_params = dict()
map_fragment = self.MAP_STATS
sub = self._aggregate_param
map_params['aggregate_initial_val'] = sub('emit_initial', aggregate)
map_params['aggregate_body_val'] = sub('emit_body', aggregate)
map_stats = map_fragment % map_params
reduce_params = dict(
aggregate_initial_val=sub('reduce_initial', aggregate),
aggregate_body_val=sub('reduce_body', aggregate),
aggregate_computation_val=sub('reduce_computation', aggregate)
)
reduce_stats = self.REDUCE_STATS % reduce_params
finalize_params = dict(aggregate_val=sub('finalize', aggregate),
period=(period if period else 0))
finalize_stats = self.FINALIZE_STATS % finalize_params
results = self.db.meter.map_reduce(
map_stats,
reduce_stats,
{'inline': 1},
finalize=finalize_stats,
query=q,
)
# FIXME(terriyu) Fix get_meter_statistics() so we don't use sorted()
# to return the results
return sorted(
(self._stats_result_to_model(r['value'], groupby, aggregate)
for r in results['results']),
key=operator.attrgetter('period_start'))
@staticmethod
def _stats_result_aggregates(result, aggregate):
stats_args = {}
for attr in ['count', 'min', 'max', 'sum', 'avg']:
if attr in result:
stats_args[attr] = result[attr]
if aggregate:
stats_args['aggregate'] = {}
for a in aggregate:
ak = '%s%s' % (a.func, '/%s' % a.param if a.param else '')
if ak in result:
stats_args['aggregate'][ak] = result[ak]
elif 'aggregate' in result:
stats_args['aggregate'][ak] = result['aggregate'].get(ak)
return stats_args
@staticmethod
def _stats_result_to_model(result, groupby, aggregate):
stats_args = Connection._stats_result_aggregates(result, aggregate)
stats_args['unit'] = result['unit']
stats_args['duration'] = result['duration']
stats_args['duration_start'] = result['duration_start']
stats_args['duration_end'] = result['duration_end']
stats_args['period'] = result['period']
stats_args['period_start'] = result['period_start']
stats_args['period_end'] = result['period_end']
stats_args['groupby'] = (dict(
(g, result['groupby'][g]) for g in groupby) if groupby else None)
return models.Statistics(**stats_args)
| apache-2.0 | 8,195,262,560,355,288,000 | 39.454233 | 79 | 0.535735 | false |
ebakan/Python | urler.py | 1 | 1086 | #!/usr/bin/env python
import urllib.request
def genterm(inp):
def foo(x):
if x.isalpha():
return x
else:
return '%{0}'.format(hex(ord(x))[2:])
return ''.join(map(foo,inp))
def genresults(inp):
page=urllib.request.urlopen('http://ajax.googleapis.com/ajax/services/search/web?v=1.0&q='+inp)
query=eval(page.read().decode().replace('null','None'))
page.close()
return int(list(list(query.values())[0].values())[0]['estimatedResultCount'])
def main():
terms={}
counter=3
for i in range(1,counter+1):
for k in range(1,counter+1):
terms['f'*i+'u'*k]=None
for i in terms.keys(): terms[i]=genresults(genterm(i))
return sorter(terms)
def sorter(dictionary):
keys=list(dictionary.keys())
keys.sort()
vals=[dictionary[i] for i in keys]
return dict(zip(keys,vals))
def output(terms):
f=open('fu.csv','w')
for i in terms:
f.write('{0},{1},{2}\n'.format(i[0].count('f'),i[0].count('u'),i[1]))
if __name__=='__main__':
output(main())
| gpl-3.0 | -8,417,570,026,628,726,000 | 25.487805 | 99 | 0.571823 | false |
grakiss888/testapi | update/templates/update_mongodb.py | 1 | 2865 | ##############################################################################
# Copyright (c) 2016 ZTE Corporation
# [email protected]
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import argparse
from pymongo import MongoClient
from changes_in_mongodb import collections_old2New, \
fields_old2New, docs_old2New
from utils import main, parse_mongodb_url
parser = argparse.ArgumentParser(description='Update MongoDBs')
parser.add_argument('-u', '--url',
type=str,
required=False,
default='mongodb://127.0.0.1:27017/',
help='Mongo DB URL for Backups')
parser.add_argument('-d', '--db',
type=str,
required=False,
default='test_results_collection',
help='database for the update.')
def assert_collections(a_dict):
if a_dict is not None:
collections = eval_db('collection_names')
no_collections = []
for collection in a_dict.keys():
if collection not in collections:
no_collections.append(collection)
assert len(no_collections) == 0, \
'collections {} not exist'.format(no_collections)
def rename_collections(a_dict):
if a_dict is not None:
for collection, new_name in a_dict.iteritems():
eval_collection(collection, 'rename', new_name)
def rename_fields(a_dict):
collection_update(a_dict, '$rename')
def change_docs(a_dict):
collection_update(a_dict, '$set')
def eval_db(method, *args, **kwargs):
exec_db = db.__getattribute__(method)
return exec_db(*args, **kwargs)
def eval_collection(collection, method, *args, **kwargs):
exec_collection = db.__getattr__(collection)
return exec_collection.__getattribute__(method)(*args, **kwargs)
def collection_update(a_dict, operator):
if a_dict is not None:
for collection, updates in a_dict.iteritems():
for (query, doc) in updates:
doc_dict = {operator: doc}
eval_collection(collection, 'update', query,
doc_dict, upsert=False, multi=True)
def update(args):
parse_mongodb_url(args.url)
client = MongoClient(args.url)
global db
db = client[args.db]
assert_collections(docs_old2New)
assert_collections(fields_old2New)
assert_collections(collections_old2New)
change_docs(docs_old2New)
rename_fields(fields_old2New)
rename_collections(collections_old2New)
if __name__ == '__main__':
main(update, parser)
| apache-2.0 | -7,486,893,034,568,809,000 | 30.833333 | 78 | 0.597208 | false |
jonanv/Data-Mining | kNN.py | 1 | 2152 | # Example of kNN implemented from Scratch in Python
import csv
import random
import math
import operator
def loadDataset(filename, split, trainingSet=[] , testSet=[]):
with open(filename, 'rb') as csvfile:
lines = csv.reader(csvfile)
dataset = list(lines)
for x in range(len(dataset)-1):
for y in range(4):
dataset[x][y] = float(dataset[x][y])
if random.random() < split:
trainingSet.append(dataset[x])
else:
testSet.append(dataset[x])
def euclideanDistance(instance1, instance2, length):
distance = 0
for x in range(length):
distance += pow((instance1[x] - instance2[x]), 2)
return math.sqrt(distance)
def getNeighbors(trainingSet, testInstance, k):
distances = []
length = len(testInstance)-1
for x in range(len(trainingSet)):
dist = euclideanDistance(testInstance, trainingSet[x], length)
distances.append((trainingSet[x], dist))
distances.sort(key=operator.itemgetter(1))
neighbors = []
for x in range(k):
neighbors.append(distances[x][0])
return neighbors
def getResponse(neighbors):
classVotes = {}
for x in range(len(neighbors)):
response = neighbors[x][-1]
if response in classVotes:
classVotes[response] += 1
else:
classVotes[response] = 1
sortedVotes = sorted(classVotes.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedVotes[0][0]
def getAccuracy(testSet, predictions):
correct = 0
for x in range(len(testSet)):
if testSet[x][-1] == predictions[x]:
correct += 1
return (correct/float(len(testSet))) * 100.0
def main():
# prepare data
trainingSet=[]
testSet=[]
split = 0.67
loadDataset('iris.data', split, trainingSet, testSet)
print 'Train set: ' + repr(len(trainingSet))
print 'Test set: ' + repr(len(testSet))
# generate predictions
predictions=[]
k = 3
for x in range(len(testSet)):
neighbors = getNeighbors(trainingSet, testSet[x], k)
result = getResponse(neighbors)
predictions.append(result)
print('> predicted=' + repr(result) + ', actual=' + repr(testSet[x][-1]))
accuracy = getAccuracy(testSet, predictions)
print('Accuracy: ' + repr(accuracy) + '%')
main() | gpl-3.0 | 3,967,955,662,638,029,000 | 27.328947 | 87 | 0.681227 | false |
Prev/jikji | tests/test_generator.py | 1 | 1600 | """
tests.generator
---------------
Test generator of application
:author: Prev([email protected])
"""
import pytest
import os
import shutil
from jikji import Jikji
def test_generate1() :
""" Testing for generating of testapp1
"""
jikji = Jikji('tests/testapp1', options=['sclear'])
OUTPUT_ROOT = jikji.settings.OUTPUT_ROOT
if os.path.exists( OUTPUT_ROOT ) :
shutil.rmtree( OUTPUT_ROOT )
jikji.generate()
for i in range(1, 5) :
with open('%s/%s.html' % (jikji.settings.OUTPUT_ROOT, i), 'r') as f:
c = f.read()
assert c == '<div>%s</div>' % i
def test_generate2() :
""" Testing for generating of testapp2
"""
jikji = Jikji('tests/testapp2', options=['sclear'])
OUTPUT_ROOT = jikji.settings.OUTPUT_ROOT
STATIC_ROOT = jikji.settings.STATIC_ROOT
if os.path.exists( OUTPUT_ROOT ) :
shutil.rmtree( OUTPUT_ROOT )
jikji.generate()
with open('%s/index.html' % OUTPUT_ROOT, 'r') as f : c = f.read()
assert c == '<p>Hello</p><i>home.html</i>'
with open('%s/README.md' % OUTPUT_ROOT, 'r') as f: c = f.read()
with open('%s/README.md' % STATIC_ROOT, 'r') as f: c2 = f.read()
assert c == c2
with open('%s/requirements.txt' % OUTPUT_ROOT, 'r') as f :
c = f.read()
assert c == 'jikji>=2.0\nrequests>=2.11'
def test_generate3() :
""" Testing for generating of testapp3
"""
jikji = Jikji('tests/testapp3')
OUTPUT_ROOT = jikji.settings.OUTPUT_ROOT
if os.path.exists( OUTPUT_ROOT ) :
shutil.rmtree( OUTPUT_ROOT )
jikji.generate()
with open('%s/event/2/index.html' % OUTPUT_ROOT, 'r') as f : c = f.read()
assert c == '<div>Event: 2</div>'
| mit | -1,156,732,441,587,812,000 | 18.277108 | 74 | 0.63375 | false |
fffonion/xeHentai | xeHentai/i18n/__init__.py | 1 | 1102 | #!/usr/bin/env python
# coding:utf-8
# Contributor:
# fffonion <[email protected]>
import importlib
from ..const import *
from . import en_us as lng_fallback
try:
_locale = LOCALE.lower() if LOCALE else 'en_us'
if _locale in ('zh_cn', 'zh_sg'):
_locale = 'zh_hans'
elif _locale in ('zh_tw', 'zh_hk', 'zh_mo'):
_locale = 'zh_hant'
lng = importlib.import_module("%s.i18n.%s" % (SCRIPT_NAME, _locale))
except (ImportError, ValueError):
lng = lng_fallback
class _(object):
def c(cls, code):
_ = code not in lng.err_msg and \
(code not in lng_fallback.err_msg and \
(cls.ERR_NOMSG % code) or \
lng_fallback.err_msg[code] ) or \
lng.err_msg[code]
return _ if PY3K else (
_ if isinstance(_, unicode) else _.decode('utf-8')) # cls.ERR_NOMSG % code is unicode
def __getattr__(cls, idx):
_ = not hasattr(lng, idx) and \
getattr(lng_fallback, idx) or \
getattr(lng, idx)
return _ if PY3K else _.decode('utf-8')
i18n = _()
| gpl-3.0 | -3,419,455,272,191,152,000 | 28.783784 | 97 | 0.549002 | false |
hubert667/AIR | src/scripts/kmeansScipy.py | 1 | 3802 | import random, pickle, os, sys
import numpy as np
from clusterData import *
from scipy.cluster.vq import kmeans,vq,whiten
class KMeans:
def __init__(self, fK, tK, filename, typeDataset):
self.queryRankerList = []
self.bestKClusterGroup = []
self.queryRankerDict = {}
self.fromK = fK
self.toK = tK + 1
self.bestRankersFile = filename
self.bestK = 0
self.dataset = typeDataset
def getClusters(self, thedata):
# data generation
data = whiten(thedata)
# computing K-Means with K = 2 (2 clusters)
centroids,_ = kmeans(data,self.fromK)
# assign each sample to a cluster
idx,_ = vq(data,centroids)
return idx
def getData(self):
loadedFile = pickle.load( open( self.bestRankersFile, "rb" ) ) #dict-->print i, test.query_ranker[i]
for i in loadedFile.query_ranker.keys():
self.queryRankerDict[i] = loadedFile.query_ranker[i]
print len(self.queryRankerDict)
for i in self.queryRankerDict.keys():
if type(self.queryRankerDict[i]) == list:
for j in self.queryRankerDict[i]:
self.queryRankerList.append(j)
else:
self.queryRankerList.append(self.queryRankerDict[i])
data = np.array(self.queryRankerList)
return data
def runScript(self):#"bestRanker.p" sys.argv[1]
#commented out part is for test purposes
#data = np.vstack((random(150,2) + np.array([.5,.5]),random(150,2), random(150,2) + np.array([2.5,2.5]), rand(150,2) + np.array([10.5,10.5])))
data = self.getData()
dataToClusters = self.getClusters(data) #list > list(cluster#) > np.array,np.array etc...
dataToClusters = list(dataToClusters)
clusterDataObject = clusterData()
data = list(data)
#make object ---> dict[clusterNumber:int] = list of all rankers (where rankers are also lists)
for i in range(len(dataToClusters)):
if not dataToClusters[i] in clusterDataObject.clusterToRanker.keys():
clusterDataObject.clusterToRanker[dataToClusters[i]] = [list(data[i])]
else:
clusterDataObject.clusterToRanker[dataToClusters[i]].append(list(data[i]))
#make object ---> dict[queryID:string] = list of cluster numbers as ints
for i in clusterDataObject.clusterToRanker:#for each cluster
for j in clusterDataObject.clusterToRanker[i]:#for each ranker in cluster
for q in self.queryRankerDict:#for each query
for r in self.queryRankerDict[q]:#for each ranker in query
if list(r) == j:#if ranker in query is equal to j (current ranker in cluster)
if q in clusterDataObject.queryToCluster:#if query key exists in dictionary
clusterDataObject.queryToCluster[q].append(i)
else:
clusterDataObject.queryToCluster[q] = [i]
for i in clusterDataObject.queryToCluster:
print i, len(clusterDataObject.queryToCluster[i]), clusterDataObject.queryToCluster[i]
for i in clusterDataObject.clusterToRanker:
print i, len(clusterDataObject.clusterToRanker[i])#, clusterDataObject.clusterToRanker[i]
if not os.path.exists("ClusterData"):
os.makedirs("ClusterData")
pickle.dump(clusterDataObject, open("ClusterData/"+self.dataset+".data", "wb"))
return clusterDataObject.queryToCluster, clusterDataObject.clusterToRanker
| gpl-3.0 | -3,934,938,312,269,193,000 | 41.701149 | 150 | 0.593898 | false |
nuagenetworks/tempest | tempest/common/dynamic_creds.py | 1 | 17614 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_log import log as logging
import six
from tempest import clients
from tempest.common import cred_client
from tempest.common import cred_provider
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
LOG = logging.getLogger(__name__)
class DynamicCredentialProvider(cred_provider.CredentialProvider):
def __init__(self, identity_version, name=None, network_resources=None,
credentials_domain=None, admin_role=None, admin_creds=None):
"""Creates credentials dynamically for tests
A credential provider that, based on an initial set of
admin credentials, creates new credentials on the fly for
tests to use and then discard.
:param str identity_version: identity API version to use `v2` or `v3`
:param str admin_role: name of the admin role added to admin users
:param str name: names of dynamic resources include this parameter
when specified
:param str credentials_domain: name of the domain where the users
are created. If not defined, the project
domain from admin_credentials is used
:param dict network_resources: network resources to be created for
the created credentials
:param Credentials admin_creds: initial admin credentials
"""
super(DynamicCredentialProvider, self).__init__(
identity_version=identity_version, admin_role=admin_role,
name=name, credentials_domain=credentials_domain,
network_resources=network_resources)
self.network_resources = network_resources
self._creds = {}
self.ports = []
self.default_admin_creds = admin_creds
(self.identity_admin_client,
self.tenants_admin_client,
self.users_admin_client,
self.roles_admin_client,
self.domains_admin_client,
self.network_admin_client,
self.networks_admin_client,
self.routers_admin_client,
self.subnets_admin_client,
self.ports_admin_client,
self.security_groups_admin_client) = self._get_admin_clients()
# Domain where isolated credentials are provisioned (v3 only).
# Use that of the admin account is None is configured.
self.creds_domain_name = None
if self.identity_version == 'v3':
self.creds_domain_name = (
self.default_admin_creds.project_domain_name or
self.credentials_domain)
self.creds_client = cred_client.get_creds_client(
self.identity_admin_client,
self.tenants_admin_client,
self.users_admin_client,
self.roles_admin_client,
self.domains_admin_client,
self.creds_domain_name)
def _get_admin_clients(self):
"""Returns a tuple with instances of the following admin clients
(in this order):
identity
network
"""
os = clients.Manager(self.default_admin_creds)
if self.identity_version == 'v2':
return (os.identity_client, os.tenants_client, os.users_client,
os.roles_client, None, os.network_client,
os.networks_client, os.routers_client, os.subnets_client,
os.ports_client, os.security_groups_client)
else:
return (os.identity_v3_client, os.projects_client,
os.users_v3_client, os.roles_v3_client, os.domains_client,
os.network_client, os.networks_client, os.routers_client,
os.subnets_client, os.ports_client,
os.security_groups_client)
def _create_creds(self, suffix="", admin=False, roles=None):
"""Create random credentials under the following schema.
If the name contains a '.' is the full class path of something, and
we don't really care. If it isn't, it's probably a meaningful name,
so use it.
For logging purposes, -user and -tenant are long and redundant,
don't use them. The user# will be sufficient to figure it out.
"""
if '.' in self.name:
root = ""
else:
root = self.name
project_name = data_utils.rand_name(root) + suffix
project_desc = project_name + "-desc"
project = self.creds_client.create_project(
name=project_name, description=project_desc)
username = data_utils.rand_name(root) + suffix
user_password = data_utils.rand_password()
email = data_utils.rand_name(root) + suffix + "@example.com"
user = self.creds_client.create_user(
username, user_password, project, email)
if 'user' in user:
user = user['user']
role_assigned = False
if admin:
self.creds_client.assign_user_role(user, project,
self.admin_role)
role_assigned = True
# Add roles specified in config file
for conf_role in CONF.auth.tempest_roles:
self.creds_client.assign_user_role(user, project, conf_role)
role_assigned = True
# Add roles requested by caller
if roles:
for role in roles:
self.creds_client.assign_user_role(user, project, role)
role_assigned = True
# NOTE(mtreinish) For a user to have access to a project with v3 auth
# it must beassigned a role on the project. So we need to ensure that
# our newly created user has a role on the newly created project.
if self.identity_version == 'v3' and not role_assigned:
self.creds_client.create_user_role('Member')
self.creds_client.assign_user_role(user, project, 'Member')
creds = self.creds_client.get_credentials(user, project, user_password)
return cred_provider.TestResources(creds)
def _create_network_resources(self, tenant_id):
network = None
subnet = None
router = None
# Make sure settings
if self.network_resources:
if self.network_resources['router']:
if (not self.network_resources['subnet'] or
not self.network_resources['network']):
raise exceptions.InvalidConfiguration(
'A router requires a subnet and network')
elif self.network_resources['subnet']:
if not self.network_resources['network']:
raise exceptions.InvalidConfiguration(
'A subnet requires a network')
elif self.network_resources['dhcp']:
raise exceptions.InvalidConfiguration('DHCP requires a subnet')
data_utils.rand_name_root = data_utils.rand_name(self.name)
if not self.network_resources or self.network_resources['network']:
network_name = data_utils.rand_name_root + "-network"
network = self._create_network(network_name, tenant_id)
try:
if not self.network_resources or self.network_resources['subnet']:
subnet_name = data_utils.rand_name_root + "-subnet"
subnet = self._create_subnet(subnet_name, tenant_id,
network['id'])
if not self.network_resources or self.network_resources['router']:
router_name = data_utils.rand_name_root + "-router"
router = self._create_router(router_name, tenant_id)
self._add_router_interface(router['id'], subnet['id'])
except Exception:
try:
if router:
self._clear_isolated_router(router['id'], router['name'])
if subnet:
self._clear_isolated_subnet(subnet['id'], subnet['name'])
if network:
self._clear_isolated_network(network['id'],
network['name'])
except Exception as cleanup_exception:
msg = "There was an exception trying to setup network " \
"resources for tenant %s, and this error happened " \
"trying to clean them up: %s"
LOG.warning(msg % (tenant_id, cleanup_exception))
raise
return network, subnet, router
def _create_network(self, name, tenant_id):
resp_body = self.networks_admin_client.create_network(
name=name, tenant_id=tenant_id)
return resp_body['network']
def _create_subnet(self, subnet_name, tenant_id, network_id):
base_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
for subnet_cidr in base_cidr.subnet(mask_bits):
try:
if self.network_resources:
resp_body = self.subnets_admin_client.\
create_subnet(
network_id=network_id, cidr=str(subnet_cidr),
name=subnet_name,
tenant_id=tenant_id,
enable_dhcp=self.network_resources['dhcp'],
ip_version=4)
else:
resp_body = self.subnets_admin_client.\
create_subnet(network_id=network_id,
cidr=str(subnet_cidr),
name=subnet_name,
tenant_id=tenant_id,
ip_version=4)
break
except lib_exc.BadRequest as e:
if 'overlaps with another subnet' not in str(e):
raise
else:
message = 'Available CIDR for subnet creation could not be found'
raise Exception(message)
return resp_body['subnet']
def _create_router(self, router_name, tenant_id):
external_net_id = dict(
network_id=CONF.network.public_network_id)
resp_body = self.routers_admin_client.create_router(
router_name,
external_gateway_info=external_net_id,
tenant_id=tenant_id)
return resp_body['router']
def _add_router_interface(self, router_id, subnet_id):
self.routers_admin_client.add_router_interface(router_id,
subnet_id=subnet_id)
def get_credentials(self, credential_type):
if self._creds.get(str(credential_type)):
credentials = self._creds[str(credential_type)]
else:
if credential_type in ['primary', 'alt', 'admin']:
is_admin = (credential_type == 'admin')
credentials = self._create_creds(admin=is_admin)
else:
credentials = self._create_creds(roles=credential_type)
self._creds[str(credential_type)] = credentials
# Maintained until tests are ported
LOG.info("Acquired dynamic creds:\n credentials: %s"
% credentials)
if (CONF.service_available.neutron and
not CONF.baremetal.driver_enabled and
CONF.auth.create_isolated_networks):
network, subnet, router = self._create_network_resources(
credentials.tenant_id)
credentials.set_resources(network=network, subnet=subnet,
router=router)
LOG.info("Created isolated network resources for : \n"
+ " credentials: %s" % credentials)
return credentials
def get_primary_creds(self):
return self.get_credentials('primary')
def get_admin_creds(self):
return self.get_credentials('admin')
def get_alt_creds(self):
return self.get_credentials('alt')
def get_creds_by_roles(self, roles, force_new=False):
roles = list(set(roles))
# The roles list as a str will become the index as the dict key for
# the created credentials set in the dynamic_creds dict.
exist_creds = self._creds.get(str(roles))
# If force_new flag is True 2 cred sets with the same roles are needed
# handle this by creating a separate index for old one to store it
# separately for cleanup
if exist_creds and force_new:
new_index = str(roles) + '-' + str(len(self._creds))
self._creds[new_index] = exist_creds
del self._creds[str(roles)]
return self.get_credentials(roles)
def _clear_isolated_router(self, router_id, router_name):
client = self.routers_admin_client
try:
client.delete_router(router_id)
except lib_exc.NotFound:
LOG.warning('router with name: %s not found for delete' %
router_name)
def _clear_isolated_subnet(self, subnet_id, subnet_name):
client = self.subnets_admin_client
try:
client.delete_subnet(subnet_id)
except lib_exc.NotFound:
LOG.warning('subnet with name: %s not found for delete' %
subnet_name)
def _clear_isolated_network(self, network_id, network_name):
net_client = self.networks_admin_client
try:
net_client.delete_network(network_id)
except lib_exc.NotFound:
LOG.warning('network with name: %s not found for delete' %
network_name)
def _cleanup_default_secgroup(self, tenant):
nsg_client = self.security_groups_admin_client
resp_body = nsg_client.list_security_groups(tenant_id=tenant,
name="default")
secgroups_to_delete = resp_body['security_groups']
for secgroup in secgroups_to_delete:
try:
nsg_client.delete_security_group(secgroup['id'])
except lib_exc.NotFound:
LOG.warning('Security group %s, id %s not found for clean-up' %
(secgroup['name'], secgroup['id']))
def _clear_isolated_net_resources(self):
client = self.routers_admin_client
for cred in self._creds:
creds = self._creds.get(cred)
if (not creds or not any([creds.router, creds.network,
creds.subnet])):
continue
LOG.debug("Clearing network: %(network)s, "
"subnet: %(subnet)s, router: %(router)s",
{'network': creds.network, 'subnet': creds.subnet,
'router': creds.router})
if (not self.network_resources or
(self.network_resources.get('router') and creds.subnet)):
try:
client.remove_router_interface(
creds.router['id'],
subnet_id=creds.subnet['id'])
except lib_exc.NotFound:
LOG.warning('router with name: %s not found for delete' %
creds.router['name'])
self._clear_isolated_router(creds.router['id'],
creds.router['name'])
if (not self.network_resources or
self.network_resources.get('subnet')):
self._clear_isolated_subnet(creds.subnet['id'],
creds.subnet['name'])
if (not self.network_resources or
self.network_resources.get('network')):
self._clear_isolated_network(creds.network['id'],
creds.network['name'])
def clear_creds(self):
if not self._creds:
return
self._clear_isolated_net_resources()
for creds in six.itervalues(self._creds):
try:
self.creds_client.delete_user(creds.user_id)
except lib_exc.NotFound:
LOG.warning("user with name: %s not found for delete" %
creds.username)
try:
if CONF.service_available.neutron:
self._cleanup_default_secgroup(creds.tenant_id)
self.creds_client.delete_project(creds.tenant_id)
except lib_exc.NotFound:
LOG.warning("tenant with name: %s not found for delete" %
creds.tenant_name)
self._creds = {}
def is_multi_user(self):
return True
def is_multi_tenant(self):
return True
def is_role_available(self, role):
return True
| apache-2.0 | -7,163,394,698,792,381,000 | 43.933673 | 79 | 0.56756 | false |
whiteShtef/BlobStore | blobstore/app/app.py | 1 | 2467 | import os
import io
import flask
import flask_restful
from flask_cors import CORS, cross_origin
import flask_cache
import blobstorecore
import tools
import json
app = flask.Flask(__name__)
app.config["CACHE_DEFAULT_TIMEOUT"] = int(os.environ.get("CACHE_DEFAULT_TIMEOUT", 50))
app.config["CACHE_THRESHOLD"] = int(os.environ.get("CACHE_THRESHOLD", 100))
app.config["ADMIN_ACCESS_TOKEN"] = os.environ.get("ADMIN_ACCESS_TOKEN")
blobstorecore = blobstorecore.BlobStoreCore(from_envvars=True)
blobstorecore.connect()
api = flask_restful.Api(app)
CORS(app)
cache = flask_cache.Cache(app,config={
"CACHE_TYPE":"simple",
"CACHE_DEFAULT_TIMEOUT":app.config["CACHE_DEFAULT_TIMEOUT"],
"CACHE_THRESHOLD":app.config["CACHE_THRESHOLD"],
})
if os.environ.get("DEBUG", None) != None:
if os.environ.get("DEBUG").lower() == "true":
app.config["DEBUG"] = True
class GetBlob(flask_restful.Resource):
method_decorators = [cache.cached()]
def get(self, blobstore_id):
# Try and get the document containing data.
# This is wrapped around an exception, because
# if document doesn't exist in the database,
# gridfs doesn't return None, but throws an
# exception.
file, mimetype = blobstorecore.get_blob_by_id(blobstore_id)
if file != None:
return flask.send_file(file, mimetype = mimetype)
else:
return {"message":"not found"}, 404
api.add_resource(GetBlob, "/<string:blobstore_id>/")
# Admin Endpoints
@app.route("/add/", methods=["POST"])
def AddBlob():
bin_data = flask.request.files["file"]
memory_buffer = io.BytesIO()
memory_buffer.write(bin_data.read())
memory_buffer.seek(0)
if flask.request.args["admin_access_token"] == app.config["ADMIN_ACCESS_TOKEN"]:
saved_id = blobstorecore.insert_blob(
memory_buffer,
flask.request.args["mimetype"]
)
return json.dumps({"bsid":saved_id})
else:
return json.dumps({"message":"not allowed"}), 403
@app.route("/delete/", methods=["GET"])
def DeleteBlob():
if flask.request.args["admin_access_token"] == app.config["ADMIN_ACCESS_TOKEN"]:
result = blobstorecore.delete_blob(flask.request.args["bsid"])
return json.dumps({"message":"attempted deletion, not guaranteed (default)"}), 200
else:
return json.dumps({"message":"not allowed"}), 403
| mit | -2,466,128,191,824,501,000 | 29.8375 | 91 | 0.645318 | false |
denmojo/pygrow | grow/commands/filter.py | 1 | 2992 | from grow.pods import pods
from grow.pods import storage
import click
import os
@click.command()
@click.argument('pod_path', default='.')
@click.option('--include-obsolete/--no-include-obsolete', default=False,
is_flag=True,
help='Whether to include obsolete messages. If false, obsolete'
' messages will be removed from the catalog template. By'
' default, Grow cleans obsolete messages from the catalog'
' template.')
@click.option('--localized/--no-localized', default=False, is_flag=True,
help='Whether to create localized message catalogs. Use this'
' option if content varies by locale.')
@click.option('--locale', type=str, multiple=True,
help='Which locale(s) to analyze when creating template catalogs'
' that contain only untranslated messages. This option is'
' only applicable when using --untranslated.')
@click.option('--path', type=str, multiple=True,
help='Which paths to extract strings from. By default, all paths'
' are extracted. This option is useful if you\'d like to'
' generate a partial messages file representing just a'
' specific set of files.')
@click.option('-o', type=str, default=None,
help='Where to write the extracted translation catalog. The path'
' must be relative to the pod\'s root.')
@click.option('--include-header', default=False, is_flag=True,
help='Whether to preserve headers at the beginning of catalogs.')
@click.option('--out_dir', type=str, default=None,
help='Where to write extracted localized translation catalogs.'
' The path must be relative to the pod\'s root. This option'
' is only applicable when using --localized.')
@click.option('-f', default=False, is_flag=True,
help='Whether to force an update when writing localized message'
' catalogs.')
def filter(pod_path, locale, o, include_obsolete, localized, path,
include_header, out_dir, f):
"""Filters untranslated messages from catalogs into new catalogs."""
root = os.path.abspath(os.path.join(os.getcwd(), pod_path))
pod = pods.Pod(root, storage=storage.FileStorage)
catalogs = pod.get_catalogs()
if not locale:
locale = catalogs.list_locales()
if out_dir and pod.file_exists(out_dir) and not f:
raise click.UsageError(
'{} exists. You must specify a directory that does not exist, or '
'use the "-f" flag, which will force update catalogs within the '
'specified directory.'.format(out_dir))
catalogs.filter(out_path=o, out_dir=out_dir,
include_obsolete=include_obsolete,
localized=localized, paths=path,
include_header=include_header, locales=locale)
| mit | 5,153,167,556,879,288,000 | 53.4 | 79 | 0.621658 | false |
alex4108/scLikesDownloader | scLikesDownloader.py | 1 | 12232 | import soundcloud as sc
from soundcloud.resource import Resource
import sys
import os
import urllib2
import re
class downloader:
def __init__(self, UserURL, PATH):
try:
self.client = sc.Client(client_id='',
client_secret='',
)
self.user = self.client.get('/resolve', url=UserURL)
self.path = PATH
self.reports = list()
except:
self.report('Constructor Exception Raised!')
self.report(sys.exc_info()[0])
self.report(sys.exc_info()[1])
return False
# Constructor
def __str__(self):
return 'Downloader Client v1 | Username: ' + self.user.username
def isMp3Valid(self, file_path):
is_valid = False
f = open(file_path, 'r')
block = f.read(1024)
frame_start = block.find(chr(255))
block_count = 0 #abort after 64k
while len(block)>0 and frame_start == -1 and block_count<64:
block = f.read(1024)
frame_start = block.find(chr(255))
block_count+=1
if frame_start > -1:
frame_hdr = block[frame_start:frame_start+4]
is_valid = frame_hdr[0] == chr(255)
mpeg_version = ''
layer_desc = ''
uses_crc = False
bitrate = 0
sample_rate = 0
padding = False
frame_length = 0
if is_valid:
is_valid = ord(frame_hdr[1]) & 0xe0 == 0xe0 #validate the rest of the frame_sync bits exist
if is_valid:
if ord(frame_hdr[1]) & 0x18 == 0:
mpeg_version = '2.5'
elif ord(frame_hdr[1]) & 0x18 == 0x10:
mpeg_version = '2'
elif ord(frame_hdr[1]) & 0x18 == 0x18:
mpeg_version = '1'
else:
is_valid = False
if is_valid:
if ord(frame_hdr[1]) & 6 == 2:
layer_desc = 'Layer III'
elif ord(frame_hdr[1]) & 6 == 4:
layer_desc = 'Layer II'
elif ord(frame_hdr[1]) & 6 == 6:
layer_desc = 'Layer I'
else:
is_valid = False
if is_valid:
uses_crc = ord(frame_hdr[1]) & 1 == 0
bitrate_chart = [
[0,0,0,0,0],
[32,32,32,32,8],
[64,48,40,48,16],
[96,56,48,56,24],
[128,64,56,64,32],
[160,80,64,80,40],
[192,96,80,96,40],
[224,112,96,112,56],
[256,128,112,128,64],
[288,160,128,144,80],
[320,192,160,160,96],
[352,224,192,176,112],
[384,256,224,192,128],
[416,320,256,224,144],
[448,384,320,256,160]]
bitrate_index = ord(frame_hdr[2]) >> 4
if bitrate_index==15:
is_valid=False
else:
bitrate_col = 0
if mpeg_version == '1':
if layer_desc == 'Layer I':
bitrate_col = 0
elif layer_desc == 'Layer II':
bitrate_col = 1
else:
bitrate_col = 2
else:
if layer_desc == 'Layer I':
bitrate_col = 3
else:
bitrate_col = 4
bitrate = bitrate_chart[bitrate_index][bitrate_col]
is_valid = bitrate > 0
if is_valid:
sample_rate_chart = [
[44100, 22050, 11025],
[48000, 24000, 12000],
[32000, 16000, 8000]]
sample_rate_index = (ord(frame_hdr[2]) & 0xc) >> 2
if sample_rate_index != 3:
sample_rate_col = 0
if mpeg_version == '1':
sample_rate_col = 0
elif mpeg_version == '2':
sample_rate_col = 1
else:
sample_rate_col = 2
sample_rate = sample_rate_chart[sample_rate_index][sample_rate_col]
else:
is_valid = False
if is_valid:
padding = ord(frame_hdr[2]) & 1 == 1
padding_length = 0
if layer_desc == 'Layer I':
if padding:
padding_length = 4
frame_length = (12 * bitrate * 1000 / sample_rate + padding_length) * 4
else:
if padding:
padding_length = 1
frame_length = 144 * bitrate * 1000 / sample_rate + padding_length
is_valid = frame_length > 0
# Verify the next frame
if(frame_start + frame_length < len(block)):
is_valid = block[frame_start + frame_length] == chr(255)
else:
offset = (frame_start + frame_length) - len(block)
block = f.read(1024)
if len(block) > offset:
is_valid = block[offset] == chr(255)
else:
is_valid = False
f.close()
return is_valid
def directory(self, path,extension = ''):
list_dir = []
list_dir = os.listdir(path)
count = 0
for file in list_dir:
if file.endswith(extension): # eg: '.txt'
count += 1
return count
'''
Gets list of likes
'''
def trackList(self, downloadable_only = False):
# API: Get favorites count, save data from /users/{id}/favorites
offset = 0
limit = 1
favorites = list()
retry = 0
#self.user.public_favorites_count = 5 # Test data
while offset < self.user.public_favorites_count:
if offset is -1:
break
try:
uri = '/users/' + str(self.user.id) + '/favorites'
favoritesToJoin = self.client.get(uri, offset=offset, limit=limit)
if len(favoritesToJoin) == 0 or not favoritesToJoin:
print str(offset) + ' of ' + str(self.user.public_favorites_count) + ' is hiding. Trying again.'
if retry != 0 :
retry = retry + 1
else:
retry = 1
print '(Retry ' + str(retry) + ')'
if retry >= 5:
print str(offset) + ' of ' + str(self.user.public_favorites_count) + ' won\'t retrieve. Aborting...'
self.report('(trackList) Can\'t select track #' + str(offset))
self.report('To download this manually, please visit https://api.soundcloud.com/users/' + str(self.user.id) + '/favorites/' + str(offset) + '.json')
retry = 0
offset += 1
elif hasattr(self.trackData(favoritesToJoin[0].id), 'download_url'):
if len(favoritesToJoin) < limit:
offset = offset + limit - len(favoritesToJoin)
if len(favorites) == 0:
print str(offset) + ' of ' + str(self.user.public_favorites_count) + ' retrieved from API '
favorites.append(favoritesToJoin[0])
if offset + 1 < self.user.public_favorites_count:
offset += 1
else:
offset = -1
elif len(favorites) != 0 and not favorites[len(favorites)-1] == favoritesToJoin[0]:
print str(offset) + ' of ' + str(self.user.public_favorites_count) + ' retrieved from API '
favorites.append(favoritesToJoin[0])
if offset + 1 < self.user.public_favorites_count:
offset += 1
else:
offset = -1
else:
print str(offset) + ' of ' + str(self.user.public_favorites_count) + ' isn\'t downloadable. Skipping...'
offset += 1
except:
self.report('(trackList) ' + str(sys.exc_info()[0]))
self.report('(trackList) ' + str(sys.exc_info()[1]))
self.report('(trackList) ' + str(favoritesToJoin[0].download_url))
print 'All tracks have been retrieved'
return favorites
'''
Adds a report for later viewing
:param str msg Message to report
'''
def report(self, msg):
self.reports.append(msg)
'''
Gets data on specific track
:param int trackid The Track's API ID
:return (Resource|Boolean) Track Resource or false on failure
'''
def trackData(self, trackid):
try:
track = self.client.get('/tracks/' + str(trackid))
except:
self.report('(trackData) Failed to select Track ID ' + str(trackid))
return False
return track
'''
Get data on specific user
:param int User's ID in the API
:return Resource User Resource
'''
def getUser(self, userid):
try:
user = self.client.get('/users/' + str(userid))
except:
self.report('(getUser) Failed to select User ID ' + str(userid))
return False
return user
'''
Takes the inputted path and makes it system-safe by stripping characters
:param str path Path to clean
:return str Clean path
'''
def validPath(self, path):
cleaned_up_filename = re.sub(r'[\/\\\:\*\?\"\<\>\|]', '', path)
return self.path + "".join(c for c in cleaned_up_filename if c.isalnum()).rstrip()
def getReports(self):
return self.reports
'''
Saves a file
:param (Resource|int) the Track Resource to download or the track's ID
:param bool False on failure, True on success
'''
def saveFile(self, track):
if isinstance(track, int):
track = trackData(track.id)
artist = self.getUser(track.user_id)
filepath = self.validPath(artist.username + '_' + track.permalink + '.' + track.original_format)
url = track.download_url + '?client_id=1fbdfddf1e6711cd0aff00f3b92e7cbf'
try:
req = urllib2.urlopen(urllib2.Request(url=url))
if req.getcode() != 200:
self.report('HTTPError Code: ' + str(req.getcode()) + ' url: ' + req.geturl())
return False
try:
if not os.path.exists(self.path):
os.makedirs(self.path)
if os.path.exists(filepath):
os.remove(filepath)
file = open(filepath, 'wb')
file.write(req.read())
file.close()
except:
raise
except:
self.report('(saveFile) Failed to save file! Manual download required! URL: ' + req.geturl())
self.report('(saveFile)' + str(sys.exc_info()[0]))
self.report('(saveFile)' + str(sys.exc_info()[1]))
return False
return True
| gpl-2.0 | 3,850,365,799,363,366,400 | 36.179331 | 172 | 0.435988 | false |
rbas/simocollector | simocollector/bin/install-simocollection.py | 1 | 8054 | #!/usr/bin/env python
import os
import argparse
import json
import subprocess
import urllib2
import getpass
import socket
import psutil
from simocollector.sender import BaseObjectSender, BaseMultiObjectSender
from simocollector.collectors import system_info_collector
from simocollector.utils import slugify
CRON_JOB_FILENAME = '/etc/cron.d/simo-collector'
CONFIG_FILE_DEFAULT_PATH = '/etc/simo/collector.conf'
class ServerInfoSender(BaseObjectSender):
name = 'server'
def __init__(self, config, ip, name):
if 'server_id' not in config:
config['server_id'] = ''
super(ServerInfoSender, self).__init__(config)
self.ip = ip
self.server_name = slugify(unicode(name))
def get_data(self):
raw_data = system_info_collector.get_system_info()
data = {
'distribution': raw_data['distro'].get('distribution', 'unknown'),
'release': raw_data['distro'].get('release', 'unknown'),
'cpu_model_name': raw_data['processor'].get('model-name', 'unknown'),
'cpu_number_of_cores': raw_data['processor'].get('cpu-cores', 0) or 0,
'name': self.server_name,
'slug': self.server_name,
'ip_address': self.ip,
}
return data
def add_additional_data(self, data):
return data
class DiskRegisterSender(BaseObjectSender):
name = 'disk'
data = {}
def get_data(self):
return self.data
def set_partition(self, partiton):
usage = psutil.disk_usage(partiton.mountpoint)
total = int(usage.total) / (1024 * 1024) # Convert to MB
self.data = {
'partition_name': partiton.device.replace('/dev/', ''),
'path': partiton.mountpoint,
'total': total,
'volume': partiton.device,
}
class NetDeviceRegisterSender(BaseMultiObjectSender):
name = 'netdevice'
def get_data(self):
raw_data = system_info_collector.get_network_traffic()
data = []
for device_name in raw_data.iterkeys():
device_data = {
'name': device_name,
}
data.append(self.add_additional_data(device_data))
return data
def _get_hostname():
return subprocess.Popen(['hostname'], stdout=subprocess.PIPE, close_fds=True).communicate()[0].strip()
def _get_host_ip_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('google.com', 80))
ip = s.getsockname()[0]
s.close()
return ip
def create_cron_jobs():
publisher_path = subprocess.Popen(['which', 'simo-collection-publish.py'],
stdout=subprocess.PIPE, close_fds=True).communicate()[0].strip()
jobs = [
'*/2 * * * * root {0} -t loadavg'.format(publisher_path),
'*/6 * * * * root {0} -t networktraffic'.format(publisher_path),
'*/5 * * * * root sleep 20 && {0} -t cpu'.format(publisher_path),
'*/9 * * * * root {0} -t memory'.format(publisher_path),
'1 */3 * * * root {0} -t diskusage'.format(publisher_path),
'1 3 * * * root {0} -t diskio'.format(publisher_path),
]
if not os.path.exists(os.path.dirname(CRON_JOB_FILENAME)):
os.makedirs(os.path.dirname(CRON_JOB_FILENAME))
with open(CRON_JOB_FILENAME, 'w') as f:
f.write('{0}\n\n'.format('\n'.join(jobs)))
f.close()
def _wrap_with(code):
def inner(text, bold=False):
c = code
if bold:
c = "1;%s" % c
return "\033[%sm%s\033[0m" % (c, text)
return inner
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
def build_partition_list_for_registration():
str_bool_values = ('y', 'yes', 'true', 't', '1')
def str2bool(v):
return v.lower() in str_bool_values
partition_list = psutil.disk_partitions(all=True)
partition_list_to_register = []
for partition in partition_list:
result = raw_input(
'Register partition {0} [no]: '.format(partition.mountpoint, ', '.join(str_bool_values))) or 'n'
if str2bool(result):
partition_list_to_register.append(partition)
return partition_list_to_register
def _write_error(data):
import tempfile
error_lof_filename = os.path.join(tempfile.gettempdir(), 'simo_install_error.log')
with file(error_lof_filename, mode='w') as f:
f.write(data)
print(data)
print(red('Error log: {0}'.format(error_lof_filename)))
def main():
parser = argparse.ArgumentParser(description='SIMO Collector installer.')
parser.add_argument('path', default=CONFIG_FILE_DEFAULT_PATH, type=str, nargs='?',
help='path to configuration file (default {0}).'.format(CONFIG_FILE_DEFAULT_PATH))
args = parser.parse_args()
actual_user = getpass.getuser()
host_ip_address = _get_host_ip_address()
current_hostname = _get_hostname()
username = raw_input('Username [{0}]: '.format(actual_user)) or actual_user
password = getpass.getpass('Password: ')
server = raw_input('SIMO url: ')
ip_address = raw_input('Write server ip address [{0}]: '.format(host_ip_address)) or host_ip_address
hostname = raw_input('Server name [{0}]: '.format(current_hostname)) or current_hostname
config = {
'username': username,
'password': password,
'server': server
}
print('\n\n')
response_data = {}
try:
sender = ServerInfoSender(config, ip_address, hostname)
response = sender.send()
response_data = json.loads(response)
except urllib2.HTTPError, e:
error_log = e.fp.read()
_write_error(error_log)
exit(1)
if 'url' not in response_data:
print(red('Bad data response:'))
print(response_data)
exit(1)
config['server_id'] = response_data['url']
config['server'] = server
config['username'] = username
config['password'] = password
config_path = args.path
if not os.path.exists(os.path.dirname(config_path)):
os.makedirs(os.path.dirname(config_path))
response_list = []
partition_list_for_registration = build_partition_list_for_registration()
for partition in partition_list_for_registration:
try:
r_sender = DiskRegisterSender(config)
r_sender.set_partition(partition)
response_list.append(r_sender.send())
except urllib2.HTTPError, e:
print(red('Problem in disk registration process'))
error_log = e.fp.read()
_write_error(error_log)
exit(1)
config['disk'] = {}
config['path_list'] = []
for data in response_list:
disk = json.loads(data)
partition_name = disk['partition_name']
print('{0}: {1}'.format(green('Disk partition has been registred'), partition_name))
config['disk'][partition_name] = disk['url']
config['path_list'].append(disk['path'])
response = {}
try:
response = NetDeviceRegisterSender(config).send()
except urllib2.HTTPError, e:
print(red('Problem in disk registration process'))
error_log = e.fp.read()
_write_error(error_log)
exit(1)
config['networkdevices'] = {}
for data in response:
device_data = json.loads(data)
device_name = device_data['name']
print('{0}: {1}'.format(green('Network device has been registred'), device_name))
config['networkdevices'][device_name] = device_data['url']
with open(config_path, 'w') as f:
f.write(json.dumps(config, indent=4, sort_keys=True))
print('{0}: {1}'.format(green('Creating configuration on path'), config_path))
print('{0}: {1}'.format(green('Creating cron jobs'), CRON_JOB_FILENAME))
create_cron_jobs()
print('\n\n{0}'.format(green('Successfully installed SIMO Collector')))
if __name__ == '__main__':
main()
| mit | 6,288,456,403,123,903,000 | 29.976923 | 108 | 0.604048 | false |
ksmit799/Toontown-Source | toontown/safezone/DistributedCheckers.py | 1 | 29924 | from pandac.PandaModules import *
from direct.distributed.ClockDelta import *
from direct.task.Task import Task
from direct.interval.IntervalGlobal import *
from TrolleyConstants import *
from direct.gui.DirectGui import *
from toontown.toonbase import TTLocalizer
from direct.distributed import DistributedNode
from direct.distributed.ClockDelta import globalClockDelta
from CheckersBoard import CheckersBoard
from direct.fsm import ClassicFSM, State
from direct.fsm import StateData
from toontown.toonbase.ToontownTimer import ToontownTimer
from toontown.toonbase import ToontownGlobals
from direct.distributed.ClockDelta import *
from otp.otpbase import OTPGlobals
from direct.showbase import PythonUtil
class DistributedCheckers(DistributedNode.DistributedNode):
def __init__(self, cr):
NodePath.__init__(self, 'DistributedCheckers')
DistributedNode.DistributedNode.__init__(self, cr)
self.cr = cr
self.reparentTo(render)
self.boardNode = loader.loadModel('phase_6/models/golf/regular_checker_game.bam')
self.boardNode.reparentTo(self)
self.board = CheckersBoard()
self.exitButton = None
self.inGame = False
self.waiting = True
self.startButton = None
self.playerNum = None
self.turnText = None
self.isMyTurn = False
self.wantTimer = True
self.leaveButton = None
self.screenText = None
self.turnText = None
self.exitButton = None
self.numRandomMoves = 0
self.blinker = Sequence()
self.moveList = []
self.mySquares = []
self.myKings = []
self.isRotated = False
self.accept('mouse1', self.mouseClick)
self.traverser = base.cTrav
self.pickerNode = CollisionNode('mouseRay')
self.pickerNP = camera.attachNewNode(self.pickerNode)
self.pickerNode.setFromCollideMask(ToontownGlobals.WallBitmask)
self.pickerRay = CollisionRay()
self.pickerNode.addSolid(self.pickerRay)
self.myHandler = CollisionHandlerQueue()
self.traverser.addCollider(self.pickerNP, self.myHandler)
self.buttonModels = loader.loadModel('phase_3.5/models/gui/inventory_gui')
self.upButton = self.buttonModels.find('**//InventoryButtonUp')
self.downButton = self.buttonModels.find('**/InventoryButtonDown')
self.rolloverButton = self.buttonModels.find('**/InventoryButtonRollover')
self.clockNode = ToontownTimer()
self.clockNode.setPos(1.16, 0, -0.83)
self.clockNode.setScale(0.3)
self.clockNode.hide()
self.playerColors = [Vec4(0, 0, 1, 1), Vec4(0, 1, 0, 1)]
self.tintConstant = Vec4(0.25, 0.25, 0.25, 0.5)
self.ghostConstant = Vec4(0, 0, 0, 0.8)
self.startingPositions = [[0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11], [20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31]]
self.knockSound = base.loadSfx('phase_5/audio/sfx/GUI_knock_1.mp3')
self.clickSound = base.loadSfx('phase_3/audio/sfx/GUI_balloon_popup.mp3')
self.moveSound = base.loadSfx('phase_6/audio/sfx/CC_move.mp3')
self.accept('stoppedAsleep', self.handleSleep)
self.fsm = ClassicFSM.ClassicFSM('ChineseCheckers', [State.State('waitingToBegin', self.enterWaitingToBegin, self.exitWaitingToBegin, ['playing', 'gameOver']), State.State('playing', self.enterPlaying, self.exitPlaying, ['gameOver']), State.State('gameOver', self.enterGameOver, self.exitGameOver, ['waitingToBegin'])], 'waitingToBegin', 'waitingToBegin')
x = self.boardNode.find('**/locator*')
self.locatorList = x.getChildren()
tempList = []
for x in range(0, 32):
self.locatorList[x].setTag('GamePeiceLocator', '%d' % x)
tempList.append(self.locatorList[x].attachNewNode(CollisionNode('picker%d' % x)))
tempList[x].node().addSolid(CollisionSphere(0, 0, 0, 0.39))
for z in self.locatorList:
y = loader.loadModel('phase_6/models/golf/regular_checker_piecewhite.bam')
y.find('**/checker_k*').hide()
zz = loader.loadModel('phase_6/models/golf/regular_checker_pieceblack.bam')
zz.find('**/checker_k*').hide()
y.reparentTo(z)
y.hide()
zz.reparentTo(z)
zz.hide()
return
def setName(self, name):
self.name = name
def announceGenerate(self):
DistributedNode.DistributedNode.announceGenerate(self)
if self.table.fsm.getCurrentState().getName() != 'observing':
if base.localAvatar.doId in self.table.tableState:
self.seatPos = self.table.tableState.index(base.localAvatar.doId)
def handleSleep(self, task = None):
if self.fsm.getCurrentState().getName() == 'waitingToBegin':
self.exitButtonPushed()
if task != None:
task.done
return
def setTableDoId(self, doId):
self.tableDoId = doId
self.table = self.cr.doId2do[doId]
self.table.setTimerFunc(self.startButtonPushed)
self.fsm.enterInitialState()
self.table.setGameDoId(self.doId)
def disable(self):
DistributedNode.DistributedNode.disable(self)
if self.leaveButton:
self.leaveButton.destroy()
self.leavebutton = None
if self.screenText:
self.screenText.destroy()
self.screenText = None
if self.turnText:
self.turnText.destroy()
self.turnText = None
self.clockNode.stop()
self.clockNode.hide()
self.ignore('mouse1')
self.ignore('stoppedAsleep')
self.fsm = None
return
def delete(self):
DistributedNode.DistributedNode.delete(self)
self.table.gameDoId = None
self.table.game = None
if self.exitButton:
self.exitButton.destroy()
if self.startButton:
self.startButton.destroy()
self.clockNode.stop()
self.clockNode.hide()
self.table.startButtonPushed = None
self.ignore('mouse1')
self.ignore('stoppedAsleep')
self.fsm = None
self.table = None
return
def getTimer(self):
self.sendUpdate('requestTimer', [])
def setTimer(self, timerEnd):
if self.fsm.getCurrentState() != None and self.fsm.getCurrentState().getName() == 'waitingToBegin' and not self.table.fsm.getCurrentState().getName() == 'observing':
self.clockNode.stop()
time = globalClockDelta.networkToLocalTime(timerEnd)
timeLeft = int(time - globalClock.getRealTime())
if timeLeft > 0 and timerEnd != 0:
if timeLeft > 60:
timeLeft = 60
self.clockNode.setPos(1.16, 0, -0.83)
self.clockNode.countdown(timeLeft, self.startButtonPushed)
self.clockNode.show()
else:
self.clockNode.stop()
self.clockNode.hide()
return
def setTurnTimer(self, turnEnd):
if self.fsm.getCurrentState() != None and self.fsm.getCurrentState().getName() == 'playing':
self.clockNode.stop()
time = globalClockDelta.networkToLocalTime(turnEnd)
timeLeft = int(time - globalClock.getRealTime())
if timeLeft > 0:
self.clockNode.setPos(-.74, 0, -0.2)
if self.isMyTurn:
self.clockNode.countdown(timeLeft, self.doNothing)
else:
self.clockNode.countdown(timeLeft, self.doNothing)
self.clockNode.show()
return
def gameStart(self, playerNum):
if playerNum != 255:
self.playerNum = playerNum
if self.playerNum == 1:
self.playerColorString = 'white'
else:
self.playerColorString = 'black'
self.playerColor = self.playerColors[playerNum - 1]
self.moveCameraForGame()
self.fsm.request('playing')
def sendTurn(self, playersTurn):
if self.fsm.getCurrentState().getName() == 'playing':
if playersTurn == self.playerNum:
self.isMyTurn = True
self.enableTurnScreenText(playersTurn)
def illegalMove(self):
self.exitButtonPushed()
def moveCameraForGame(self):
if self.table.cameraBoardTrack.isPlaying():
self.table.cameraBoardTrack.finish()
rotation = 0
if self.seatPos > 2:
if self.playerNum == 1:
rotation = 180
elif self.playerNum == 2:
rotation = 0
for x in self.locatorList:
x.setH(180)
self.isRotated = True
elif self.playerNum == 1:
rotation = 0
elif self.playerNum == 2:
rotation = 180
for x in self.locatorList:
x.setH(180)
self.isRotated = True
int = LerpHprInterval(self.boardNode, 4.2, Vec3(rotation, self.boardNode.getP(), self.boardNode.getR()), self.boardNode.getHpr())
int.start()
def enterWaitingToBegin(self):
if self.table.fsm.getCurrentState().getName() != 'observing':
self.enableExitButton()
self.enableStartButton()
def exitWaitingToBegin(self):
if self.exitButton:
self.exitButton.destroy()
self.exitButton = None
if self.startButton:
self.startButton.destroy()
self.exitButton = None
self.clockNode.stop()
self.clockNode.hide()
return
def enterPlaying(self):
self.inGame = True
self.enableScreenText()
if self.table.fsm.getCurrentState().getName() != 'observing':
self.enableLeaveButton()
def exitPlaying(self):
self.inGame = False
if self.leaveButton:
self.leaveButton.destroy()
self.leavebutton = None
self.playerNum = None
if self.screenText:
self.screenText.destroy()
self.screenText = None
if self.turnText:
self.turnText.destroy()
self.turnText = None
self.clockNode.stop()
self.clockNode.hide()
return
def enterGameOver(self):
pass
def exitGameOver(self):
pass
def exitWaitCountdown(self):
self.__disableCollisions()
self.ignore('trolleyExitButton')
self.clockNode.reset()
def enableExitButton(self):
self.exitButton = DirectButton(relief=None, text=TTLocalizer.ChineseCheckersGetUpButton, text_fg=(1, 1, 0.65, 1), text_pos=(0, -.23), text_scale=0.8, image=(self.upButton, self.downButton, self.rolloverButton), image_color=(1, 0, 0, 1), image_scale=(20, 1, 11), pos=(0.92, 0, 0.4), scale=0.15, command=lambda self = self: self.exitButtonPushed())
return
def enableScreenText(self):
defaultPos = (-.8, -0.4)
if self.playerNum == 1:
message = TTLocalizer.CheckersColorWhite
color = Vec4(1, 1, 1, 1)
elif self.playerNum == 2:
message = TTLocalizer.CheckersColorBlack
color = Vec4(0, 0, 0, 1)
else:
message = TTLocalizer.CheckersObserver
color = Vec4(0, 0, 0, 1)
defaultPos = (-.8, -0.4)
self.screenText = OnscreenText(text=message, pos=defaultPos, scale=0.1, fg=color, align=TextNode.ACenter, mayChange=1)
def enableStartButton(self):
self.startButton = DirectButton(relief=None, text=TTLocalizer.ChineseCheckersStartButton, text_fg=(1, 1, 0.65, 1), text_pos=(0, -.23), text_scale=0.6, image=(self.upButton, self.downButton, self.rolloverButton), image_color=(1, 0, 0, 1), image_scale=(20, 1, 11), pos=(0.92, 0, 0.1), scale=0.15, command=lambda self = self: self.startButtonPushed())
return
def enableLeaveButton(self):
self.leaveButton = DirectButton(relief=None, text=TTLocalizer.ChineseCheckersQuitButton, text_fg=(1, 1, 0.65, 1), text_pos=(0, -.13), text_scale=0.5, image=(self.upButton, self.downButton, self.rolloverButton), image_color=(1, 0, 0, 1), image_scale=(20, 1, 11), pos=(0.92, 0, 0.4), scale=0.15, command=lambda self = self: self.exitButtonPushed())
return
def enableTurnScreenText(self, player):
playerOrder = [1,
4,
2,
5,
3,
6]
message1 = TTLocalizer.CheckersIts
if self.turnText != None:
self.turnText.destroy()
if player == self.playerNum:
message2 = TTLocalizer.ChineseCheckersYourTurn
color = (0, 0, 0, 1)
elif player == 1:
message2 = TTLocalizer.CheckersWhiteTurn
color = (1, 1, 1, 1)
elif player == 2:
message2 = TTLocalizer.CheckersBlackTurn
color = (0, 0, 0, 1)
self.turnText = OnscreenText(text=message1 + message2, pos=(-0.8, -0.5), scale=0.092, fg=color, align=TextNode.ACenter, mayChange=1)
return
def startButtonPushed(self):
self.sendUpdate('requestBegin')
self.startButton.hide()
self.clockNode.stop()
self.clockNode.hide()
def exitButtonPushed(self):
self.fsm.request('gameOver')
self.table.fsm.request('off')
self.clockNode.stop()
self.clockNode.hide()
self.table.sendUpdate('requestExit')
def mouseClick(self):
messenger.send('wakeup')
if self.isMyTurn == True and self.inGame == True:
mpos = base.mouseWatcherNode.getMouse()
self.pickerRay.setFromLens(base.camNode, mpos.getX(), mpos.getY())
self.traverser.traverse(render)
if self.myHandler.getNumEntries() > 0:
self.myHandler.sortEntries()
pickedObj = self.myHandler.getEntry(0).getIntoNodePath()
pickedObj = pickedObj.getNetTag('GamePeiceLocator')
if pickedObj:
self.handleClicked(int(pickedObj))
def handleClicked(self, index):
self.sound = Sequence(SoundInterval(self.clickSound))
if self.moveList == []:
if index not in self.mySquares and index not in self.myKings:
return
self.moveList.append(index)
type = self.board.squareList[index].getState()
if type == 3 or type == 4:
self.moverType = 'king'
else:
self.moverType = 'normal'
self.blinker = Sequence()
col = self.locatorList[index].getColor()
self.blinker.append(LerpColorInterval(self.locatorList[index], 0.7, self.tintConstant, col))
self.blinker.append(LerpColorInterval(self.locatorList[index], 0.7, col, self.tintConstant))
self.blinker.loop()
self.sound.start()
elif index in self.mySquares or index in self.myKings:
for x in self.moveList:
self.locatorList[x].setColor(1, 1, 1, 1)
self.locatorList[x].hide()
self.blinker.finish()
self.blinker = Sequence()
col = self.locatorList[index].getColor()
self.blinker.append(LerpColorInterval(self.locatorList[index], 0.7, self.tintConstant, col))
self.blinker.append(LerpColorInterval(self.locatorList[index], 0.7, col, self.tintConstant))
self.blinker.loop()
self.sound.start()
self.locatorList[self.moveList[0]].show()
self.moveList = []
self.moveList.append(index)
type = self.board.squareList[index].getState()
if type == 3 or type == 4:
self.moverType = 'king'
else:
self.moverType = 'normal'
else:
self.currentMove = index
lastItem = self.board.squareList[self.moveList[len(self.moveList) - 1]]
thisItem = self.board.squareList[index]
if self.mustJump == True:
if lastItem.getNum() == index:
self.blinker.finish()
self.d_requestMove(self.moveList)
self.isMyTurn = False
self.moveList = []
return
if self.checkLegalJump(lastItem, thisItem, self.moverType) == True:
col = self.locatorList[index].getColor()
self.locatorList[index].show()
self.sound.start()
if self.existsLegalJumpsFrom(index, self.moverType) == False:
self.moveList.append(index)
self.blinker.finish()
self.d_requestMove(self.moveList)
self.moveList = []
self.isMyTurn = False
else:
self.moveList.append(index)
if self.playerColorString == 'white':
x = self.locatorList[index].getChildren()[1]
x.show()
else:
x = self.locatorList[index].getChildren()[2]
x.show()
if self.moverType == 'king':
x.find('**/checker_k*').show()
self.locatorList[index].setColor(Vec4(0.5, 0.5, 0.5, 0.5))
elif self.checkLegalMove(lastItem, thisItem, self.moverType) == True:
self.moveList.append(index)
col = self.locatorList[index].getColor()
self.locatorList[index].show()
self.sound.start()
self.blinker.finish()
self.d_requestMove(self.moveList)
self.moveList = []
self.isMyTurn = False
def existsLegalJumpsFrom(self, index, peice):
if peice == 'king':
for x in range(4):
if self.board.squareList[index].getAdjacent()[x] != None and \
self.board.squareList[index].getJumps()[x] != None:
adj = self.board.squareList[self.board.squareList[index].getAdjacent()[x]]
jump = self.board.squareList[self.board.squareList[index].getJumps()[x]]
if adj.getState() == 0:
pass
elif adj.getState() == self.playerNum or adj.getState() == self.playerNum + 2:
pass
elif jump.getState() == 0:
if index not in self.moveList and jump.getNum() not in self.moveList:
return True
return False
elif peice == 'normal':
if self.playerNum == 1:
moveForward = [1, 2]
elif self.playerNum == 2:
moveForward = [0, 3]
for x in moveForward:
if self.board.squareList[index].getAdjacent()[x] != None and \
self.board.squareList[index].getJumps()[x] != None:
adj = self.board.squareList[self.board.squareList[index].getAdjacent()[x]]
jump = self.board.squareList[self.board.squareList[index].getJumps()[x]]
if adj.getState() == 0:
pass
elif adj.getState() == self.playerNum or adj.getState() == self.playerNum + 2:
pass
elif jump.getState() == 0:
if index not in self.moveList:
return True
return False
def existsLegalMovesFrom(self, index, peice):
if peice == 'king':
for x in self.board.squareList[index].getAdjacent():
if x != None:
if self.board.squareList[x].getState() == 0:
return True
return False
elif peice == 'normal':
if self.playerNum == 1:
moveForward = [1, 2]
elif self.playerNum == 2:
moveForward = [0, 3]
for x in moveForward:
if self.board.squareList[index].getAdjacent()[x] != None:
adj = self.board.squareList[self.board.squareList[index].getAdjacent()[x]]
if adj.getState() == 0:
return True
return False
return
def checkLegalMove(self, firstSquare, secondSquare, peice):
if firstSquare.getNum() not in self.mySquares and firstSquare.getNum() not in self.myKings:
return False
if self.playerNum == 1:
moveForward = [1, 2]
else:
moveForward = [0, 3]
if peice == 'king':
for x in range(4):
if firstSquare.getAdjacent()[x] != None:
if self.board.squareList[firstSquare.getAdjacent()[x]].getState() == 0 and secondSquare.getNum() in firstSquare.getAdjacent():
return True
return False
elif peice == 'normal':
for x in moveForward:
if firstSquare.getAdjacent()[x] != None and secondSquare.getNum() in firstSquare.getAdjacent():
if self.board.squareList[firstSquare.getAdjacent()[x]].getState() == 0 and firstSquare.getAdjacent().index(secondSquare.getNum()) == x:
return True
return False
return
def checkLegalJump(self, firstSquare, secondSquare, peice):
if firstSquare.getNum() not in self.mySquares and firstSquare.getNum() not in self.myKings and len(self.moveList) == 1:
return False
if self.playerNum == 1:
moveForward = [1, 2]
opposingPeices = [2, 4]
else:
moveForward = [0, 3]
opposingPeices = [1, 3]
if peice == 'king':
if secondSquare.getNum() in firstSquare.getJumps():
index = firstSquare.getJumps().index(secondSquare.getNum())
if self.board.squareList[firstSquare.getAdjacent()[index]].getState() in opposingPeices:
return True
else:
return False
elif peice == 'normal':
if secondSquare.getNum() in firstSquare.getJumps():
index = firstSquare.getJumps().index(secondSquare.getNum())
if index in moveForward:
if self.board.squareList[firstSquare.getAdjacent()[index]].getState() in opposingPeices:
return True
else:
return False
else:
return False
else:
return False
def d_requestMove(self, moveList):
self.sendUpdate('requestMove', [moveList])
def setGameState(self, tableState, moveList):
if moveList != []:
if self.board.squareList[moveList[0]].getState() == 1 or self.board.squareList[moveList[0]].getState() == 3:
playerColor = 'white'
else:
playerColor = 'black'
if self.board.squareList[moveList[0]].getState() <= 2:
self.animatePeice(tableState, moveList, 'normal', playerColor)
else:
self.animatePeice(tableState, moveList, 'king', playerColor)
else:
self.updateGameState(tableState)
def updateGameState(self, squares):
self.board.setStates(squares)
self.mySquares = []
self.myKings = []
messenger.send('wakeup')
isObserve = False
if self.playerNum == None:
self.playerNum = 1
self.playerColorString = 'white'
isObserve = True
for xx in range(32):
for blah in self.locatorList[xx].getChildren():
blah.hide()
if self.locatorList[xx].getChildren().index(blah) != 0:
blah1 = blah.find('**/checker_k*')
owner = self.board.squareList[xx].getState()
if owner == self.playerNum:
if self.playerColorString == 'white':
x = self.locatorList[xx].getChildren()[1]
x.show()
x.find('**/checker_k*').hide()
else:
x = self.locatorList[xx].getChildren()[2]
x.show()
x.find('**/checker_k*').hide()
self.mySquares.append(xx)
elif owner == 0:
self.hideChildren(self.locatorList[xx].getChildren())
elif owner == self.playerNum + 2:
if self.playerColorString == 'white':
x = self.locatorList[xx].getChildren()[1]
x.show()
x.find('**/checker_k*').show()
else:
x = self.locatorList[xx].getChildren()[2]
x.show()
x.find('**/checker_k*').show()
self.myKings.append(xx)
elif owner <= 2:
if self.playerColorString == 'white':
x = self.locatorList[xx].getChildren()[2]
x.show()
x.find('**/checker_k*').hide()
else:
x = self.locatorList[xx].getChildren()[1]
x.show()
x.find('**/checker_k*').hide()
elif self.playerColorString == 'white':
x = self.locatorList[xx].getChildren()[2]
x.show()
x.find('**/checker_k*').show()
else:
x = self.locatorList[xx].getChildren()[1]
x.show()
x.find('**/checker_k*').show()
if isObserve == True:
self.playerNum = None
self.playerColorString = None
return
self.mustJump = False
self.hasNormalMoves = False
for x in self.myKings:
if self.existsLegalJumpsFrom(x, 'king') == True:
self.mustJump = True
break
else:
self.mustJump = False
if self.mustJump == False:
for x in self.mySquares:
if self.existsLegalJumpsFrom(x, 'normal') == True:
self.mustJump = True
break
else:
self.mustJump = False
if self.mustJump != True:
for x in self.mySquares:
if self.existsLegalMovesFrom(x, 'normal') == True:
self.hasNormalMoves = True
break
else:
self.hasNormalMoves = False
if self.hasNormalMoves == False:
for x in self.myKings:
if self.existsLegalMovesFrom(x, 'king') == True:
self.hasNormalMoves = True
break
else:
self.hasNormalMoves = False
if self.mustJump == False and self.hasNormalMoves == False:
pass
return
def hideChildren(self, nodeList):
for x in range(1, 2):
nodeList[x].hide()
def animatePeice(self, tableState, moveList, type, playerColor):
messenger.send('wakeup')
if playerColor == 'white':
gamePeiceForAnimation = loader.loadModel('phase_6/models/golf/regular_checker_piecewhite.bam')
else:
gamePeiceForAnimation = loader.loadModel('phase_6/models/golf/regular_checker_pieceblack.bam')
if type == 'king':
gamePeiceForAnimation.find('**/checker_k*').show()
else:
gamePeiceForAnimation.find('**/checker_k*').hide()
gamePeiceForAnimation.reparentTo(self.boardNode)
gamePeiceForAnimation.setPos(self.locatorList[moveList[0]].getPos())
if self.isRotated == True:
gamePeiceForAnimation.setH(180)
for x in self.locatorList[moveList[0]].getChildren():
x.hide()
checkersPeiceTrack = Sequence()
length = len(moveList)
for x in range(length - 1):
checkersPeiceTrack.append(Parallel(SoundInterval(self.moveSound), ProjectileInterval(gamePeiceForAnimation, endPos=self.locatorList[moveList[x + 1]].getPos(), duration=0.5)))
checkersPeiceTrack.append(Func(gamePeiceForAnimation.removeNode))
checkersPeiceTrack.append(Func(self.updateGameState, tableState))
checkersPeiceTrack.append(Func(self.unAlpha, moveList))
checkersPeiceTrack.start()
def announceWin(self, avId):
self.fsm.request('gameOver')
def unAlpha(self, moveList):
for x in moveList:
self.locatorList[x].setColorOff()
def doRandomMove(self):
import random
move = []
foundLegal = False
self.blinker.pause()
self.numRandomMoves += 1
while not foundLegal:
x = random.randint(0, 9)
for y in self.board.getAdjacent(self.mySquares[x]):
if y != None and self.board.getState(y) == 0:
move.append(self.mySquares[x])
move.append(y)
foundLegal = True
break
if move == []:
pass
playSound = Sequence(SoundInterval(self.knockSound))
playSound.start()
self.d_requestMove(move)
self.moveList = []
self.isMyTurn = False
if self.numRandomMoves >= 5:
self.exitButtonPushed()
return
def doNothing(self):
pass
| mit | 4,444,623,332,312,362,000 | 39.547425 | 363 | 0.554471 | false |
ilona-asa/LDSAproject | email_counter.py | 1 | 2018 | #!/usr/bin/env python
import os
rootdir ='enron_mail_20110402/maildir'
for user in os.listdir(rootdir):
sent_items = 0
sent = 0
_sent_mail = 0
inbox = 0
total = 0
for folder in os.listdir(rootdir+'/'+user):
# print '%s\t%s' % ((folder, os.path.isdir(folder)), 1)
#if os.path.isdir(folder) == True:
# for mail in os.listdir(rootdir+'/'+user+'/'+folder):
# if os.path.isdir(mail) == False:
# print '%s\t%s' % ('total', 1)
# print folder
if folder == 'sent_items':
for mail in os.listdir(rootdir+'/'+user+'/'+folder):
sent_items += 1
total += 1
# print '%s,%s,%s' % (user, folder, sent_items)
elif folder == 'sent':
for mail in os.listdir(rootdir+'/'+user+'/'+folder):
sent += 1
total += 1
# print '%s,%s,%s' % (user, folder, sent)
elif folder == '_sent_mail':
for mail in os.listdir(rootdir+'/'+user+'/'+folder):
_sent_mail += 1
total += 1
# print '%s,%s,%s' % (user, folder, _sent_mail)
elif folder == 'inbox':
for mail in os.listdir(rootdir+'/'+user+'/'+folder):
inbox += 1
total += 1
else:
for mail in os.listdir(rootdir+'/'+user+'/'+folder):
total += 1
print '%s,%s,%s' % (user, 'sent_items', sent_items)
print '%s,%s,%s' % (user, 'sent', sent)
print '%s,%s,%s' % (user, '_sent_mail', _sent_mail)
print '%s,%s,%s' % (user, 'inbox', inbox)
print '%s,%s,%s' % (user, 'all', total) | mit | 7,093,846,238,697,374,000 | 41.957447 | 76 | 0.387017 | false |
graik/biskit | biskit/core/pdbparsePickle.py | 1 | 4284 | ## numpy-oldnumeric calls replaced by custom script; 09/06/2016
## Automatically adapted for numpy-oldnumeric Mar 26, 2007 by alter_code1.py
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
"""
Parse a pickled PDBModel from disc into a new PDBModel instance
"""
import biskit.tools as T
import biskit as B
from biskit.core.pdbparser import PDBParserError
from biskit.core.pdbparseModel import PDBParseModel
from biskit.core.localpath import LocalPath
class PDBParsePickle( PDBParseModel ):
"""
Parse a pickled PDBModel from disc into a new PDBModel instance
"""
@staticmethod
def supports( source ):
"""
The method is static and can thus be called directly with the parser
class rather than with an instance::
>>> if ParsePDBModel.supports( model ):
>>> ...
:return: True if the given source is supported by this parser
implementation (equivalent to isinstance( source, PDBModel) )
:rtype: bool
"""
return (type(source) is str) or isinstance(source, LocalPath)
@staticmethod
def description():
"""
The method is static and can thus be called directly with the parser
class rather than with an instance::
>>> if ParsePDBModel.description():
>>> ...
:return: short free text description of the supported format
:rtype: str
"""
return 'pickled PDBModel (file)'
def update( self, model, source, skipRes=None, updateMissing=0, force=0,
headPatterns=[] ):
"""
Update empty or missing fields of model from the source. The
model will be connected to the source via model.source.
Profiles that are taken from the source are labeled 'changed'=0.
The same holds for coordinates (xyzChanged=0).
However, existing profiles or coordinates or fields remain untouched.
:param model: existing model
:type model: PDBModel
:param source: source PDB file or pickled PDBModel or PDBModel object
:type source: str || file || PDBModel
:param skipRes: list residue names that should not be parsed
:type skipRes: [ str ]
:param updateMissing: check source for additional profiles [0]
:type updateMissing: 1|0
"""
try:
if force or updateMissing or self.needsUpdate( model ):
s = T.load( source )
super( PDBParsePickle, self ).update(
model, s, skipRes=skipRes, updateMissing=updateMissing,
force=force )
except Exception as why:
raise PDBParserError("Cannot unpickle source model from %s, "\
% str(source) + "Reason:\n" + str(why))
model.setSource( source )
#############
## TESTING
#############
import biskit.test as BT
class Test(BT.BiskitTest):
"""Test case"""
def test_PDBParsePickle( self ):
"""PDBParsePickle test"""
import biskit.core.oldnumeric as N0
## loading output file from X-plor
if self.local:
print('Loading pickled model ..')
self.p = PDBParsePickle()
self.m = self.p.parse2new( T.testRoot('rec/1A2P_dry.model'))
self.assertAlmostEqual( N0.sum( self.m.centerOfMass() ),
114.18037, 5)
if __name__ == '__main__':
BT.localTest()
| gpl-3.0 | 1,253,569,983,927,946,800 | 32.732283 | 78 | 0.628385 | false |
jolyonb/edx-platform | lms/djangoapps/lti_provider/tests/test_signature_validator.py | 1 | 3845 | """
Tests for the SignatureValidator class.
"""
from __future__ import absolute_import
import ddt
from django.test import TestCase
from django.test.client import RequestFactory
from mock import patch
from lti_provider.models import LtiConsumer
from lti_provider.signature_validator import SignatureValidator
def get_lti_consumer():
"""
Helper method for all Signature Validator tests to get an LtiConsumer object.
"""
return LtiConsumer(
consumer_name='Consumer Name',
consumer_key='Consumer Key',
consumer_secret='Consumer Secret'
)
@ddt.ddt
class ClientKeyValidatorTest(TestCase):
"""
Tests for the check_client_key method in the SignatureValidator class.
"""
def setUp(self):
super(ClientKeyValidatorTest, self).setUp()
self.lti_consumer = get_lti_consumer()
def test_valid_client_key(self):
"""
Verify that check_client_key succeeds with a valid key
"""
key = self.lti_consumer.consumer_key
self.assertTrue(SignatureValidator(self.lti_consumer).check_client_key(key))
@ddt.data(
('0123456789012345678901234567890123456789',),
('',),
(None,),
)
@ddt.unpack
def test_invalid_client_key(self, key):
"""
Verify that check_client_key fails with a disallowed key
"""
self.assertFalse(SignatureValidator(self.lti_consumer).check_client_key(key))
@ddt.ddt
class NonceValidatorTest(TestCase):
"""
Tests for the check_nonce method in the SignatureValidator class.
"""
def setUp(self):
super(NonceValidatorTest, self).setUp()
self.lti_consumer = get_lti_consumer()
def test_valid_nonce(self):
"""
Verify that check_nonce succeeds with a key of maximum length
"""
nonce = '0123456789012345678901234567890123456789012345678901234567890123'
self.assertTrue(SignatureValidator(self.lti_consumer).check_nonce(nonce))
@ddt.data(
('01234567890123456789012345678901234567890123456789012345678901234',),
('',),
(None,),
)
@ddt.unpack
def test_invalid_nonce(self, nonce):
"""
Verify that check_nonce fails with badly formatted nonce
"""
self.assertFalse(SignatureValidator(self.lti_consumer).check_nonce(nonce))
class SignatureValidatorTest(TestCase):
"""
Tests for the custom SignatureValidator class that uses the oauthlib library
to check message signatures. Note that these tests mock out the library
itself, since we assume it to be correct.
"""
def setUp(self):
super(SignatureValidatorTest, self).setUp()
self.lti_consumer = get_lti_consumer()
def test_get_existing_client_secret(self):
"""
Verify that get_client_secret returns the right value for the correct
key
"""
key = self.lti_consumer.consumer_key
secret = SignatureValidator(self.lti_consumer).get_client_secret(key, None)
self.assertEqual(secret, self.lti_consumer.consumer_secret)
@patch('oauthlib.oauth1.SignatureOnlyEndpoint.validate_request',
return_value=(True, None))
def test_verification_parameters(self, verify_mock):
"""
Verify that the signature validaton library method is called using the
correct parameters derived from the HttpRequest.
"""
body = 'oauth_signature_method=HMAC-SHA1&oauth_version=1.0'
content_type = 'application/x-www-form-urlencoded'
request = RequestFactory().post('/url', body, content_type=content_type)
headers = {'Content-Type': content_type}
SignatureValidator(self.lti_consumer).verify(request)
verify_mock.assert_called_once_with(
request.build_absolute_uri(), 'POST', body, headers)
| agpl-3.0 | -1,902,899,896,321,596,400 | 31.041667 | 85 | 0.66788 | false |
tdickers/mitmproxy | test/netlib/http/test_cookies.py | 1 | 6365 | from netlib.http import cookies
from netlib.tutils import raises
def test_read_token():
tokens = [
[("foo", 0), ("foo", 3)],
[("foo", 1), ("oo", 3)],
[(" foo", 1), ("foo", 4)],
[(" foo;", 1), ("foo", 4)],
[(" foo=", 1), ("foo", 4)],
[(" foo=bar", 1), ("foo", 4)],
]
for q, a in tokens:
assert cookies._read_token(*q) == a
def test_read_quoted_string():
tokens = [
[('"foo" x', 0), ("foo", 5)],
[('"f\oo" x', 0), ("foo", 6)],
[(r'"f\\o" x', 0), (r"f\o", 6)],
[(r'"f\\" x', 0), (r"f" + '\\', 5)],
[('"fo\\\"" x', 0), ("fo\"", 6)],
[('"foo" x', 7), ("", 8)],
]
for q, a in tokens:
assert cookies._read_quoted_string(*q) == a
def test_read_pairs():
vals = [
[
"one",
[["one", None]]
],
[
"one=two",
[["one", "two"]]
],
[
"one=",
[["one", ""]]
],
[
'one="two"',
[["one", "two"]]
],
[
'one="two"; three=four',
[["one", "two"], ["three", "four"]]
],
[
'one="two"; three=four; five',
[["one", "two"], ["three", "four"], ["five", None]]
],
[
'one="\\"two"; three=four',
[["one", '"two'], ["three", "four"]]
],
]
for s, lst in vals:
ret, off = cookies._read_pairs(s)
assert ret == lst
def test_pairs_roundtrips():
pairs = [
[
"",
[]
],
[
"one=uno",
[["one", "uno"]]
],
[
"one",
[["one", None]]
],
[
"one=uno; two=due",
[["one", "uno"], ["two", "due"]]
],
[
'one="uno"; two="\due"',
[["one", "uno"], ["two", "due"]]
],
[
'one="un\\"o"',
[["one", 'un"o']]
],
[
'one="uno,due"',
[["one", 'uno,due']]
],
[
"one=uno; two; three=tre",
[["one", "uno"], ["two", None], ["three", "tre"]]
],
[
"_lvs2=zHai1+Hq+Tc2vmc2r4GAbdOI5Jopg3EwsdUT9g=; "
"_rcc2=53VdltWl+Ov6ordflA==;",
[
["_lvs2", "zHai1+Hq+Tc2vmc2r4GAbdOI5Jopg3EwsdUT9g="],
["_rcc2", "53VdltWl+Ov6ordflA=="]
]
]
]
for s, lst in pairs:
ret, off = cookies._read_pairs(s)
assert ret == lst
s2 = cookies._format_pairs(lst)
ret, off = cookies._read_pairs(s2)
assert ret == lst
def test_cookie_roundtrips():
pairs = [
[
"one=uno",
[["one", "uno"]]
],
[
"one=uno; two=due",
[["one", "uno"], ["two", "due"]]
],
]
for s, lst in pairs:
ret = cookies.parse_cookie_header(s)
assert ret == lst
s2 = cookies.format_cookie_header(ret)
ret = cookies.parse_cookie_header(s2)
assert ret == lst
def test_parse_set_cookie_pairs():
pairs = [
[
"one=uno",
[
["one", "uno"]
]
],
[
"one=un\x20",
[
["one", "un\x20"]
]
],
[
"one=uno; foo",
[
["one", "uno"],
["foo", None]
]
],
[
"mun=1.390.f60; "
"expires=sun, 11-oct-2015 12:38:31 gmt; path=/; "
"domain=b.aol.com",
[
["mun", "1.390.f60"],
["expires", "sun, 11-oct-2015 12:38:31 gmt"],
["path", "/"],
["domain", "b.aol.com"]
]
],
[
r'rpb=190%3d1%2616726%3d1%2634832%3d1%2634874%3d1; '
'domain=.rubiconproject.com; '
'expires=mon, 11-may-2015 21:54:57 gmt; '
'path=/',
[
['rpb', r'190%3d1%2616726%3d1%2634832%3d1%2634874%3d1'],
['domain', '.rubiconproject.com'],
['expires', 'mon, 11-may-2015 21:54:57 gmt'],
['path', '/']
]
],
]
for s, lst in pairs:
ret = cookies._parse_set_cookie_pairs(s)
assert ret == lst
s2 = cookies._format_set_cookie_pairs(ret)
ret2 = cookies._parse_set_cookie_pairs(s2)
assert ret2 == lst
def test_parse_set_cookie_header():
vals = [
[
"", None
],
[
";", None
],
[
"one=uno",
("one", "uno", ())
],
[
"one=uno; foo=bar",
("one", "uno", (("foo", "bar"),))
],
[
"one=uno; foo=bar; foo=baz",
("one", "uno", (("foo", "bar"), ("foo", "baz")))
],
]
for s, expected in vals:
ret = cookies.parse_set_cookie_header(s)
if expected:
assert ret[0] == expected[0]
assert ret[1] == expected[1]
assert ret[2].items(multi=True) == expected[2]
s2 = cookies.format_set_cookie_header(*ret)
ret2 = cookies.parse_set_cookie_header(s2)
assert ret2[0] == expected[0]
assert ret2[1] == expected[1]
assert ret2[2].items(multi=True) == expected[2]
else:
assert ret is None
def test_refresh_cookie():
# Invalid expires format, sent to us by Reddit.
c = "rfoo=bar; Domain=reddit.com; expires=Thu, 31 Dec 2037 23:59:59 GMT; Path=/"
assert cookies.refresh_set_cookie_header(c, 60)
c = "MOO=BAR; Expires=Tue, 08-Mar-2011 00:20:38 GMT; Path=foo.com; Secure"
assert "00:21:38" in cookies.refresh_set_cookie_header(c, 60)
c = "foo,bar"
with raises(ValueError):
cookies.refresh_set_cookie_header(c, 60)
# https://github.com/mitmproxy/mitmproxy/issues/773
c = ">=A"
assert cookies.refresh_set_cookie_header(c, 60)
# https://github.com/mitmproxy/mitmproxy/issues/1118
c = "foo:bar=bla"
assert cookies.refresh_set_cookie_header(c, 0)
c = "foo/bar=bla"
assert cookies.refresh_set_cookie_header(c, 0)
| mit | -1,637,221,477,777,298,200 | 24.769231 | 84 | 0.394973 | false |
jevinw/rec_utilities | babel_util/parsers/tree.py | 1 | 4791 | #!/usr/bin/env python
import logging
class TreeFile(object):
"""Handling functions for tree files, as produced by Infomap.
The file should be a plain text file with the following format:
<cluster_id> <score> <paper_id>
1:1:1:1 0.000021 "123456"
1:1:1:2 0.023122 "8675309"
"""
def __init__(self, stream, delimiter=' ', comment='#'):
"""Initializes a TreeFile for reading.
Args:
source: An iterable providing a line of input for each iteration.
delimiter: Character tree file is delimited by.
comment: Lines starting with this character should be skipped
"""
self.delimiter = delimiter
self.stream = stream
self.comment = comment
def to_dict(self, on_collide="error", transform=None):
"""Converts a TreeFile to a dictionary. Consumes all of stream.
This might consume all available memory if the input stream is large.
Args:
on_collide: If a value already exists in the dictionary what should
happen. Options are:
error - raise an exception
warn - log a warning
info - log an info
transform: If provided a function that will be applied to the
values prior to storing them. This function should accept
a tuple of (cluster_id, score, paper_id):
("1:2:3:4", 0.12345, "A paper title"). If this function returns
None the paper will not be stored.
Returns:
Returns a dictionary using paper_id as the key and
(cluster_id, score, paper_id) as the value.
Raises:
KeyError: If on_collide="error" this signals a duplicate paper_id
in the tree file.
"""
results = dict()
for cid, score, pid in self:
if pid in results:
if on_collide == "error":
raise KeyError("Duplicate paper_id: {0}".format(pid))
elif on_collide == "warn":
logging.warning("Duplicate paper_id: {0}".format(pid))
elif on_collide == "info":
logging.info("Duplicate paper_id: {0}".format(pid))
if transform:
value = transform((cid, score, pid))
if value is not None:
results[pid] = value
else:
results[pid] = (cid, score)
return results
def __iter__(self):
self._iter = iter(self.stream)
return self
def __next__(self):
line = next(self._iter)
while self.comment and line.startswith(self.comment):
line = next(self._iter)
return self.parse_line(line)
def parse_line(self, line):
try:
v = line.split(self.delimiter)
v[2] = v[2].strip().strip('"')
return TreeRecord(v[0], v[2], v[1])
except ValueError:
print(line)
raise
except AttributeError:
print(line)
raise
except IndexError:
print(line)
raise
class TreeRecord(object):
__slots__ = ("pid", "local", "score", "parent")
def __init__(self, cluster, pid, score, delimiter=':'):
if not pid or pid == "":
raise ValueError("Invalid pid")
if score is None:
raise ValueError("Invalid score")
if cluster is None:
raise ValueError("Invalid cluster")
cluster = cluster.split(delimiter)
try:
cluster.pop() # Remove local order
self.local = delimiter.join(cluster)
if not self.local:
raise ValueError("Invalid cluster")
except IndexError:
self.local = None
try:
cluster.pop() # Remove local-cluster id
if len(cluster):
self.parent = delimiter.join(cluster)
else:
self.parent = None
except IndexError:
self.parent = None
score = float(score)
if score == 0:
score = -1.0 #Dynamo doesn't understand inf
# Strip whitespace and any quotes
self.pid = pid.strip().strip('"')
self.score = score
def __eq__(self, other):
if not isinstance(other, TreeRecord):
return False
return self.pid == other.pid and self.local == other.local and self.parent == other.parent
def __ne__(self, other):
return not self == other
def __str__(self):
return "<TreeRecord: %s %s %s>" % (self.local, self.pid, self.score)
def __repr__(self):
return "<TreeRecord: %s %s %s>" % (self.local, self.pid, self.score)
| agpl-3.0 | 51,877,899,420,969,890 | 31.591837 | 98 | 0.540388 | false |
evilchili/shiptrak | mmsn/settings/__init__.py | 1 | 4072 | """
Django settings for mmsn project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
import socket
import dj_database_url
from django.utils.crypto import get_random_string
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd%ehat=&bb5pr+=unsxmpxq(57@1nx+okkyni3n9lk!a#pduq&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'shiptrak', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
#"django.core.context_processors.request",
#"django.contrib.auth.context_processors.auth",
],
},
},
]
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'shiptrak',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
#'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mmsn.urls'
WSGI_APPLICATION = 'mmsn.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
import os
STATIC_ROOT = 'staticfiles'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles/')
STATIC_URL = '/static/'
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
)
LOGGING = {
'version': 1,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'default': {
'handlers': ['console'],
'level': 'DEBUG',
}
}
}
GOOGLE_MAPS_API_KEY = ''
CACHE_DIR = os.path.abspath(os.path.join(BASE_DIR, 'callsign_data'))
WINLINK_API_URL = "http://cms.winlink.org/"
h = socket.gethostname()
try:
(h, domain) = h.split('.', 2)
print("from mmsn.settings.{0} import *".format(h))
exec(("from mmsn.settings.{0} import *".format(h)), locals())
print("Overriding production configuration with local settings for host {}".format(h))
except Exception as e:
SECRET_KEY = get_random_string(50, 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')
ALLOWED_HOSTS = ['*']
CSRF_TRUSTED_ORIGINS = ['*']
SESSION_COOKIE_SECURE = False
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
DATABASES = {
'default': dj_database_url.config()
}
DEBUG = False
GOOGLE_MAPS_API_KEY = 'AIzaSyDHRIu1CdX0O95_bTdyyiom4Z84uzKG0bw'
GOOGLE_ANALYTICS_ID = 'UA-52163451-1'
| mit | -5,726,247,871,517,285,000 | 26.513514 | 100 | 0.658644 | false |
igemsoftware/HFUT-China_2015 | design/search_part.py | 1 | 5122 | """
search_part.py realize the part search
@author: Bowen
"""
from elasticsearch import Elasticsearch
from design.models import parts, teams, team_parts, part_papers, paper
import traceback
def getPart(partName):
"""
find the part with part name
@param partName: name of a part
@type partName: str
@return : part information
@rtype: dict
"""
try:
partObj = parts.objects.get(part_name=partName)
papers = part_papers.objects.filter(part=partObj)
result = {
'isSuccessful': True,
'isEmpty': False,
'part_id': partObj.part_id,
'ok': partObj.ok,
'part_name': partObj.part_name,
'nickname' : partObj.nickname,
'short_desc': partObj.short_desc,
'description': partObj.description,
'part_type': partObj.part_type,
'author': partObj.author,
'status': partObj.status,
'dominant': partObj.dominant,
'discontinued': partObj.discontinued,
'part_status': partObj.part_status,
'sample_status': partObj.sample_status,
'p_status_cache': partObj.p_status_cache,
's_status_cache': partObj.s_status_cache,
'in_stock': partObj.in_stock,
'results': partObj.results,
'favorite': partObj.favorite,
'ps_string': partObj.ps_string,
'scars' : partObj.scars,
'barcode' : partObj.barcode,
'notes' : partObj.notes,
'source' : partObj.source,
'premium' : partObj.premium,
'categories' : partObj.categories,
'sequence' : partObj.sequence,
'sequence_length' : partObj.sequence_length,
'part_url' : partObj.part_url,
'score' : str(partObj.score)
}
paper_list = list()
for paper in papers:
paper_info = {
'name': paper.paper.paper_name,
'url' : paper.paper.paper_url
}
paper_list.append(paper_info)
result['paper'] = paper_list
except:
traceback.print_exc()
result = {
'isSuccessful': False
}
return result
def ambiguousSearch(keyword, funcs):
"""
ambiguous search parts with the keyword, and adjust result with the functions
@param keyword: search keyword
@type keyword: str
@param funcs: functions
@type: str
@return: search result
@rtype: list
"""
es = Elasticsearch()
result = format_fuzzy_result(sort_result(fuzzy_search_parts(es, keyword), funcs))
return result
def fuzzy_search_parts(es, keyword):
"""
fuzzy search part with elasticsearch
@param es: elasticsearch object
@type es: Elasticsearch
@param keyword: search keyword
@type keyword: str
@return: elasticsearch search result
@rtype: dict
"""
query_body = {
"from" : 0,
"size" : 80,
"query" : {
"fuzzy_like_this" : {
"fields" : ["part_name", "part_type", "short_desc"],
"like_text" : keyword,
"max_query_terms" : 80
}
}
}
result = es.search(index="biodesigners", doc_type="parts", body=query_body)
return result
def get_func_parts(func_list):
"""
get parts related to functions
@param func_list: functions
@type func_list: list
@return : parts related to functions
@rtype: list
"""
part_list = list()
for func_id in func_list:
team_list = teams.objects.filter(function_id=func_id)
for team_obj in team_list:
part_list.extend(team_parts.objects.filter(team=team_obj))
result = list()
for part_obj in part_list:
result.append(part_obj.part_id)
return result
def sort_result(es_result, funcs):
"""
sort result according to the functions
@param funcs: functions
@type funcs : list
@return : sorted result
@rtype: list
"""
if funcs == None:
func_parts = list()
else:
if funcs.endswith('_'):
funcs = funcs[:-1]
if funcs.startswith('_'):
funcs = funcs[1:]
func_parts = get_func_parts(funcs.split('_'))
hits = es_result['hits']['hits']
for item in hits:
if item['_source']['part_id'] in func_parts:
item['_score'] += 1.5
hits = sorted(hits, key = lambda x:x['_score'], reverse = True)
return hits[:40]
def format_fuzzy_result(hits):
"""
format search result
@param hits: searched parts
@type hists: list
@return part informaions
@rtype: list
"""
part_list = list()
for item in hits:
info = item['_source']
part_info = {
'part_name' : info['part_name'],
'part_id' : info['part_id'],
'part_type' : info['part_type'],
}
part_list.append(part_info)
return part_list
| apache-2.0 | 1,825,477,763,711,557,600 | 28.606936 | 85 | 0.545881 | false |
ruipgil/TrackToTrip | scripts/test/test_t_mode_changepoint.py | 1 | 2236 | from tracktotrip import Track
import tracktotrip.transportation_mode as tm
from changepy import pelt
from changepy.costs import normal_mean
import numpy as np
import matplotlib.pyplot as plt
temp_trk = [
Track.from_gpx('/Users/ruipgil/tracks/backup/2015-07-23_1.gpx')[0],
Track.from_gpx('/Users/ruipgil/tracks/backup/2015-07-23_2.gpx')[0],
Track.from_gpx('/Users/ruipgil/tracks/backup/2015-07-23_3.gpx')[0]
]
segs = []
for trke in temp_trk:
segs.extend(trke.segments)
trk = Track("", segs)
trk.compute_metrics()
trk.to_trip('', 0, 5.0, 0.15, 80, 0.3, '%Y-%m-%d')
def raw_vel(seg):
return [p.vel for p in seg.points]
def raw_acc(seg):
return [p.acc for p in seg.points]
def abs_vel(seg):
return [abs(p.vel) for p in seg.points]
def square_vel(seg):
return [p.vel**2 for p in seg.points]
def diff_vel(seg):
result = []
last = None
for p in seg.points:
if last is None:
result.append(0)
else:
result.append(last.vel-p.vel)
last = p
return result
def abs_diff_vel(seg):
return [abs(v) for v in diff_vel(seg)]
def square_diff_vel(seg):
return [v**3 for v in diff_vel(seg)]
def compute_metric(metric):
return [metric(seg) for seg in trk.segments]
colors = 'rgby'
def plot(ax, data, changepoints):
index = 0
for i, seg_data in enumerate(data):
ax.plot(range(index, len(seg_data) + index), seg_data, '-')
for changepoint in changepoints[i]:
ax.axvline(changepoint + index, color='k', linestyle='--')
index = index + len(seg_data)
def pelt_(data):
return pelt(normal_mean(data, np.std(data)), len(data))
plot_n = 1
plot_cols = 2
plot_rows = 3
def changepoint_for(metric):
global plot_n
ax = fig.add_subplot(plot_rows, plot_cols, plot_n)
data = compute_metric(metric)
changepoints = [pelt_(d) for d in data]
ax.set_title("%s (%d changepoints)" % (metric.__name__, sum([len(c) for c in changepoints])))
plot(ax, data, changepoints)
plot_n = plot_n + 1
fig = plt.figure()
changepoint_for(raw_vel)
changepoint_for(abs_vel)
changepoint_for(square_vel)
changepoint_for(diff_vel)
changepoint_for(square_diff_vel)
changepoint_for(raw_acc)
plt.show()
| mit | -9,132,506,782,880,904,000 | 24.123596 | 97 | 0.647138 | false |
pybel/pybel-tools | src/pybel_tools/analysis/neurommsig/export.py | 1 | 8861 | # -*- coding: utf-8 -*-
"""This module contains the functions needed to process the NeuroMMSig excel sheets as well as export as BEL.
To run, type :code:`python3 -m pybel_tools.analysis.neurommsig` in the command line
"""
import itertools as itt
import logging
import os
import re
import time
from functools import partial
from typing import Mapping, TextIO
import pandas as pd
import pybel
from bel_resources import get_bel_resource
from pybel import BELGraph
from pybel.dsl import Abundance, Gene
from pybel.utils import ensure_quotes
logger = logging.getLogger(__name__)
hgnc_symbol_pattern = re.compile(r"^[A-Z0-9-]+$|^C[0-9XY]+orf[0-9]+$")
snp_pattern = re.compile(r"^rs[0-9]+$")
snps_pattern_space = re.compile(r"^(rs[0-9]+)\s((rs[0-9]+)\s)*(rs[0-9]+)$")
snps_pattern_comma = re.compile(r"^(rs[0-9]+),((rs[0-9]+),)*(rs[0-9]+)$")
snps_pattern_space_comma = re.compile(r"^(rs[0-9]+), ((rs[0-9]+), )*(rs[0-9]+)$")
checked_by_anandhi = re.compile(r"No")
mirna_pattern = re.compile(r"^MIR.*$")
mirnas_pattern = re.compile(r"^(MIR.*),((MIR.*$),)*(MIR.*$)$")
def preprocessing_excel(path: str) -> pd.DataFrame:
"""Preprocess the excel sheet.
:param path: filepath of the excel data
:return: df: pandas dataframe with excel data
"""
if not os.path.exists(path):
raise ValueError("Error: %s file not found" % path)
# Import Models from Excel sheet, independent for AD and PD
df = pd.read_excel(path, sheet_name=0, header=0)
# Indexes and column name
# [log.info(str(x)+': '+str((df.columns.values[x]))) for x in range (0,len(df.columns.values))]
# Starting from 4: Pathway Name
# Fill Pathway cells that are merged and are 'NaN' after deleting rows where there is no genes
for column_idx in (0, 1): # identifiers column then names columns
df.iloc[:, column_idx] = pd.Series(df.iloc[:, column_idx]).fillna(method='ffill')
# Number of gaps
# log.info(df.ix[:,6].isnull().sum())
df = df[df.iloc[:, 1].notnull()]
df = df.reset_index(drop=True)
# Fill NaN to zeros in PubMed identifier column
df.iloc[:, 2].fillna(0, inplace=True)
# Number of gaps in the gene column should be already zero
if (df.iloc[:, 1].isnull().sum()) != 0:
raise ValueError("Error: Empty cells in the gene column")
# Check current state
# df.to_csv('out.csv')
return df
def munge_cell(cell, line=None, validators=None):
"""Process a cell from the NeuroMMSig excel sheet."""
if pd.isnull(cell) or isinstance(cell, int):
return None
c = ' '.join(cell.split())
if validators is not None and all(re.match(validator, c) is None for validator in validators):
if line:
logger.info("Munge cell error: aprox in line: %s: %s", line, c)
return None
return [x.strip() for x in str(c).strip().split(',')]
def preprocessing_br_projection_excel(path: str) -> pd.DataFrame:
"""Preprocess the excel file."""
if not os.path.exists(path):
raise ValueError(f"Error: {path} file not found")
return pd.read_excel(path, sheetname=0, header=0)
munge_snp = partial(munge_cell, validators=[snp_pattern, snps_pattern_space_comma])
mesh_alzheimer = "Alzheimer Disease" # Death to the eponym!
mesh_parkinson = "Parkinson Disease"
CANNED_EVIDENCE = 'Serialized from NeuroMMSigDB'
CANNED_CITATION = '28651363'
PATHWAY_ID_COLUMN_NAME = 'NeuroMMSig identifier'
PATHWAY_COLUMN_NAME = 'Subgraph Name'
GENE_COLUMN_NAME = 'Genes'
pmids_column = 'PMIDs'
snp_from_literature_column = 'SNPs from Literature (Aybuge)'
snp_from_gwas_column = 'Genome wide associated SNPs (Mufassra)'
snp_from_ld_block_column = 'LD block analysis (Mufassra)'
clinical_features_column = 'Imaging Features (Anandhi)'
snp_from_imaging_column = 'SNP_Image Feature (Mufassra & Anandhi)'
columns = [
GENE_COLUMN_NAME,
pmids_column,
snp_from_literature_column,
snp_from_gwas_column,
snp_from_ld_block_column,
clinical_features_column,
snp_from_imaging_column,
]
def preprocess(path: str) -> pd.DataFrame:
"""Preprocess a NeuroMMSig excel sheet, specified by a file path."""
df = preprocessing_excel(path)
df[snp_from_literature_column] = df[snp_from_literature_column].map(munge_snp)
df[snp_from_gwas_column] = df[snp_from_gwas_column].map(munge_snp)
df[snp_from_ld_block_column] = df[snp_from_ld_block_column].map(munge_snp)
df[clinical_features_column] = df[clinical_features_column].map(munge_cell)
df[clinical_features_column] = df[clinical_features_column].map(
lambda c: None
if c is not None and c[0] == 'No' else
c
)
df[snp_from_imaging_column] = df[snp_from_imaging_column].map(munge_snp)
return df
def get_nift_values() -> Mapping[str, str]:
"""Map NIFT names that have been normalized to the original names."""
r = get_bel_resource('https://arty.scai.fraunhofer.de/artifactory/bel/namespace/nift/NIFT.belns')
return {
name.lower(): name
for name in r['Values']
}
def write_neurommsig_bel(
file: TextIO,
df: pd.DataFrame,
disease: str,
nift_values: Mapping[str, str],
) -> None:
"""Write the NeuroMMSigDB excel sheet to BEL.
:param file: a file or file-like that can be writen to
:param df:
:param disease:
:param nift_values: a dictionary of lower-cased to normal names in NIFT
"""
graph = get_neurommsig_bel(df, disease, nift_values)
pybel.to_bel_script(graph, file)
def get_neurommsig_bel(
df: pd.DataFrame,
disease: str,
nift_values: Mapping[str, str],
) -> BELGraph:
"""Generate the NeuroMMSig BEL graph.
:param df:
:param disease:
:param nift_values: a dictionary of lower-cased to normal names in NIFT
"""
missing_features = set()
fixed_caps = set()
nift_value_originals = set(nift_values.values())
graph = BELGraph(
name=f'NeuroMMSigDB for {disease}',
description=f'SNP and Clinical Features for Subgraphs in {disease}',
authors='Daniel Domingo-Fernández, Charles Tapley Hoyt, Mufassra Naz, Aybuge Altay, Anandhi Iyappan',
contact='[email protected]',
version=time.strftime('%Y%m%d'),
)
for pathway, pathway_df in df.groupby(PATHWAY_COLUMN_NAME):
sorted_pathway_df = pathway_df.sort_values(GENE_COLUMN_NAME)
sliced_df = sorted_pathway_df[columns].itertuples()
for _, gene, pubmeds, lit_snps, gwas_snps, ld_block_snps, clinical_features, clinical_snps in sliced_df:
gene = ensure_quotes(gene)
for snp in itt.chain(lit_snps or [], gwas_snps or [], ld_block_snps or [], clinical_snps or []):
if not snp.strip():
continue
graph.add_association(
Gene('HGNC', gene),
Gene('DBSNP', snp),
evidence=CANNED_EVIDENCE,
citation=CANNED_CITATION,
annotations={
'MeSHDisease': disease,
},
)
for clinical_feature in clinical_features or []:
if not clinical_feature.strip():
continue
if clinical_feature.lower() not in nift_values:
missing_features.add(clinical_feature)
continue
if clinical_feature not in nift_value_originals:
fixed_caps.add((clinical_feature, nift_values[clinical_feature.lower()]))
clinical_feature = nift_values[clinical_feature.lower()] # fix capitalization
graph.add_association(
Gene('HGNC', gene),
Abundance('NIFT', clinical_feature),
evidence=CANNED_EVIDENCE,
citation=CANNED_CITATION,
annotations={
'MeSHDisease': disease,
},
)
if clinical_snps:
for clinical_snp in clinical_snps:
graph.add_association(
Gene('DBSNP', clinical_snp),
Abundance('NIFT', clinical_feature),
evidence=CANNED_EVIDENCE,
citation=CANNED_CITATION,
annotations={
'MeSHDisease': disease,
},
)
if missing_features:
logger.warning('Missing Features in %s', disease)
for feature in missing_features:
logger.warning(feature)
if fixed_caps:
logger.warning('Fixed capitalization')
for broken, fixed in fixed_caps:
logger.warning('%s -> %s', broken, fixed)
return graph
| mit | 6,785,736,960,564,687,000 | 33.341085 | 112 | 0.611174 | false |
bloomberg/bqplot | tests/marks_test.py | 1 | 1593 | import bqplot
import numpy as np
import pytest
def test_scatter(figure):
x = np.arange(10, dtype=np.float64)
y = (x**2).astype(np.int32)
scatter = bqplot.Scatter(x=x, y=y)
assert scatter.x.dtype == np.float64
assert scatter.y.dtype == np.int32
assert scatter.x.shape == (10,)
assert scatter.y.shape == (10,)
def test_lines(scales):
# since lines can have 2d data, with irregularly shaped data, binary serialization
# doesn't work or is trickier
with pytest.raises(ValueError, match='.*Unsupported dtype object*'):
lines = bqplot.Lines(x=[[0, 1], [0, 1, 2]], y=[[0, 1], [1, 0, -1]], scales=scales)
lines = bqplot.Lines(x=[[0, 1], [0, 1]], y=[[0, 1], [1, 0]], scales=scales)
state = lines.get_state()
lines2 = bqplot.Lines(scales=scales)
lines2.set_state(state)
assert lines.x[0][0] == 0
assert lines.x[0][1] == 1
assert lines.x[1][1] == 1
def test_lines_ordinal(scale_ordinal, scale_y):
scales = {'x': scale_ordinal, 'y': scale_y}
lines = bqplot.Lines(x=list('ABC'), y=[1, 2, 3], scales=scales)
def test_bars(scales):
with pytest.raises(ValueError, match='.*Unsupported dtype object*'):
bars = bqplot.Bars(x=[0, 1], y=[[0, 1], [1, 0, -1]], scales=scales)
bars = bqplot.Bars(x=[0, 1], y=[[1, 2], [3, 4]], scales=scales)
state = bars.get_state()
bars2 = bqplot.Bars(scales=scales)
bars2.set_state(state)
assert bars.x[0] == 0
assert bars.x[1] == 1
assert bars.y[0][0] == 1
assert bars.y[0][1] == 2
assert bars.y[1][0] == 3
assert bars.y[1][1] == 4
| apache-2.0 | -216,256,783,966,949,120 | 30.235294 | 90 | 0.599498 | false |
ilya-epifanov/ansible | lib/ansible/cli/doc.py | 1 | 11138 | # (c) 2014, James Tanner <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
import fcntl
import datetime
import os
import struct
import termios
import traceback
import textwrap
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.plugins import module_loader
from ansible.cli import CLI
from ansible.utils import module_docs
class DocCLI(CLI):
""" Vault command line class """
BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm')
IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"]
def __init__(self, args, display=None):
super(DocCLI, self).__init__(args, display)
self.module_list = []
def parse(self):
self.parser = CLI.base_parser(
usage='usage: %prog [options] [module...]',
epilog='Show Ansible module documentation',
module_opts=True,
)
self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir',
help='List available modules')
self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
help='Show playbook snippet for specified module(s)')
self.options, self.args = self.parser.parse_args()
self.display.verbosity = self.options.verbosity
def run(self):
super(DocCLI, self).run()
if self.options.module_path is not None:
for i in self.options.module_path.split(os.pathsep):
module_loader.add_directory(i)
# list modules
if self.options.list_dir:
paths = module_loader._get_paths()
for path in paths:
self.find_modules(path)
self.pager(self.get_module_list_text())
return 0
if len(self.args) == 0:
raise AnsibleOptionsError("Incorrect options passed")
# process command line module list
text = ''
for module in self.args:
try:
filename = module_loader.find_plugin(module)
if filename is None:
self.display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader)))
continue
if any(filename.endswith(x) for x in self.BLACKLIST_EXTS):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename)
except:
self.display.vvv(traceback.print_exc())
self.display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module)
continue
if doc is not None:
all_keys = []
for (k,v) in doc['options'].iteritems():
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = filename
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['plainexamples'] = plainexamples
doc['returndocs'] = returndocs
if self.options.show_snippet:
text += DocCLI.get_snippet_text(doc)
else:
text += DocCLI.get_man_text(doc)
else:
# this typically means we couldn't even parse the docstring, not just that the YAML is busted,
# probably a quoting issue.
raise AnsibleError("Parsing produced an empty object.")
except Exception, e:
self.display.vvv(traceback.print_exc())
raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e)))
self.pager(text)
return 0
def find_modules(self, path):
if os.path.isdir(path):
for module in os.listdir(path):
if module.startswith('.'):
continue
elif os.path.isdir(module):
self.find_modules(module)
elif any(module.endswith(x) for x in self.BLACKLIST_EXTS):
continue
elif module.startswith('__'):
continue
elif module in self.IGNORE_FILES:
continue
elif module.startswith('_'):
fullpath = '/'.join([path,module])
if os.path.islink(fullpath): # avoids aliases
continue
module = os.path.splitext(module)[0] # removes the extension
self.module_list.append(module)
def get_module_list_text(self):
tty_size = 0
if os.isatty(0):
tty_size = struct.unpack('HHHH',
fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))[1]
columns = max(60, tty_size)
displace = max(len(x) for x in self.module_list)
linelimit = columns - displace - 5
text = []
deprecated = []
for module in sorted(set(self.module_list)):
if module in module_docs.BLACKLIST_MODULES:
continue
filename = module_loader.find_plugin(module)
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename)
desc = self.tty_ify(doc.get('short_description', '?')).strip()
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
if module.startswith('_'): # Handle deprecated
deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
else:
text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
except:
raise AnsibleError("module %s has a documentation error formatting or is missing documentation\n" % module)
if len(deprecated) > 0:
text.append("\nDEPRECATED:")
text.extend(deprecated)
return "\n".join(text)
@staticmethod
def print_paths(finder):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in finder._get_paths():
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
@staticmethod
def get_snippet_text(doc):
text = []
desc = CLI.tty_ify(" ".join(doc['short_description']))
text.append("- name: %s" % (desc))
text.append(" action: %s" % (doc['module']))
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
desc = CLI.tty_ify(" ".join(opt['description']))
if opt.get('required', False):
s = o + "="
else:
s = o
text.append(" %-20s # %s" % (s, desc))
text.append('')
return "\n".join(text)
@staticmethod
def get_man_text(doc):
opt_indent=" "
text = []
text.append("> %s\n" % doc['module'].upper())
desc = " ".join(doc['description'])
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), initial_indent=" ", subsequent_indent=" "))
if 'option_keys' in doc and len(doc['option_keys']) > 0:
text.append("Options (= is mandatory):\n")
for o in sorted(doc['option_keys']):
opt = doc['options'][o]
if opt.get('required', False):
opt_leadin = "="
else:
opt_leadin = "-"
text.append("%s %s" % (opt_leadin, o))
desc = " ".join(opt['description'])
if 'choices' in opt:
choices = ", ".join(str(i) for i in opt['choices'])
desc = desc + " (Choices: " + choices + ")"
if 'default' in opt:
default = str(opt['default'])
desc = desc + " [Default: " + default + "]"
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), initial_indent=opt_indent,
subsequent_indent=opt_indent))
if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0:
notes = " ".join(doc['notes'])
text.append("Notes:%s\n" % textwrap.fill(CLI.tty_ify(notes), initial_indent=" ",
subsequent_indent=opt_indent))
if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
req = ", ".join(doc['requirements'])
text.append("Requirements:%s\n" % textwrap.fill(CLI.tty_ify(req), initial_indent=" ",
subsequent_indent=opt_indent))
if 'examples' in doc and len(doc['examples']) > 0:
text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
for ex in doc['examples']:
text.append("%s\n" % (ex['code']))
if 'plainexamples' in doc and doc['plainexamples'] is not None:
text.append("EXAMPLES:")
text.append(doc['plainexamples'])
if 'returndocs' in doc and doc['returndocs'] is not None:
text.append("RETURN VALUES:")
text.append(doc['returndocs'])
text.append('')
maintainers = set()
if 'author' in doc:
if isinstance(doc['author'], basestring):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
if 'maintainers' in doc:
if isinstance(doc['maintainers'], basestring):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
text.append('MAINTAINERS: ' + ', '.join(maintainers))
text.append('')
return "\n".join(text)
| gpl-3.0 | -8,255,860,300,840,190,000 | 35.638158 | 158 | 0.532412 | false |
linktlh/Toontown-journey | otp/uberdog/GlobalOtpObjectUD.py | 1 | 1956 | from direct.distributed.DistributedObjectGlobalUD import DistributedObjectGlobalUD
MANAGER_CLASS = ''
class GlobalOtpObjectUD(DistributedObjectGlobalUD):
notify = directNotify.newCategory('GlobalOtpObjectUD')
def announceGenerate(self):
DistributedObjectGlobalUD.announceGenerate(self)
self.senders2Mgrs = {}
def __makeAvMsg(self, field, values, recipient):
return self.air.dclassesByName['DistributedToonUD'].getFieldByName(field).aiFormatUpdate(
recipient, recipient, simbase.air.ourChannel, values)
def sendToAvatar(self, avId, field, values):
dg = self.__makeAvMsg(field, values, avId)
self.air.send(dg)
def __makeAIMsg(self, field, values, recipient):
return self.air.dclassesByName[MANAGER_CLASS].getFieldByName(field).aiFormatUpdate(
recipient, recipient, simbase.air.ourChannel, values)
def sendToAI(self, field, values, sender=None):
if not MANAGER_CLASS:
self.notify.warning('A AI manager class is not implemented!')
return
if not sender:
sender = self.air.getAvatarIdFromSender()
dg = self.__makeAIMsg(field, values, self.senders2Mgrs.get(sender, sender + 8))
self.air.send(dg)
def hello(self, channel):
if not MANAGER_CLASS:
self.notify.warning('A AI manager class is not implemented!')
return
self.senders2Mgrs[simbase.air.getAvatarIdFromSender()] = channel
# Manager classes must implement their own response to hello's
self.sendToAI('UDResponse', [])
self.air.addPostRemove(self.__makeAIMsg('UDLost', [], channel))
def heartbeat(self, channel):
if simbase.air.getAvatarIdFromSender() not in self.senders2Mgrs:
self.senders2Mgrs[simbase.air.getAvatarIdFromSender()] = channel
self.sendUpdateToChannel(simbase.air.getAvatarIdFromSender(), 'heartbeatResponse', [])
| apache-2.0 | -1,745,958,492,094,661,600 | 37.352941 | 97 | 0.685072 | false |
xiangke/pycopia | mibs/pycopia/mibs/UCD_SNMP_MIB.py | 1 | 28770 | # python
# This file is generated by a program (mib2py). Any edits will be lost.
from pycopia.aid import Enum
import pycopia.SMI.Basetypes
Range = pycopia.SMI.Basetypes.Range
Ranges = pycopia.SMI.Basetypes.Ranges
from pycopia.SMI.Objects import ColumnObject, MacroObject, NotificationObject, RowObject, ScalarObject, NodeObject, ModuleObject, GroupObject
# imports
from SNMPv2_SMI import OBJECT_TYPE, NOTIFICATION_TYPE, MODULE_IDENTITY, Integer32, Opaque, enterprises, Counter32
from SNMPv2_TC import TEXTUAL_CONVENTION, DisplayString, TruthValue
class UCD_SNMP_MIB(ModuleObject):
path = '/usr/share/snmp/mibs/site/UCD-SNMP-MIB'
conformance = 3
name = 'UCD-SNMP-MIB'
language = 2
description = 'Deprecate the non-raw objects.'
# nodes
class ucdavis(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021])
name = 'ucdavis'
class memory(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4])
name = 'memory'
class systemStats(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11])
name = 'systemStats'
class ucdInternal(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 12])
name = 'ucdInternal'
class ucdExperimental(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 13])
name = 'ucdExperimental'
class logMatch(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16])
name = 'logMatch'
class version(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100])
name = 'version'
class snmperrs(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 101])
name = 'snmperrs'
class ucdSnmpAgent(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250])
name = 'ucdSnmpAgent'
class hpux9(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 1])
name = 'hpux9'
class sunos4(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 2])
name = 'sunos4'
class solaris(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 3])
name = 'solaris'
class osf(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 4])
name = 'osf'
class ultrix(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 5])
name = 'ultrix'
class hpux10(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 6])
name = 'hpux10'
class netbsd1(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 7])
name = 'netbsd1'
class freebsd(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 8])
name = 'freebsd'
class irix(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 9])
name = 'irix'
class linux(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 10])
name = 'linux'
class bsdi(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 11])
name = 'bsdi'
class openbsd(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 12])
name = 'openbsd'
class win32(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 13])
name = 'win32'
class hpux11(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 14])
name = 'hpux11'
class unknown(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 250, 255])
name = 'unknown'
class ucdTraps(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 251])
name = 'ucdTraps'
# macros
# types
class Float(pycopia.SMI.Basetypes.Opaque):
status = 1
ranges = Ranges(Range(7, 7))
# scalars
class memIndex(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memErrorName(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class memTotalSwap(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 3])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memAvailSwap(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 4])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memTotalReal(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 5])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memAvailReal(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 6])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memTotalSwapTXT(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 7])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memAvailSwapTXT(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 8])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memTotalRealTXT(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 9])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memAvailRealTXT(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 10])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memTotalFree(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 11])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memMinimumSwap(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 12])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memShared(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 13])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memBuffer(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 14])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memCached(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 15])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memSwapError(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 100])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class memSwapErrorMsg(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 4, 101])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class ssIndex(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssErrorName(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class ssSwapIn(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 3])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssSwapOut(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 4])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssIOSent(ScalarObject):
access = 4
status = 2
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 5])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssIOReceive(ScalarObject):
access = 4
status = 2
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 6])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssSysInterrupts(ScalarObject):
access = 4
status = 2
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 7])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssSysContext(ScalarObject):
access = 4
status = 2
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 8])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssCpuUser(ScalarObject):
access = 4
status = 2
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 9])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssCpuSystem(ScalarObject):
access = 4
status = 2
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 10])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssCpuIdle(ScalarObject):
access = 4
status = 2
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 11])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class ssCpuRawUser(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 50])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssCpuRawNice(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 51])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssCpuRawSystem(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 52])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssCpuRawIdle(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 53])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssCpuRawWait(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 54])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssCpuRawKernel(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 55])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssCpuRawInterrupt(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 56])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssIORawSent(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 57])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssIORawReceived(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 58])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssRawInterrupts(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 59])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class ssRawContexts(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 11, 60])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class logMatchMaxEntries(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class versionIndex(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class versionTag(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class versionDate(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 3])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class versionCDate(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 4])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class versionIdent(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 5])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class versionConfigureOptions(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 6])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class versionClearCache(ScalarObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 10])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class versionUpdateConfig(ScalarObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 11])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class versionRestartAgent(ScalarObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 12])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class versionSavePersistentData(ScalarObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 13])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class versionDoDebugging(ScalarObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 100, 20])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class snmperrIndex(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 101, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class snmperrNames(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 101, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class snmperrErrorFlag(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 101, 100])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class snmperrErrMessage(ScalarObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 101, 101])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
# columns
class prIndex(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class prNames(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class prMin(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class prMax(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class prCount(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 5])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class prErrorFlag(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 100])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class prErrMessage(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 101])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class prErrFix(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 102])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class prErrFixCmd(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1, 103])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class extIndex(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 8, 1, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class extNames(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 8, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class extCommand(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 8, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class extResult(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 8, 1, 100])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class extOutput(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 8, 1, 101])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class extErrFix(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 8, 1, 102])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class extErrFixCmd(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 8, 1, 103])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class dskIndex(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskPath(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class dskDevice(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class dskMinimum(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskMinPercent(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 5])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskTotal(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 6])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskAvail(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 7])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskUsed(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 8])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskPercent(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 9])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskPercentNode(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 10])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskErrorFlag(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 100])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class dskErrorMsg(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1, 101])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class laIndex(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class laNames(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class laLoad(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class laConfig(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class laLoadInt(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1, 5])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class laLoadFloat(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1, 6])
syntaxobject = Float
class laErrorFlag(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1, 100])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class laErrMessage(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1, 101])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class fileIndex(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 15, 1, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class fileName(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 15, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class fileSize(ColumnObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 15, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.Integer32
access = 4
units = 'kB'
class fileMax(ColumnObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 15, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.Integer32
access = 4
units = 'kB'
class fileErrorFlag(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 15, 1, 100])
syntaxobject = pycopia.SMI.Basetypes.TruthValue
class fileErrorMsg(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 15, 1, 101])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class logMatchIndex(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 1])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class logMatchName(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class logMatchFilename(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class logMatchRegEx(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class logMatchGlobalCounter(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 5])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class logMatchGlobalCount(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 6])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class logMatchCurrentCounter(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 7])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class logMatchCurrentCount(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 8])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class logMatchCounter(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 9])
syntaxobject = pycopia.SMI.Basetypes.Counter32
class logMatchCount(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 10])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class logMatchCycle(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 11])
syntaxobject = pycopia.SMI.Basetypes.Integer32
class logMatchErrorFlag(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 100])
syntaxobject = pycopia.SMI.Basetypes.TruthValue
class logMatchRegExCompilation(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1, 101])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class mrIndex(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 102, 1, 1])
syntaxobject = pycopia.SMI.Basetypes.ObjectIdentifier
class mrModuleName(ColumnObject):
access = 4
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 102, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
# rows
class prEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([prIndex], False)
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 2, 1])
access = 2
columns = {'prIndex': prIndex, 'prNames': prNames, 'prMin': prMin, 'prMax': prMax, 'prCount': prCount, 'prErrorFlag': prErrorFlag, 'prErrMessage': prErrMessage, 'prErrFix': prErrFix, 'prErrFixCmd': prErrFixCmd}
class extEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([extIndex], False)
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 8, 1])
access = 2
columns = {'extIndex': extIndex, 'extNames': extNames, 'extCommand': extCommand, 'extResult': extResult, 'extOutput': extOutput, 'extErrFix': extErrFix, 'extErrFixCmd': extErrFixCmd}
class dskEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([dskIndex], False)
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 9, 1])
access = 2
columns = {'dskIndex': dskIndex, 'dskPath': dskPath, 'dskDevice': dskDevice, 'dskMinimum': dskMinimum, 'dskMinPercent': dskMinPercent, 'dskTotal': dskTotal, 'dskAvail': dskAvail, 'dskUsed': dskUsed, 'dskPercent': dskPercent, 'dskPercentNode': dskPercentNode, 'dskErrorFlag': dskErrorFlag, 'dskErrorMsg': dskErrorMsg}
class laEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([laIndex], False)
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 10, 1])
access = 2
columns = {'laIndex': laIndex, 'laNames': laNames, 'laLoad': laLoad, 'laConfig': laConfig, 'laLoadInt': laLoadInt, 'laLoadFloat': laLoadFloat, 'laErrorFlag': laErrorFlag, 'laErrMessage': laErrMessage}
class fileEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([fileIndex], False)
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 15, 1])
access = 2
columns = {'fileIndex': fileIndex, 'fileName': fileName, 'fileSize': fileSize, 'fileMax': fileMax, 'fileErrorFlag': fileErrorFlag, 'fileErrorMsg': fileErrorMsg}
class logMatchEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([logMatchIndex], False)
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 16, 2, 1])
access = 2
columns = {'logMatchIndex': logMatchIndex, 'logMatchName': logMatchName, 'logMatchFilename': logMatchFilename, 'logMatchRegEx': logMatchRegEx, 'logMatchGlobalCounter': logMatchGlobalCounter, 'logMatchGlobalCount': logMatchGlobalCount, 'logMatchCurrentCounter': logMatchCurrentCounter, 'logMatchCurrentCount': logMatchCurrentCount, 'logMatchCounter': logMatchCounter, 'logMatchCount': logMatchCount, 'logMatchCycle': logMatchCycle, 'logMatchErrorFlag': logMatchErrorFlag, 'logMatchRegExCompilation': logMatchRegExCompilation}
class mrEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([mrIndex], True)
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 102, 1])
access = 2
columns = {'mrIndex': mrIndex, 'mrModuleName': mrModuleName}
# notifications (traps)
class ucdStart(NotificationObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 251, 1])
class ucdShutdown(NotificationObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 2021, 251, 2])
# groups
# capabilities
# special additions
# Add to master OIDMAP.
from pycopia import SMI
SMI.update_oidmap(__name__)
| lgpl-2.1 | 7,706,995,773,191,167,000 | 27.97281 | 525 | 0.714599 | false |
CFDEMproject/LAMMPS | tools/moltemplate/src/ltemplify.py | 1 | 94070 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Andrew Jewett (jewett.aij at g mail)
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2012, Regents of the University of California
# All rights reserved.
"""
ltemplify.py
The "ltemplify.py" script can be used to convert existing LAMMPS
input script and data files into a single .ttree file
(which includes both topology and force-field information
for a single molecule in your system).
Example:
ltemplify.py -name Mol file.in file.data > mol.ttree
This creates a template for a new type of molecule (named "Mol"),
consisting of all the atoms in the lammps files you included,
and saves this data in a single ttree file ("mol.ttree").
This file can be used with moltemplate/ttree to
define large systems containing this molecule.
"""
import sys
from ttree_lex import *
from lttree_styles import *
def Intify(s):
if s.isdigit():
return int(s)
elif s[0:2] == 'id':
return int(s[2:])
elif s[0:4] == 'type':
return int(s[4:])
else:
return s
def StringToInterval(sel_str, slice_delim='*'):
i_slice = sel_str.find(slice_delim)
if i_slice == -1:
if sel_str.isdigit():
a = int(sel_str)
b = int(sel_str)
else:
a = sel_str
b = sel_str
else:
a = sel_str[:i_slice]
b = sel_str[i_slice+len(slice_delim):]
if (((len(a)>0) and (not a.isdigit())) or
((len(b)>0) and (not b.isdigit()))):
raise InputError('Error: invalid selection string \"'+
sel_str+'\"\n')
if (len(a) > 0):
a = int(a)
else:
a = None
if (len(b) > 0):
b = int(b)
else:
b = None
return a,b
# Selections are simply lists of 2-tuples (pairs)
def LammpsSelectToIntervals(sel_str, slice_delim='*', or_delim=', '):
"""
This function converts a string such as "1*4 6 9*12" into
a list of tuples, for example: [(1,4), (6,6), (9,12)]
In general, the of intervals has the form:
[(a1,b1), (a2,b2), (a3,b3), ... ]
An atom is considered to belong to this selection
if it happens to lie within the closed interval [a,b]
for any pair of a,b values in the list of intervals.
If for a given pair a,b, either a or b is "None", then that a or b
value is not used to disqualify membership in the interval.
(Similar to -infinity or +infinity. In other words if a is set to None,
then to belong to the interval it is enough to be less than b.)
"""
selection_list = []
#tokens = sel_str.split(or_delim) <-- Not what we want when len(or_delim)>1
tokens = LineLex.TextBlock2Lines(sel_str, or_delim, keep_delim=False)
for token in tokens:
token = token.strip()
(a,b) = StringToInterval(token, slice_delim)
selection_list.append((a, b))
return selection_list
def IntervalListToMinMax(interval_list):
min_a = None
max_b = None
for (a,b) in interval_list:
if ((not (type(a) is int)) or (not (type(b) is int))):
return None,None #only integer min/max makes sense. otherwise skip
if (min_a == None) or (a < min_a):
min_a = a
if (max_b == None) or (b > max_b):
max_b = b
return min_a, max_b
def BelongsToSel(i, sel):
if (i == None) or (sel == None) or (len(sel) == 0):
# If the user has not specified a selection for this category,
# then by default all objects are accepted
return True
elif (type(i) is str):
if i.isdigit():
i = int(i)
else:
return True
belongs = False
for interval in sel:
assert(len(interval) == 2)
if interval[0]:
if i >= interval[0]:
if (interval[1] == None) or (i <= interval[1]):
belongs = True
break
elif interval[1]:
if i <= interval[1]:
belongs = True
break
else:
# In that case, the user entered something like "*"
# which covers all possible numbers
belongs = True
break
return belongs
try:
g_program_name = __file__.split('/')[-1] # = 'lemplify.py'
g_version_str = '0.3'
g_date_str = '2012-12-11'
sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+'\n')
non_empty_output = False
no_warnings = True
indent = 2
cindent = 0
atomid_selection = []
atomtype_selection = []
molid_selection = []
mol_name = ''
min_sel_atomid = None
min_sel_atomtype = None
min_sel_bondid = None
min_sel_bondtype = None
min_sel_angleid = None
min_sel_angletype = None
min_sel_dihedralid = None
min_sel_dihedraltype = None
min_sel_improperid = None
min_sel_impropertype = None
max_sel_atomid = None
max_sel_atomtype = None
max_sel_bondid = None
max_sel_bondtype = None
max_sel_angleid = None
max_sel_angletype = None
max_sel_dihedralid = None
max_sel_dihedraltype = None
max_sel_improperid = None
max_sel_impropertype = None
needed_atomids = set([])
needed_atomtypes = set([])
needed_bondids = set([])
needed_bondtypes = set([])
needed_angleids = set([])
needed_angletypes = set([])
needed_dihedralids = set([])
needed_dihedraltypes = set([])
needed_improperids = set([])
needed_impropertypes = set([])
min_needed_atomtype = None
max_needed_atomtype = None
min_needed_bondtype = None
max_needed_bondtype = None
min_needed_angletype = None
max_needed_angletype = None
min_needed_dihedraltype = None
max_needed_dihedraltype = None
# To process the selections, we need to know the atom style:
atom_style_undefined = True
i_atomid = None
i_atomtype = None
i_molid = None
l_in_init = []
l_in_settings = []
l_in_masses = []
l_in_pair_coeffs = []
l_in_bond_coeffs = []
l_in_angle_coeffs = []
l_in_dihedral_coeffs = []
l_in_improper_coeffs = []
l_data_masses = []
l_data_bond_coeffs = []
l_data_angle_coeffs = []
l_data_dihedral_coeffs = []
l_data_improper_coeffs = []
l_data_pair_coeffs = []
l_data_atoms = []
l_data_velocities = []
l_data_bonds = []
l_data_angles = []
l_data_dihedrals = []
l_data_impropers = []
# class2 force fields
l_data_bondbond_coeffs = []
l_data_bondangle_coeffs = []
l_data_middlebondtorsion_coeffs = []
l_data_endbondtorsion_coeffs = []
l_data_angletorsion_coeffs = []
l_data_angleangletorsion_coeffs = []
l_data_bondbond13_coeffs = []
l_data_angleangle_coeffs = []
# non-point-like particles:
l_data_ellipsoids = []
l_data_lines = []
l_data_triangles = []
# automatic generation of bonded interactions by type:
l_data_angles_by_type = []
l_data_dihedrals_by_type = []
l_data_impropers_by_type = []
atoms_already_read = False
some_pair_coeffs_read = False
complained_atom_style_mismatch = False
argv = sys.argv
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if argv[i] == '-columns':
if i+1 >= len(argv):
raise InputError('Error: the \"'+argv[i]+'\" argument should be followed by a quoted\n'
' string which contains a space-delimited list of the names of\n'
' of columns in the \"Atoms\" section of the LAMMPS data file.\n'
' If the list contains the symbols:\n'
' \"atom-ID\" or \"atomid\", they are interpreted\n'
' as unique atom ID numbers, and columns named\n'
' \"atom-type\" or \"atomtype\" are interpreted\n'
' as atom types. Finally, columns named\n'
' \"molecule-ID\", \"molecule\", or \"mol-ID\", or \"mol\"\n'
' are interpreted as unique molecule id numbers.\n'
'Example:\n'
' '+argv[i]+' \'atom-ID atom-type q polarizability molecule-ID x y z\'\n'
' defines a custom atom_style containing the properties\n'
' atom-ID atom-type q polarizability molecule-ID x y z\n'
' Make sure you enclose the entire list in quotes.\n');
column_names = argv[i+1].strip('\"\'').strip().split()
del(argv[i:i+2])
elif ((argv[i] == '-name') or
(argv[i] == '-molname') or
(argv[i] == '-molecule-name') or
(argv[i] == '-molecule_name')):
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a a molecule type name.\n')
cindent = 2
indent += cindent
mol_name = argv[i+1]
del(argv[i:i+2])
elif ((argv[i].lower() == '-atomstyle') or
(argv[i].lower() == '-atom_style') or
(argv[i].lower() == '-atom-style')):
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a an atom_style name.\n'
' (or single quoted string which includes a space-separated\n'
' list of column names).\n')
atom_style_undefined = False
column_names = AtomStyle2ColNames(argv[i+1])
if (argv[i+1].strip().split()[0] in g_style_map):
l_in_init.append((' '*indent) + 'atom_style ' + argv[i+1] + '\n')
sys.stderr.write('\n \"Atoms\" column format:\n')
sys.stderr.write(' '+(' '.join(column_names))+'\n')
i_atomid, i_atomtype, i_molid = ColNames2AidAtypeMolid(column_names)
if i_molid:
sys.stderr.write(' (i_atomid='+str(i_atomid+1)+', i_atomtype='+str(i_atomtype+1)+', i_molid='+str(i_molid+1)+')\n\n')
else:
sys.stderr.write(' (i_atomid='+str(i_atomid+1)+', i_atomtype='+str(i_atomtype+1)+')\n')
del(argv[i:i+2])
elif ((argv[i].lower() == '-id') or
#(argv[i].lower() == '-a') or
#(argv[i].lower() == '-atoms') or
(argv[i].lower() == '-atomid') or
#(argv[i].lower() == '-atomids') or
(argv[i].lower() == '-atom-id')
#(argv[i].lower() == '-atom-ids') or
#(argv[i].lower() == '-$atom') or
#(argv[i].lower() == '-$atoms')
):
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a list of integers\n'
' (or strings). These identify the group of atoms you want to\n'
' to include in the template you are creating.\n')
atomid_selection += LammpsSelectToIntervals(argv[i+1])
min_sel_atomid, max_sel_atomid = IntervalListToMinMax(atomid_selection)
del(argv[i:i+2])
elif ((argv[i].lower() == '-type') or
#(argv[i].lower() == '-t') or
(argv[i].lower() == '-atomtype') or
(argv[i].lower() == '-atom-type')
#(argv[i].lower() == '-atomtypes') or
#(argv[i].lower() == '-atom-types') or
#(argv[i].lower() == '-@atom') or
#(argv[i].lower() == '-@atoms') or
#(argv[i].lower() == '-@atomtype') or
#(argv[i].lower() == '-@atomtypes')
):
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a list of integers.\n'
' (or strings). These identify the group of atom types you want to\n'
' to include in the template you are creating.\n')
atomtype_selection += LammpsSelectToIntervals(argv[i+1])
min_sel_atomtype, max_sel_atomtype = IntervalListToMinMax(atomtype_selection)
del(argv[i:i+2])
elif ((argv[i].lower() == '-mol') or
#(argv[i].lower() == '-m') or
(argv[i].lower() == '-molid') or
#(argv[i].lower() == '-molids') or
(argv[i].lower() == '-mol-id') or
#(argv[i].lower() == '-mol-ids') or
#(argv[i].lower() == '-molecule') or
(argv[i].lower() == '-moleculeid') or
(argv[i].lower() == '-molecule-id')
#(argv[i].lower() == '-molecules') or
#(argv[i].lower() == '-molecule-ids') or
#(argv[i].lower() == '-$mol') or
#(argv[i].lower() == '-$molecule')
):
if i+1 >= len(argv):
sys.stderr.write('Error: '+argv[i]+' flag should be followed by a list of integers.\n'
' (or strings). These identify the group of molecules you want to\n'
' include in the template you are creating.\n')
molid_selection += LammpsSelectToIntervals(argv[i+1])
del(argv[i:i+2])
else:
i += 1
if atom_style_undefined:
# The default atom_style is "full"
column_names = AtomStyle2ColNames('full')
i_atomid, i_atomtype, i_molid = ColNames2AidAtypeMolid(column_names)
#---------------------------------------------------------
#-- The remaining arguments are files that the user wants
#-- us to read and convert. It is typical to have
#-- multiple input files, because LAMMPS users often
#-- store their force field parameters in either the LAMMPS
#-- data files and input script files, or both.
#-- We want to search all of the LAMMPS input files in
#-- order to make sure we extracted all the force field
#-- parameters (coeff commands).
#---------------------------------------------------------
for i_arg in range(1,len(argv)):
fname = argv[i_arg]
try:
lammps_file = open(fname, 'r')
except IOError:
raise InputError('Error: unrecognized argument (\"'+fname+'\"),\n'
' OR unable to open file:\n'
'\n'
' \"'+fname+'\"\n'
' for reading.\n'
'\n'
' (If you were not trying to open a file with this name,\n'
' then there is a problem in your argument list.)\n')
sys.stderr.write('reading file \"'+fname+'\"\n')
atomid2type = {}
atomid2mol = {}
data_file_header_names = set(['LAMMPS Description',
'Atoms', 'Masses', 'Velocities', 'Bonds',
'Angles', 'Dihedrals', 'Impropers',
'Pair Coeffs',
'Bond Coeffs', 'Angle Coeffs',
'Dihedral Coeffs', 'Improper Coeffs',
#class2 force fields:
'BondBond Coeffs', 'BondAngle Coeffs',
'MiddleBondTorsion Coeffs', 'EndBondTorsion Coeffs',
'AngleTorsion Coeffs', 'AngleAngleTorsion Coeffs',
'BondBond13 Coeffs',
'AngleAngle Coeffs',
# non-point-like particles:
'Ellipsoids', 'Triangles', 'Lines',
#specifying bonded interactions by type:
'Angles By Type', 'Dihedrals By Type', 'Impropers By Type'
])
lex=LineLex(lammps_file, fname)
lex.source_triggers = set(['include','import'])
# set up lex to accept most characters in file names:
lex.wordterminators = '(){}' + lex.whitespace
# set up lex to understand the "include" statement:
lex.source = 'include'
lex.escape = '\\'
while lex:
infile = lex.infile
lineno = lex.lineno
line = lex.ReadLine()
if (lex.infile != infile):
infile = lex.infile
lineno = lex.lineno
#sys.stderr.write(' processing \"'+line.strip()+'\", (\"'+infile+'\":'+str(lineno)+')\n')
if line == '':
break
tokens = line.strip().split()
if (len(tokens) > 0):
if ((tokens[0] == 'atom_style') and
atom_style_undefined):
sys.stderr.write(' Atom Style found. Processing: \"'+line.strip()+'\"\n')
if atoms_already_read:
raise InputError('Error: The file containing the \"atom_style\" command must\n'
' come before the data file in the argument list.\n'
' (The templify program needs to know the atom style before reading\n'
' the data file. Either change the order of arguments so that the\n'
' LAMMPS input script file is processed before the data file, or use\n'
' the \"-atom_style\" command line argument to specify the atom_style.)\n')
column_names = AtomStyle2ColNames(line.split()[1])
i_atomid, i_atomtype, i_molid = ColNames2AidAtypeMolid(column_names)
sys.stderr.write('\n \"Atoms\" column format:\n')
sys.stderr.write(' '+(' '.join(column_names))+'\n')
if i_molid:
sys.stderr.write(' (i_atomid='+str(i_atomid+1)+', i_atomtype='+str(i_atomtype+1)+', i_molid='+str(i_molid+1)+')\n\n')
else:
sys.stderr.write(' (i_atomid='+str(i_atomid+1)+', i_atomtype='+str(i_atomtype+1)+')\n\n')
l_in_init.append((' '*indent)+line.lstrip())
elif (tokens[0] in set(['units',
'angle_style',
'bond_style',
'dihedral_style',
'impoper_style',
'min_style',
'pair_style',
'pair_modify',
'special_bonds',
'kspace_style',
'kspace_modify'])):
l_in_init.append((' '*indent)+line.lstrip())
#if (line.strip() == 'LAMMPS Description'):
# sys.stderr.write(' processing \"'+line.strip()+'\"\n')
# # skip over this section
# while lex:
# line = lex.ReadLine()
# if line.strip() in data_file_header_names:
# lex.push_raw_text(line) # <- Save line for later
# break
elif (line.strip() == 'Atoms'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
atoms_already_read = True
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if ((len(tokens) <= i_atomid) or
(len(tokens) <= i_atomtype) or
((i_molid != None) and
(len(tokens) <= i_molid))):
raise InputError('Error: The number of columns in the \"Atoms\" section does\n'
' not match the atom_style (see column name list above).\n')
elif ((len(tokens) != len(column_names)) and
(not complained_atom_style_mismatch)):
complained_atom_style_mismatch = True
sys.stderr.write('Warning: The number of columns in the \"Atoms\" section does\n'
' not match the atom_style (see column name list above).\n')
# this is not a very serious warning.
#no_warnings = False <--no need. commenting out
atomid = Intify(tokens[i_atomid])
atomtype = Intify(tokens[i_atomtype])
molid = None
if i_molid:
molid = Intify(tokens[i_molid])
atomid2type[atomid] = atomtype
if i_molid:
atomid2mol[atomid] = molid
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[i_atomid] = '$atom:id'+tokens[i_atomid]
tokens[i_atomtype] = '@atom:type'+tokens[i_atomtype]
if i_molid:
tokens[i_molid] = '$mol:id'+tokens[i_molid]
l_data_atoms.append((' '*indent)+(' '.join(tokens)+'\n'))
needed_atomids.add(atomid)
needed_atomtypes.add(int(atomtype))
for atomtype in needed_atomtypes:
if type(atomtype) is int:
if ((min_needed_atomtype == None) or
(min_needed_atomtype > atomtype)):
min_needed_atomtype = atomtype
if ((max_needed_atomtype == None) or
(max_needed_atomtype < atomtype)):
max_needed_atomtype = atomtype
elif (line.strip() == 'Masses'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomtype = Intify(tokens[0])
if BelongsToSel(atomtype, atomtype_selection):
#tokens[0] = '@atom:type'+tokens[0]
l_data_masses.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Velocities'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomid = Intify(tokens[0])
atomtype = None
if atomid in atomid2type:
atomtype = atomid2type[atomid]
moldid = None
if atomid in atomid2mol:
molid = atomid2mol[atomid]
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[0] = '$atom:id'+tokens[0]
l_data_velocities.append((' '*indent)+(' '.join(tokens)+'\n'))
# non-point-like-particles:
elif (line.strip() == 'Ellipsoids'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomid = Intify(tokens[0])
atomtype = None
if atomid in atomid2type:
atomtype = atomid2type[atomid]
moldid = None
if atomid in atomid2mol:
molid = atomid2mol[atomid]
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[0] = '$atom:id'+tokens[0]
l_data_ellipsoids.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Lines'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomid = Intify(tokens[0])
atomtype = None
if atomid in atomid2type:
atomtype = atomid2type[atomid]
moldid = None
if atomid in atomid2mol:
molid = atomid2mol[atomid]
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[0] = '$atom:id'+tokens[0]
l_data_lines.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Triangles'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomid = Intify(tokens[0])
atomtype = None
if atomid in atomid2type:
atomtype = atomid2type[atomid]
moldid = None
if atomid in atomid2mol:
molid = atomid2mol[atomid]
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[0] = '$atom:id'+tokens[0]
l_data_triangles.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Bonds'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 4):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical line in Bonds section:\n'
' \"'+line.strip()+'\"\n')
#tokens[0] = '$bond:id'+tokens[0]
#tokens[1] = '@bond:type'+tokens[1]
atomids = [None, None]
atomtypes = [None, None]
molids = [None, None]
in_selections = True
some_in_selection = False
for n in range(0,2):
atomids[n] = Intify(tokens[2+n])
if atomids[n] in atomid2type:
atomtypes[n] = atomid2type[atomids[n]]
if atomids[n] in atomid2mol:
molids[n] = atomid2mol[atomids[n]]
if (BelongsToSel(atomids[n], atomid_selection) and
BelongsToSel(atomtypes[n], atomtype_selection) and
BelongsToSel(molids[n], molid_selection)):
#tokens[2+n] = '$atom:id'+tokens[2+n]
some_in_selection = True
else:
in_selections = False
if in_selections:
l_data_bonds.append((' '*indent)+(' '.join(tokens)+'\n'))
elif some_in_selection:
sys.stderr.write('WARNING: SELECTION BREAKS BONDS\n')
sys.stderr.write(' (between atom ids: ')
for n in range(0,2):
sys.stderr.write(str(atomids[n])+' ')
sys.stderr.write(')\n'
' The atoms you selected are bonded\n'
' to other atoms you didn\'t select.\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Angles'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line == '':
break
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 5):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical line in Angles section:\n'
' \"'+line.strip()+'\"\n')
#tokens[0] = '$angle:id'+tokens[0]
#tokens[1] = '@angle:type'+tokens[1]
atomids = [None, None, None]
atomtypes = [None, None, None]
molids = [None, None, None]
in_selections = True
some_in_selection = False
for n in range(0,3):
atomids[n] = Intify(tokens[2+n])
if atomids[n] in atomid2type:
atomtypes[n] = atomid2type[atomids[n]]
if atomids[n] in atomid2mol:
molids[n] = atomid2mol[atomids[n]]
if (BelongsToSel(atomids[n], atomid_selection) and
BelongsToSel(atomtypes[n], atomtype_selection) and
BelongsToSel(molids[n], molid_selection)):
#tokens[2+n] = '$atom:id'+tokens[2+n]
some_in_selection = True
else:
in_selections = False
if in_selections:
l_data_angles.append((' '*indent)+(' '.join(tokens)+'\n'))
elif some_in_selection:
sys.stderr.write('WARNING: SELECTION BREAKS ANGLES\n')
sys.stderr.write(' (between atom ids: ')
for n in range(0,3):
sys.stderr.write(str(atomids[n])+' ')
sys.stderr.write(')\n'
' The atoms you selected participate in 3-body \"Angle\"\n'
' interactions with other atoms you didn\'t select.\n'
' (They will be ignored.)\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Dihedrals'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 6):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical line in Dihedrals section:\n'
' \"'+line.strip()+'\"\n')
#tokens[0] = '$dihedral:id'+tokens[0]
#tokens[1] = '@dihedral:type'+tokens[1]
atomids = [None, None, None, None]
atomtypes = [None, None, None, None]
molids = [None, None, None, None]
in_selections = True
some_in_selection = False
for n in range(0,4):
atomids[n] = Intify(tokens[2+n])
if atomids[n] in atomid2type:
atomtypes[n] = atomid2type[atomids[n]]
if atomids[n] in atomid2mol:
molids[n] = atomid2mol[atomids[n]]
if (BelongsToSel(atomids[n], atomid_selection) and
BelongsToSel(atomtypes[n], atomtype_selection) and
BelongsToSel(molids[n], molid_selection)):
#tokens[2+n] = '$atom:id'+tokens[2+n]
some_in_selection = True
else:
in_selections = False
if in_selections:
l_data_dihedrals.append((' '*indent)+(' '.join(tokens)+'\n'))
elif some_in_selection:
sys.stderr.write('WARNING: SELECTION BREAKS DIHEDRALS\n')
sys.stderr.write(' (between atom ids: ')
for n in range(0,4):
sys.stderr.write(str(atomids[n])+' ')
sys.stderr.write(')\n'
' The atoms you selected participate in 4-body \"Dihedral\"\n'
' interactions with other atoms you didn\'t select.\n'
' (They will be ignored.)\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Impropers'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 6):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical line in Impropers section:\n'
' \"'+line.strip()+'\"\n')
#tokens[0] = '$improper:id'+tokens[0]
#tokens[1] = '@improper:type'+tokens[1]
atomids = [None, None, None, None]
atomtypes = [None, None, None, None]
molids = [None, None, None, None]
in_selections = True
some_in_selection = False
for n in range(0,4):
atomids[n] = Intify(tokens[2+n])
if atomids[n] in atomid2type:
atomtypes[n] = atomid2type[atomids[n]]
if atomids[n] in atomid2mol:
molids[n] = atomid2mol[atomids[n]]
if (BelongsToSel(atomids[n], atomid_selection) and
BelongsToSel(atomtypes[n], atomtype_selection) and
BelongsToSel(molids[n], molid_selection)):
#tokens[2+n] = '$atom:id'+tokens[2+n]
some_in_selection = True
else:
in_selections = False
if in_selections:
l_data_impropers.append((' '*indent)+(' '.join(tokens)+'\n'))
elif some_in_selection:
sys.stderr.write('WARNING: SELECTION BREAKS IMPROPERS\n')
sys.stderr.write(' (between atom ids: ')
for n in range(0,4):
sys.stderr.write(str(atomids[n])+' ')
sys.stderr.write(')\n'
' The atoms you selected participate in 4-body \"Improper\"\n'
' interactions with other atoms you didn\'t select.\n'
' (They will be ignored.)\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Bond Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@bond:type'+tokens[0]
l_data_bond_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Angle Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@angle:type'+tokens[0]
l_data_angle_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Dihedral Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@dihedral:type'+tokens[0]
l_data_dihedral_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Improper Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@improper:type'+tokens[0]
l_data_improper_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Pair Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
some_pair_coeffs_read = True
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 2):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical line in Pair Coeffs section:\n'
' \"'+line.strip()+'\"\n')
atomtype_i_str = tokens[0]
if '*' in atomtype_i_str:
raise InputError('PROBLEM near or before '+ErrorLeader(infile, lineno)+'\n'
' As of 2012-7, moltemplate forbids use of the "\*\" wildcard\n'
' character in the \"Pair Coeffs\" section.\n')
else:
i = int(atomtype_i_str)
if ((not i) or
BelongsToSel(i, atomtype_selection)):
i_str = '@atom:type'+str(i)
tokens[0] = i_str
l_data_pair_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'pair_coeff'):
some_pair_coeffs_read = True
if (len(tokens) < 3):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical pair_coeff command:\n'
' \"'+line.strip()+'\"\n')
l_in_pair_coeffs.append(' '*indent+line.strip())
elif (tokens[0] == 'mass'):
some_pair_coeffs_read = True
if (len(tokens) < 3):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical \"mass\" command:\n'
' \"'+line.strip()+'\"\n')
l_in_masses.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'bond_coeff'):
if (len(tokens) < 2):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical bond_coeff command:\n'
' \"'+line.strip()+'\"\n')
#tokens[1] = '@bond:type'+tokens[1]
l_in_bond_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'angle_coeff'):
if (len(tokens) < 2):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical angle_coeff command:\n'
' \"'+line.strip()+'\"\n')
#tokens[1] = '@angle:type'+tokens[1]
l_in_angle_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'dihedral_coeff'):
if (len(tokens) < 2):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical dihedral_coeff command:\n'
' \"'+line.strip()+'\"\n')
#tokens[1] = '@dihedral:type'+tokens[1]
l_in_dihedral_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'improper_coeff'):
if (len(tokens) < 2):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical improper_coeff command:\n'
' \"'+line.strip()+'\"\n')
#tokens[1] = '@improper:type'+tokens[1]
l_in_improper_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
# -- class2 force fields --
elif (line.strip() == 'BondBond Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@angle:type'+tokens[0]
l_data_bondbond_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'BondAngle Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@angle:type'+tokens[0]
l_data_bondangle_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'MiddleBondTorsion Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@dihedral:type'+tokens[0]
l_data_middlebondtorsion_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'EndBondTorsion Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@dihedral:type'+tokens[0]
l_data_endbondtorsion_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'AngleTorsion Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@dihedral:type'+tokens[0]
l_data_angletorsion_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'AngleAngleTorsion Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@dihedral:type'+tokens[0]
l_data_angleangletorsion_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'BondBond13 Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@dihedral:type'+tokens[0]
l_data_bondbond13_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'AngleAngle Coeffs'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@improper:type'+tokens[0]
l_data_angleangle_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Angles By Type'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@angle:type'+tokens[0]
l_data_angles_by_type.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Dihedrals By Type'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@dihedral:type'+tokens[0]
l_data_dihedrals_by_type.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Impropers By Type'):
sys.stderr.write(' processing \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@improper:type'+tokens[0]
l_data_impropers_by_type.append((' '*indent)+(' '.join(tokens)+'\n'))
else:
sys.stderr.write(' Ignoring line \"'+line.strip()+'\"\n')
sys.stderr.write('\n\n')
# --- Now delete items that were not selected from the other lists ---
# --- MASSES ---
# delete masses for atom types we don't care about anymore:
i_line = 0
while i_line < len(l_data_masses):
line = l_data_masses[i_line]
tokens = line.strip().split()
atomtype = Intify(tokens[0])
if ((not (atomtype in needed_atomtypes)) and
(not ((len(atomtype_selection) > 0) and
BelongsToSel(atomtype, atomtype_selection)))):
del(l_data_masses[i_line])
else:
tokens[0] = '@atom:type'+str(atomtype)
l_data_masses[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# --- PAIR COEFFS ---
# delete data_pair_coeffs for atom types we don't care about anymore:
i_line = 0
while i_line < len(l_data_pair_coeffs):
line = l_data_pair_coeffs[i_line]
tokens = line.strip().split()
assert(len(tokens) > 0)
split_colon = tokens[0].split(':')
assert(len(split_colon) == 2)
atomtype = Intify(split_colon[1])
if ((not (atomtype in needed_atomtypes)) and
(not ((len(atomtype_selection) > 0) and
BelongsToSel(atomtype, atomtype_selection)))):
del(l_data_pair_coeffs[i_line])
else:
i_line += 1
# delete in_pair_coeffs for atom we don't care about anymore:
i_line = 0
while i_line < len(l_in_pair_coeffs):
line = l_in_pair_coeffs[i_line]
tokens = line.strip().split()
atomtype_i_str = tokens[1]
atomtype_j_str = tokens[2]
#if (('*' in atomtype_i_str) or
# ('*' in atomtype_j_str)):
# sys.stderr.write('WARNING: near or before '+ErrorLeader(infile, lineno)+'\n'
# ' pair_coeff command contains a \"*\" character.\n'
# ' Keep in mind that using moltemplate.sh you can manually change the\n'
# ' numbers assigned to each atom type (when using -a or -b). Make sure\n'
# ' nor to accidentally change the order of atom types in one of these\n'
# ' pair_coeff commands. For example, commands like\n'
# ' pair_coeff 10*4 20*10 0.15 3.6\n'
# ' can be generated by moltemplate.sh, however\n'
# ' they may be rejected by LAMMPS (because LAMMPS prefers this\n'
# ' pair_coeff 4*10 10*20 0.15 3.6)\n'
# ' Later on, you may want to check to make sure moltemplate.sh\n'
# ' is not doing this. (Fortunately you never have to worry unless\n'
# ' you are using the -a or -b arguments with moltemplate.sh)\n')
if ('*' in atomtype_i_str):
atomtype_i_tokens = atomtype_i_str.split('*')
if atomtype_i_tokens[0] == '':
if (min_sel_atomtype and
(min_sel_atomtype < min_needed_atomtype)):
i_a = min_sel_atomtype
else:
i_a = min_needed_atomtype
else:
i_a = Intify(atomtype_i_tokens[0])
if atomtype_i_tokens[1] == '':
if (max_sel_atomtype and
(max_sel_atomtype > max_needed_atomtype)):
i_b = max_sel_atomtype
else:
i_b = max_needed_atomtype
else:
i_b = Intify(atomtype_i_tokens[1])
else:
i_a = i_b = Intify(atomtype_i_str)
i_a_final = None
i_b_final = None
for i in range(i_a, i_b+1):
if ((i in needed_atomtypes) or (min_sel_atomtype <= i)):
i_a_final = i
break
for i in reversed(range(i_a, i_b+1)):
if ((i in needed_atomtypes) or (max_sel_atomtype >= i)):
i_b_final = i
break
#if i_a_final and i_b_final:
# if i_a_final == i_b_final:
# i_str = '@atom:type'+str(i_a_final)
# tokens[1] = i_str
# else:
# i_str = '@{atom:type'+str(i_a_final)+'}*@{atom:type'+str(i_b_final)+'}'
if ('*' in atomtype_j_str):
atomtype_j_tokens = atomtype_j_str.split('*')
if atomtype_j_tokens[0] == '':
if (min_sel_atomtype and
(min_sel_atomtype < min_needed_atomtype)):
j_a = min_sel_atomtype
else:
j_a = min_needed_atomtype
else:
j_a = Intify(atomtype_j_tokens[0])
if atomtype_j_tokens[1] == '':
if (max_sel_atomtype and
(max_sel_atomtype > max_needed_atomtype)):
j_b = max_sel_atomtype
else:
j_b = max_needed_atomtype
else:
j_b = Intify(atomtype_j_tokens[1])
else:
j_a = j_b = Intify(atomtype_j_str)
j_a_final = None
j_b_final = None
for j in range(j_a, j_b+1):
if ((j in needed_atomtypes) or (min_sel_atomtype <= j)):
j_a_final = j
break
for j in reversed(range(j_a, j_b+1)):
if ((j in needed_atomtypes) or (max_sel_atomtype >= j)):
j_b_final = j
break
#if j_a_final and j_b_final:
# if j_a_final == j_b_final:
# j_str = '@atom:type'+str(j_a_final)
# tokens[1] = j_str
# else:
# j_str = '@{atom:type'+str(j_a_final)+'}*@{atom:type'+str(j_b_final)+'}'
if not (i_a_final and i_b_final and j_a_final and j_b_final):
del(l_in_pair_coeffs[i_line])
elif (('*' in atomtype_i_str) or ('*' in atomtype_j_str)):
del(l_in_pair_coeffs[i_line])
for i in range(i_a_final, i_b_final+1):
for j in range(j_a_final, j_b_final+1):
if j >= i:
tokens[1] = '@atom:type'+str(i)
tokens[2] = '@atom:type'+str(j)
l_in_pair_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
tokens[1] = '@atom:type'+tokens[1]
tokens[2] = '@atom:type'+tokens[2]
l_in_pair_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# delete mass commands for atom types we don't care about anymore:
i_line = 0
while i_line < len(l_in_masses):
line = l_in_masses[i_line]
tokens = line.strip().split()
atomtype_i_str = tokens[1]
#if (('*' in atomtype_i_str) or
# ('*' in atomtype_j_str)):
# sys.stderr.write('WARNING: near or before '+ErrorLeader(infile, lineno)+'\n'
# ' pair_coeff command contains a \"*\" character.\n'
# ' Keep in mind that using moltemplate.sh you can manually change the\n'
# ' numbers assigned to each atom type (when using -a or -b). Make sure\n'
# ' nor to accidentally change the order of atom types in one of these\n'
# ' pair_coeff commands. For example, commands like\n'
# ' pair_coeff 10*4 20*10 0.15 3.6\n'
# ' can be generated by moltemplate.sh, however\n'
# ' they may be rejected by LAMMPS (because LAMMPS prefers this\n'
# ' pair_coeff 4*10 10*20 0.15 3.6)\n'
# ' Later on, you may want to check to make sure moltemplate.sh\n'
# ' is not doing this. (Fortunately you never have to worry unless\n'
# ' you are using the -a or -b arguments with moltemplate.sh)\n')
if ('*' in atomtype_i_str):
atomtype_i_tokens = atomtype_i_str.split('*')
if atomtype_i_tokens[0] == '':
if (min_sel_atomtype and
(min_sel_atomtype < min_needed_atomtype)):
i_a = min_sel_atomtype
else:
i_a = min_needed_atomtype
else:
i_a = Intify(atomtype_i_tokens[0])
if atomtype_i_tokens[1] == '':
if (max_sel_atomtype and
(max_sel_atomtype > max_needed_atomtype)):
i_b = max_sel_atomtype
else:
i_b = max_needed_atomtype
else:
i_b = Intify(atomtype_i_tokens[1])
else:
i_a = i_b = Intify(atomtype_i_str)
i_a_final = None
i_b_final = None
for i in range(i_a, i_b+1):
if ((i in needed_atomtypes) or (min_sel_atomtype <= i)):
i_a_final = i
break
for i in reversed(range(i_a, i_b+1)):
if ((i in needed_atomtypes) or (max_sel_atomtype >= i)):
i_b_final = i
break
#if i_a_final and i_b_final:
# if i_a_final == i_b_final:
# i_str = '@atom:type'+str(i_a_final)
# tokens[1] = i_str
# else:
# i_str = '@{atom:type'+str(i_a_final)+'}*@{atom:type'+str(i_b_final)+'}'
if not (i_a_final and i_b_final and j_a_final and j_b_final):
del(l_in_masses[i_line])
elif ('*' in atomtype_i_str):
del(l_in_masses[i_line])
for i in range(i_a_final, i_b_final+1):
tokens[1] = '@atom:type'+str(i)
l_in_masses.insert(i_line, (' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
assert(i_a == i_b)
tokens[1] = '@atom:type'+str(i_a)
l_in_masses[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# --- BONDS AND BOND COEFFS ---
# delete lines from data_bonds if they involve atoms we don't care about
i_line = 0
while i_line < len(l_data_bonds):
line = l_data_bonds[i_line]
tokens = line.strip().split()
assert(len(tokens) == 4)
bondid = Intify(tokens[0])
bondtype = Intify(tokens[1])
atomid1 = Intify(tokens[2])
atomid2 = Intify(tokens[3])
#if ((atomid1 in needed_atomids) and
# (atomid2 in needed_atomids)):
tokens[0] = '$bond:id'+str(bondid)
tokens[1] = '@bond:type'+str(bondtype)
tokens[2] = '$atom:id'+str(atomid1)
tokens[3] = '$atom:id'+str(atomid2)
needed_bondids.add(bondid)
needed_bondtypes.add(bondtype)
l_data_bonds[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
#else:
# del(l_data_bonds[i_line])
# delete data_bond_coeffs for bondtypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_bond_coeffs):
line = l_data_bond_coeffs[i_line]
tokens = line.strip().split()
bondtype = Intify(tokens[0])
if (not (bondtype in needed_bondtypes)):
del(l_data_bond_coeffs[i_line])
else:
tokens[0] = '@bond:type'+str(bondtype)
l_data_bond_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# delete in_bond_coeffs for bondtypes we don't care about anymore:
for bondtype in needed_bondtypes:
if type(bondtype) is int:
if ((min_needed_bondtype == None) or
(min_needed_bondtype > bondtype)):
min_needed_bondtype = bondtype
if ((max_needed_bondtype == None) or
(max_needed_bondtype < bondtype)):
max_needed_bondtype = bondtype
i_line = 0
while i_line < len(l_in_bond_coeffs):
line = l_in_bond_coeffs[i_line]
tokens = line.strip().split()
bondtype_str = tokens[1]
if ('*' in bondtype_str):
bondtype_tokens = bondtype_str.split('*')
if bondtype_tokens[0] == '':
i_a = min_needed_bondtype
else:
i_a = Intify(bondtype_tokens[0])
if bondtype_tokens[1] == '':
i_b = max_needed_bondtype
else:
i_b = Intify(bondtype_tokens[1])
else:
i_a = i_b = Intify(bondtype_str)
if i_a < min_needed_bondtype:
i_a = min_needed_bondtype
if i_b > max_needed_bondtype:
i_b = max_needed_bondtype
#if i_a == i_b:
# i_str = '@bond:type'+str(i_a)
# tokens[1] = i_str
#else:
# i_str = '@{bond:type'+str(j_a)+'}*@{bond:type'+str(j_b)+'}'
if ('*' in bondtype_str):
del(l_in_bond_coeffs[i_line])
for i in range(i_a, i_b+1):
if (i in needed_bondtypes):
tokens[1] = '@bond:type'+str(i)
l_in_bond_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
assert(i_a == i_b)
if (i_a in needed_bondtypes):
tokens[1] = '@bond:type'+str(i_a)
l_in_bond_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
del(l_in_bond_coeffs[i_line])
# --- ANGLES AND ANGLE COEFFS ---
# delete lines from data_angles if they involve atoms we don't care about
i_line = 0
while i_line < len(l_data_angles):
line = l_data_angles[i_line]
tokens = line.strip().split()
assert(len(tokens) == 5)
angleid = Intify(tokens[0])
angletype = Intify(tokens[1])
atomid1 = Intify(tokens[2])
atomid2 = Intify(tokens[3])
atomid3 = Intify(tokens[4])
#if ((atomid1 in needed_atomids) and
# (atomid2 in needed_atomids)):
tokens[0] = '$angle:id'+str(angleid)
tokens[1] = '@angle:type'+str(angletype)
tokens[2] = '$atom:id'+str(atomid1)
tokens[3] = '$atom:id'+str(atomid2)
tokens[4] = '$atom:id'+str(atomid3)
needed_angleids.add(angleid)
needed_angletypes.add(angletype)
l_data_angles[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
#else:
# del(l_data_angles[i_line])
# delete data_angle_coeffs for angletypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_angle_coeffs):
line = l_data_angle_coeffs[i_line]
tokens = line.strip().split()
angletype = Intify(tokens[0])
if (not (angletype in needed_angletypes)):
del(l_data_angle_coeffs[i_line])
else:
tokens[0] = '@angle:type'+str(angletype)
l_data_angle_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# delete in_angle_coeffs for angletypes we don't care about anymore:
for angletype in needed_angletypes:
if type(angletype) is int:
if ((min_needed_angletype == None) or
(min_needed_angletype > angletype)):
min_needed_angletype = angletype
if ((max_needed_angletype == None) or
(max_needed_angletype < angletype)):
max_needed_angletype = angletype
i_line = 0
while i_line < len(l_in_angle_coeffs):
line = l_in_angle_coeffs[i_line]
tokens = line.strip().split()
angletype_str = tokens[1]
if ('*' in angletype_str):
angletype_tokens = angletype_str.split('*')
if angletype_tokens[0] == '':
i_a = min_needed_angletype
else:
i_a = Intify(angletype_tokens[0])
if angletype_tokens[1] == '':
i_b = max_needed_angletype
else:
i_b = Intify(angletype_tokens[1])
else:
i_a = i_b = Intify(angletype_str)
if i_a < min_needed_angletype:
i_a = min_needed_angletype
if i_b > max_needed_angletype:
i_b = max_needed_angletype
#if i_a == i_b:
# i_str = '@angle:type'+str(i_a)
# tokens[1] = i_str
#else:
# i_str = '@{angle:type'+str(j_a)+'}*@{angle:type'+str(j_b)+'}'
if ('*' in angletype_str):
del(l_in_angle_coeffs[i_line])
for i in range(i_a, i_b+1):
if (i in needed_angletypes):
tokens[1] = '@angle:type'+str(i)
l_in_angle_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
assert(i_a == i_b)
if (i_a in needed_angletypes):
tokens[1] = '@angle:type'+str(i_a)
l_in_angle_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
del(l_in_angle_coeffs[i_line])
# --- DIHEDRALS AND DIHEDRAL COEFFS ---
# delete lines from data_dihedrals if they involve atoms we don't care about
i_line = 0
while i_line < len(l_data_dihedrals):
line = l_data_dihedrals[i_line]
tokens = line.strip().split()
assert(len(tokens) == 6)
dihedralid = Intify(tokens[0])
dihedraltype = Intify(tokens[1])
atomid1 = Intify(tokens[2])
atomid2 = Intify(tokens[3])
atomid3 = Intify(tokens[4])
atomid4 = Intify(tokens[5])
#if ((atomid1 in needed_atomids) and
# (atomid2 in needed_atomids)):
tokens[0] = '$dihedral:id'+str(dihedralid)
tokens[1] = '@dihedral:type'+str(dihedraltype)
tokens[2] = '$atom:id'+str(atomid1)
tokens[3] = '$atom:id'+str(atomid2)
tokens[4] = '$atom:id'+str(atomid3)
tokens[5] = '$atom:id'+str(atomid4)
needed_dihedralids.add(dihedralid)
needed_dihedraltypes.add(dihedraltype)
l_data_dihedrals[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
#else:
# del(l_data_dihedrals[i_line])
# delete data_dihedral_coeffs for dihedraltypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_dihedral_coeffs):
line = l_data_dihedral_coeffs[i_line]
tokens = line.strip().split()
dihedraltype = Intify(tokens[0])
if (not (dihedraltype in needed_dihedraltypes)):
del(l_data_dihedral_coeffs[i_line])
else:
tokens[0] = '@dihedral:type'+str(dihedraltype)
l_data_dihedral_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# delete in_dihedral_coeffs for dihedraltypes we don't care about anymore:
for dihedraltype in needed_dihedraltypes:
if type(dihedraltype) is int:
if ((min_needed_dihedraltype == None) or
(min_needed_dihedraltype > dihedraltype)):
min_needed_dihedraltype = dihedraltype
if ((max_needed_dihedraltype == None) or
(max_needed_dihedraltype < dihedraltype)):
max_needed_dihedraltype = dihedraltype
i_line = 0
while i_line < len(l_in_dihedral_coeffs):
line = l_in_dihedral_coeffs[i_line]
tokens = line.strip().split()
dihedraltype_str = tokens[1]
if ('*' in dihedraltype_str):
dihedraltype_tokens = dihedraltype_str.split('*')
if dihedraltype_tokens[0] == '':
i_a = min_needed_dihedraltype
else:
i_a = Intify(dihedraltype_tokens[0])
if dihedraltype_tokens[1] == '':
i_b = max_needed_dihedraltype
else:
i_b = Intify(dihedraltype_tokens[1])
else:
i_a = i_b = Intify(dihedraltype_str)
if i_a < min_needed_dihedraltype:
i_a = min_needed_dihedraltype
if i_b > max_needed_dihedraltype:
i_b = max_needed_dihedraltype
#if i_a == i_b:
# i_str = '@dihedral:type'+str(i_a)
# tokens[1] = i_str
#else:
# i_str = '@{dihedral:type'+str(j_a)+'}*@{dihedral:type'+str(j_b)+'}'
if ('*' in dihedraltype_str):
del(l_in_dihedral_coeffs[i_line])
for i in range(i_a, i_b+1):
if (i in needed_dihedraltypes):
tokens[1] = '@dihedral:type'+str(i)
l_in_dihedral_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
assert(i_a == i_b)
if (i_a in needed_dihedraltypes):
tokens[1] = '@dihedral:type'+str(i_a)
l_in_dihedral_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
del(l_in_dihedral_coeffs[i_line])
# --- IMPROPERS AND IMPROPER COEFFS ---
# delete lines from data_impropers if they involve atoms we don't care about
i_line = 0
while i_line < len(l_data_impropers):
line = l_data_impropers[i_line]
tokens = line.strip().split()
assert(len(tokens) == 6)
improperid = Intify(tokens[0])
impropertype = Intify(tokens[1])
atomid1 = Intify(tokens[2])
atomid2 = Intify(tokens[3])
atomid3 = Intify(tokens[4])
atomid4 = Intify(tokens[5])
#if ((atomid1 in needed_atomids) and
# (atomid2 in needed_atomids)):
tokens[0] = '$improper:id'+str(improperid)
tokens[1] = '@improper:type'+str(impropertype)
tokens[2] = '$atom:id'+str(atomid1)
tokens[3] = '$atom:id'+str(atomid2)
tokens[4] = '$atom:id'+str(atomid3)
tokens[5] = '$atom:id'+str(atomid4)
needed_improperids.add(improperid)
needed_impropertypes.add(impropertype)
l_data_impropers[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
#else:
# del(l_data_impropers[i_line])
# delete data_improper_coeffs for impropertypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_improper_coeffs):
line = l_data_improper_coeffs[i_line]
tokens = line.strip().split()
impropertype = Intify(tokens[0])
if (not (impropertype in needed_impropertypes)):
del(l_data_improper_coeffs[i_line])
else:
tokens[0] = '@improper:type'+str(impropertype)
l_data_improper_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# delete in_improper_coeffs for impropertypes we don't care about anymore:
for impropertype in needed_impropertypes:
if type(impropertype) is int:
if ((min_needed_impropertype == None) or
(min_needed_impropertype > impropertype)):
min_needed_impropertype = impropertype
if ((max_needed_impropertype == None) or
(max_needed_impropertype < impropertype)):
max_needed_impropertype = impropertype
i_line = 0
while i_line < len(l_in_improper_coeffs):
line = l_in_improper_coeffs[i_line]
tokens = line.strip().split()
impropertype_str = tokens[1]
if ('*' in impropertype_str):
impropertype_tokens = impropertype_str.split('*')
if impropertype_tokens[0] == '':
i_a = min_needed_impropertype
else:
i_a = Intify(impropertype_tokens[0])
if impropertype_tokens[1] == '':
i_b = max_needed_impropertype
else:
i_b = Intify(impropertype_tokens[1])
else:
i_a = i_b = Intify(impropertype_str)
if i_a < min_needed_impropertype:
i_a = min_needed_impropertype
if i_b > max_needed_impropertype:
i_b = max_needed_impropertype
#if i_a == i_b:
# i_str = '@improper:type'+str(i_a)
# tokens[1] = i_str
#else:
# i_str = '@{improper:type'+str(j_a)+'}*@{improper:type'+str(j_b)+'}'
if ('*' in impropertype_str):
del(l_in_improper_coeffs[i_line])
for i in range(i_a, i_b+1):
if (i in needed_impropertypes):
tokens[1] = '@improper:type'+str(i)
l_in_improper_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
assert(i_a == i_b)
if (i_a in needed_impropertypes):
tokens[1] = '@improper:type'+str(i_a)
l_in_improper_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
del(l_in_improper_coeffs[i_line])
if not some_pair_coeffs_read:
sys.stderr.write('Warning: No \"pair coeffs\" set.\n'
' (No interactions between non-bonded atoms defined.)\n')
no_warnings = False
#sys.stderr.write('Writing ttree data to standard out.\n'
# ' You can redirect this to a file using:\n'+
# ' '+' '.join(sys.argv)+' > filename.ttree\n'
# ' ----------------------\n')
if mol_name != '':
sys.stdout.write(mol_name + ' {\n')
if len(l_in_init) > 0:
sys.stdout.write('\n### LAMMPS commands for initialization\n'
'### (These can be overridden later.)\n\n')
l_in_init.insert(0, (' '*cindent)+'write_once(\"'+in_init+'\") {\n')
l_in_init.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_init))
if len(l_in_settings) > 0:
sys.stdout.write('\n### LAMMPS commands for settings\n'
'### (These can be overridden later.)\n\n')
l_in_settings.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_settings.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_settings))
non_empty_output = True
if len(l_in_masses) > 0:
l_in_masses.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_masses.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_masses))
non_empty_output = True
if len(l_in_pair_coeffs) > 0:
l_in_pair_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_pair_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_pair_coeffs))
non_empty_output = True
if len(l_in_bond_coeffs) > 0:
l_in_bond_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_bond_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_bond_coeffs))
non_empty_output = True
if len(l_in_angle_coeffs) > 0:
l_in_angle_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_angle_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_angle_coeffs))
non_empty_output = True
if len(l_in_dihedral_coeffs) > 0:
l_in_dihedral_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_dihedral_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_dihedral_coeffs))
non_empty_output = True
if len(l_in_improper_coeffs) > 0:
l_in_improper_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_improper_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_improper_coeffs))
non_empty_output = True
if non_empty_output:
sys.stdout.write('\n### DATA sections\n\n')
if len(l_data_masses) > 0:
l_data_masses.insert(0, (' '*cindent)+'write_once(\"'+data_masses+'\") {\n')
l_data_masses.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_masses))
non_empty_output = True
if len(l_data_bond_coeffs) > 0:
l_data_bond_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_bond_coeffs+'\") {\n')
l_data_bond_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bond_coeffs))
non_empty_output = True
if len(l_data_angle_coeffs) > 0:
l_data_angle_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_angle_coeffs+'\") {\n')
l_data_angle_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angle_coeffs))
non_empty_output = True
if len(l_data_dihedral_coeffs) > 0:
l_data_dihedral_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_dihedral_coeffs+'\") {\n')
l_data_dihedral_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_dihedral_coeffs))
non_empty_output = True
if len(l_data_improper_coeffs) > 0:
l_data_improper_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_improper_coeffs+'\") {\n')
l_data_improper_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_improper_coeffs))
non_empty_output = True
if len(l_data_pair_coeffs) > 0:
l_data_pair_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_pair_coeffs+'\") {\n')
l_data_pair_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_pair_coeffs))
non_empty_output = True
# class2 force fields:
if len(l_data_bondbond_coeffs) > 0:
l_data_bondbond_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_bondbond_coeffs+'\") {\n')
l_data_bondbond_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bondbond_coeffs))
non_empty_output = True
if len(l_data_bondangle_coeffs) > 0:
l_data_bondangle_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_bondangle_coeffs+'\") {\n')
l_data_bondangle_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bondangle_coeffs))
non_empty_output = True
if len(l_data_middlebondtorsion_coeffs) > 0:
l_data_middlebondtorsion_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_middlebondtorsion_coeffs+'\") {\n')
l_data_middlebondtorsion_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_middlebondtorsion_coeffs))
non_empty_output = True
if len(l_data_endbondtorsion_coeffs) > 0:
l_data_endbondtorsion_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_endbondtorsion_coeffs+'\") {\n')
l_data_endbondtorsion_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_endbondtorsion_coeffs))
non_empty_output = True
if len(l_data_angletorsion_coeffs) > 0:
l_data_angletorsion_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_angletorsion_coeffs+'\") {\n')
l_data_angletorsion_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angletorsion_coeffs))
non_empty_output = True
if len(l_data_angleangletorsion_coeffs) > 0:
l_data_angleangletorsion_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_angleangletorsion_coeffs+'\") {\n')
l_data_angleangletorsion_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angleangletorsion_coeffs))
non_empty_output = True
if len(l_data_bondbond13_coeffs) > 0:
l_data_bondbond13_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_bondbond13_coeffs+'\") {\n')
l_data_bondbond13_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bondbond13_coeffs))
non_empty_output = True
if len(l_data_angleangle_coeffs) > 0:
l_data_angleangle_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_angleangle_coeffs+'\") {\n')
l_data_angleangle_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angleangle_coeffs))
non_empty_output = True
# automatic generation of bonded interactions by type:
if len(l_data_angles_by_type) > 0:
l_data_angles_by_type.insert(0, (' '*cindent)+'write_once(\"'+data_angles_by_type+'\") {\n')
l_data_angles_by_type.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angles_by_type))
non_empty_output = True
if len(l_data_dihedrals_by_type) > 0:
l_data_dihedrals_by_type.insert(0, (' '*cindent)+'write_once(\"'+data_dihedrals_by_type+'\") {\n')
l_data_dihedrals_by_type.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_dihedrals_by_type))
non_empty_output = True
if len(l_data_impropers_by_type) > 0:
l_data_impropers_by_type.insert(0, (' '*cindent)+'write_once(\"'+data_impropers_by_type+'\") {\n')
l_data_impropers_by_type.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_impropers_by_type))
non_empty_output = True
if len(l_data_atoms) > 0:
l_data_atoms.insert(0, (' '*cindent)+'write(\"'+data_atoms+'\") {\n')
l_data_atoms.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_atoms))
non_empty_output = True
else:
sys.stderr.write('Warning: missing \"Atoms\" section.\n'
' (Did you include a LAMMPS data file in your argument list?)\n')
no_warnings = False
# non-point-like particles
if len(l_data_ellipsoids) > 0:
l_data_ellipsoids.insert(0, (' '*cindent)+'write(\"'+data_ellipsoids+'\") {\n')
l_data_ellipsoids.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_ellipsoids))
if len(l_data_lines) > 0:
l_data_lines.insert(0, (' '*cindent)+'write(\"'+data_lines+'\") {\n')
l_data_lines.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_lines))
if len(l_data_triangles) > 0:
l_data_triangles.insert(0, (' '*cindent)+'write(\"'+data_triangles+'\") {\n')
l_data_triangles.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_triangles))
if len(l_data_velocities) > 0:
l_data_velocities.insert(0, (' '*cindent)+'write(\"'+data_velocities+'\") {\n')
l_data_velocities.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_velocities))
if len(l_data_bonds) > 0:
l_data_bonds.insert(0, (' '*cindent)+'write(\"'+data_bonds+'\") {\n')
l_data_bonds.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bonds))
non_empty_output = True
if len(l_data_angles) > 0:
l_data_angles.insert(0, (' '*cindent)+'write(\"'+data_angles+'\") {\n')
l_data_angles.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angles))
non_empty_output = True
if len(l_data_dihedrals) > 0:
l_data_dihedrals.insert(0, (' '*cindent)+'write(\"'+data_dihedrals+'\") {\n')
l_data_dihedrals.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_dihedrals))
non_empty_output = True
if len(l_data_impropers) > 0:
l_data_impropers.insert(0, (' '*cindent)+'write(\"'+data_impropers+'\") {\n')
l_data_impropers.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_impropers))
non_empty_output = True
if mol_name != '':
sys.stdout.write('\n} # end of \"'+mol_name+'\" type definition\n')
if non_empty_output and no_warnings:
sys.stderr.write('WARNING: The '+g_program_name+' script has not been rigorously tested.\n'
' Exotic (manybody) pair styles (and other force-field styles\n'
' with unusual syntax) are not understood by '+g_program_name+'\n'
' (although they are supported by moltemplate). Please look over\n'
' the resulting LT file and check for errors. Convert any remaining\n'
' atom, bond, angle, dihedral, or improper id or type numbers to the\n'
' corresponding variables. Feel free to report any bugs you find.\n'
' (-Andrew Jewett 2012-12-11)\n')
except (ValueError, InputError) as err:
sys.stderr.write('\n'+str(err)+'\n')
sys.exit(-1)
| gpl-2.0 | -8,459,022,808,084,960,000 | 45.248771 | 148 | 0.452418 | false |
axptwig/CSCI-2963--Intro-to-Open-Source | files/Lab7/words5unordered.py | 1 | 2890 | """
Words/Ladder Graph
------------------
Generate an undirected graph over the 5757 5-letter words in the
datafile words_dat.txt.gz. Two words are connected by an edge
if they differ in one letter, resulting in 14,135 edges. This example
is described in Section 1.1 in Knuth's book [1]_,[2]_.
References
----------
.. [1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
.. [2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
# Authors: Aric Hagberg ([email protected]),
# Brendt Wohlberg,
# [email protected]
# Copyright (C) 2004-2016 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from itertools import permutations
#-------------------------------------------------------------------
# The Words/Ladder graph of Section 1.1
#-------------------------------------------------------------------
def generate_graph(words):
from string import ascii_lowercase as lowercase
G = nx.Graph(name="words")
lookup = dict((c,lowercase.index(c)) for c in lowercase)
def edit_distance_one(word):
perms = [''.join(p) for p in permutations(word)]
for p in perms:
for i in range(len(word)):
left, c, right = word[0:i], word[i], word[i+1:]
j = lookup[c] # lowercase.index(c)
for cc in lowercase[j+1:]:
yield left + cc + right
candgen = ((word, cand) for word in sorted(words)
for cand in edit_distance_one(word) if cand in words)
G.add_nodes_from(words)
for word, cand in candgen:
G.add_edge(word, cand)
return G
def words_graph():
"""Return the words example graph from the Stanford GraphBase"""
import gzip
fh=gzip.open('words_dat.txt.gz','r')
words=set()
for line in fh.readlines():
line = line.decode()
if line.startswith('*'):
continue
w=str(line[0:5])
words.add(w)
return generate_graph(words)
if __name__ == '__main__':
from networkx import *
G=words_graph()
print("Loaded words_dat.txt containing 5757 five-letter English words.")
print("Two words are connected if they differ in one letter, order doesn't matter.")
print("Graph has %d nodes with %d edges"
%(number_of_nodes(G),number_of_edges(G)))
print("%d connected components" % number_connected_components(G))
for (source,target) in [('chaos','order'),('nodes','graph'),'moron','smart'),('pound','marks')]:
print("Shortest path between %s and %s is"%(source,target))
try:
sp=shortest_path(G, source, target)
for n in sp:
print(n)
except nx.NetworkXNoPath:
print("None")
| mit | 7,894,361,437,417,402,000 | 35.125 | 100 | 0.582699 | false |
ph1l/ocemr | ocemr/modelviz.7.py | 1 | 6810 | #!/usr/bin/env python
"""Django model to DOT (Graphviz) converter
by Antonio Cavedoni <[email protected]>
Make sure your DJANGO_SETTINGS_MODULE is set to your project or
place this script in the same directory of the project and call
the script like this:
$ python modelviz.py [-h] [-d] [-i <model_names>] [-e <model_names>] <app_label> ... <app_label> > <filename>.dot
$ dot <filename>.dot -Tpng -o <filename>.png
options:
-h, --help
show this help message and exit.
-d, --disable_fields
don't show the class member fields.
-i, --include_models=User,Person,Car
only include selected models in graph.
-e, --exclude_models=User,Person,Car
only include selected models in graph.
"""
__version__ = "0.8"
__svnid__ = "$Id$"
__license__ = "Python"
__author__ = "Antonio Cavedoni <http://cavedoni.com/>"
__contributors__ = [
"Stefano J. Attardi <http://attardi.org/>",
"limodou <http://www.donews.net/limodou/>",
"Carlo C8E Miron",
"Andre Campos <[email protected]>",
"Justin Findlay <[email protected]>",
"Alexander Houben <[email protected]>",
"Christopher Schmidt <[email protected]>",
]
import getopt, sys
from django.core.management import setup_environ
try:
import settings
except ImportError:
pass
else:
setup_environ(settings)
from django.template import Template, Context
from django.db import models
from django.db.models import get_models
from django.db.models.fields.related import \
ForeignKey, OneToOneField, ManyToManyField
try:
from django.db.models.fields.generic import GenericRelation
except ImportError:
from django.contrib.contenttypes.generic import GenericRelation
head_template = """
digraph name {
fontname = "Helvetica"
fontsize = 8
node [
fontname = "Helvetica"
fontsize = 8
shape = "plaintext"
]
edge [
fontname = "Helvetica"
fontsize = 8
]
"""
body_template = """
{% for model in models %}
{% for relation in model.relations %}
{{ relation.target }} [label=<
<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"
><FONT FACE="Helvetica Bold" COLOR="white"
>{{ relation.target }}</FONT></TD></TR>
</TABLE>
>]
{{ model.name }} -> {{ relation.target }}
[label="{{ relation.name }}"] {{ relation.arrows }};
{% endfor %}
{% endfor %}
{% for model in models %}
{{ model.name }} [label=<
<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"
><FONT FACE="Helvetica Bold" COLOR="white"
>{{ model.name }}</FONT></TD></TR>
{% if not disable_fields %}
{% for field in model.fields %}
<TR><TD ALIGN="LEFT" BORDER="0"
><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica Bold">{{ field.name }}</FONT
></TD>
<TD ALIGN="LEFT"
><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica Bold">{{ field.type }}</FONT
></TD></TR>
{% endfor %}
{% endif %}
</TABLE>
>]
{% endfor %}
"""
tail_template = """
}
"""
def generate_dot(app_labels, **kwargs):
disable_fields = kwargs.get('disable_fields', False)
include_models = kwargs.get('include_models', [])
exclude_models = kwargs.get('exclude_models', [])
dot = head_template
for app_label in app_labels:
app = models.get_app(app_label)
graph = Context({
'name': '"%s"' % app.__name__,
'disable_fields': disable_fields,
'models': []
})
for appmodel in get_models(app):
# consider given model name ?
def consider(model_name):
return (not include_models or model_name in include_models) and (not model_name in exclude_models)
if not consider(appmodel._meta.object_name):
continue
model = {
'name': appmodel.__name__,
'fields': [],
'relations': []
}
# model attributes
def add_attributes():
model['fields'].append({
'name': field.name,
'type': type(field).__name__,
'blank': field.blank
})
for field in appmodel._meta.fields:
add_attributes()
if appmodel._meta.many_to_many:
for field in appmodel._meta.many_to_many:
add_attributes()
# relations
def add_relation(extras=""):
_rel = {
'target': field.rel.to.__name__,
'type': type(field).__name__,
'name': field.name,
'arrows': extras
}
if _rel not in model['relations'] and consider(_rel['target']):
model['relations'].append(_rel)
for field in appmodel._meta.fields:
if isinstance(field, ForeignKey):
add_relation()
elif isinstance(field, OneToOneField):
add_relation("[arrowhead=none arrowtail=none]")
if appmodel._meta.many_to_many:
for field in appmodel._meta.many_to_many:
if isinstance(field, ManyToManyField):
add_relation("[arrowhead=normal arrowtail=normal]")
elif isinstance(field, GenericRelation):
add_relation(
'[style="dotted"] [arrowhead=normal arrowtail=normal]')
graph['models'].append(model)
t = Template(body_template)
dot += '\n' + t.render(graph)
dot += '\n' + tail_template
return dot
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hdi:e:",
["help", "disable_fields", "include_models=", "exclude_models="])
except getopt.GetoptError, error:
print __doc__
sys.exit(error)
else:
if not args:
print __doc__
sys.exit()
kwargs = {}
for opt, arg in opts:
if opt in ("-h", "--help"):
print __doc__
sys.exit()
if opt in ("-d", "--disable_fields"):
kwargs['disable_fields'] = True
if opt in ("-i", "--include_models"):
kwargs['include_models'] = arg.split(',')
if opt in ("-e", "--exclude_models"):
kwargs['exclude_models'] = arg.split(',')
print generate_dot(args, **kwargs)
if __name__ == "__main__":
main()
| gpl-3.0 | -5,578,806,056,656,196,000 | 29.538117 | 114 | 0.5442 | false |
dimara/synnefo | snf-pithos-app/pithos/api/test/__init__.py | 1 | 26757 | #!/usr/bin/env python
#coding=utf8
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from urlparse import urlunsplit, urlsplit, urlparse
from xml.dom import minidom
from urllib import quote, unquote
from mock import patch, PropertyMock
from django_nose import NoseTestSuiteRunner
from snf_django.utils.testing import with_settings, astakos_user
from pithos.api import settings as pithos_settings
from pithos.api.test.util import is_date, get_random_data, get_random_name
from pithos.backends.migrate import initialize_db
from synnefo.lib.services import get_service_path
from synnefo.lib import join_urls
from django.test import TestCase
from django.test.client import Client, MULTIPART_CONTENT, FakePayload
from django.conf import settings
from django.utils.http import urlencode
from django.utils.encoding import smart_unicode
from django.db.backends.creation import TEST_DATABASE_PREFIX
import django.utils.simplejson as json
import sys
import random
import functools
import time
pithos_test_settings = functools.partial(with_settings, pithos_settings)
DATE_FORMATS = ["%a %b %d %H:%M:%S %Y",
"%A, %d-%b-%y %H:%M:%S GMT",
"%a, %d %b %Y %H:%M:%S GMT"]
o_names = ['kate.jpg',
'kate_beckinsale.jpg',
'How To Win Friends And Influence People.pdf',
'moms_birthday.jpg',
'poodle_strut.mov',
'Disturbed - Down With The Sickness.mp3',
'army_of_darkness.avi',
'the_mad.avi',
'photos/animals/dogs/poodle.jpg',
'photos/animals/dogs/terrier.jpg',
'photos/animals/cats/persian.jpg',
'photos/animals/cats/siamese.jpg',
'photos/plants/fern.jpg',
'photos/plants/rose.jpg',
'photos/me.jpg']
details = {'container': ('name', 'count', 'bytes', 'last_modified',
'x_container_policy'),
'object': ('name', 'hash', 'bytes', 'content_type',
'content_encoding', 'last_modified',)}
TEST_BLOCK_SIZE = 1024
TEST_HASH_ALGORITHM = 'sha256'
print 'backend module:', pithos_settings.BACKEND_DB_MODULE
print 'backend database engine:', settings.DATABASES['default']['ENGINE']
print 'update md5:', pithos_settings.UPDATE_MD5
django_sqlalchemy_engines = {
'django.db.backends.postgresql_psycopg2': 'postgresql+psycopg2',
'django.db.backends.postgresql': 'postgresql',
'django.db.backends.mysql': '',
'django.db.backends.sqlite3': 'mssql',
'django.db.backends.oracle': 'oracle'}
def prepare_db_connection():
"""Build pithos backend connection string from django default database"""
db = settings.DATABASES['default']
name = db.get('TEST_NAME', TEST_DATABASE_PREFIX + db['NAME'])
if (pithos_settings.BACKEND_DB_MODULE == 'pithos.backends.lib.sqlalchemy'):
if db['ENGINE'] == 'django.db.backends.sqlite3':
db_connection = 'sqlite:///%s' % name
else:
d = dict(scheme=django_sqlalchemy_engines.get(db['ENGINE']),
user=db['USER'],
pwd=db['PASSWORD'],
host=db['HOST'].lower(),
port=int(db['PORT']) if db['PORT'] != '' else '',
name=name)
db_connection = (
'%(scheme)s://%(user)s:%(pwd)s@%(host)s:%(port)s/%(name)s' % d)
# initialize pithos database
initialize_db(db_connection)
else:
db_connection = name
pithos_settings.BACKEND_DB_CONNECTION = db_connection
def filter_headers(headers, prefix):
meta = {}
for k, v in headers.iteritems():
if not k.startswith(prefix):
continue
meta[unquote(k[len(prefix):])] = unquote(v)
return meta
class PithosTestSuiteRunner(NoseTestSuiteRunner):
def setup_test_environment(self, **kwargs):
pithos_settings.BACKEND_MAPFILE_PREFIX = \
'snf_test_pithos_app_%s_' % time.time()
super(PithosTestSuiteRunner, self).setup_test_environment(**kwargs)
def setup_databases(self, **kwargs):
old_names, mirrors = super(PithosTestSuiteRunner,
self).setup_databases(**kwargs)
prepare_db_connection()
return old_names, mirrors
def teardown_databases(self, old_config, **kwargs):
from pithos.api.util import _pithos_backend_pool
_pithos_backend_pool.shutdown()
try:
super(PithosTestSuiteRunner, self).teardown_databases(old_config,
**kwargs)
except Exception as e:
sys.stderr.write("FAILED to teardown databases: %s\n" % str(e))
class PithosTestClient(Client):
def _get_path(self, parsed):
# If there are parameters, add them
if parsed[3]:
return unquote(parsed[2] + ";" + parsed[3])
else:
return unquote(parsed[2])
def copy(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""
Send a resource to the server using COPY.
"""
parsed = urlparse(path)
r = {
'CONTENT_TYPE': 'text/html; charset=utf-8',
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'COPY',
'wsgi.input': FakePayload('')
}
r.update(extra)
response = self.request(**r)
if follow:
response = self._handle_redirects(response, **extra)
return response
def move(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""
Send a resource to the server using MOVE.
"""
parsed = urlparse(path)
r = {
'CONTENT_TYPE': 'text/html; charset=utf-8',
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'MOVE',
'wsgi.input': FakePayload('')
}
r.update(extra)
response = self.request(**r)
if follow:
response = self._handle_redirects(response, **extra)
return response
class PithosAPITest(TestCase):
def create_patch(self, name, new_callable=None):
patcher = patch(name, new_callable=new_callable)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def setUp(self):
self.client = PithosTestClient()
# Override default block size to spead up tests
pithos_settings.BACKEND_BLOCK_SIZE = TEST_BLOCK_SIZE
pithos_settings.BACKEND_HASH_ALGORITHM = TEST_HASH_ALGORITHM
self.user = 'user'
self.pithos_path = join_urls(get_service_path(
pithos_settings.pithos_services, 'object-store'))
# patch astakosclient.AstakosClient.validate_token
mock_validate_token = self.create_patch(
'astakosclient.AstakosClient.validate_token')
mock_validate_token.return_value = {
'access': {
'user': {'id': smart_unicode(self.user, encoding='utf-8')}}}
# patch astakosclient.AstakosClient.get_token
mock_get_token = self.create_patch(
'astakosclient.AstakosClient.get_token')
mock_get_token.return_value = {'access_token': 'valid_token'}
# patch astakosclient.AstakosClient.api_oa2_auth
mock_api_oauth2_auth = self.create_patch(
'astakosclient.AstakosClient.oauth2_url',
new_callable=PropertyMock)
mock_api_oauth2_auth.return_value = '/astakos/oauth2/'
mock_service_get_quotas = self.create_patch(
'astakosclient.AstakosClient.service_get_quotas')
mock_service_get_quotas.return_value = {
self.user: {
"system": {
"pithos.diskspace": {
"usage": 0,
"limit": 1073741824, # 1GB
"pending": 0}}}}
def tearDown(self):
#delete additionally created metadata
meta = self.get_account_meta()
self.delete_account_meta(meta)
#delete additionally created groups
groups = self.get_account_groups()
self.delete_account_groups(groups)
self._clean_account()
def _clean_account(self):
for c in self.list_containers():
self.delete_container_content(c['name'])
self.delete_container(c['name'])
def head(self, url, user='user', token='DummyToken', data={}, follow=False,
**extra):
with astakos_user(user):
extra = dict((quote(k), quote(v)) for k, v in extra.items())
if token:
extra['HTTP_X_AUTH_TOKEN'] = token
response = self.client.head(url, data, follow, **extra)
return response
def get(self, url, user='user', token='DummyToken', data={}, follow=False,
**extra):
with astakos_user(user):
extra = dict((quote(k), quote(v)) for k, v in extra.items())
if token:
extra['HTTP_X_AUTH_TOKEN'] = token
response = self.client.get(url, data, follow, **extra)
return response
def delete(self, url, user='user', token='DummyToken', data={},
follow=False, **extra):
with astakos_user(user):
extra = dict((quote(k), quote(v)) for k, v in extra.items())
if token:
extra['HTTP_X_AUTH_TOKEN'] = token
response = self.client.delete(url, data, follow, **extra)
return response
def post(self, url, user='user', token='DummyToken', data={},
content_type='application/octet-stream', follow=False, **extra):
with astakos_user(user):
extra = dict((quote(k), quote(v)) for k, v in extra.items())
if token:
extra['HTTP_X_AUTH_TOKEN'] = token
response = self.client.post(url, data, content_type, follow,
**extra)
return response
def put(self, url, user='user', token='DummyToken', data={},
content_type='application/octet-stream', follow=False,
quote_extra=True, **extra):
with astakos_user(user):
if quote_extra:
extra = dict((quote(k), quote(v)) for k, v in extra.items())
if token:
extra['HTTP_X_AUTH_TOKEN'] = token
response = self.client.put(url, data, content_type, follow,
**extra)
return response
def copy(self, url, user='user', token='DummyToken', data={},
content_type='application/octet-stream', follow=False, **extra):
with astakos_user(user):
extra = dict((quote(k), quote(v)) for k, v in extra.items())
if token:
extra['HTTP_X_AUTH_TOKEN'] = token
response = self.client.copy(url, data, content_type, follow,
**extra)
return response
def move(self, url, user='user', token='DummyToken', data={},
content_type='application/octet-stream', follow=False, **extra):
with astakos_user(user):
extra = dict((quote(k), quote(v)) for k, v in extra.items())
if token:
extra['HTTP_X_AUTH_TOKEN'] = token
response = self.client.move(url, data, content_type, follow,
**extra)
return response
def update_account_meta(self, meta, user=None, verify_status=True):
user = user or self.user
kwargs = dict(
('HTTP_X_ACCOUNT_META_%s' % k, str(v)) for k, v in meta.items())
url = join_urls(self.pithos_path, user)
r = self.post('%s?update=' % url, user=user, **kwargs)
if verify_status:
self.assertEqual(r.status_code, 202)
account_meta = self.get_account_meta(user=user)
(self.assertTrue('X-Account-Meta-%s' % k in account_meta) for
k in meta.keys())
(self.assertEqual(account_meta['X-Account-Meta-%s' % k], v) for
k, v in meta.items())
def reset_account_meta(self, meta, user=None, verify_status=True):
user = user or self.user
kwargs = dict(
('HTTP_X_ACCOUNT_META_%s' % k, str(v)) for k, v in meta.items())
url = join_urls(self.pithos_path, user)
r = self.post(url, user=user, **kwargs)
if verify_status:
self.assertEqual(r.status_code, 202)
account_meta = self.get_account_meta(user=user)
(self.assertTrue('X-Account-Meta-%s' % k in account_meta) for
k in meta.keys())
(self.assertEqual(account_meta['X-Account-Meta-%s' % k], v) for
k, v in meta.items())
def delete_account_meta(self, meta, user=None, verify_status=True):
user = user or self.user
transform = lambda k: 'HTTP_X_ACCOUNT_META_%s' %\
k.replace('-', '_').upper()
kwargs = dict((transform(k), '') for k in meta)
url = join_urls(self.pithos_path, user)
r = self.post('%s?update=' % url, user=user, **kwargs)
if verify_status:
self.assertEqual(r.status_code, 202)
account_meta = self.get_account_meta(user=user)
(self.assertTrue('X-Account-Meta-%s' % k not in account_meta) for
k in meta.keys())
return r
def delete_account_groups(self, groups, user=None, verify_status=True):
user = user or self.user
url = join_urls(self.pithos_path, user)
transform = lambda k: 'HTTP_X_ACCOUNT_GROUP_%s' %\
k.replace('-', '_').upper()
kwargs = dict((transform(k), '') for k in groups)
r = self.post('%s?update=' % url, user=user, **kwargs)
if verify_status:
self.assertEqual(r.status_code, 202)
account_groups = self.get_account_groups()
(self.assertTrue(k not in account_groups) for k in groups.keys())
return r
def get_account_info(self, until=None, user=None, verify_status=True):
user = user or self.user
url = join_urls(self.pithos_path, user)
if until is not None:
parts = list(urlsplit(url))
parts[3] = urlencode({
'until': until
})
url = urlunsplit(parts)
r = self.head(url, user=user)
if verify_status:
self.assertEqual(r.status_code, 204)
return r
def get_account_meta(self, until=None, user=None):
prefix = 'X-Account-Meta-'
r = self.get_account_info(until=until, user=user)
headers = dict(r._headers.values())
return filter_headers(headers, prefix)
def get_account_groups(self, until=None, user=None):
prefix = 'X-Account-Group-'
r = self.get_account_info(until=until, user=user)
headers = dict(r._headers.values())
return filter_headers(headers, prefix)
def get_container_info(self, container, until=None, user=None,
verify_status=True):
user = user or self.user
url = join_urls(self.pithos_path, user, container)
if until is not None:
parts = list(urlsplit(url))
parts[3] = urlencode({
'until': until
})
url = urlunsplit(parts)
r = self.head(url, user=user)
if verify_status:
self.assertEqual(r.status_code, 204)
return r
def get_container_meta(self, container, until=None, user=None):
prefix = 'X-Container-Meta-'
r = self.get_container_info(container, until=until, user=user)
headers = dict(r._headers.values())
return filter_headers(headers, prefix)
def update_container_meta(self, container, meta=None, user=None,
verify_status=True):
user = user or self.user
meta = meta or {get_random_name(): get_random_name()}
kwargs = dict(
('HTTP_X_CONTAINER_META_%s' % k, str(v)) for k, v in meta.items())
url = join_urls(self.pithos_path, user, container)
r = self.post('%s?update=' % url, user=user, **kwargs)
if verify_status:
self.assertEqual(r.status_code, 202)
container_meta = self.get_container_meta(container, user=user)
(self.assertTrue('X-Container-Meta-%s' % k in container_meta) for
k in meta.keys())
(self.assertEqual(container_meta['X-Container-Meta-%s' % k], v) for
k, v in meta.items())
return r
def list_containers(self, format='json', headers={}, user=None, **params):
user = user or self.user
_url = join_urls(self.pithos_path, user)
parts = list(urlsplit(_url))
params['format'] = format
parts[3] = urlencode(params)
url = urlunsplit(parts)
_headers = dict(('HTTP_%s' % k.upper(), str(v))
for k, v in headers.items())
r = self.get(url, user=user, **_headers)
if format is None:
containers = r.content.split('\n')
if '' in containers:
containers.remove('')
return containers
elif format == 'json':
try:
containers = json.loads(r.content)
except:
self.fail('json format expected')
return containers
elif format == 'xml':
return minidom.parseString(r.content)
def delete_container_content(self, cname, user=None, verify_status=True):
user = user or self.user
url = join_urls(self.pithos_path, user, cname)
r = self.delete('%s?delimiter=/' % url, user=user)
if verify_status:
self.assertEqual(r.status_code, 204)
return r
def delete_container(self, cname, user=None, verify_status=True):
user = user or self.user
url = join_urls(self.pithos_path, user, cname)
r = self.delete(url, user=user)
if verify_status:
self.assertEqual(r.status_code, 204)
return r
def delete_object(self, cname, oname, user=None, verify_status=True):
user = user or self.user
url = join_urls(self.pithos_path, user, cname, oname)
r = self.delete(url, user=user)
if verify_status:
self.assertEqual(r.status_code, 204)
return r
def create_container(self, cname=None, user=None, verify_status=True,
meta=None):
meta = meta or {}
cname = cname or get_random_name()
user = user or self.user
url = join_urls(self.pithos_path, user, cname)
kwargs = dict(
('HTTP_X_CONTAINER_META_%s' % k, str(v)) for k, v in meta.items())
r = self.put(url, user=user, data='', **kwargs)
if verify_status:
self.assertTrue(r.status_code in (202, 201))
return cname, r
def upload_object(self, cname, oname=None, length=None, verify_status=True,
user=None, **meta):
oname = oname or get_random_name()
length = length or random.randint(TEST_BLOCK_SIZE, 2 * TEST_BLOCK_SIZE)
user = user or self.user
data = get_random_data(length=length)
headers = dict(('HTTP_X_OBJECT_META_%s' % k.upper(), v)
for k, v in meta.iteritems())
url = join_urls(self.pithos_path, user, cname, oname)
r = self.put(url, user=user, data=data, **headers)
if verify_status:
self.assertEqual(r.status_code, 201)
return oname, data, r
def update_object_data(self, cname, oname=None, length=None,
content_type=None, content_range=None,
verify_status=True, user=None, **meta):
oname = oname or get_random_name()
length = length or random.randint(TEST_BLOCK_SIZE, 2 * TEST_BLOCK_SIZE)
content_type = content_type or 'application/octet-stream'
user = user or self.user
data = get_random_data(length=length)
headers = dict(('HTTP_X_OBJECT_META_%s' % k.upper(), v)
for k, v in meta.iteritems())
if content_range:
headers['HTTP_CONTENT_RANGE'] = content_range
url = join_urls(self.pithos_path, user, cname, oname)
r = self.post(url, user=user, data=data, content_type=content_type,
**headers)
if verify_status:
self.assertEqual(r.status_code, 204)
return oname, data, r
def append_object_data(self, cname, oname=None, length=None,
content_type=None, user=None):
return self.update_object_data(cname, oname=oname,
length=length,
content_type=content_type,
content_range='bytes */*',
user=user)
def create_folder(self, cname, oname=None, user=None, verify_status=True,
**headers):
user = user or self.user
oname = oname or get_random_name()
url = join_urls(self.pithos_path, user, cname, oname)
r = self.put(url, user=user, data='',
content_type='application/directory', **headers)
if verify_status:
self.assertEqual(r.status_code, 201)
return oname, r
def list_objects(self, cname, prefix=None, user=None, verify_status=True):
user = user or self.user
url = join_urls(self.pithos_path, user, cname)
path = '%s?format=json' % url
if prefix is not None:
path = '%s&prefix=%s' % (path, prefix)
r = self.get(path, user=user)
if verify_status:
self.assertTrue(r.status_code in (200, 204))
try:
objects = json.loads(r.content)
except:
self.fail('json format expected')
return objects
def get_object_info(self, container, object, version=None, until=None,
user=None, verify_status=True):
user = user or self.user
url = join_urls(self.pithos_path, user, container, object)
if until is not None:
parts = list(urlsplit(url))
parts[3] = urlencode({
'until': until
})
url = urlunsplit(parts)
if version:
url = '%s?version=%s' % (url, version)
r = self.head(url, user=user)
if verify_status:
self.assertEqual(r.status_code, 200)
return r
def get_object_meta(self, container, object, version=None, until=None,
user=None):
prefix = 'X-Object-Meta-'
user = user or self.user
r = self.get_object_info(container, object, version, until=until,
user=user)
headers = dict(r._headers.values())
return filter_headers(headers, prefix)
def update_object_meta(self, container, object, meta=None, user=None,
verify_status=True):
user = user or self.user
meta = meta or {get_random_name(): get_random_name()}
kwargs = dict(
('HTTP_X_OBJECT_META_%s' % k, str(v)) for k, v in meta.items())
url = join_urls(self.pithos_path, user, container, object)
r = self.post('%s?update=' % url, user=user, content_type='', **kwargs)
if verify_status:
self.assertEqual(r.status_code, 202)
object_meta = self.get_object_meta(container, object, user=user)
(self.assertTrue('X-Objecr-Meta-%s' % k in object_meta) for
k in meta.keys())
(self.assertEqual(object_meta['X-Object-Meta-%s' % k], v) for
k, v in meta.items())
return r
def assert_extended(self, data, format, type, size=10000):
if format == 'xml':
self._assert_xml(data, type, size)
elif format == 'json':
self._assert_json(data, type, size)
def _assert_json(self, data, type, size):
convert = lambda s: s.lower()
info = [convert(elem) for elem in details[type]]
self.assertTrue(len(data) <= size)
for item in info:
for i in data:
if 'subdir' in i.keys():
continue
self.assertTrue(item in i.keys())
def _assert_xml(self, data, type, size):
convert = lambda s: s.lower()
info = [convert(elem) for elem in details[type]]
try:
info.remove('content_encoding')
except ValueError:
pass
xml = data
entities = xml.getElementsByTagName(type)
self.assertTrue(len(entities) <= size)
for e in entities:
for item in info:
self.assertTrue(e.getElementsByTagName(item))
class AssertMappingInvariant(object):
def __init__(self, callable, *args, **kwargs):
self.callable = callable
self.args = args
self.kwargs = kwargs
def __enter__(self):
self.map = self.callable(*self.args, **self.kwargs)
return self.map
def __exit__(self, type, value, tb):
map = self.callable(*self.args, **self.kwargs)
for k, v in self.map.items():
if is_date(v):
continue
assert(k in map), '%s not in map' % k
assert v == map[k]
class AssertUUidInvariant(object):
def __init__(self, callable, *args, **kwargs):
self.callable = callable
self.args = args
self.kwargs = kwargs
def __enter__(self):
self.map = self.callable(*self.args, **self.kwargs)
assert('x-object-uuid' in self.map)
self.uuid = self.map['x-object-uuid']
return self.map
def __exit__(self, type, value, tb):
map = self.callable(*self.args, **self.kwargs)
assert('x-object-uuid' in self.map)
uuid = map['x-object-uuid']
assert(uuid == self.uuid)
| gpl-3.0 | -1,280,235,108,467,557,400 | 37.947598 | 79 | 0.569982 | false |
mkrapp/pystable | src/pystable.py | 1 | 11946 | from ConfigParser import SafeConfigParser
import sys, os
from string import Template
import markdown2
import shutil
import glob
from dateutil.parser import parse
import calendar
def parse_config(posts_directory):
global site_title, site_subtitle, site_author, \
site_aboutme, site_info, site_syntax, \
site_url, site_output, site_theme, style_file, \
contact
parser = SafeConfigParser()
config_file = posts_directory+'/site.config'
parser.read(config_file)
site_title = parser.get("info","title")
site_subtitle = parser.get("info","subtitle")
site_author = parser.get("info","author")
site_aboutme = parser.get("info","aboutme")
site_info = parser.get("info","info")
site_syntax = parser.get("config","syntax")
site_url = parser.get("config","url")
site_output = parser.get("config","output")
site_theme = parser.get("config","theme")
style_file = site_url+'/'+site_output+'/styles.css'
contact = {}
contact["twitter"] = parser.get("contact","twitter")
contact["facebook"] = parser.get("contact","facebook")
contact["email"] = parser.get("contact","email")
contact["google+"] = parser.get("contact","google+")
contact["github"] = parser.get("contact","github")
contact["linkedin"] = parser.get("contact","linkedin")
print contact
def parse_posts(posts_directory):
files = glob.glob(posts_directory+'/*.txt')
posts = []
for p in files:
post = {}
meta = []
content = ""
is_content = False
f = open(p,'r')
lines = f.readlines()
for l in lines:
if is_content == False and l.strip():
meta.append(l.strip())
else:
is_content = True
if is_content:
content += l
f.close()
meta_dict = { k.lower().strip():v.strip() for k, v in dict(s.split(':',1) for s in meta).iteritems()}
post["meta"] = meta_dict
post["content"] = content[1:]
# parse tags from meta
post["tags"] = [tag.strip() for tag in meta_dict["tags"].split(',')]
# parse date into year, month from meta
date = parse(post['meta']['date'])
post["year"] = date.year
post["month"] = date.month
post["date"] = date
posts.append(post)
return posts
def parse_dates(posts):
# parse dates of posts according to years and months
date_list = [parse(p['meta']['date']) for p in posts]
years = [parse(p['meta']['date']).year for p in posts]
years = list(set(years))
dates = {}
for year in years:
dates[year] = []
for date in date_list:
if date.year == year: dates[year].append(date.month)
return dates
def parse_tags(posts):
# parse tags
tags = [p["tags"] for p in posts]
all_tags = [item for sublist in tags for item in sublist]
tags = list(set(all_tags))
tags = [(tag,all_tags.count(tag)) for tag in tags]
return tags
def generate_footer(theme):
footer_tmpl = open(theme+'/footer.tmpl','r')
lines = footer_tmpl.readlines()
footer = ""
disclaimer = 'Created by <a href="https://github.com/mkrapp/pystable" target="_blank">Pystable</a> \
(© 2014-2016 Mario Krapp. All rights reserved.)'
for l in lines:
s = Template(l)
footer += s.safe_substitute(disclaimer=disclaimer)
footer_tmpl.close()
return footer
def generate_header(theme):
header_tmpl = open(theme+'/header.tmpl','r')
lines = header_tmpl.readlines()
header = ""
title = '<a href="'+site_url+'/'+site_output+'/'+'index.html">'+site_title+'</a>'
for l in lines:
s = Template(l)
header += s.safe_substitute(title=title, subtitle=site_subtitle)
header_tmpl.close()
return header
def generate_sidebar(dates,tags,theme):
sidebar_tmpl = open(theme+'/sidebar.tmpl','r')
lines = sidebar_tmpl.readlines()
# contacts
twitter = ""
if contact["twitter"] != "":
twitter = '<a href="https://twitter.com/'+contact["twitter"]+'" target="_blank"><img src="'+site_url+'/'+site_output+'/icons/twitter-32-black.png"></img></a>'
email = ""
if contact["email"] != "":
email = '<a href="mailto:'+contact["email"]+'?subject=Mail from '+site_url+'/'+site_output+'" target="_blank"><img src="'+site_url+'/'+site_output+'/icons/email-32-black.png"></img></a>'
facebook = ""
if contact["facebook"] != "":
facebook = '<a href="https://www.facebook.com/'+contact["facebook"]+'" target="_blank"><img src="'+site_url+'/'+site_output+'/icons/facebook-32-black.png"></img></a>'
google = ""
if contact["google+"] != "":
google = '<a href="https://www.plus.google.com/'+contact["google+"]+'" target="_blank"><img src="'+site_url+'/'+site_output+'/icons/googleplus-32-black.png"></img></a>'
linkedin = ""
if contact["linkedin"] != "":
linkedin = '<a href="'+contact["linkedin"]+'" target="_blank"><img src="'+site_url+'/'+site_output+'/icons/linkedin-32-black.png"></img></a>'
github = ""
if contact["github"] != "":
github = '<a href="https://www.github.com/'+contact["github"]+'" target="_blank"><img src="'+site_url+'/'+site_output+'/icons/github-32-black.png"></img></a>'
# archive
archive = "<ul>"
for year in sorted(dates)[::-1]:
archive += "<li>%.4d</li>" % year
archive += "<ul>"
for month in sorted(set(dates[year]))[::-1]:
k = dates[year].count(month)
n = ""
if k>1: n = ' ('+str(k)+')'
link = site_url+'/'+site_output+'/archive/%.4d/%.2d/index.html' % (year,month)
archive += '<li><a href="'+link+'">' + calendar.month_name[month] + n + '</a></li>'
archive += "</ul>"
archive += "</ul>"
# tagcloud
tagcloud = ""
for tag in sorted(tags):
tag_dir = site_output+'/tag/'+tag[0]
tag_dir = tag_dir.replace(' ','%20')
size = 40-25/tag[1]
tagcloud += '<span style="font-size: %.2dpx"><a href=' % size +site_url+'/'+tag_dir+'/index.html>'+tag[0]+'</a></span> '
sidebar = ""
for l in lines:
s = Template(l)
sidebar += s.safe_substitute(author=site_author, aboutme=site_aboutme, info=site_info,
archive=archive, tagcloud=tagcloud[:-2],
twitter=twitter,email=email,facebook=facebook,github=github,google=google,linkedin=linkedin)
sidebar_tmpl.close()
return sidebar
def process_content(content,syntax):
if syntax == 'markdown':
processed_content = markdown2.markdown(content,extras=["tables","fenced-code-blocks"])
if syntax == 'text':
processed_content = content
return processed_content.encode('utf-8')
def generate_main_page(posts,theme):
# create the full main web page
index_tmpl = open(theme+'/main.html.tmpl','r')
lines = index_tmpl.readlines()
index_html = open(site_output+'/index.html','w')
main = ""
for p in posts:
post = generate_post_page(p,theme)
main += post["html_content"]
for l in lines:
s = Template(l)
index_html.write(s.safe_substitute(style_file=style_file, title=site_title, header=header,
sidebar=sidebar, footer=footer, main=main))
index_html.close()
index_tmpl.close()
def generate_tags_page(posts,tags,theme):
# create a full web page for each tag
for tag in tags:
print 'create index.html for tag ' +tag[0]
index_tmpl = open(theme+'/main.html.tmpl','r')
lines = index_tmpl.readlines()
index_html = open(site_output+'/tag/'+tag[0]+'/index.html','w')
main = ""
for p in posts:
if tag[0] in p["tags"]:
main += p["html_content"]
shutil.copy2(site_output+'/'+p["html_file"],site_output+'/tag/'+tag[0]+'/'+p["html_file"])
for l in lines:
s = Template(l)
index_html.write(s.safe_substitute(style_file=style_file, header=header,title=site_title,
sidebar=sidebar, footer=footer, main=main))
index_html.close()
index_tmpl.close()
def generate_post(post,theme):
# read meta and content of post
meta = post["meta"]
content = process_content(post["content"],site_syntax)
# open post template
post_tmpl = open(theme+'/post.tmpl','r')
lines = post_tmpl.readlines()
post_content = ""
post_file = meta["title"].replace(" ","_")+'.html'
tags = ""
# create tags directory according to tag
for t in post["tags"]:
tag_dir = site_output+'/tag/'+t
if not os.path.exists(tag_dir):
os.makedirs(tag_dir)
tags += '<a href='+site_url+'/'+tag_dir+'/index.html>'+t+'</a>, '
# create archive directory according to year and month
date_dir = site_output+'/archive/%.4d/%.2d' % (post["year"],post["month"])
if not os.path.exists(date_dir):
os.makedirs(date_dir)
# create post content
for l in lines:
s = Template(l)
post_content += s.safe_substitute(title=meta["title"], date=meta["date"],
content=content, url='./'+post_file,
tags=tags[:-2])
post_tmpl.close()
# append html file name and html-processed content to post
post["html_content"] = post_content
post["html_file"] = post_file
return post
def generate_post_page(post,theme):
# create a full web page for a single post
post_tmpl = open(theme+'/main.html.tmpl','r')
lines = post_tmpl.readlines()
new_post = generate_post(post,theme)
post_file = new_post["html_file"]
html_content = new_post["html_content"]
post_html = open(site_output+'/'+post_file,'w')
for l in lines:
s = Template(l)
post_html.write(s.safe_substitute(style_file=style_file, title=site_title,
header=header, sidebar=sidebar,
footer=footer, main=html_content))
post_html.close()
post_tmpl.close()
return new_post
def generate_archives_page(posts,dates,theme):
# create a full web page for each month of each year (if posts are available for that date)
for year in sorted(dates):
months = list(set(dates[year]))
for month in months:
print 'create index.html for '+calendar.month_name[month]+' %.4d' % year
index_tmpl = open(theme+'/main.html.tmpl','r')
lines = index_tmpl.readlines()
index_html = open(site_output+'/archive/%.4d/%.2d/index.html' % (year,month),'w')
main = ""
for p in posts:
if month == p["month"]:
main += p["html_content"]
shutil.copy2(site_output+'/'+p["html_file"],
site_output+'/archive/%.4d/%.2d/' % (year,month)+p["html_file"])
for l in lines:
s = Template(l)
index_html.write(s.safe_substitute(style_file=style_file, header=header, sidebar=sidebar,
title=site_title, footer=footer, main=main))
index_html.close()
index_tmpl.close()
def create_blog(posts_dir):
global sidebar, footer, header
parse_config(posts_dir)
theme = 'themes/'+site_theme
if not os.path.exists(site_output):
os.makedirs(site_output)
# copy style.css
shutil.copy2(theme+'/styles.css',site_output+'/styles.css')
if not os.path.exists(site_output+'/icons'):
shutil.copytree('themes/icons',site_output+'/icons')
posts = parse_posts(posts_dir)
# sort list of posts in descending order of their date
decorated_posts = [(dict_["date"], dict_) for dict_ in posts]
decorated_posts.sort(reverse=True)
posts = [dict_ for (key, dict_) in decorated_posts]
tags = parse_tags(posts)
dates = parse_dates(posts)
header = generate_header(theme)
sidebar = generate_sidebar(dates,tags,theme)
footer = generate_footer(theme)
generate_main_page(posts,theme)
generate_tags_page(posts,tags,theme)
generate_archives_page(posts,dates,theme)
create_blog(sys.argv[1])
| gpl-2.0 | 7,100,139,526,058,622,000 | 38.556291 | 194 | 0.597438 | false |
Jackson-Y/Machine-Learning | text/classification_logistic_regression.py | 1 | 3574 | #-*- coding: utf-8 -*-
'''
Description:
Text Classification Based on Logistic Regression.
Version:
python3
'''
import scipy as sp
import numpy as np
from matplotlib import pyplot
from matplotlib import pylab
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
import time
start_time = time.time()
# 绘制 P/R 曲线
def plot_pr(auc_score, precision, recall, label=None):
pylab.figure(num=None, figsize=(6, 5))
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('Recall')
pylab.ylabel('Precision')
pylab.title('P/R (AUC=%0.2f) / %s' % (auc_score, label))
pylab.fill_between(recall, precision, alpha=0.5)
pylab.grid(True, linestyle='-', color='0.75')
pylab.plot(recall, precision, lw=1)
pylab.show()
pylab.savefig('classification_pr.png')
# 加载并保存数据.
# 数据在endata/ 目录下,
# endata/neg 下存放所有负面评论文本, endata/pos 下存放所有正面评论文本.
# 第一次运行时需要打开以下三行注释,以后只需读取.npy文件即可.
# movie_reviews = load_files('endata')
# sp.save('my_data.npy', movie_reviews.data)
# sp.save('my_target.npy', movie_reviews.target)
# 读取已保存的数据
movie_data = sp.load("my_data.npy")
movie_target = sp.load("my_target.npy")
x = movie_data
y = movie_target
count_vec = TfidfVectorizer(binary=False, decode_error='ignore', stop_words='english')
average = 0
testNum = 10
for i in range(testNum):
# 将数据切分,80%用来训练,20%用来测试。
x_train, x_test, y_train, y_test \
= train_test_split(movie_data, movie_target, test_size=0.2)
# 特征提取 & 向量化 & 加权
x_train = count_vec.fit_transform(x_train)
x_test = count_vec.transform(x_test)
# 创建逻辑回归模型
clf = LogisticRegression()
# 使用训练数据,训练模型
clf.fit(x_train, y_train)
# 使用测试数据,进行预测
y_pred = clf.predict(x_test)
# 判断预测的准确率
p = np.mean(y_pred == y_test)
print(p)
average += p
# 对 x_test(测试数据的特征矩阵/向量)进行预测
answer = clf.predict_proba(x_test)[:,1]
precision, recall, thresholds = precision_recall_curve(y_test, answer)
report = answer > 0.5
print(classification_report(y_test, report, target_names=['neg', 'pos']))
print("average precision: ", average/testNum)
print("time spent: ", time.time() - start_time)
# 这里进绘制正面评论的 P/R 图.
# 如果是在shell或者终端中运行,只能生成图片文件,不能直接显示图形。
plot_pr(0.5, precision, recall, "pos")
# 以下是对训练好的模型进行应用,判断其他文本属于那一分类(neg/pos)
test = [b'nb movie!\n']
test1 = count_vec.transform(test)
result = clf.predict_proba(test1)[:,1]
print("result: ", result)
if result > 0.5:
print(test, "\nThis is a positive comment!")
else:
print(test, "\nThis is a negative comment!")
test2 = [b'waste life!\n']
test3 = count_vec.transform(test2)
result1 = clf.predict_proba(test3)[:,1]
print("result: ", result1)
if result1 > 0.5:
print(test2, "\nThis is a positive comment!")
else:
print(test2, "\nThis is a negative comment!")
| mit | -1,780,801,201,349,490,200 | 28.158879 | 86 | 0.696474 | false |
pmeier82/django-spikeval | djspikeval/views/algorithm.py | 1 | 2234 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import apps
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.db.models import Q
from django.shortcuts import redirect
from django.views.generic import CreateView, ListView, DetailView, UpdateView, DeleteView
from djspikeval.forms import AlgorithmForm
__all__ = [
"AlgorithmBaseView", "AlgorithmList", "AlgorithmCreate", "AlgorithmDetail", "AlgorithmUpdate", "AlgorithmDelete"]
__author__ = "pmeier82"
Algorithm = apps.get_model("djspikeval", "algorithm")
class AlgorithmBaseView(object):
model = Algorithm
class AlgorithmList(AlgorithmBaseView, ListView):
template_name = "djspikeval/algorithm/list.html"
paginate_by = 10
def get_context_data(self, **kwargs):
cntx = super(AlgorithmList, self).get_context_data(**kwargs)
cntx.update(scope=self.request.GET.get("scope"))
return cntx
def get_queryset(self):
if self.request.GET.get("scope"):
scope = self.request.GET.get("scope")
return Algorithm.objects.filter(
Q(name__icontains=scope) |
Q(kind__name__icontains=scope))
return Algorithm.objects.all()
class AlgorithmCreate(AlgorithmBaseView, CreateView):
template_name = "djspikeval/algorithm/create.html"
form_class = AlgorithmForm
def get_form_kwargs(self):
kwargs = super(AlgorithmCreate, self).get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
class AlgorithmDetail(AlgorithmBaseView, DetailView):
template_name = "djspikeval/algorithm/detail.html"
class AlgorithmUpdate(AlgorithmBaseView, UpdateView):
template_name = "djspikeval/algorithm/update.html"
form_class = AlgorithmForm
def get_form_kwargs(self):
kwargs = super(AlgorithmUpdate, self).get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
class AlgorithmDelete(AlgorithmBaseView, DeleteView):
template_name = "djspikeval/algorithm/delete.html"
success_url = reverse_lazy("algorithm:list")
if __name__ == "__main__":
pass
| bsd-3-clause | -5,051,124,523,755,810,000 | 29.60274 | 117 | 0.703671 | false |
FlowBoat/Flow-Tech-NeurAlgae | Versions/v2/v2.0/NeurAlgae2.0.py | 1 | 15860 | # FlowTech | NeurAlgae
## 2017 CWSF Science Fair | NeurAlgae: HAB Prediction Using Machine Learning Algorithms
#Describes and trains a neural network for the analysis and prediction of algal bloom data
#Copyright (C) 2017 Zachary Trefler and Atif Mahmud
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#If you have comments, questions, concerns, or you just want to say 'hi',
#email Zachary Trefler at [email protected] or Atif Mahmud at [email protected]
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Input, Dense, Dropout, regularizers
from keras.models import Model
#from Data import dataX as X, dataPN as PN, dataCDA as CDA, dataPDA as PDA
import Data
X = Data.dataX[5000:len(Data.dataX) - 5000]
PN = Data.dataPN[5000:len(Data.dataPN) - 5000]
CDA = Data.dataCDA[5000:len(Data.dataCDA) - 5000]
PDA = Data.dataPDA[5000:len(Data.dataPDA) - 5000]
Xr = np.zeroes((3, 2))
PNr = np.array
architecture = int(input("Which network architecture to use? "))
if architecture == 0:
#Overfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu")(inputs)
layer2 = Dense(64, activation = "relu")(layer1)
outputs = Dense(1, activation = "sigmoid")(layer2)
epochnum = 256
minimizer = "rmsprop"
cost = "mean_squared_error"
elif architecture == 1:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l1_l2(0.0001))(inputs)
drop1 = Dropout(0.25)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l1_l2(0.0001))(drop1)
drop2 = Dropout(0.25)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 256
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 2:
#Overfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu")(inputs)
layer2 = Dense(64, activation = "relu")(layer1)
outputs = Dense(1, activation = "sigmoid")(layer2)
epochnum = 256
minimizer = "rmsprop"
cost = "mean_squared_error"
elif architecture == 3:
#Pretty good
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.25)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.25)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 64
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 4:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.001))(inputs)
drop1 = Dropout(0.5)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.001))(drop1)
drop2 = Dropout(0.5)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 128
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 5:
#Surprisingly good underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu")(inputs)
outputs = Dense(1, activation = "sigmoid")(layer1)
epochnum = 1
minimizer = "rmsprop"
cost = "mean_squared_error"
elif architecture == 6:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l1(0.0001))(inputs)
drop1 = Dropout(0.25)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l1(0.0001))(drop1)
drop2 = Dropout(0.25)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 64
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 7:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0005))(inputs)
drop1 = Dropout(0.33)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0005))(drop1)
drop2 = Dropout(0.33)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 128
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 8:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.20)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.20)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 128
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 9:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.25)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.25)(layer2)
layer3 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop2)
drop3 = Dropout(0.25)(layer3)
outputs = Dense(1, activation = "sigmoid")(drop3)
epochnum = 64
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 10:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.25)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.25)(layer2)
layer3 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop2)
drop3 = Dropout(0.25)(layer3)
outputs = Dense(1, activation = "sigmoid")(drop3)
epochnum = 128
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 11:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.25)(layer1)
layer2 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.25)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 64
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 12:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.25)(layer1)
layer2 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.25)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 128
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 13:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.25)(layer1)
layer2 = Dense(64, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.25)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 32
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 14:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.5)(layer1)
layer2 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.5)(layer2)
outputs = Dense(1, activation = "sigmoid")(drop2)
epochnum = 128
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 15:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.5)(layer1)
layer2 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.5)(layer2)
layer3 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop2)
drop3 = Dropout(0.5)(layer1)
layer4 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop3)
drop4 = Dropout(0.5)(layer1)
outputs = Dense(1, activation = "sigmoid")(drop4)
epochnum = 256
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 16:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(inputs)
drop1 = Dropout(0.5)(layer1)
layer2 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop1)
drop2 = Dropout(0.5)(layer2)
layer3 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop2)
drop3 = Dropout(0.5)(layer3)
layer4 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop3)
drop4 = Dropout(0.5)(layer4)
layer5 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop4)
drop5 = Dropout(0.5)(layer1)
layer6 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop5)
drop6 = Dropout(0.5)(layer2)
layer7 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop6)
drop7 = Dropout(0.5)(layer3)
layer8 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.0001))(drop7)
drop8 = Dropout(0.5)(layer4)
outputs = Dense(1, activation = "sigmoid")(drop8)
epochnum = 128
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 17:
#Overfit
inputs = Input(shape = (9,))
layer1 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00001))(inputs)
drop1 = Dropout(0.05)(layer1)
layer2 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00001))(drop1)
drop2 = Dropout(0.05)(layer2)
layer3 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00001))(drop2)
drop3 = Dropout(0.05)(layer3)
layer4 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00001))(drop3)
drop4 = Dropout(0.05)(layer4)
layer5 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00001))(drop4)
drop5 = Dropout(0.05)(layer1)
layer6 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00001))(drop5)
drop6 = Dropout(0.05)(layer2)
layer7 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00001))(drop6)
drop7 = Dropout(0.05)(layer3)
layer8 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00001))(drop7)
drop8 = Dropout(0.05)(layer4)
outputs = Dense(1, activation = "sigmoid")(drop8)
epochnum = 64
minimizer = "nadam"
cost = "mean_squared_error"
elif architecture == 18:
#Interesting
inputs = Input(shape = (9,))
layer1 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00005))(inputs)
drop1 = Dropout(0.2)(layer1)
layer2 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00005))(drop1)
drop2 = Dropout(0.2)(layer2)
layer3 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00005))(drop2)
drop3 = Dropout(0.2)(layer3)
layer4 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00005))(drop3)
drop4 = Dropout(0.2)(layer4)
layer5 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00005))(drop4)
drop5 = Dropout(0.2)(layer1)
layer6 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00005))(drop5)
drop6 = Dropout(0.2)(layer2)
layer7 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00005))(drop6)
drop7 = Dropout(0.2)(layer3)
layer8 = Dense(128, activation = "relu", activity_regularizer = regularizers.l2(0.00005))(drop7)
drop8 = Dropout(0.2)(layer4)
outputs = Dense(1, activation = "sigmoid")(drop8)
epochnum = 64
minimizer = "nadam"
cost = "mean_squared_error"
else:
#Underfit
inputs = Input(shape = (9,))
layer1 = Dense(16, activation = "sigmoid")(inputs)
outputs = Dense(1, activation = "sigmoid")(layer1)
epochnum = 128
minimizer = "sgd"
cost = "mean_squared_error"
netPN = Model(inputs = inputs, outputs = outputs)
netPN.compile(optimizer = minimizer, loss = cost)
PNh = netPN.fit(x = X, y = PN, batch_size = 128, epochs = epochnum, verbose = 1, validation_split = 0.2, shuffle = True)
netPN.save_weights("Nets/v2/netPN" + str(architecture) + ".hdf5")
plt.figure(1)
plt.subplot(311)
plt.plot(PNh.history["loss"])
plt.plot(PNh.history["val_loss"])
plt.title("MSE Loss vs. Training Epoch")
plt.xlabel("Epoch")
plt.ylabel("MSE Loss")
plt.legend(["Training loss", "Testing loss"])
plt.figure(2)
plt.subplot(311)
x = [i for i in range(len(Data.dataX))]
yPNp = [netPN.predict(np.array([Data.dataX[i]]))[0][0] for i in range(len(Data.dataX))]
yPNo = [Data.dataPN[i][0] for i in range(len(Data.dataX))]
plt.plot(x, yPNp, label = "Predicted")
plt.plot(x, yPNo, label = "Observed")
plt.title("Predicted and Observed Values vs. Time")
plt.xlabel("Time")
plt.ylabel("P(PN > 10Kcells/L)")
plt.legend()
netCDA = Model(inputs = inputs, outputs = outputs)
netCDA.compile(optimizer = minimizer, loss = cost)
CDAh = netCDA.fit(x = X, y = CDA, batch_size = 128, epochs = epochnum, verbose = 1, validation_split = 0.2, shuffle = True)
netCDA.save_weights("Nets/v2/netCDA" + str(architecture) + ".hdf5")
plt.figure(1)
plt.subplot(312)
plt.plot(CDAh.history["loss"])
plt.plot(CDAh.history["val_loss"])
plt.xlabel("Epoch")
plt.ylabel("MSE Loss")
plt.legend(["Training loss", "Testing loss"])
plt.figure(2)
plt.subplot(312)
x = [i for i in range(len(Data.dataX))]
yCDAp = [netCDA.predict(np.array([Data.dataX[i]]))[0][0] for i in range(len(Data.dataX))]
yCDAo = [Data.dataCDA[i][0] for i in range(len(Data.dataX))]
plt.plot(x, yCDAp, label = "Predicted")
plt.plot(x, yCDAo, label = "Observed")
plt.xlabel("Time")
plt.ylabel("P(CDA > 10pg/cell)")
plt.legend()
netPDA = Model(inputs = inputs, outputs = outputs)
netPDA.compile(optimizer = minimizer, loss = cost)
PDAh = netPDA.fit(x = X, y = PDA, batch_size = 128, epochs = epochnum, verbose = 1, validation_split = 0.2, shuffle = True)
netPDA.save_weights("Nets/v2/netPDA" + str(architecture) + ".hdf5")
plt.figure(1)
plt.subplot(313)
plt.plot(PDAh.history["loss"])
plt.plot(PDAh.history["val_loss"])
plt.xlabel("Epoch")
plt.ylabel("MSE Loss")
plt.legend(["Training loss", "Testing loss"])
plt.savefig("Plots/v2/MSEvE" + str(architecture) + ".png")
plt.figure(2)
plt.subplot(313)
x = [i for i in range(len(Data.dataX))]
yPDAp = [netPDA.predict(np.array([Data.dataX[i]]))[0][0] for i in range(len(Data.dataX))]
yPDAo = [Data.dataPDA[i][0] for i in range(len(Data.dataX))]
plt.plot(x, yPDAp, label = "Predicted")
plt.plot(x, yPDAo, label = "Observed")
plt.xlabel("Time")
plt.ylabel("P(PDA > 500ng/L)")
plt.legend()
plt.savefig("Plots/v2/POvT" + str(architecture) + ".png")
| gpl-3.0 | -8,447,979,558,048,298,000 | 41.98103 | 123 | 0.674149 | false |
openstack/kuryr | kuryr/tests/unit/binding/drivers/test_macvlan.py | 1 | 3091 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_utils import uuidutils
from kuryr.lib.binding.drivers import macvlan
from kuryr.lib import constants
from kuryr.lib import utils
from kuryr.tests.unit import base
mock_create = mock.MagicMock()
mock_interface = mock.MagicMock()
class TestMacvlanDriver(base.TestCase):
"""Unit tests for nested MACVLAN driver"""
@mock.patch('kuryr.lib.binding.drivers.utils._configure_container_iface')
@mock.patch('pyroute2.ipdb.interfaces.InterfacesDict.__getattribute__',
return_value=mock_create)
@mock.patch('pyroute2.ipdb.interfaces.InterfacesDict.__getitem__',
return_value=mock_interface)
def test_port_bind(self, mock_getitem, mock_getattribute,
mock_configure_container_iface):
fake_mtu = 1450
fake_docker_endpoint_id = utils.get_hash()
fake_docker_network_id = utils.get_hash()
fake_port_id = uuidutils.generate_uuid()
fake_nova_instance_port_id = uuidutils.generate_uuid()
fake_neutron_v4_subnet_id = uuidutils.generate_uuid()
fake_neutron_v6_subnet_id = uuidutils.generate_uuid()
fake_vif_details = {"port_filter": True, "ovs_hybrid_plug": False}
fake_vif_type = "ovs"
fake_neutron_port = self._get_fake_port(
fake_docker_endpoint_id, fake_docker_network_id,
fake_port_id, constants.PORT_STATUS_ACTIVE,
fake_neutron_v4_subnet_id, fake_neutron_v6_subnet_id,
vif_details=fake_vif_details, vif_type=fake_vif_type)
fake_nova_instance_port = self._get_fake_port(
"nova-port", fake_docker_network_id,
fake_nova_instance_port_id, constants.PORT_STATUS_ACTIVE,
fake_neutron_v4_subnet_id, fake_neutron_v6_subnet_id,
"192.168.1.3", "fe80::f816:3eff:fe20:57c5",
vif_details=fake_vif_details, vif_type=fake_vif_type)
fake_subnets = self._get_fake_subnets(
fake_docker_endpoint_id, fake_docker_network_id,
fake_neutron_v4_subnet_id, fake_neutron_v6_subnet_id)
fake_network = self._get_fake_networks(fake_docker_network_id)
fake_network['networks'][0]['mtu'] = fake_mtu
macvlan.port_bind(fake_docker_endpoint_id,
fake_neutron_port['port'],
fake_subnets['subnets'],
fake_network['networks'][0],
fake_nova_instance_port['port'])
mock_configure_container_iface.assert_called_once()
| apache-2.0 | -4,145,377,892,585,740,000 | 43.157143 | 77 | 0.657392 | false |
temnoregg/django-muzo | muzo/ws.py | 1 | 7137 | from SOAPpy import WSDL
from django.utils.translation import gettext_lazy as _
from django.conf import settings
from os.path import dirname
from signature import CSignature
from models import MERCHANT_NUM
MUZO_ORDER_STATES = {
0: _('UNKNOWN'),
1: _('REQUESTED'),
2: _('PENDING'),
3: _('CREATED'),
4: _('APPROVED'),
5: _('APPROVE_REVERSED'),
6: _('UNAPPROVED'),
7: _('DEPOSITED_BATCH_OPENED'),
8: _('DEPOSITED_BATCH_CLOSED'),
9: _('ORDER_CLOSED'),
10: _('DELETED'),
11: _('CREDITED_BATCH_OPENED'),
12: _('CREDITED_BATCH_CLOSED'),
13: _('DECLINED')
}
MUZO_PRCODE = {
0: _('OK'),
1: _('Field too long'),
2: _('Field too short'),
3: _('Incorrect content of field'),
4: _('Field is null'),
5: _('Missing required field'),
11: _('Unknown merchant'),
14: _('Duplicate order number'),
15: _('Object not found'),
17: _('Amount to deposit exceeds approved amount'),
18: _('Total sum of credited amounts exceeded deposited amount'),
20: _('Object not in valid state for operation'),
26: _('Technical problem in connection to authorization center'),
27: _('Incorrect order type'),
28: _('Declined in 3D'),
30: _('Declined in AC'),
31: _('Wrong digest'),
1000: _('Technical problem')
}
MUZO_SRCODE = {
0: _('Empty'),
1: _('ORDERNUMBER'),
2: _('MERCHANTNUMBER'),
6: _('AMOUNT'),
7: _('CURRENCY'),
8: _('DEPOSITFLAG'),
10: _('MERORDERNUM'),
11: _('CREDITNUMBER'),
12: _('OPERATION'),
18: _('BATCH'),
22: _('ORDER'),
24: _('URL'),
25: _('MD'),
26: _('DESC'),
34: _('DIGEST'),
1001: _("""Declined in AC, Card blocked"""),
1002: _("""Declined in AC, Declined"""),
1003: _("""Declined in AC, Card problem"""),
1004: _("""Declined in AC, Technical problem in authorization process"""),
1005: _("""Declined in AC, Account problem"""),
3000: _("""Declined in 3D. Cardholder not authenticated in 3D.
Contact your card issuer. Note: Cardholder authentication failed (wrong
password, transaction canceled, authentication window was closed)
Transaction Declined."""),
3001: _("""Authenticated. Note: Cardholder was successfully
authenticated - transaction continue with
authorization."""),
3002: _("""Not Authenticated in 3D. Issuer or Cardholder not participating in 3D.
Contact your card issuer."""),
3004: _("""Not Authenticated in 3D. Issuer not participating or Cardholder not
enrolled. Contact your card issuer."""),
3005: _("""Declined in 3D. Technical problem during Cardholder authentication.
Contact your card issuer"""),
3006: _("""Declined in 3D. Technical problem during Cardholder authentication."""),
3007: _("""Declined in 3D. Acquirer technical problem. Contact the merchant."""),
3008: _("""Declined in 3D. Unsupported card product. Contact your card issuer""")
}
class MuzoWSError(Exception):
pass
class MuzoWS:
# private key file location
priv_key = settings.MUZO_PRIV_KEY
# password
passwd = settings.MUZO_PASS
# public key file location
pub_key = settings.MUZO_PUB_KEY
# WSDL file
# if settings.DEBUG:
# wsdl_file = dirname(__file__)+'/pgwTest.xml'
# else:
wsdl_file = dirname(__file__)+'/pgw.xml'
#
merchant_num = MERCHANT_NUM
def __init__(self, order_num):
self._server = WSDL.Proxy(self.wsdl_file)
self._order_num = str(order_num)
# sign data routine, returns base64 encoded digest
def _sign(self, data):
CS = CSignature(privkey=self.priv_key, passwd=self.passwd, pubkey=self.pub_key)
return CS.sign(data)
# sends orderQueryState request to WS server and returns WS object response
def queryOrderState(self):
d = '%s|%s' % (self.merchant_num, self._order_num)
digest = self._sign(d)
return self._server.queryOrderState(str(self.merchant_num), str(self._order_num), digest)
def getOrderState(self):
st = self.queryOrderState()
return '%s - %s' % (st.state, MUZO_ORDER_STATES[st.state])
def getOrderStateId(self):
st = self.queryOrderState().state
return int(st)
def approveReversal(self):
d = '%s|%s' % (self.merchant_num, self._order_num)
digest = self._sign(d)
response = self._server.approveReversal(str(self.merchant_num), str(self._order_num), digest)
if response.primaryReturnCode == 0:
return True
else:
return '%s - %s' % (MUZO_PRCODE.get(response.primaryReturnCode, 'Unknown'), MUZO_PRCODE.get(response.secondaryReturnCode, 'Unknown'))
def deposit(self, amount):
d = '%s|%s|%s' % (self.merchant_num, self._order_num, amount)
digest = self._sign(d)
response = self._server.deposit(str(self.merchant_num), str(self._order_num), str(self.amount), digest)
if response.primaryReturnCode == 0:
return True
else:
return '%s - %s' % (MUZO_PRCODE.get(response.primaryReturnCode, 'Unknown'), MUZO_PRCODE.get(response.secondaryReturnCode, 'Unknown'))
def depositReversal(self):
d = '%s|%s' % (self.merchant_num, self._order_num)
digest = self._sign(d)
response = self._server.depositReversal(str(self.merchant_num), str(self._order_num), digest)
if response.primaryReturnCode == 0:
return True
else:
return '%s - %s' % (MUZO_PRCODE.get(response.primaryReturnCode, 'Unknown'), MUZO_PRCODE.get(response.secondaryReturnCode, 'Unknown'))
def credit(self, amount):
d = '%s|%s|%s' % (self.merchant_num, self._order_num, amount)
digest = CS._sign(d)
response = self._server.credit(str(self.merchant_num), str(self._order_num), str(amount), digest)
if response.primaryReturnCode == 0:
return True
else:
return '%s - %s' % (MUZO_PRCODE.get(response.primaryReturnCode, 'Unknown'), MUZO_PRCODE.get(response.secondaryReturnCode, 'Unknown'))
def creditReversal(self):
d = '%s|%s' % (self.merchant_num, self._order_num)
digest = self._sign(d)
response = self._server.creditReversal(str(self.merchant_num), str(self._order_num), digest)
if response.primaryReturnCode == 0:
return True
else:
return '%s - %s' % (MUZO_PRCODE.get(response.primaryReturnCode, 'Unknown'), MUZO_PRCODE.get(response.secondaryReturnCode, 'Unknown'))
def orderClose(self):
d = '%s|%s' % (self.merchant_num, self._order_num)
digest = self._sign(d)
response = self._server.orderClose(str(self.merchant_num), str(self._order_num), digest)
if response.primaryReturnCode == 0:
return True
else:
return '%s - %s' % (MUZO_PRCODE.get(response.primaryReturnCode, 'Unknown'), MUZO_PRCODE.get(response.secondaryReturnCode, 'Unknown'))
def delete(self):
d = '%s|%s' % (self.merchant_num, self._order_num)
digest = self._sign(d)
response = self._server.delete(str(self.merchant_num), str(self._order_num), digest)
if response.primaryReturnCode == 0:
return True
else:
return '%s - %s' % (MUZO_PRCODE.get(response.primaryReturnCode, 'Unknown'), MUZO_PRCODE.get(response.secondaryReturnCode, 'Unknown'))
def batchClose(self):
d = '%s|%s' % (self.merchant_num, self._order_num)
digest = self._sign(d)
response = self._server.batchClose(str(self.merchant_num), str(self._order_num), digest)
if response.primaryReturnCode == 0:
return True
else:
return '%s - %s' % (MUZO_PRCODE.get(response.primaryReturnCode, 'Unknown'), MUZO_PRCODE.get(response.secondaryReturnCode, 'Unknown'))
| mit | -1,142,081,912,268,378,500 | 34.507463 | 136 | 0.676475 | false |
IntelLabsEurope/infrastructure-repository | monitoring_service/epa_database/hw_reources.py | 1 | 12910 | # Copyright 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module to manage Hw information of a node.
It takes in input the files produced by the agents,
parses them and it stores a graph representation of the host
in Neo4j
"""
__author__ = 'gpetralia'
import xml.etree.ElementTree as Et
import time
import json
import networkx as nx
import common.neo4j_resources as neo_resource
# Map numerical types used by hardware locality to string categories
OSDEVTYPE_CATEGORY_MAP = {
'0': 'storage', # HWLOC_OBJ_OSDEV_BLOCK
'1': 'compute', # HWLOC_OBJ_OSDEV_GPU
'2': 'network', # HWLOC_OBJ_OSDEV_NETWORK
'3': 'network', # HWLOC_OBJ_OSDEV_OPENFABRICS
'4': 'compute', # HWLOC_OBJ_OSDEV_DMA
'5': 'compute', # HWLOC_OBJ_OSDEV_COPROC
}
class HostHwResources(object):
"""
Class to manage hw resources in the Neo4j DB
"""
def __init__(self, hostname, pop_id, graph_db):
self.hostname = hostname
self.graph_db = graph_db
self.pop_id = pop_id
self.label = 'physical_resource'
self.index = 'physical_name'
def store(self, path, hwloc_file, cpu_file=None, sriov_file=None, dpdk_file=None, timestamp=None):
"""
Store information contained in files created by the EPA agents into Neo4j.
using a networkx graph
:param path: Path of the files
:param hwloc_file: Hardware locality file
:param cpu_file: Optional cpu information file
:param sriov_file: Optional SR-IOV information file
:param dpdk_file: Optional DPDK information file
:param timestamp: Optional timestamp in epoch
"""
graph = nx.DiGraph()
xml_root = Et.parse(path + hwloc_file).getroot()
deleted_edges = {}
for child in xml_root:
_parse_object_hwloc(graph, child, self.hostname, deleted_edges, self.pop_id)
if cpu_file is not None:
processors_dict = _parse_cpu_info(path + cpu_file)
_enrich_graph_cpuinfo(graph, processors_dict)
if dpdk_file is not None:
dpdk_dict = _parse_dpdk_info(path + dpdk_file)
_enrich_graph_dpdkinfo(graph, dpdk_dict)
if sriov_file is not None:
sriov_dict = _parse_sriov_info(path + sriov_file)
_enrich_graph_sriovinfo(graph, sriov_dict)
if timestamp is not None:
now = timestamp
else:
now = time.time()
neo_id_nodes = {}
nodes_to_add = []
nodes_stored = []
query_string = 'Match n Where n.hostname = {hostname} ' \
'And n.resource_type = {resource_type} Return n.physical_name'
res = self.graph_db.cypher.execute(query_string, hostname=self.hostname, resource_type='physical')
for item in res:
print str(item)
nodes_stored.append(item['n.physical_name'])
for nx_node in graph.nodes():
nodes_to_add.append(str(nx_node))
neo_id_nodes[nx_node] = neo_resource.add_node(self.graph_db, (self.label, self.index, nx_node), now,
get_node_properties(graph, nx_node))
nodes_to_remove = [item for item in nodes_stored if item not in nodes_to_add]
for node in nodes_to_remove:
neo_resource.delete_node(self.graph_db, (self.label, self.index, node))
for edge in graph.edges():
source = edge[0]
target = edge[1]
edge_label = ''
if 'label' in graph.edge[source][target]:
edge_label = graph.edge[source][target]['label']
db_src = neo_id_nodes[source]
db_target = neo_id_nodes[target]
rel_stored = neo_resource.get_edge(self.graph_db, db_src, db_target)
if rel_stored is None:
neo_resource.add_edge(self.graph_db, db_src, db_target, timestamp, edge_label)
else:
neo_resource.update_edge(self.graph_db, timestamp, edge_label, db_src=db_src, db_target=db_target)
def get_node_properties(graph, node_name):
"""
Return a dict containing nodes properties
:param graph: Networkx graph
:param node_name: name of the node
:return dict: Node properties
"""
neo_node = {}
for item in graph.node[str(node_name)]:
if isinstance((graph.node[str(node_name)][item]), dict):
neo_node[item] = json.dumps(graph.node[str(node_name)][item])
else:
neo_node[item] = str(graph.node[str(node_name)][item])
neo_node['physical_name'] = node_name
return neo_node
def _enrich_graph_sriovinfo(graph, sriov_dict):
"""
Enrich the graph with SR-IOV information
:param graph: networkx graph
:param sriov_dict: SR-IOV information
"""
for node, attr in graph.nodes(data=True):
if 'pci_busid' in attr['attributes'].keys() and attr['attributes']['pci_busid'] in sriov_dict.keys():
attr['attributes']['sriov'] = sriov_dict[attr['attributes']['pci_busid']]
def _enrich_graph_dpdkinfo(graph, dpdk_dict):
"""
Enrich the graph with DPDK information
:param graph: networkx graph
:param dpdk_dict: DPDK information
"""
for node, attr in graph.nodes(data=True):
if 'pci_busid' in attr['attributes'].keys() and attr['attributes']['pci_busid'] in dpdk_dict.keys():
attr['attributes']['dpdk'] = True
def _enrich_graph_cpuinfo(graph, processors_dict):
"""
Navigate the graph and
add attributes from processor_list
to the PU nodes.
The key between processor_list and hwlock_graph
is the os_index attribute.
:param graph: the graph that should be enriched
:param processors_dict: a dict of cpu attributes
"""
for node, attr in graph.nodes(data=True):
if '_PU_' in node:
index = int(attr['attributes']['os_index'])
attr['attributes'].update(processors_dict[index])
def _parse_sriov_info(sriov_info_file):
"""
Create a dict containing information extracted from the SR-IOV file
:param sriov_info_file: SR-IOV file
:return dict: SR-IOV information
"""
sriov_dict = {}
with open(sriov_info_file) as f:
for line in f:
line = sanitize_string(line)
attr = line.split(' ')
if len(attr) == 3:
sriov_dict[attr[0]] = {"numvfs": attr[1], "totalvfs": attr[2]}
return sriov_dict
def _parse_dpdk_info(dpdk_info_file):
"""
Create a dict containing information extracted from the DPDK file
:param dpdk_info_file: DPDK file
:return dict: DPDK information
"""
dpdk_dict = {}
with open(dpdk_info_file) as f:
for line in f:
line = sanitize_string(line)
dpdk_dict[line] = {"dpdk": True}
return dpdk_dict
def _parse_cpu_info(cpu_info_file):
"""
Parse the text cpuinfo file
and create a dict of processors.
Each processor is a dict with all the attributes given by cpuinfo.
:param cpu_info_file: Text file with the output of cat /proc/cpuinfo
:return processors_dict: Dictionary containing attributes of each proc
"""
processors_dict = {}
with open(cpu_info_file) as f:
current_id = None
for line in f:
attr = line.split(':')
if len(attr) > 1:
attr[0] = sanitize_string(attr[0])
attr[1] = sanitize_string(attr[1])
if 'processor' in attr[0]:
current_id = int(attr[1])
processors_dict[current_id] = {}
processors_dict[current_id]['id'] = attr[1]
elif current_id is not None and attr[1] is not None and attr[1] is not '':
processors_dict[current_id][attr[0]] = attr[1]
return processors_dict
def _parse_object_hwloc(graph, obj, host_name, deleted_edges, pop_id, parent=None):
"""
Given an xml object extracted from Hardware locality file, create the
corresponding node in the Networkx graph
:param graph: netowrkx graph
:param obj: xml object
:param host_name: hostname of the host who the hwloc obj belongs to
:param deleted_edges: list of edges to delete
:param pop_id: PoP ID
:param parent: Optional reference to the parent of the current object
"""
object_children = []
new_node_properties = {
'resource_type': 'physical',
'category': _get_category(obj),
'type': obj.attrib['type'],
'hostname': host_name,
'pop': pop_id,
'attributes': _get_attributes(obj)
}
node_name = _get_unique_name(obj, host_name)
attr = obj.attrib.copy()
del attr['type']
# Saving the children to be parsed
for child in obj:
if child.tag == 'object':
object_children.append(child)
graph.add_node(node_name, attr_dict=new_node_properties)
# Adding the edge between current node and the parent
if parent is not None:
graph.add_edge(parent, node_name, label='INTERNAL')
if parent in deleted_edges.keys():
graph.add_edge(deleted_edges[parent], node_name, label='INTERNAL')
# Resolving the bug of hwloc that shows
# the 2 caches L1 (data and instruction)
# as they are one under the other
if parent is not None:
if new_node_properties['type'] == 'Cache':
parent_type = ''
parent_depth = ''
for node, node_attr in graph.nodes(data=True):
if node == parent:
parent_type = node_attr['type']
if parent_type == 'Cache':
parent_depth = node_attr['attributes']['depth']
if parent_type == new_node_properties['type'] and attr['depth'] == parent_depth:
graph.remove_edge(parent, node_name)
deleted_edges[node_name] = parent
parent = graph.pred[parent].keys()[0]
graph.add_edge(parent, node_name, label='INTERNAL')
# Recursively calls the function to parse the child of current node
for obj in object_children:
_parse_object_hwloc(graph, obj, host_name, deleted_edges, pop_id, parent=node_name)
def _get_category(hw_obj):
"""
Given an object from the hwloc xml file
the function return the category of the node
choosen using the OSDETYPE_CATEGORY_MAP
:param hw_obj: object extracted from hwloc xml file
:rtype string
:return: category
"""
attrib = hw_obj.attrib
if 'osdev_type' in attrib.keys():
category = OSDEVTYPE_CATEGORY_MAP[attrib['osdev_type']]
else:
category = 'compute'
return category
def _get_attributes(hw_obj):
"""
Return a dict containing the attributes
of an xml object extracted from Hwloc xml file
:param hw_obj: hw object to be parsed
:return dict: attributes of the object
"""
attributes = hw_obj.attrib.copy()
del attributes['type']
for child in hw_obj:
if child.tag == 'info':
name = child.attrib['name']
value = child.attrib['value']
attributes[name] = value
return attributes
def _get_unique_name(hw_obj, hostname):
# UniqueName
# Cache: hostname_Cache_[cpuset]_[depth]_[cache_type]
# OSDev: hostname_OSDev_[name]
# otherwise: hostname_[type]_os_index
obj_type = hw_obj.attrib['type']
if obj_type == 'Cache':
return hostname + '_' + 'Cache' + '_' + hw_obj.attrib['cpuset'] + '_' + hw_obj.attrib['depth'] + '_' + \
hw_obj.attrib['cache_type']
if obj_type == 'OSDev':
return hostname + '_' + 'OSDev' + '_' + hw_obj.attrib['name']
if obj_type == 'Core':
print "Core"
return hostname + '_' + 'Core' + '_' + hw_obj.attrib['cpuset']
return hostname + '_' + hw_obj.attrib['type'] + '_' + hw_obj.attrib['os_index']
def sanitize_string(input_string, space=True):
"""
Sanitize the input_string changing it to lowercase,
deleting space at the start and at the end
:param input_string:
:param space: if space=False, spaces will be replaced with _
:return:
"""
output_string = input_string.strip().lower().replace('-', '_').replace('\n', '')
if not space:
output_string = output_string.replace(' ', '_')
return output_string
| apache-2.0 | 8,347,671,430,936,619,000 | 32.273196 | 114 | 0.608521 | false |
anhiga/poliastro | src/poliastro/tests/tests_twobody/test_sample.py | 1 | 3519 | import pytest
from astropy.tests.helper import assert_quantity_allclose
from astropy import units as u
from astropy.time import Time
from poliastro.bodies import Earth, Sun
from poliastro.twobody import Orbit
from poliastro.twobody.propagation import kepler, mean_motion, cowell
import numpy as np
from poliastro.util import norm
def test_sample_angle_zero_returns_same():
# Data from Vallado, example 2.4
r0 = [1131.340, -2282.343, 6672.423] * u.km
v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s
ss0 = Orbit.from_vectors(Earth, r0, v0)
nu_values = [0] * u.deg
_, rr = ss0.sample(ss0.nu + nu_values)
assert_quantity_allclose(rr[0].get_xyz(), ss0.r)
@pytest.mark.parametrize("time_of_flight", [1 * u.min, 40 * u.min])
@pytest.mark.parametrize("method", [kepler, mean_motion, cowell])
def test_sample_one_point_equals_propagation_small_deltas(time_of_flight, method):
# Time arithmetic loses precision, see
# https://github.com/astropy/astropy/issues/6638
# Data from Vallado, example 2.4
r0 = [1131.340, -2282.343, 6672.423] * u.km
v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s
ss0 = Orbit.from_vectors(Earth, r0, v0)
sample_times = Time([ss0.epoch + time_of_flight])
expected_ss = ss0.propagate(time_of_flight, method)
_, rr = ss0.sample(sample_times, method)
assert_quantity_allclose(rr[0].get_xyz(), expected_ss.r)
@pytest.mark.parametrize("time_of_flight", [6 * u.h, 2 * u.day])
@pytest.mark.parametrize("method", [kepler, mean_motion, cowell])
def test_sample_one_point_equals_propagation_big_deltas(time_of_flight, method):
# Data from Vallado, example 2.4
r0 = [1131.340, -2282.343, 6672.423] * u.km
v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s
ss0 = Orbit.from_vectors(Earth, r0, v0)
sample_times = Time([ss0.epoch + time_of_flight])
expected_ss = ss0.propagate(time_of_flight)
_, rr = ss0.sample(sample_times, method)
assert_quantity_allclose(rr[0].get_xyz(), expected_ss.r)
def test_sample_nu_values():
# Data from Vallado, example 2.4
r0 = [1131.340, -2282.343, 6672.423] * u.km
v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s
ss0 = Orbit.from_vectors(Earth, r0, v0)
nu_values = [0, 90, 180] * u.deg
expected_ss = ss0.propagate(ss0.period / 2)
_, rr = ss0.sample(nu_values)
assert len(rr) == len(nu_values)
assert_quantity_allclose(norm(rr[0].get_xyz()), expected_ss.r_p)
assert_quantity_allclose(norm(rr[-1].get_xyz()), expected_ss.r_a)
@pytest.mark.parametrize("num_points", [3, 5, 7, 9, 11, 101])
def test_sample_num_points(num_points):
# Data from Vallado, example 2.4
r0 = [1131.340, -2282.343, 6672.423] * u.km
v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s
ss0 = Orbit.from_vectors(Earth, r0, v0)
# TODO: Test against the perigee and apogee
# expected_ss = ss0.propagate(ss0.period / 2)
_, rr = ss0.sample(num_points)
assert len(rr) == num_points
# assert_quantity_allclose(rr[num_points // 2].get_xyz(), expected_ss.r)
@pytest.mark.parametrize('method', [
mean_motion,
cowell,
pytest.param(kepler, marks=pytest.mark.xfail),
])
def test_sample_big_orbits(method):
# See https://github.com/poliastro/poliastro/issues/265
ss = Orbit.from_vectors(
Sun,
[-9018878.6, -94116055, 22619059] * u.km,
[-49.950923, -12.948431, -4.2925158] * u.km / u.s
)
times, positions = ss.sample(15, method=method)
assert len(times) == len(positions) == 15
| mit | -3,945,005,692,256,854,000 | 30.702703 | 82 | 0.651037 | false |
GrahamDumpleton/ispyd | ispyd/manager.py | 1 | 2308 | import atexit
import cmd
import ConfigParser
import os
import socket
import threading
import traceback
import sys
from ispyd.shell import RootShell
class ShellManager(object):
def __init__(self, config_file):
self.__config_file = config_file
self.__config_object = ConfigParser.RawConfigParser()
if not self.__config_object.read([config_file]):
raise RuntimeError('Unable to open configuration file %s.' %
config_file)
self.__socket_server = self.__config_object.get('ispyd',
'listen') % {'pid': os.getpid()}
if not os.path.isabs(self.__socket_server):
host, port = self.__socket_server.split(':')
port = int(port)
self.__socket_server = (host, port)
self.__thread = threading.Thread(target=self.__thread_run,
name='ISpyd-Shell-Manager')
self.__thread.setDaemon(True)
self.__thread.start()
def __socket_cleanup(self, path):
try:
os.unlink(path)
except:
pass
def __thread_run(self):
if type(self.__socket_server) == type(()):
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(self.__socket_server)
else:
try:
os.unlink(self.__socket_server)
except:
pass
listener = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
listener.bind(self.__socket_server)
atexit.register(self.__socket_cleanup, self.__socket_server)
os.chmod(self.__socket_server, 0600)
listener.listen(5)
while True:
client, addr = listener.accept()
shell = RootShell(self.__config_object)
shell.stdin = client.makefile('r')
shell.stdout = client.makefile('w')
try:
shell.cmdloop()
except:
print >> shell.stdout, 'Exception in shell "%s".' % shell.name
traceback.print_exception(*sys.exc_info(), file=shell.stdout)
shell.stdin = None
shell.stdout = None
del shell
client.close()
| apache-2.0 | -6,454,305,951,312,017,000 | 27.85 | 78 | 0.555893 | false |
jart/tensorflow | tensorflow/python/eager/backprop.py | 1 | 29803 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for backpropagation using the tape utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import imperative_grad
from tensorflow.python.eager import tape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
_op_attr_type_cache = {}
def op_attr_type(op_type, attr_name):
try:
return _op_attr_type_cache[(op_type, attr_name)]
except KeyError:
h = context.context()._handle # pylint: disable=protected-access
attr_type = pywrap_tensorflow.TFE_OpNameGetAttrType(h, op_type, attr_name)
_op_attr_type_cache[(op_type, attr_name)] = attr_type
return attr_type
def make_attr(attr_type, value):
if attr_type == pywrap_tensorflow.TF_ATTR_TYPE:
return dtypes.as_dtype(value)
elif attr_type == [pywrap_tensorflow.TF_ATTR_TYPE]:
return [dtypes.as_dtype(v) for v in value]
elif attr_type == pywrap_tensorflow.TF_ATTR_SHAPE:
return tensor_shape.as_shape(value).as_proto()
elif attr_type == [pywrap_tensorflow.TF_ATTR_SHAPE]:
return [tensor_shape.as_shape(v).as_proto() for v in value]
return value
class _MockOp(object):
"""Pretends to be a tf.Operation for the gradient functions."""
def __init__(self, attrs, inputs, outputs, typ):
self.attrs = attrs
self.inputs = inputs
self.outputs = outputs
self.type = typ
def get_attr(self, attr):
typ = op_attr_type(self.type, attr)
for i in range(0, len(self.attrs), 2):
if self.attrs[i] == attr:
return make_attr(typ, self.attrs[i + 1])
raise KeyError(attr)
def _get_control_flow_context(self):
raise NotImplementedError(
"tf.GradientTape.gradients() does not support graph control flow "
"operations like tf.cond or tf.while at this time. Use tf.gradients() "
"instead. If you need this feature, please file a feature request at "
"https://github.com/tensorflow/tensorflow/issues/new"
)
def _gradient_function(op_name, attr_tuple, num_inputs, inputs, outputs,
out_grads):
"""Calls the gradient function of the op.
Args:
op_name: the name of the op to be differentiated.
attr_tuple: the attrs, as a tuple.
num_inputs: the number of inputs to the op.
inputs: inputs to the original operation.
outputs: outputs to the original operation.
out_grads: gradients of the operation wrt its outputs.
Returns:
The gradients with respect to the inputs of the function, as a list.
"""
mock_op = _MockOp(attr_tuple, inputs, outputs, op_name)
grad_fn = ops._gradient_registry.lookup(op_name) # pylint: disable=protected-access
if grad_fn is None:
return [None] * num_inputs
return grad_fn(mock_op, *out_grads)
pywrap_tensorflow.TFE_Py_RegisterGradientFunction(_gradient_function)
_tracing = False
# TODO(agarwal): use an automatic mechanism for handling None arguments to
# gradient functions.
# Some gradient functions can accept None arguments for gradients. The following
# maps the operation name to the indices at which the corresponding gradient
# function can accept None values.
# e.g. FusedBatchNorm outputs 5 values and hence receives 5 gradient values
# during backprop. However the gradient function uses only the first of those
# values and ignores the rest. The entry, "FusedBatchNorm": [1, 2, 3, 4],
# indicates that only the gradient corresponding to index 0 is used, and the
# gradient values at indices 1-4 are ignored (and hence can be None). The
# backprop algorithm can then leverage this by not constructing zeros to
# pass for those indices.
_grad_fn_accepts_none_for_indices = {
"SoftmaxCrossEntropyWithLogits": [1],
"FusedBatchNorm": [1, 2, 3, 4]
}
def _record_gradient(op_name, inputs, attrs, results, name):
return pywrap_tensorflow.TFE_Py_RecordGradient(op_name, inputs, attrs,
results, name)
execute.record_gradient = _record_gradient
def implicit_val_and_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the value and the gradient of f when called with
the same arguments. The gradient is with respect to all trainable TFE
variables accessed by `f`.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
val_grad_fn = tfe.implicit_value_and_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
value, grads_and_vars = val_grad_fn(x, y)
print('Value of loss: %s' % value)
# Apply the gradients to Variables.
optimizer = tf.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar.
Returns:
A function which, when called, returns a tuple pair.
Its first element is the value to which the function evaluates.
Its second element is list of (gradient, variable) pairs.
Raises:
ValueError: if `f` returns None.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args, **kwds):
"""Computes the gradient of the wrapped function."""
this_tape = tape.push_new_tape()
try:
end_node = f(*args, **kwds)
if end_node is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
finally:
tape.pop_tape(this_tape)
# Note: variables are returned in construction order. This ensures unique
# order across executions.
variables = this_tape.watched_variables()
if not variables:
raise ValueError("No trainable variables were accessed while the "
"function was being computed.")
sources = [v.handle for v in variables]
grad = imperative_grad.imperative_grad(_default_vspace,
this_tape,
nest.flatten(end_node),
sources)
return end_node, list(zip(grad, variables))
return grad_fn
def implicit_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the gradient of f when called with the same
arguments. The gradient is with respect to all trainable TFE variables
accessed by `f`.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
grad_fn = tfe.implicit_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
grads_and_vars = grad_fn(x, y)
# Apply the gradients to Variables.
optimizer = tf.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar.
Returns:
A function which, when called, returns a list of (gradient, variable) pairs.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args, **kwds):
"""Computes the gradient of the wrapped function."""
return implicit_val_and_grad(f)(*args, **kwds)[1]
return grad_fn
def _get_arg_spec(f, params, param_args):
"""The positions of the parameters of f to be differentiated in param_args."""
try:
args = tf_inspect.getargspec(f).args
except TypeError as e:
# TypeError can happen when f is a callable object.
if params is None:
return range(len(param_args))
elif all(isinstance(x, int) for x in params):
return params
raise ValueError("Either callable provided is not a function or could not "
"inspect its arguments by name: %s. Original error: %s"
% (f, e))
if params is None:
if not args:
return range(len(param_args))
return range(len(args))
elif all(isinstance(x, six.string_types) for x in params):
return [args.index(n) for n in params]
elif all(isinstance(x, int) for x in params):
return params
else:
raise ValueError(
"params must be all strings or all integers; got %s." % params)
def gradients_function(f, params=None):
"""Returns a function which differentiates f with respect to params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
# The 2nd order derivatives with respect to x is:
# d^2 f / (dx)^2 = 6 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns 1st order gradients.
grad_fn = tfe.gradients_function(f)
x = 2.0
y = 3.0
# Invoke the 1st order gradient function.
x_grad, y_grad = grad_fn(x, y)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# Obtain a function that returns the 2nd order gradient with respect to x.
gradgrad_fn = tfe.gradients_function(lambda x, y: grad_fn(x, y)[0])
# Invoke the 2nd order gradient function.
x_gradgrad = gradgrad_fn(x, y)[0]
assert x_gradgrad.numpy() == 6 * 2 * 3
# To obtain a callable that returns the gradient(s) of `f` with respect to a
# subset of its inputs, use the `params` keyword argument with
# `gradients_function()`.
ygrad_fn = tfe.gradients_function(f, params=[1])
(y_grad,) = ygrad_fn(x, y)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Note that only tensors with real or complex dtypes are differentiable.
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar. If desired, the tensors can be elementwise multiplied by the
tensors passed as the `dy` keyword argument to the returned gradient
function.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing None
differentiates with respect to all parameters.
Returns:
function which, when called, returns the value of f and the gradient
of f with respect to all of `params`. The function takes an extra optional
keyword argument "dy". Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the gradient of the decorated function."""
_, grad = val_and_grad_function(f, params=params)(*args, **kwds)
return grad
return decorated
def _ensure_unique_tensor_objects(parameter_positions, args):
"""Make each of the parameter_positions in args a unique ops.Tensor object.
Ensure that each parameter is treated independently.
For example:
def f(x, y): return x * y
g = gradients_function(f)
one = tf.constant(1.)
g(one, one) should return [1., 1.]
(even though the two arguments are the same Tensor object).
Args:
parameter_positions: List of indices into args defining the arguments to
differentiate against.
args: A list of arguments to the function to be differentiated.
Returns:
args, possibly edited in-place.
"""
s = set()
for (i, t) in enumerate(args):
if i in parameter_positions:
tid = ops.tensor_id(t)
if tid in s:
args[i] = gen_array_ops.identity(args[i])
else:
s.add(tid)
return args
def val_and_grad_function(f, params=None):
"""Returns a function that computes f and its derivative w.r.t. params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns the function value and the 1st order
# gradients.
val_grads_fn = tfe.value_and_gradients_function(f)
x = 2.0
y = 3.0
# Invoke the value-and-gradients function.
f_val, (x_grad, y_grad) = val_grads_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# To obtain a callable that returns the value of `f` and the gradient(s) of
# `f` with respect to a subset of its inputs, use the `params` keyword
# argument with `value_and_gradients_function()`.
val_ygrad_fn = tfe.value_and_gradients_function(f, params=[1])
f_val, (y_grad,) = val_ygrad_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar. If desired, the tensors can be elementwise multiplied by the
tensors passed as the `dy` keyword argument to the returned gradient
function.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing `None`
differentiates with respect to all parameters.
Returns: function which, when called, returns the value of f and the gradient
of f with respect to all of `params`. The function takes an extra optional
keyword argument "dy". Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
dy = kwds.pop("dy", None)
if kwds:
raise ValueError("Functions to be differentiated cannot "
"receive keyword arguments.")
val, vjp = make_vjp(f, params)(*args, **kwds)
return val, vjp(dy=dy)
return decorated
def make_vjp(f, params=None, persistent=True):
"""Returns a function that computes f and is vjp w.r.t. params.
The term "vjp" here is an abbreviation for vector-jacobian product.
Args:
f: the function to be differentiated.
params: the parameters (numbers or names) to differentiate with respect to.
A value of None will differentiate with respect to all parameters.
persistent: Boolean controlling whether the VJP function can be re-used.
Must be True or False.
Returns:
A function, which when called, returns a tuple (value, vjp), where:
- value is the result of calling f.
- vjp is a function, which takes a vector as an argument and
returns the product of that vector with the Jacobian of f.
Providing no argument to vjp is equivalent to providing a
vector of ones.
For example,
```python
def f(x):
return x * x
wrapped_fn = tfe.make_vjp(f)
result, vjp = wrapped_fn(tf.constant(3.0))
# result is 9.0
vjp() # the vjp function rturns 6.0
Raises:
ValueError: if `f` returns None.
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
parameter_positions = _get_arg_spec(f, params, args)
assert not kwds, "The gradient function can't take keyword arguments."
this_tape = tape.push_new_tape(persistent=persistent)
try:
sources = []
args = [
ops.convert_to_tensor(args[i])
if i in parameter_positions else args[i]
for i in range(len(args))
]
args = _ensure_unique_tensor_objects(parameter_positions, args)
for i in parameter_positions:
sources.append(args[i])
tape.watch(args[i])
result = f(*args)
if result is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
flat_result = nest.flatten(result)
flat_result = [gen_array_ops.identity(x) for x in flat_result]
result = nest.pack_sequence_as(result, flat_result)
finally:
tape.pop_tape(this_tape)
def vjp(dy=None):
if dy is not None:
dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]
return imperative_grad.imperative_grad(
_default_vspace, this_tape, nest.flatten(result), sources,
output_gradients=dy)
return result, vjp
return decorated
def _aggregate_grads(gradients):
"""Aggregate gradients from multiple sources.
Args:
gradients: A list of 'Tensor' or 'IndexedSlices' gradients.
Returns:
If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'.
Otherwise returns an aggregated 'IndexedSlices'.
"""
assert gradients, "No gradients to aggregate"
if len(gradients) == 1:
return gradients[0]
if all([isinstance(g, ops.Tensor) for g in gradients]):
return math_ops.add_n(gradients)
else:
assert all([isinstance(g, (ops.Tensor, ops.IndexedSlices))
for g in gradients])
indexed_slices_list = []
for grad in gradients:
# TODO(xpan): Support nested IndexedSlices and core IndexedSlices
if isinstance(grad, ops.Tensor):
indexed_slices = ops.IndexedSlices(
grad,
math_ops.range(grad.shape[0]),
constant_op.constant(grad.shape.as_list()))
indexed_slices_list.append(indexed_slices)
else:
indexed_slices_list.append(grad)
# Dense shapes from all gradients should be the same.
dense_shape = indexed_slices_list[0].dense_shape
# For simplicity now, always cast to int64.
indices = array_ops.concat([math_ops.cast(x.indices, dtypes.int64)
for x in indexed_slices_list], 0)
values = array_ops.concat([x.values for x in indexed_slices_list], 0)
return ops.IndexedSlices(values, indices, dense_shape)
def _num_elements(grad):
"""The number of elements in the `grad` tensor."""
if isinstance(grad, ops.Tensor):
return functools.reduce(operator.mul, grad._shape_tuple(), 1) # pylint: disable=protected-access
if isinstance(grad, ops.IndexedSlices):
return functools.reduce(operator.mul, grad.values._shape_tuple(), 1) # pylint: disable=protected-access
raise ValueError("`grad` not a Tensor or IndexedSlices.")
_zeros_cache = context._TensorCache() # pylint: disable=protected-access
def _fast_fill(value, shape, dtype):
return array_ops.fill(shape, constant_op.constant(value, dtype=dtype))
def _zeros(shape, dtype):
"""Wraps array_ops.zeros to cache last zero for a given shape and dtype."""
device = context.context().device_name
if dtype == dtypes.variant:
# TODO(apassos): need to save enough information about variant tensors to do
# a zeros
return None
# pylint: disable=protected-access
cache_key = shape, dtype, device, context.context()._eager_context.mode
# pylint: enable=protected-access
cached = _zeros_cache.get(cache_key)
if cached is None:
cached = _fast_fill(0, shape, dtype)
_zeros_cache.put(cache_key, cached)
return cached
def _ones(shape, dtype):
if shape == (): # pylint: disable=g-explicit-bool-comparison
return constant_op.constant(1, dtype=dtype)
return _fast_fill(1, shape, dtype)
_default_vspace = imperative_grad.VSpace(
num_elements_fn=_num_elements,
aggregate_fn=_aggregate_grads,
tensor_id=ops.tensor_id,
zeros=_zeros,
ones=_ones)
def _handle_or_self(x):
"""If x is ResourceVariable, return its handle, else x."""
if isinstance(x, resource_variable_ops.ResourceVariable):
x = x.handle
return x
@tf_export("GradientTape")
class GradientTape(object):
"""Record operations for automatic differentiation.
Operations are recorded if they are executed within this context manager and
at least one of their inputs is being "watched".
Trainable variables (created by `tf.contrib.eager.Variable` or
@{tf.get_variable}, trainable=True is default in both cases) are automatically
watched. Tensors can be manually watched by invoking the `watch` method on
this context manager.
For example, consider the function `y = x * x`. The gradient at `x = 3.0` can
be computed as:
```python
x = tf.constant(3.0)
with tf.GradientTape() as g:
g.watch(x)
y = x * x
dy_dx = g.gradient(y, x) # Will compute to 6.0
```
GradientTapes can be nested to compute higher-order derivatives. For example,
```python
x = tf.constant(3.0)
with tf.GradientTape() as g:
with tf.GradientTape() as gg:
gg.watch(x)
y = x * x
dy_dx = gg.gradient(y, x) # Will compute to 6.0
d2y_dx2 = g.gradient(dy_dx, x) # Will compute to 2.0
```
By default, the resources held by a GradientTape are released as soon as
GradientTape.gradient() method is called. To compute multiple gradients over
the same computation, create a persistent gradient tape. This allows multiple
calls to the gradient() method as resources are released when the tape object
is garbage collected. For example:
```python
x = tf.constant(3.0)
with tf.GradientTape(persistent=True) as g:
g.watch(x)
y = x * x
z = y * y
dz_dx = g.gradient(z, x) # 108.0 (4*x^3 at x = 3)
dy_dx = g.gradient(y, x) # 6.0
del g # Drop the reference to the tape
```
Note that only tensors with real or complex dtypes are differentiable.
"""
def __init__(self, persistent=False):
"""Creates a new GradientTape.
Args:
persistent: Boolean controlling whether a persistent gradient tape
is created. False by default, which means at most one call can
be made to the gradient() method on this object.
"""
self._tape = None
self._persistent = persistent
self._recording = False
def __enter__(self):
"""Enters a context inside which operations are recorded on this tape."""
self._push_tape()
return self
def __exit__(self, typ, value, traceback):
"""Exits the recording context, no further operations are traced."""
if self._recording:
self._pop_tape()
def _push_tape(self, existing_tape=False):
if self._recording:
raise ValueError("Tape is already recording.")
if existing_tape:
if self._tape is None:
raise ValueError("There is no existing tape.")
tape.push_tape(self._tape)
else:
self._tape = tape.push_new_tape(persistent=self._persistent)
self._recording = True
def _pop_tape(self):
if not self._recording:
raise ValueError("Tape is not recording.")
tape.pop_tape(self._tape)
self._recording = False
def watch(self, tensor):
"""Ensures that `tensor` is being traced by this tape.
Args:
tensor: a Tensor or list of Tensors.
"""
for t in nest.flatten(tensor):
tape.watch(_handle_or_self(t))
@tf_contextlib.contextmanager
def stop_recording(self):
"""Temporarily stops recording operations on this tape.
Operations executed while this context manager is active will not be
recorded on the tape. This is useful for reducing the memory used by tracing
all computations.
For example:
```
with tf.GradientTape(persistent=True) as t:
loss = compute_loss(model)
with t.stop_recording():
# The gradient computation below is not traced, saving memory.
grads = t.gradient(loss, model.variables)
```
Yields:
None
Raises:
RuntimeError: if the tape is not currently recording.
"""
if self._tape is None:
raise RuntimeError(
"Trying to stop recording a tape which is not recording.")
self._pop_tape()
try:
yield
finally:
self._push_tape(existing_tape=True)
def reset(self):
"""Clears all information stored in this tape.
Equivalent to exiting and reentering the tape context manager with a new
tape. For example, the two following code blocks are equivalent:
```
with tf.GradientTape() as t:
loss = loss_fn()
with tf.GradientTape() as t:
loss += other_loss_fn()
t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn
# The following is equivalent to the above
with tf.GradientTape() as t:
loss = loss_fn()
t.reset()
loss += other_loss_fn()
t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn
```
This is useful if you don't want to exit the context manager for the tape,
or can't because the desired reset point is inside a control flow construct:
```
with tf.GradientTape() as t:
loss = ...
if loss > k:
t.reset()
```
"""
self._pop_tape()
self._push_tape()
def watched_variables(self):
"""Returns variables watched by this tape in order of construction."""
return self._tape.watched_variables()
def gradient(self, target, sources, output_gradients=None):
"""Computes the gradient using operations recorded in context of this tape.
Args:
target: Tensor (or list of tensors) to be differentiated.
sources: a list or nested structure of Tensors or Variables. `target`
will be differentiated against elements in `sources`.
output_gradients: a list of gradients, one for each element of
target. Defaults to None.
Returns:
a list or nested structure of Tensors (or IndexedSlices, or None),
one for each element in `sources`. Returned structure is the same as
the structure of `sources`.
Raises:
RuntimeError: if called inside the context of the tape, or if called more
than once on a non-persistent tape.
"""
if self._tape is None:
raise RuntimeError("GradientTape.gradient can only be called once on "
"non-persistent tapes.")
if self._recording:
if not self._persistent:
self._pop_tape()
else:
logging.log_first_n(logging.WARN,
"Calling GradientTape.gradient on a persistent "
"tape inside it's context is significantly less "
"efficient than calling it outside the context (it "
"causes the gradient ops to be recorded on the "
"tape, leading to increased CPU and memory usage). "
"Only call GradientTape.gradient inside the "
"context if you actually want to trace the "
"gradient in order to compute higher order "
"derrivatives.", 1)
flat_sources = nest.flatten(sources)
flat_sources = [_handle_or_self(x) for x in flat_sources]
if output_gradients is not None:
output_gradients = [None if x is None else ops.convert_to_tensor(x)
for x in nest.flatten(output_gradients)]
flat_grad = imperative_grad.imperative_grad(
_default_vspace, self._tape, nest.flatten(target), flat_sources,
output_gradients=output_gradients)
if not self._persistent:
self._tape = None
grad = nest.pack_sequence_as(sources, flat_grad)
return grad
| apache-2.0 | 6,596,691,934,725,755,000 | 33.494213 | 108 | 0.664497 | false |
waytai/pytracemalloctext | tests/python_memleak.py | 1 | 3305 | """
Memory usage of Python < 3.3 grows between some function calls, randomly,
whereas it should stay stable. The final memory usage should be close to the
initial memory usage.
Example with Python 2.6:
Initial memory:
VmRSS: 3176 kB
After call #1:
VmRSS: 4996 kB
After call #2:
VmRSS: 4996 kB
After call #3:
VmRSS: 14704 kB
Finally memory
VmRSS: 14704 kB
Example with Python 3.3 (compiled in debug mode):
Initial memory:
VmRSS: 6048 kB
After call #1:
VmRSS: 6732 kB
After call #2:
VmRSS: 6732 kB
After call #3:
VmRSS: 6732 kB
Finally memory
VmRSS: 6732 kB
The Python memory allocator of Python 3.3 uses mmap(), when available, instead
of malloc(). munmap() releases immediatly system memory because it can punch
holes in the memory space of the process, whereas malloc() uses brk() and
sbrk() which uses a contigious address range for the heap memory.
The Python memory allocator allocates chunks of memory of 256 KB (see
ARENA_SIZE in Objects/obmalloc.c). A chunk cannot be released to the system
before all objects stored in the chunk are released.
The Python memory allocator is only used for allocations smaller than 256 bytes
in Python <= 3.2, or allocations smaller than 512 bytes in Python 3.3.
Otherwise, malloc() and free() are used. The GNU libc uses brk() or mmap()
depending on a threshold: 128 KB by default. The threshold is dynamic nowadays.
Use mallopt(M_MMAP_THRESHOLD, nbytes) to change this threshold.
See also:
* http://pushingtheweb.com/2010/06/python-and-tcmalloc/
* http://sourceware.org/ml/libc-alpha/2006-03/msg00033.html
* http://www.linuxdevcenter.com/pub/a/linux/2006/11/30/linux-out-of-memory.html?page=2
* http://cloudfundoo.wordpress.com/2012/05/18/minor-page-faults-and-dynamic-memory-allocation-in-linux/
"""
import gc
import sys
import tracemalloc
import tracemalloctext
tracemalloc.add_exclusive_filter(tracemalloctext.__file__)
tracemalloc.enable()
task = tracemalloctext.DisplayTopTask(10)
#task.start(60)
def dump_memory():
print("*FORCE DISPLAY*")
task.display()
return
with open("/proc/self/status") as fp:
for line in fp:
if "VmRSS" not in line:
continue
print(line.rstrip())
break
#with open("/proc/self/maps") as fp:
# for line in fp:
# print(line.rstrip())
def func():
ns = {}
codeobj = compile(codestr, 'wastememory.py', "exec")
exec(codeobj, ns, ns)
ns.clear()
codeobj = None
ns = None
gc.collect()
codestr = ["""class SuperClass:"""]
for index in range(2000):
codestr.append("""
classattr%s = 2
def methdod%s(self, arg):
"docstring"
x = len(arg)
return x""" % (index, index))
codestr = ''.join(codestr)
print("Initial memory: ")
dump_memory()
for loop in range(1, 4):
func()
print("After call #%s:" % loop)
print("After call #%s:" % loop)
print("After call #%s:" % loop)
print("After call #%s:" % loop)
print("After call #%s:" % loop)
print("After call #%s:" % loop)
print("After call #%s:" % loop)
dump_memory()
print("Finally memory")
dump_memory()
| mit | -3,558,270,381,872,102,400 | 27.247863 | 103 | 0.645386 | false |
cburschka/NBT | examples/block_analysis.py | 1 | 8106 | #!/usr/bin/env python
"""
Finds the contents of the different blocks in a level, taking different data values (sub block types) into account.
"""
import locale, os, sys
import glob
# local module
try:
import nbt
except ImportError:
# nbt not in search path. Let's see if it can be found in the parent folder
extrasearchpath = os.path.realpath(os.path.join(__file__,os.pardir,os.pardir))
if not os.path.exists(os.path.join(extrasearchpath,'nbt')):
raise
sys.path.append(extrasearchpath)
from nbt.region import RegionFile
from nbt.chunk import McRegionChunk
def stats_per_chunk(chunk, block_data_totals):
"""Given a chunk, increment the block types with the number of blocks found"""
for block_id, data_id in chunk.blocks.get_all_blocks_and_data():
block_data_totals[block_id][data_id] += 1
def bounded_stats_per_chunk(chunk, block_data_totals, start, stop):
"""Given a chunk, return the number of blocks types within the specified selection"""
chunk_z, chunk_x = chunk.get_coords()
for z in range(16):
world_z = z + chunk_z*16
if ( (start != None and world_z < int(start[2])) or (stop != None and world_z > int(stop[2])) ):
# Outside the bounding box; skip to next iteration
#print("Z break: %d,%d,%d" % (world_z,start[2],stop[2]))
break
for x in range(16):
world_x = x + chunk_x*16
if ( (start != None and world_x < int(start[0])) or (stop != None and world_x > int(stop[0])) ):
# Outside the bounding box; skip to next iteration
#print("X break: %d,%d,%d" % (world_x,start[0],stop[0]))
break
for y in range(128):
if ( (start != None and y < int(start[1])) or (stop != None and y > int(stop[1])) ):
# Outside the bounding box; skip to next iteration
#print("Y break: %d,%d,%d" % (y,start[1],stop[1]))
break
#print("Chunk: %d,%d Coord: %d,%d,%d" % (c['x'], c['z'],x,y,z))
block_id,block_data = chunk.blocks.get_block_and_data(x,y,z)
block_data_totals[block_id][block_data] += 1
def process_region_file(filename, start, stop):
"""Given a region filename, return the number of blocks of each ID in that file"""
pieces = filename.split('.')
rx = int(pieces[-3])
rz = int(pieces[-2])
block_data_totals = [[0]*16 for i in range(256)] # up to 16 data numbers in 256 block IDs
# Does the region overlap the bounding box at all?
if (start != None):
if ( (rx+1)*512-1 < int(start[0]) or (rz+1)*512-1 < int(start[2]) ):
return block_data_totals
elif (stop != None):
if ( rx*512-1 > int(stop[0]) or rz*512-1 > int(stop[2]) ):
return block_data_totals
file = RegionFile(filename)
# Get all chunks
chunks = file.get_chunks()
print("Parsing %s... %d chunks" % (os.path.basename(filename),len(chunks)))
for c in chunks:
# Does the chunk overlap the bounding box at all?
if (start != None):
if ( (c['x']+1)*16 + rx*512 - 1 < int(start[0]) or (c['z']+1)*16 + rz*512 - 1 < int(start[2]) ):
continue
elif (stop != None):
if ( c['x']*16 + rx*512 - 1 > int(stop[0]) or c['z']*16 + rz*512 - 1 > int(stop[2]) ):
continue
chunk = McRegionChunk(file.get_chunk(c['x'], c['z']))
assert chunk.get_coords() == (c['x'] + rx*32, c['z'] + rz*32)
#print("Parsing chunk ("+str(c['x'])+", "+str(c['z'])+")")
# Parse the blocks
# Fast code if no start or stop coordinates are specified
# TODO: also use this code if start/stop is specified, but the complete chunk is included
if (start == None and stop == None):
stats_per_chunk(chunk, block_data_totals)
else:
# Slow code that iterates through each coordinate
bounded_stats_per_chunk(chunk, block_data_totals, start, stop)
return block_data_totals
def print_results(block_data_totals):
locale.setlocale(locale.LC_ALL, '')
# Analyze blocks
for block_id,data in enumerate(block_data_totals):
if sum(data) > 0:
datastr = ", ".join([locale.format_string("%d: %d", (i,c), grouping=True) for (i,c) in enumerate(data) if c > 0])
print(locale.format_string("block id %3d: %12d (data id %s)", (block_id,sum(data),datastr), grouping=True))
block_totals = [sum(data_totals) for data_totals in block_data_totals]
total_blocks = sum(block_totals)
solid_blocks = total_blocks - block_totals[0]
solid_ratio = (solid_blocks+0.0)/total_blocks if (total_blocks > 0) else 0
print(locale.format_string("%d total blocks in region, %d are non-air (%0.4f", (total_blocks, solid_blocks, 100.0*solid_ratio), grouping=True)+"%)")
# Find valuable blocks
print(locale.format_string("Diamond Ore: %8d", block_totals[56], grouping=True))
print(locale.format_string("Gold Ore: %8d", block_totals[14], grouping=True))
print(locale.format_string("Redstone Ore: %8d", block_totals[73], grouping=True))
print(locale.format_string("Iron Ore: %8d", block_totals[15], grouping=True))
print(locale.format_string("Coal Ore: %8d", block_totals[16], grouping=True))
print(locale.format_string("Lapis Lazuli Ore: %8d", block_totals[21], grouping=True))
print(locale.format_string("Dungeons: %8d", block_totals[52], grouping=True))
print(locale.format_string("Clay: %8d", block_totals[82], grouping=True))
print(locale.format_string("Sugar Cane: %8d", block_totals[83], grouping=True))
print(locale.format_string("Cacti: %8d", block_totals[81], grouping=True))
print(locale.format_string("Pumpkin: %8d", block_totals[86], grouping=True))
print(locale.format_string("Dandelion: %8d", block_totals[37], grouping=True))
print(locale.format_string("Rose: %8d", block_totals[38], grouping=True))
print(locale.format_string("Brown Mushroom: %8d", block_totals[39], grouping=True))
print(locale.format_string("Red Mushroom: %8d", block_totals[40], grouping=True))
print(locale.format_string("Lava Springs: %8d", block_totals[11], grouping=True))
def main(world_folder, start=None, stop=None):
if (not os.path.exists(world_folder)):
print("No such folder as "+world_folder)
return 2 # ENOENT
regions = glob.glob(os.path.join(world_folder,'region','*.mcr'))
block_data_totals = [[0]*16 for i in range(256)] # up to 16 data numbers in 256 block IDs
try:
for filename in regions:
region_totals = process_region_file(filename, start, stop)
for i, data in enumerate(region_totals):
for j, total in enumerate(data):
block_data_totals[i][j] += total
except KeyboardInterrupt:
print_results(block_data_totals)
return 75 # EX_TEMPFAIL
print_results(block_data_totals)
return 0 # EX_OK
if __name__ == '__main__':
if (len(sys.argv) == 1):
print("No world folder specified! Usage: %s <world folder> [minx,miny,minz maxx,maxy,maxz]" % sys.argv[0])
sys.exit(64) # EX_USAGE
world_folder = sys.argv[1]
# clean path name, eliminate trailing slashes. required for os.path.basename()
world_folder = os.path.normpath(world_folder)
if (not os.path.exists(world_folder)):
print("No such folder as "+world_folder)
sys.exit(72) # EX_IOERR
start,stop = None,None
if (len(sys.argv) == 4):
# A min/max corner was specified
start_str = sys.argv[2][1:-1] # Strip parenthesis...
start = tuple(start_str.split(',')) # and convert to tuple
stop_str = sys.argv[3][1:-1] # Strip parenthesis...
stop = tuple(stop_str.split(',')) # and convert to tuple
sys.exit(main(world_folder, start, stop))
| mit | -2,987,137,642,103,406,600 | 45.855491 | 152 | 0.596348 | false |
takeshitakenji/wikiserv | src/config.py | 1 | 6694 | #!/usr/bin/env python3
import sys
if sys.version_info < (3, 3):
raise RuntimeError('At least Python 3.3 is required')
from lxml import etree
from datetime import timedelta
import hashers, processors
from os.path import join as path_join, dirname, normpath, isabs, abspath
import logging
LOGGER = logging.getLogger(__name__)
def positive_int(value):
value = int(value)
if value < 1:
raise ValueError('Not a positive integer: %s' % value)
return value
class Configuration(object):
@staticmethod
def xpath_single(document, xpath, nsmap = None):
matches = document.xpath(xpath, namespaces = nsmap)
if not matches:
raise KeyError('Missing element: %s' % xpath)
return matches[0]
@staticmethod
def get_path(current_dir, path):
path = normpath(path)
if isabs(path):
return path
else:
return abspath(path_join(current_dir, path))
def include_processors(self, root, source_path):
# TODO: Iterate over /configuration/processors/include to include external XML files, noting absolute paths and paths relative to stream.name.
included_processors = {}
procs = {}
for child in root.xpath('processor'):
name = ''.join(child.xpath('text()'))
extensions = None
try:
extensions = (x.strip() for x in child.attrib['extensions'].split())
extensions = [x for x in extensions if x]
except KeyError:
pass
mime = None
try:
mime = child.attrib['mime-type'].strip()
except KeyError:
pass
proc = None
if (name, mime) in procs:
proc = procs[name, mime]
else:
if mime is not None:
try:
proc = processors.get_processor(name)(mime, self.encoding)
except TypeError:
LOGGER.warning('Processor %s does not support MIME assignment' % name)
mime = none
if proc is None:
proc = processors.get_processor(name)(self.encoding)
procs[name, mime] = proc
if proc is None:
raise RuntimeError
if extensions:
for extension in extensions:
included_processors[extension] = proc
else:
included_processors[None] = proc
LOGGER.debug('Resulting procs from %s: %s' % (source_path, procs))
return included_processors
def __init__(self, stream, setlog = False):
document = etree.parse(stream)
try:
log_level = self.xpath_single(document, '/configuration/log-level/text()').strip().upper()
self.log_level = getattr(logging, log_level)
except (KeyError, AttributeError):
self.log_level = logging.ERROR
if setlog:
logging.basicConfig(level = self.log_level)
self.source_dir = self.get_path(dirname(stream.name), self.xpath_single(document, '/configuration/document-root/text()').strip())
self.runtime_vars = self.get_path(dirname(stream.name), self.xpath_single(document, '/configuration/runtime-vars/text()').strip())
try:
self.preview_lines = int(self.xpath_single(document, '/configuration/preview-lines/text()').strip())
except KeyError:
self.preview_lines = None
try:
self.worker_threads = int(self.xpath_single(document, '/configuration/worker-threads/text()').strip())
if self.worker_threads < 1:
raise ValueError(self.worker_threads)
except KeyError:
self.worker_threads = 1
self.cache_dir = self.get_path(dirname(stream.name), self.xpath_single(document, '/configuration/cache/@dir').strip())
self.checksum_function = hashers.get_hasher( \
self.xpath_single(document, '/configuration/cache/checksum-function/text()').strip())
try:
self.bind_address = self.xpath_single(document, '/configuration/bind-address/text()').strip()
except KeyError:
self.bind_address = ''
self.bind_port = int(self.xpath_single(document, '/configuration/bind-port/text()').strip())
# Main cache
try:
self.max_age = timedelta(seconds = positive_int(self.xpath_single(document, '/configuration/cache/max-age/text()')))
except KeyError:
self.max_age = None
try:
self.max_entries = positive_int(self.xpath_single(document, '/configuration/cache/max-entries/text()'))
except KeyError:
self.max_entries = None
self.auto_scrub = bool(document.xpath('/configuration/cache/auto-scrub'))
self.send_etags = bool(document.xpath('/configuration/cache/send-etags'))
# Search cache
self.use_search_cache = bool(document.xpath('/configuration/search-cache'))
try:
self.search_max_age = timedelta(seconds = positive_int(self.xpath_single(document, '/configuration/search-cache/max-age/text()')))
except KeyError:
self.search_max_age = None
try:
self.search_max_entries = positive_int(self.xpath_single(document, '/configuration/search-cache/max-entries/text()'))
except KeyError:
self.search_max_entries = None
self.search_auto_scrub = bool(document.xpath('/configuration/search-cache/auto-scrub'))
self.dispatcher_thread = bool(document.xpath('/configuration/cache/dispatcher-thread'))
self.encoding = self.xpath_single(document, '/configuration/processors/encoding/text()')
self.processors = {}
self.processors.update(self.include_processors(self.xpath_single(document, '/configuration/processors'), stream.name))
LOGGER.debug('Resulting processors: %s' % self.processors)
if None not in self.processors:
LOGGER.warning('There is no processor defined for unspecified file extensions; setting default to autoraw-nocache.')
self.processors[None] = processors.get_processor('autoraw-nocache')(self.encoding)
@property
def default_processor(self):
return self.processors[None]
if __name__ == '__main__':
import unittest, logging
from hashlib import sha1
from os.path import join as path_join, dirname
logging.basicConfig(level = logging.DEBUG)
class TestConfig(unittest.TestCase):
CONFIG_PATH = path_join(dirname(__file__), 'testdata', 'example_config_test.xml')
def test_read_config(self):
with open(self.CONFIG_PATH, 'r', encoding = 'utf8') as f:
config = Configuration(f)
self.assertIsNotNone(config)
self.assertEqual(config.cache_dir, abspath(path_join('testdata', 'example-cache')))
self.assertEqual(config.source_dir, abspath(path_join('testdata', 'example-source')))
self.assertIsNotNone(config.checksum_function)
self.assertEqual(config.max_age, timedelta(seconds = 86400))
self.assertEqual(config.max_entries, 2048)
self.assertTrue(config.auto_scrub)
self.assertEqual(config.encoding, 'utf8')
self.assertTrue(config.processors)
self.assertIn(None, config.processors)
self.assertIsInstance(config.default_processor, processors.Processor)
print(config.processors)
for extension, processor in config.processors.items():
if extension is None:
continue
self.assertIsInstance(extension, str)
self.assertIsInstance(processor, processors.Processor)
unittest.main()
| apache-2.0 | 403,927,196,099,338,050 | 34.231579 | 144 | 0.717956 | false |
rockfruit/bika.lims | bika/lims/browser/batch/analysisrequests.py | 1 | 2111 | # This file is part of Bika LIMS
#
# Copyright 2011-2016 by it's authors.
# Some rights reserved. See LICENSE.txt, AUTHORS.txt.
from operator import itemgetter
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.browser.analysisrequest import AnalysisRequestAddView as _ARAV
from bika.lims.browser.analysisrequest import AnalysisRequestsView as _ARV
from bika.lims.permissions import *
from plone.app.layout.globals.interfaces import IViewView
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zope.interface import implements
class AnalysisRequestsView(_ARV, _ARAV):
template = ViewPageTemplateFile(
"../analysisrequest/templates/analysisrequests.pt")
ar_add = ViewPageTemplateFile("../analysisrequest/templates/ar_add.pt")
implements(IViewView)
def __init__(self, context, request):
super(AnalysisRequestsView, self).__init__(context, request)
def contentsMethod(self, contentFilter):
return self.context.getAnalysisRequests(**contentFilter)
def __call__(self):
self.context_actions = {}
mtool = getToolByName(self.context, 'portal_membership')
if mtool.checkPermission(AddAnalysisRequest, self.portal):
self.context_actions[self.context.translate(_('Add new'))] = {
'url': self.context.absolute_url() + \
"/portal_factory/"
"AnalysisRequest/Request new analyses/ar_add?ar_count=1",
'icon': '++resource++bika.lims.images/add.png'}
return super(AnalysisRequestsView, self).__call__()
def getMemberDiscountApplies(self):
client = self.context.getClient()
return client and client.getMemberDiscountApplies() or False
def getRestrictedCategories(self):
client = self.context.getClient()
return client and client.getRestrictedCategories() or []
def getDefaultCategories(self):
client = self.context.getClient()
return client and client.getDefaultCategories() or []
| agpl-3.0 | 2,410,436,482,305,657,300 | 39.596154 | 77 | 0.71009 | false |
09zwcbupt/ryu | ryu/lib/packet/udp.py | 1 | 2010 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import socket
from . import packet_base
from . import packet_utils
import ipv4
class udp(packet_base.PacketBase):
_PACK_STR = '!HHHH'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, src_port, dst_port, total_length=0, csum=0):
super(udp, self).__init__()
self.src_port = src_port
self.dst_port = dst_port
self.total_length = total_length
self.csum = csum
self.length = udp._MIN_LEN
@classmethod
def parser(cls, buf):
(src_port, dst_port, total_length, csum) = struct.unpack_from(
cls._PACK_STR, buf)
msg = cls(src_port, dst_port, total_length, csum)
return msg, None
def serialize(self, payload, prev):
if self.total_length == 0:
self.total_length = udp._MIN_LEN + len(payload)
h = struct.pack(udp._PACK_STR, self.src_port, self.dst_port,
self.total_length, self.csum)
if self.csum == 0:
ph = struct.pack('!IIBBH', prev.src, prev.dst, 0, 17,
self.total_length)
f = ph + h + payload
if len(f) % 2:
f += '\x00'
self.csum = socket.htons(packet_utils.checksum(f))
h = struct.pack(udp._PACK_STR, self.src_port, self.dst_port,
self.total_length, self.csum)
return h
| apache-2.0 | -6,886,356,583,741,292,000 | 34.892857 | 72 | 0.612438 | false |
stephanie-wang/ray | python/ray/autoscaler/gcp/config.py | 1 | 14384 | import os
import logging
import time
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend
from googleapiclient import discovery, errors
logger = logging.getLogger(__name__)
crm = discovery.build("cloudresourcemanager", "v1")
iam = discovery.build("iam", "v1")
compute = discovery.build("compute", "v1")
VERSION = "v1"
RAY = "ray-autoscaler"
DEFAULT_SERVICE_ACCOUNT_ID = RAY + "-sa-" + VERSION
SERVICE_ACCOUNT_EMAIL_TEMPLATE = (
"{account_id}@{project_id}.iam.gserviceaccount.com")
DEFAULT_SERVICE_ACCOUNT_CONFIG = {
"displayName": "Ray Autoscaler Service Account ({})".format(VERSION),
}
DEFAULT_SERVICE_ACCOUNT_ROLES = ("roles/storage.objectAdmin",
"roles/compute.admin")
MAX_POLLS = 12
POLL_INTERVAL = 5
def wait_for_crm_operation(operation):
"""Poll for cloud resource manager operation until finished."""
logger.info("wait_for_crm_operation: "
"Waiting for operation {} to finish...".format(operation))
for _ in range(MAX_POLLS):
result = crm.operations().get(name=operation["name"]).execute()
if "error" in result:
raise Exception(result["error"])
if "done" in result and result["done"]:
logger.info("wait_for_crm_operation: Operation done.")
break
time.sleep(POLL_INTERVAL)
return result
def wait_for_compute_global_operation(project_name, operation):
"""Poll for global compute operation until finished."""
logger.info("wait_for_compute_global_operation: "
"Waiting for operation {} to finish...".format(
operation["name"]))
for _ in range(MAX_POLLS):
result = compute.globalOperations().get(
project=project_name,
operation=operation["name"],
).execute()
if "error" in result:
raise Exception(result["error"])
if result["status"] == "DONE":
logger.info("wait_for_compute_global_operation: "
"Operation done.")
break
time.sleep(POLL_INTERVAL)
return result
def key_pair_name(i, region, project_id, ssh_user):
"""Returns the ith default gcp_key_pair_name."""
key_name = "{}_gcp_{}_{}_{}".format(RAY, region, project_id, ssh_user, i)
return key_name
def key_pair_paths(key_name):
"""Returns public and private key paths for a given key_name."""
public_key_path = os.path.expanduser("~/.ssh/{}.pub".format(key_name))
private_key_path = os.path.expanduser("~/.ssh/{}.pem".format(key_name))
return public_key_path, private_key_path
def generate_rsa_key_pair():
"""Create public and private ssh-keys."""
key = rsa.generate_private_key(
backend=default_backend(), public_exponent=65537, key_size=2048)
public_key = key.public_key().public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH).decode("utf-8")
pem = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()).decode("utf-8")
return public_key, pem
def bootstrap_gcp(config):
config = _configure_project(config)
config = _configure_iam_role(config)
config = _configure_key_pair(config)
config = _configure_subnet(config)
return config
def _configure_project(config):
"""Setup a Google Cloud Platform Project.
Google Compute Platform organizes all the resources, such as storage
buckets, users, and instances under projects. This is different from
aws ec2 where everything is global.
"""
project_id = config["provider"].get("project_id")
assert config["provider"]["project_id"] is not None, (
"'project_id' must be set in the 'provider' section of the autoscaler"
" config. Notice that the project id must be globally unique.")
project = _get_project(project_id)
if project is None:
# Project not found, try creating it
_create_project(project_id)
project = _get_project(project_id)
assert project is not None, "Failed to create project"
assert project["lifecycleState"] == "ACTIVE", (
"Project status needs to be ACTIVE, got {}".format(
project["lifecycleState"]))
config["provider"]["project_id"] = project["projectId"]
return config
def _configure_iam_role(config):
"""Setup a gcp service account with IAM roles.
Creates a gcp service acconut and binds IAM roles which allow it to control
control storage/compute services. Specifically, the head node needs to have
an IAM role that allows it to create further gce instances and store items
in google cloud storage.
TODO: Allow the name/id of the service account to be configured
"""
email = SERVICE_ACCOUNT_EMAIL_TEMPLATE.format(
account_id=DEFAULT_SERVICE_ACCOUNT_ID,
project_id=config["provider"]["project_id"])
service_account = _get_service_account(email, config)
if service_account is None:
logger.info("_configure_iam_role: "
"Creating new service account {}".format(
DEFAULT_SERVICE_ACCOUNT_ID))
service_account = _create_service_account(
DEFAULT_SERVICE_ACCOUNT_ID, DEFAULT_SERVICE_ACCOUNT_CONFIG, config)
assert service_account is not None, "Failed to create service account"
_add_iam_policy_binding(service_account, DEFAULT_SERVICE_ACCOUNT_ROLES)
config["head_node"]["serviceAccounts"] = [{
"email": service_account["email"],
# NOTE: The amount of access is determined by the scope + IAM
# role of the service account. Even if the cloud-platform scope
# gives (scope) access to the whole cloud-platform, the service
# account is limited by the IAM rights specified below.
"scopes": ["https://www.googleapis.com/auth/cloud-platform"]
}]
return config
def _configure_key_pair(config):
"""Configure SSH access, using an existing key pair if possible.
Creates a project-wide ssh key that can be used to access all the instances
unless explicitly prohibited by instance config.
The ssh-keys created by ray are of format:
[USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME]
where:
[USERNAME] is the user for the SSH key, specified in the config.
[KEY_VALUE] is the public SSH key value.
"""
if "ssh_private_key" in config["auth"]:
return config
ssh_user = config["auth"]["ssh_user"]
project = compute.projects().get(
project=config["provider"]["project_id"]).execute()
# Key pairs associated with project meta data. The key pairs are general,
# and not just ssh keys.
ssh_keys_str = next(
(item for item in project["commonInstanceMetadata"].get("items", [])
if item["key"] == "ssh-keys"), {}).get("value", "")
ssh_keys = ssh_keys_str.split("\n") if ssh_keys_str else []
# Try a few times to get or create a good key pair.
key_found = False
for i in range(10):
key_name = key_pair_name(i, config["provider"]["region"],
config["provider"]["project_id"], ssh_user)
public_key_path, private_key_path = key_pair_paths(key_name)
for ssh_key in ssh_keys:
key_parts = ssh_key.split(" ")
if len(key_parts) != 3:
continue
if key_parts[2] == ssh_user and os.path.exists(private_key_path):
# Found a key
key_found = True
break
# Create a key since it doesn't exist locally or in GCP
if not key_found and not os.path.exists(private_key_path):
logger.info("_configure_key_pair: "
"Creating new key pair {}".format(key_name))
public_key, private_key = generate_rsa_key_pair()
_create_project_ssh_key_pair(project, public_key, ssh_user)
with open(private_key_path, "w") as f:
f.write(private_key)
os.chmod(private_key_path, 0o600)
with open(public_key_path, "w") as f:
f.write(public_key)
key_found = True
break
if key_found:
break
assert key_found, "SSH keypair for user {} not found for {}".format(
ssh_user, private_key_path)
assert os.path.exists(private_key_path), (
"Private key file {} not found for user {}"
"".format(private_key_path, ssh_user))
logger.info("_configure_key_pair: "
"Private key not specified in config, using"
"{}".format(private_key_path))
config["auth"]["ssh_private_key"] = private_key_path
return config
def _configure_subnet(config):
"""Pick a reasonable subnet if not specified by the config."""
# Rationale: avoid subnet lookup if the network is already
# completely manually configured
if ("networkInterfaces" in config["head_node"]
and "networkInterfaces" in config["worker_nodes"]):
return config
subnets = _list_subnets(config)
if not subnets:
raise NotImplementedError("Should be able to create subnet.")
# TODO: make sure that we have usable subnet. Maybe call
# compute.subnetworks().listUsable? For some reason it didn't
# work out-of-the-box
default_subnet = subnets[0]
if "networkInterfaces" not in config["head_node"]:
config["head_node"]["networkInterfaces"] = [{
"subnetwork": default_subnet["selfLink"],
"accessConfigs": [{
"name": "External NAT",
"type": "ONE_TO_ONE_NAT",
}],
}]
if "networkInterfaces" not in config["worker_nodes"]:
config["worker_nodes"]["networkInterfaces"] = [{
"subnetwork": default_subnet["selfLink"],
"accessConfigs": [{
"name": "External NAT",
"type": "ONE_TO_ONE_NAT",
}],
}]
return config
def _list_subnets(config):
response = compute.subnetworks().list(
project=config["provider"]["project_id"],
region=config["provider"]["region"]).execute()
return response["items"]
def _get_subnet(config, subnet_id):
subnet = compute.subnetworks().get(
project=config["provider"]["project_id"],
region=config["provider"]["region"],
subnetwork=subnet_id,
).execute()
return subnet
def _get_project(project_id):
try:
project = crm.projects().get(projectId=project_id).execute()
except errors.HttpError as e:
if e.resp.status != 403:
raise
project = None
return project
def _create_project(project_id):
operation = crm.projects().create(body={
"projectId": project_id,
"name": project_id
}).execute()
result = wait_for_crm_operation(operation)
return result
def _get_service_account(account, config):
project_id = config["provider"]["project_id"]
full_name = ("projects/{project_id}/serviceAccounts/{account}"
"".format(project_id=project_id, account=account))
try:
service_account = iam.projects().serviceAccounts().get(
name=full_name).execute()
except errors.HttpError as e:
if e.resp.status != 404:
raise
service_account = None
return service_account
def _create_service_account(account_id, account_config, config):
project_id = config["provider"]["project_id"]
service_account = iam.projects().serviceAccounts().create(
name="projects/{project_id}".format(project_id=project_id),
body={
"accountId": account_id,
"serviceAccount": account_config,
}).execute()
return service_account
def _add_iam_policy_binding(service_account, roles):
"""Add new IAM roles for the service account."""
project_id = service_account["projectId"]
email = service_account["email"]
member_id = "serviceAccount:" + email
policy = crm.projects().getIamPolicy(
resource=project_id, body={}).execute()
already_configured = True
for role in roles:
role_exists = False
for binding in policy["bindings"]:
if binding["role"] == role:
if member_id not in binding["members"]:
binding["members"].append(member_id)
already_configured = False
role_exists = True
if not role_exists:
already_configured = False
policy["bindings"].append({
"members": [member_id],
"role": role,
})
if already_configured:
# In some managed environments, an admin needs to grant the
# roles, so only call setIamPolicy if needed.
return
result = crm.projects().setIamPolicy(
resource=project_id, body={
"policy": policy,
}).execute()
return result
def _create_project_ssh_key_pair(project, public_key, ssh_user):
"""Inserts an ssh-key into project commonInstanceMetadata"""
key_parts = public_key.split(" ")
# Sanity checks to make sure that the generated key matches expectation
assert len(key_parts) == 2, key_parts
assert key_parts[0] == "ssh-rsa", key_parts
new_ssh_meta = "{ssh_user}:ssh-rsa {key_value} {ssh_user}".format(
ssh_user=ssh_user, key_value=key_parts[1])
common_instance_metadata = project["commonInstanceMetadata"]
items = common_instance_metadata.get("items", [])
ssh_keys_i = next(
(i for i, item in enumerate(items) if item["key"] == "ssh-keys"), None)
if ssh_keys_i is None:
items.append({"key": "ssh-keys", "value": new_ssh_meta})
else:
ssh_keys = items[ssh_keys_i]
ssh_keys["value"] += "\n" + new_ssh_meta
items[ssh_keys_i] = ssh_keys
common_instance_metadata["items"] = items
operation = compute.projects().setCommonInstanceMetadata(
project=project["name"], body=common_instance_metadata).execute()
response = wait_for_compute_global_operation(project["name"], operation)
return response
| apache-2.0 | -4,714,851,947,882,477,000 | 31.107143 | 79 | 0.621593 | false |
iandees/all-the-places | locations/spiders/ljsilvers.py | 1 | 1524 | # -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
class LjsilversSpider(scrapy.Spider):
name = "ljsilvers"
allowed_domains = ["ljsilvers.com"]
start_urls = (
'http://www.ljsilvers.com/locator?postalcode=76010',
)
def parse(self, response):
data = response.body_as_unicode()
base_data = re.search('dataout\s--Array\s\((.*)\)\s\s--><style type="text/css">', data, re.DOTALL).group(1)
detail_matches = re.findall('\((.*?)\)', base_data, re.DOTALL)
for detail_match in detail_matches:
key_values = re.findall('(.*?)\s=>\s(.*)', detail_match)
props = {}
for key_value in key_values:
key = key_value[0].strip()
value = key_value[1].strip()
if key == '[storeID]':
props['ref'] = value
if key == '[address]':
props['addr_full'] = value
if key == '[city]':
props['city'] = value
if key == '[state]':
props['state'] = value
if key == '[zip]':
props['postcode'] = value
if key == '[phone_number]':
props['phone'] = value
if key == '[latitude]':
props['lat'] = value
if key == '[longitude]':
props['lon'] = value
yield GeojsonPointItem(**props)
| mit | -8,044,704,276,494,561,000 | 32.130435 | 115 | 0.467192 | false |
twhyntie/tasl-data-management | wrappers/test_nod.py | 1 | 1166 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#...the usual suspects.
import os, inspect
#...for the unit testing.
import unittest
#...for the logging.
import logging as lg
# The wrapper class to test.
from nod import NOD
class TestNOD(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_nod(self):
## The annotation CSV file.
nod = NOD("testdata/NOD/000000_00_00_00.csv")
# The tests.
# The headers.
self.assertEqual(nod.get_number_of_headers(), 2)
self.assertEqual(nod.get_header(0), "annotation_id")
self.assertEqual(nod.get_header(1), "n_oddities_identified")
# The annotations.
# Test the number of annotations found.
self.assertEqual(nod.get_number_of_annotations(), 88)
if __name__ == "__main__":
lg.basicConfig(filename='log_test_nod.log', filemode='w', level=lg.DEBUG)
lg.info(" *")
lg.info(" *=========================================")
lg.info(" * Logger output from wrappers/test_nod.py ")
lg.info(" *=========================================")
lg.info(" *")
unittest.main()
| mit | -4,170,661,468,903,382,500 | 21 | 77 | 0.559177 | false |
certik/hermes-gui | hermesgui/core/handle_hermes.py | 1 | 1543 | from hermes2d import Mesh, H1Shapeset, PrecalcShapeset, H1Space, \
WeakForm, Solution, ScalarView, LinSystem, DummySolver, \
MeshView, set_verbose, plot_mesh_mpl_simple
from hermes2d.forms import set_forms
from hermes2d.mesh import read_hermes_format
def read_mesh(filename):
nodes, elements, boundary, nurbs = read_hermes_format(filename)
return nodes, elements, boundary, nurbs
def plot_mesh(mesh, axes=None, plot_nodes=True):
nodes, elements, boundary, nurbs = mesh
# remove the element markers
elements = [x[:-1] for x in elements]
return plot_mesh_mpl_simple(nodes, elements, axes=axes,
plot_nodes=plot_nodes)
def poisson_solver(mesh_tuple):
"""
Poisson solver.
mesh_tuple ... a tuple of (nodes, elements, boundary, nurbs)
"""
set_verbose(False)
mesh = Mesh()
mesh.create(*mesh_tuple)
mesh.refine_element(0)
shapeset = H1Shapeset()
pss = PrecalcShapeset(shapeset)
# create an H1 space
space = H1Space(mesh, shapeset)
space.set_uniform_order(5)
space.assign_dofs()
# initialize the discrete problem
wf = WeakForm(1)
set_forms(wf)
solver = DummySolver()
sys = LinSystem(wf, solver)
sys.set_spaces(space)
sys.set_pss(pss)
# assemble the stiffness matrix and solve the system
sys.assemble()
A = sys.get_matrix()
b = sys.get_rhs()
from scipy.sparse.linalg import cg
x, res = cg(A, b)
sln = Solution()
sln.set_fe_solution(space, pss, x)
return sln
| bsd-3-clause | 3,263,927,270,197,432,300 | 28.113208 | 73 | 0.657161 | false |
nkoep/blaplay | blaplay/blagui/blaqueue.py | 1 | 13341 | # blaplay, Copyright (C) 2014 Niklas Koep
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import os
import cPickle as pickle
import re
import gobject
import gtk
import blaplay
ui_manager = blaplay.bla.ui_manager
from blaplay.blacore import blaconst, blacfg
from blaplay import blagui
from blaplay.formats._identifiers import *
from blawindows import BlaScrolledWindow
from blatracklist import (
COLUMN_ARTIST, COLUMN_ALBUM, COLUMN_ALBUM_ARTIST, COLUMN_GENRE, popup,
update_columns, parse_track_list_stats, BlaTreeView, BlaTrackListItem)
from blastatusbar import BlaStatusbar
from blaview import BlaViewMeta
from blaplaylist import playlist_manager
class BlaQueue(BlaScrolledWindow):
__metaclass__ = BlaViewMeta("Queue")
__layout = (
gobject.TYPE_PYOBJECT, # An instance of BlaTrackListItem
gobject.TYPE_STRING # Position in the queue
)
def __init__(self):
super(BlaQueue, self).__init__()
self.__size = 0
self.__length = 0
self.clipboard = []
self.__treeview = BlaTreeView(view_id=blaconst.VIEW_QUEUE)
self.__treeview.set_model(gtk.ListStore(*self.__layout))
self.__treeview.set_enable_search(False)
self.__treeview.set_property("rules_hint", True)
self.set_shadow_type(gtk.SHADOW_IN)
self.add(self.__treeview)
self.__treeview.enable_model_drag_dest(
[("queue", gtk.TARGET_SAME_WIDGET, 3)], gtk.gdk.ACTION_COPY)
self.__treeview.enable_model_drag_source(
gtk.gdk.BUTTON1_MASK,
[("queue", gtk.TARGET_SAME_WIDGET, 3)],
gtk.gdk.ACTION_COPY)
self.__treeview.connect("popup", popup, blaconst.VIEW_QUEUE, self)
self.__treeview.connect("row_activated", self.play_item)
self.__treeview.connect(
"button_press_event", self.__button_press_event)
self.__treeview.connect("key_press_event", self.__key_press_event)
self.__treeview.connect("drag_data_get", self.__drag_data_get)
self.__treeview.connect("drag_data_received", self.__drag_data_recv)
update_columns(self.__treeview, view_id=blaconst.VIEW_QUEUE)
self.show_all()
def __button_press_event(self, treeview, event):
if (event.button == 2 and
event.type not in [gtk.gdk._2BUTTON_PRESS,
gtk.gdk._3BUTTON_PRESS]):
self.paste()
return True
def __key_press_event(self, treeview, event):
if blagui.is_accel(event, "<Ctrl>X"):
self.cut()
elif blagui.is_accel(event, "<Ctrl>C"):
self.copy()
elif blagui.is_accel(event, "<Ctrl>V"):
self.paste()
elif blagui.is_accel(event, "Delete"):
self.remove()
return False
def __drag_data_get(self, treeview, drag_context, selection_data, info,
time):
data = pickle.dumps(treeview.get_selection().get_selected_rows()[-1],
pickle.HIGHEST_PROTOCOL)
selection_data.set("", 8, data)
def __drag_data_recv(self, treeview, drag_context, x, y, selection_data,
info, time):
drop_info = treeview.get_dest_row_at_pos(x, y)
model = self.__treeview.get_model()
paths = pickle.loads(selection_data.data)
# TODO: factor this out so we can use the same for the playlist
if drop_info:
path, pos = drop_info
iterator = model.get_iter(path)
if (pos == gtk.TREE_VIEW_DROP_BEFORE or
pos == gtk.TREE_VIEW_DROP_INTO_OR_BEFORE):
move_before = model.move_before
def move_func(it):
move_before(it, iterator)
else:
move_after = model.move_after
def move_func(it):
move_after(it, iterator)
paths.reverse()
else:
iterator = None
move_before = model.move_before
def move_func(it):
move_before(it, iterator)
get_iter = model.get_iter
iterators = map(get_iter, paths)
map(move_func, iterators)
self.update_queue_positions()
def __add_items(self, items, path=None, select_rows=False):
treeview = self.__treeview
model = treeview.get_model()
iterator = None
try:
if (not treeview.get_selection().get_selected_rows()[-1] or
path == -1):
raise TypeError
if not path:
path, column = treeview.get_cursor()
except TypeError:
path = (len(model),)
append = model.append
def insert_func(iterator, item):
append(item)
else:
iterator = model.get_iter(path)
insert_func = model.insert_before
items.reverse()
for item in items:
iterator = insert_func(iterator, [item, None])
if select_rows:
treeview.freeze_notify()
selection = treeview.get_selection()
selection.unselect_all()
select_path = selection.select_path
map(select_path, xrange(path[0], path[0] + len(items)))
treeview.thaw_notify()
self.update_queue_positions()
def __get_items(self, remove=True):
treeview = self.__treeview
model, selections = treeview.get_selection().get_selected_rows()
if selections:
get_iter = model.get_iter
iterators = map(get_iter, selections)
items = [model[iterator][0] for iterator in iterators]
if remove:
remove = model.remove
map(remove, iterators)
self.update_queue_positions()
return items
return []
def play_item(self, treeview, path, column=None):
model = treeview.get_model()
iterator = model.get_iter(path)
model[iterator][0].play()
if blacfg.getboolean("general", "queue.remove.when.activated"):
model.remove(iterator)
self.update_queue_positions()
def update_statusbar(self):
model = self.__treeview.get_model()
count = len(model)
if count == 0:
info = ""
else:
info = parse_track_list_stats(count, self.__size, self.__length)
BlaStatusbar.set_view_info(blaconst.VIEW_QUEUE, info)
def select(self, type_):
treeview = self.__treeview
selection = treeview.get_selection()
model, selected_paths = selection.get_selected_rows()
if type_ == blaconst.SELECT_ALL:
selection.select_all()
return
elif type_ == blaconst.SELECT_COMPLEMENT:
selected_paths = set(selected_paths)
paths = set([(p,) for p in xrange(len(model))])
paths.difference_update(selected_paths)
selection.unselect_all()
select_path = selection.select_path
map(select_path, paths)
return
elif type_ == blaconst.SELECT_BY_ARTISTS:
column_id = COLUMN_ARTIST
elif type_ == blaconst.SELECT_BY_ALBUMS:
column_id = COLUMN_ALBUM
elif type_ == blaconst.SELECT_BY_ALBUM_ARTISTS:
column_id = COLUMN_ALBUM_ARTIST
else:
column_id = COLUMN_GENRE
items = [model[path][0] for path in selected_paths]
eval_ = BlaEval(column_id).eval
values = set()
for item in items:
values.add(eval_(item.track).lower())
if not values:
return
r = re.compile(
r"^(%s)$" % "|".join(values), re.UNICODE | re.IGNORECASE)
items = [row[0] for row in model if r.match(eval_(row[0].track))]
paths = [row.path for row in model if row[0] in items]
selection.unselect_all()
select_path = selection.select_path
map(select_path, paths)
def update_queue_positions(self):
model = self.__treeview.get_model()
# Update the position labels for our own treeview.
for idx, row in enumerate(model):
model[row.path][1] = idx+1
# Invalidate the visible rows of the current playlists so the
# position labels also get updated in playlists.
playlist = playlist_manager.get_current_playlist()
playlist.invalidate_visible_rows()
# Calculate size and length of the queue and update the statusbar.
size = length = 0
for row in model:
track = row[0].track
size += track[FILESIZE]
length += track[LENGTH]
self.__size, self.__length = size, length
self.emit("count_changed", blaconst.VIEW_QUEUE, self.n_items)
self.update_statusbar()
def get_queue_positions(self, item):
model = self.__treeview.get_model()
return [row[1] for row in model if row[0] == item]
def queue_items(self, items):
if not items:
return
# If any of the items is not an instance of BlaTrackListItem it means
# all of the items are actually just URIs which stem from the library
# browser and are not part of a playlist.
if not isinstance(items[0], BlaTrackListItem):
items = map(BlaTrackListItem, items)
count = blaconst.QUEUE_MAX_ITEMS - self.n_items
self.__add_items(items[:count], path=-1)
def remove_items(self, items):
# This is invoked by playlists who want to remove tracks from the
# queue.
model = self.__treeview.get_model()
for row in model:
if row[0] in items:
model.remove(row.iter)
self.update_queue_positions()
def get_queue(self):
queue = []
playlists = playlist_manager.get_playlists()
for row in self.__treeview.get_model():
item = row[0]
playlist = item.playlist
try:
playlist_idx = playlists.index(playlist)
except ValueError:
item = (item.uri,)
else:
item = (playlist_idx,
playlist.get_path_from_item(item, all_=True))
queue.append(item)
return queue
def restore(self, items):
print_i("Restoring the play queue")
if not items:
return
playlists = playlist_manager.get_playlists()
for idx, item in enumerate(items):
try:
playlist_idx, path = item
except ValueError:
# Library tracks that are not part of a playlist.
item = BlaTrackListItem(item)
else:
item = playlists[playlist_idx].get_item_from_path(path)
items[idx] = item
self.queue_items(items)
def cut(self, *args):
self.clipboard = self.__get_items(remove=True)
ui_manager.update_menu(blaconst.VIEW_QUEUE)
def copy(self, *args):
# We specifically don't create actual copies of items here as it's not
# desired to have unique ones in the queue. Copied and pasted tracks
# should still refer to the same BlaTrackListItem instances which are
# possibly part of a playlist.
self.clipboard = self.__get_items(remove=False)
ui_manager.update_menu(blaconst.VIEW_QUEUE)
def paste(self, *args, **kwargs):
self.__add_items(items=self.clipboard, select_rows=True)
def remove(self, *args):
self.__get_items(remove=True)
def remove_duplicates(self):
unique = set()
model = self.__treeview.get_model()
for row in model:
uri = row[0].uri
if uri not in unique:
unique.add(uri)
else:
model.remove(row.iter)
self.update_queue_positions()
def remove_invalid_tracks(self):
model = self.__treeview.get_model()
isfile = os.path.isfile
for row in model:
uri = row[0].uri
if not isfile(uri):
model.remove(row.iter)
self.update_queue_positions()
def clear(self):
self.__treeview.get_model().clear()
self.update_queue_positions()
def get_item(self):
model = self.__treeview.get_model()
iterator = model.get_iter_first()
if iterator:
item = model[iterator][0]
model.remove(iterator)
self.update_queue_positions()
return item
return None
@property
def n_items(self):
return len(self.__treeview.get_model())
queue = BlaQueue()
| gpl-2.0 | 1,768,491,662,655,170,000 | 33.562176 | 78 | 0.58174 | false |
juliantaylor/scipy | scipy/optimize/cobyla.py | 2 | 9434 | """
Interface to Constrained Optimization By Linear Approximation
Functions
---------
.. autosummary::
:toctree: generated/
fmin_cobyla
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.lib.six import callable
from scipy.optimize import _cobyla
from .optimize import Result, _check_unknown_options
__all__ = ['fmin_cobyla']
def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0,
rhoend=1e-4, iprint=1, maxfun=1000, disp=None, catol=1e-6):
"""
Minimize a function using the Constrained Optimization BY Linear
Approximation (COBYLA) method. This method wraps a FORTRAN
implentation of the algorithm.
Parameters
----------
func : callable
Function to minimize. In the form func(x, \\*args).
x0 : ndarray
Initial guess.
cons : sequence
Constraint functions; must all be ``>=0`` (a single function
if only 1 constraint). Each function takes the parameters `x`
as its first argument.
args : tuple
Extra arguments to pass to function.
consargs : tuple
Extra arguments to pass to constraint functions (default of None means
use same extra arguments as those passed to func).
Use ``()`` for no extra arguments.
rhobeg :
Reasonable initial changes to the variables.
rhoend :
Final accuracy in the optimization (not precisely guaranteed). This
is a lower bound on the size of the trust region.
iprint : {0, 1, 2, 3}
Controls the frequency of output; 0 implies no output. Deprecated.
disp : {0, 1, 2, 3}
Over-rides the iprint interface. Preferred.
maxfun : int
Maximum number of function evaluations.
catol : float
Absolute tolerance for constraint violations.
Returns
-------
x : ndarray
The argument that minimises `f`.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'COBYLA' `method` in particular.
Notes
-----
This algorithm is based on linear approximations to the objective
function and each constraint. We briefly describe the algorithm.
Suppose the function is being minimized over k variables. At the
jth iteration the algorithm has k+1 points v_1, ..., v_(k+1),
an approximate solution x_j, and a radius RHO_j.
(i.e. linear plus a constant) approximations to the objective
function and constraint functions such that their function values
agree with the linear approximation on the k+1 points v_1,.., v_(k+1).
This gives a linear program to solve (where the linear approximations
of the constraint functions are constrained to be non-negative).
However the linear approximations are likely only good
approximations near the current simplex, so the linear program is
given the further requirement that the solution, which
will become x_(j+1), must be within RHO_j from x_j. RHO_j only
decreases, never increases. The initial RHO_j is rhobeg and the
final RHO_j is rhoend. In this way COBYLA's iterations behave
like a trust region algorithm.
Additionally, the linear program may be inconsistent, or the
approximation may give poor improvement. For details about
how these issues are resolved, as well as how the points v_i are
updated, refer to the source code or the references below.
References
----------
Powell M.J.D. (1994), "A direct search optimization method that models
the objective and constraint functions by linear interpolation.", in
Advances in Optimization and Numerical Analysis, eds. S. Gomez and
J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67
Powell M.J.D. (1998), "Direct search algorithms for optimization
calculations", Acta Numerica 7, 287-336
Powell M.J.D. (2007), "A view of algorithms for optimization without
derivatives", Cambridge University Technical Report DAMTP 2007/NA03
Examples
--------
Minimize the objective function f(x,y) = x*y subject
to the constraints x**2 + y**2 < 1 and y > 0::
>>> def objective(x):
... return x[0]*x[1]
...
>>> def constr1(x):
... return 1 - (x[0]**2 + x[1]**2)
...
>>> def constr2(x):
... return x[1]
...
>>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7)
Normal return from subroutine COBYLA
NFVALS = 64 F =-5.000000E-01 MAXCV = 1.998401E-14
X =-7.071069E-01 7.071067E-01
array([-0.70710685, 0.70710671])
The exact solution is (-sqrt(2)/2, sqrt(2)/2).
"""
err = "cons must be a sequence of callable functions or a single"\
" callable function."
try:
len(cons)
except TypeError:
if callable(cons):
cons = [cons]
else:
raise TypeError(err)
else:
for thisfunc in cons:
if not callable(thisfunc):
raise TypeError(err)
if consargs is None:
consargs = args
# build constraints
con = tuple({'type': 'ineq', 'fun': c, 'args': consargs} for c in cons)
# options
if disp is not None:
iprint = disp
opts = {'rhobeg': rhobeg,
'tol': rhoend,
'iprint': iprint,
'disp': iprint != 0,
'maxiter': maxfun,
'catol': catol}
sol = _minimize_cobyla(func, x0, args, constraints=con,
**opts)
if iprint > 0 and not sol['success']:
print("COBYLA failed to find a solution: %s" % (sol.message,))
return sol['x']
def _minimize_cobyla(fun, x0, args=(), constraints=(),
rhobeg=1.0, tol=1e-4, iprint=1, maxiter=1000,
disp=False, catol=1e-6, **unknown_options):
"""
Minimize a scalar function of one or more variables using the
Constrained Optimization BY Linear Approximation (COBYLA) algorithm.
Options for the COBYLA algorithm are:
rhobeg : float
Reasonable initial changes to the variables.
tol : float
Final accuracy in the optimization (not precisely guaranteed).
This is a lower bound on the size of the trust region.
disp : bool
Set to True to print convergence messages. If False,
`verbosity` is ignored as set to 0.
maxiter : int
Maximum number of function evaluations.
catol : float
Tolerance (absolute) for constraint violations
This function is called by the `minimize` function with
`method=COBYLA`. It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
rhoend = tol
if not disp:
iprint = 0
# check constraints
if isinstance(constraints, dict):
constraints = (constraints, )
for ic, con in enumerate(constraints):
# check type
try:
ctype = con['type'].lower()
except KeyError:
raise KeyError('Constraint %d has no type defined.' % ic)
except TypeError:
raise TypeError('Constraints must be defined using a '
'dictionary.')
except AttributeError:
raise TypeError("Constraint's type must be a string.")
else:
if ctype != 'ineq':
raise ValueError("Constraints of type '%s' not handled by "
"COBYLA." % con['type'])
# check function
if 'fun' not in con:
raise KeyError('Constraint %d has no function defined.' % ic)
# check extra arguments
if 'args' not in con:
con['args'] = ()
m = len(constraints)
def calcfc(x, con):
f = fun(x, *args)
for k, c in enumerate(constraints):
con[k] = c['fun'](x, *c['args'])
return f
info = np.zeros(4, np.float64)
xopt, info = _cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg,
rhoend=rhoend, iprint=iprint, maxfun=maxfun,
dinfo=info)
if info[3] > catol:
# Check constraint violation
info[0] = 4
return Result(x=xopt,
status=int(info[0]),
success=info[0] == 1,
message={1: 'Optimization terminated successfully.',
2: 'Maximum number of function evaluations has '
'been exceeded.',
3: 'Rounding errors are becoming damaging in '
'COBYLA subroutine.',
4: 'Did not converge to a solution satisfying '
'the constraints. See `maxcv` for magnitude '
'of violation.'
}.get(info[0], 'Unknown exit status.'),
nfev=int(info[1]),
fun=info[2],
maxcv=info[3])
if __name__ == '__main__':
from math import sqrt
def fun(x):
return x[0] * x[1]
def cons(x):
return 1 - x[0]**2 - x[1]**2
x = fmin_cobyla(fun, [1., 1.], cons, iprint=3, disp=1)
print('\nTheoretical solution: %e, %e' % (1. / sqrt(2.), -1. / sqrt(2.)))
| bsd-3-clause | -2,557,425,230,123,296,300 | 32.572954 | 79 | 0.587662 | false |
dcsch/pyif | pyif/util.py | 1 | 1607 |
def is_whitespace(c):
if c == " " or c == "\t" or c == "\n":
return True
return False
def compress_whitespace(s):
"""
Remove extraneous whitespace from the string, that being all whitespace at the beginning
and end of the string and anything beyond a single space within the string.
"""
new_str = ""
in_text = False
for i in range(len(s)):
c = s[i]
if is_whitespace(c):
if not in_text:
# Before any text, so ignore
pass
else:
# We're leaving text, so we allow one space and ignore all others
new_str += " "
in_text = False
else:
# Handling text
new_str += c
in_text = True
if new_str[-1:] == " ":
new_str = new_str[:-1]
return new_str
def cw(s):
return compress_whitespace(s)
def insert_newlines(s, width):
"""
Insert newlines into the string so words don't wrap at the end of lines.
"""
new_str = ""
# Jump to the end of a line and scan backwards for whitespace
start = 0
pos = width
while pos < len(s):
for i in range(pos, pos - width, -1):
if is_whitespace(s[i]):
for j in range(i - 1, pos - width, -1):
if not is_whitespace(s[j]):
i = j + 1
new_str += s[start:i + 1] + "\n"
start = i + 1
pos += width
break
if start < len(s):
new_str += s[start:]
return new_str
| mit | -8,638,453,769,667,749,000 | 25.783333 | 92 | 0.481643 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.