repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
thinkopensolutions/tkobr-addons | tko_partner_relatives/__init__.py | 1 | 1091 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# Thinkopen Brasil
# Copyright (C) Thinkopen Solutions Brasil (<http://www.tkobr.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import partner
| agpl-3.0 | 7,388,563,763,742,791,000 | 44.458333 | 78 | 0.608616 | false |
DavidGSola/Basic-RESTful-Service-with-FLASK | practica1.py | 1 | 1251 | # -*- coding: utf-8 -*-
from flask import Flask, url_for, render_template, Response
import random
app = Flask(__name__)
@app.route('/')
def api_root():
mensaje = 'Welcome'
return Response(mensaje, status=200, mimetype='text/plain')
@app.route('/hola')
def api_home():
mensaje = 'Hola -cañón-'
return Response(mensaje, status=200, mimetype='text/plain')
@app.route('/imagen')
def api_imagen():
mensaje = '<img src=' + url_for('static',filename='img/imagen.jpg') + '>'
return Response(mensaje, status=200, mimetype='image/jpg')
@app.route('/hola_pepe')
def api_pepe():
mensaje = 'Hola <b> pepe</b>'
return Response(mensaje, status=200, mimetype='text/html')
@app.route('/pagina')
def api_pagina():
mensaje = render_template('pagina.html')
return Response(mensaje, status=200, mimetype='text/html')
@app.route('/circulos_varios')
def api_circulos():
randoms = [random.randrange(50,200) for i in range(9)]
mensaje = render_template( 'circulos.xml',
cx1=randoms[0], cy1=randoms[1], r1=randoms[2],
cx2=randoms[3], cy2=randoms[4], r2=randoms[5],
cx3=randoms[6], cy3=randoms[7], r3=randoms[8])
return Response(mensaje, status=200, mimetype='image/svg+xml')
if __name__ == '__main__':
app.run(host='0.0.0.0')
| apache-2.0 | -8,561,766,479,407,991,000 | 28.738095 | 74 | 0.665332 | false |
a4a881d4/6FSK | utils.py | 1 | 1800 | import random
import numpy as np
def rsrcBin(L):
r = []
for k in range(L):
r.append(random.randint(0,1))
return r
def rsrc(L):
r = rsrcBin(L)
x = [1-2*x for x in r]
return x
def fftOnce(x):
W = len(x)
hw = np.hamming(W)
ss = np.fft.fft(x*hw)
return np.conj(ss)*ss
def spectrum(x):
W = 1024*32
r = fftOnce(x[:W])
for k in range(W/2,len(x)-W,W/2):
r = r + fftOnce(x[k:k+W])
return r
def xorsum(k):
r = 0
for i in range(self.order):
r = r^(k&1)
k = k>>1
return r&1
class mseq:
def __init__(self,poly):
self.p = poly
k=0
while poly!=0:
k = k+1
poly = poly>>1
self.order = k-1
print "M sequence order",k
self.length = (1<<self.order)-1
self.s = []
state = 1
for n in range(self.length):
state = state<<1
if state>self.length:
state = state^self.p
self.s.append(1)
else:
self.s.append(0)
def printSeq(self,x=None):
if x==None:
x = self.s
for k in x:
print k,
print ""
def sum(self):
ss = 0
for x in self.s:
ss = ss + x
return ss
def shift(self,l):
return self.s[l:]+self.s[:l]
class gold:
def __init__(self,p0,p1):
self.m0 = mseq(p0)
self.m1 = mseq(p1)
def seq(self,k0,k1):
s0 = self.m0.shift(k0)
s1 = self.m1.shift(k1)
r = [a^b for (a,b) in zip(s0,s1)]
return r
def toReal(self,s):
return np.array([1-2*x for x in s])
def xcorr(self,x,y):
return np.correlate(np.array(x),np.array(y),'full')
def main():
m = mseq(0x409)
m.printSeq()
y = m.shift(1)
print "shift 1"
m.printSeq(y)
print m.sum()
g = gold(0x409,0x40f)
s = g.toReal(g.seq(1,3))
x = g.xcorr(s,s)
import matplotlib.pyplot as plt
plt.plot(x)
plt.show()
if __name__ == '__main__':
main()
| gpl-3.0 | 1,853,868,788,121,301,800 | 15.142857 | 53 | 0.545 | false |
owers19856/PyLATO | TBelec.py | 1 | 39671 | """
Created on Thursday 16 April 2015
@author: Andrew Horsfield, Marc Coury and Max Boleininger
This module contains functions that are needed once the molecular orbitals are
populated by electrons.
"""
#
# Import the modules that will be needed
import numpy as np
import math
import TBH
import sys
import time
import myfunctions
from Verbosity import *
import random
# PyDQED module
from pydqed import DQED
class Electronic:
"""Initialise and build the density matrix."""
def __init__(self, JobClass):
"""Compute the total number of electrons in the system, allocate space for occupancies"""
# Save job reference as an attribute for internal use.
self.Job = JobClass
# Set up the core charges, and count the number of electrons
self.zcore = np.zeros(self.Job.NAtom, dtype='double')
for a in range(0, self.Job.NAtom):
self.zcore[a] = self.Job.Model.atomic[self.Job.AtomType[a]]['NElectrons']
self.NElectrons = np.sum(self.zcore)
#
# Allocate memory for the level occupancies and density matrix
self.occ = np.zeros( self.Job.Hamilton.HSOsize, dtype='double')
self.rho = np.matrix(np.zeros((self.Job.Hamilton.HSOsize, self.Job.Hamilton.HSOsize), dtype='complex'))
self.rhotot = np.matrix(np.zeros((self.Job.Hamilton.HSOsize, self.Job.Hamilton.HSOsize), dtype='complex'))
# setup for the Pulay mixing
self.inputrho = np.zeros((self.Job.Def['num_rho'], self.Job.Hamilton.HSOsize, self.Job.Hamilton.HSOsize), dtype='complex')
self.outputrho = np.zeros((self.Job.Def['num_rho'], self.Job.Hamilton.HSOsize, self.Job.Hamilton.HSOsize), dtype='complex')
self.residue = np.zeros((self.Job.Def['num_rho'], self.Job.Hamilton.HSOsize, self.Job.Hamilton.HSOsize), dtype='complex')
if self.Job.Def['el_kT'] == 0.0:
self.fermi = myfunctions.fermi_0
else:
self.fermi = myfunctions.fermi_non0
if self.Job.Def['optimisation_routine'] == 1:
self.optimisation_routine = self.optimisation_routine1
elif self.Job.Def['optimisation_routine'] == 2:
self.optimisation_routine = self.optimisation_routine2
elif self.Job.Def['optimisation_routine'] == 3:
self.optimisation_routine = self.optimisation_routine3
self.optimisation_rho = optimisation_rho_Duff_Meloni
elif self.Job.Def['optimisation_routine'] == 4:
self.optimisation_routine = self.optimisation_routine4
self.optimisation_rho = optimisation_rho_total
else:
print "WARNING: No optimisation routine selected. Using optimisation_routine1."
self.optimisation_routine = self.optimisation_routine1
def occupy(self, s, kT, n_tol, max_loops):
"""Populate the eigenstates using the Fermi function.
This function uses binary section."""
#
# Find the lower bound to the chemical potential
mu_l = self.Job.e[0]
while np.sum(self.fermi(self.Job.e, mu_l, kT)) > self.NElectrons:
mu_l -= 10.0*kT
#
# Find the upper bound to the chemical potential
mu_u = self.Job.e[-1]
while np.sum(self.fermi(self.Job.e, mu_u, kT)) < self.NElectrons:
mu_u += 10.0*kT
#
# Find the correct chemical potential using binary section
mu = 0.5*(mu_l + mu_u)
n = np.sum(self.fermi(self.Job.e, mu, kT))
count = 0
while math.fabs(self.NElectrons-n) > n_tol*self.NElectrons:
count+=1
if count>max_loops:
print("ERROR: The chemical potential could not be found. The error became "+str(math.fabs(self.NElectrons-n)))
sys.exit()
if n > self.NElectrons:
mu_u = mu
elif n < self.NElectrons:
mu_l = mu
mu = 0.5*(mu_l + mu_u)
n = np.sum(self.fermi(self.Job.e, mu, kT))
self.occ = self.fermi(self.Job.e, mu, kT)
def densitymatrix(self):
"""Build the density matrix."""
self.rho = np.matrix(self.Job.psi)*np.diag(self.occ)*np.matrix(self.Job.psi).H
def SCFerror(self):
"""
Calculate the self-consistent field error. We do this by comparing the
on-site elements of the new density matrix (self.rho) with the old
density matrix, self.rhotot. It is normalised by dividing by the total
number of electrons.
"""
return sum(abs(
self.rho[TBH.map_atomic_to_index(atom1, orbital1, spin1, self.Job.NAtom, self.Job.NOrb),TBH.map_atomic_to_index(atom1, orbital2, spin2, self.Job.NAtom, self.Job.NOrb)]
- self.rhotot[TBH.map_atomic_to_index(atom1, orbital1, spin1, self.Job.NAtom, self.Job.NOrb),TBH.map_atomic_to_index(atom1, orbital2, spin2, self.Job.NAtom, self.Job.NOrb)])
for atom1 in range(self.Job.NAtom) for orbital1 in range(self.Job.NOrb[atom1]) for spin1 in range(2)
for orbital2 in range(orbital1, self.Job.NOrb[atom1]) for spin2 in range(spin1, 2)
)/(self.Job.Electron.NElectrons**2)
def idempotency_error(self, rho):
"""
Determine how far from idempotency the density matrix is. If the
density matrix is idempotent then
rho*rho - rho = 0.
We normalise by the number of electrons.
"""
rho_err = np.linalg.norm((np.dot(rho, rho) - rho))/self.NElectrons
return rho_err
def McWeeny(self):
"""
Make the density matrix idempotent using the McWeeny transformation,
R.McWeeny, Rev. Mod. Phys. (1960):
rho_n+1 = 3*rho_n^3 - 2*rho_n^2
"""
if self.Job.Def['Hamiltonian'] in ('scase','pcase','dcase','vectorS'):
rho_temp = self.rhotot
else:
rho_temp = self.rho
# Make sure that it isn't already idempotent
err_orig = self.idempotency_error(rho_temp)
if err_orig < self.Job.Def['McWeeny_tol']:
# if already idempotent then don't do anything, just exit function
return
flag, iterations, err, rho_temp = self.McWeeny_iterations(rho_temp)
# if the flag is false it means that idempotency was reduced below the tolerance
if flag == False:
# if the iterations did not converge but the idempotency error has
# gotten smaller then print a warning but treat as a success.
if err < err_orig:
print "Max iterations, ", iterations, " reached. Idempotency error = ", err
flag = True
else:
print "McWeeny transformation unsuccessful. Proceeding using input density matrix."
# Turn off using the McWeeny transformation as once it doesn't work it seems to not work again.
self.Job.Def["McWeeny"] = 0
# if this is going to be treated like a success then reassign rho_temp.
if flag == True:
if self.Job.Def['Hamiltonian'] in ('scase','pcase','dcase','vectorS'):
self.rhotot = rho_temp
else:
self.rho = rho_temp
def McWeeny_iterations(self, rho):
"""
Iterations of the McWeeny scheme for the inputted rho.
Return a True/False flag that indicates convergence, the number of
iterations required to reach convergence, the error and the converged density
matrix.
"""
converge_flag = False
for ii in range(self.Job.Def['McWeeny_max_loops']):
# McWeeny transformation
rho = 3*np.dot(rho, np.dot(rho, rho)) - 2*np.dot(rho, rho)
err = self.idempotency_error(rho)
verboseprint(self.Job.Def['extraverbose'], "McWeeny iteration: ", ii, "; Idempotency error = ", err)
if err < self.Job.Def['McWeeny_tol']:
converge_flag = True
return converge_flag, ii, err, rho
# Check to make sure that the error hasn't become a nan.
elif np.isnan(err):
return converge_flag, ii, err, rho
# if it gets to this statement then it probably hasn't converged.
return converge_flag, ii, err, rho
def linear_mixing(self):
"""
Mix the new and the old density matrix by linear mixing.
The form of this mixing is
rho_out = (1-A)*rho_old + A*rho_new
for which, using our notation, rho_new is self.rho, rho_old is
self.rhotot and we overwrite self.rhotot to make rho_out.
"""
A = self.Job.Def['A']
self.rhotot = (1-A)*self.rhotot + A*self.rho
def GR_Pulay(self, scf_iteration):
"""
This is the guaranteed reduction Pulay mixing scheme proposed by
Bowler and Gillan in 2008. If the number of density matrices to be
used, num_rho, is 1, it reduces to just linear mixing.
The scf_iteration is a required input because when scf_iteration is
less than num_rho then scf_iteration is the number of density matrices
that should be used.
The output is an updated self.rhotot to be used in the construction of
the Fock matrix. Also, self.inputrho, self.outputrho and self.residue
are updated for the next iteration.
"""
num_rho = self.Job.Def['num_rho']
# If the number of scf iterations is less than num_rho replace it by
# the number of scf iterations (as there will only be that number of
# density matrices).
if scf_iteration < num_rho:
num_rho = scf_iteration
# Shift along the density and residue matrices
for ii in range(num_rho-1):
self.inputrho[num_rho - 1 - ii] = np.copy(self.inputrho[num_rho - 2 - ii])
self.outputrho[num_rho - 1 - ii] = np.copy(self.outputrho[num_rho - 2 - ii])
self.residue[num_rho - 1 - ii] = np.copy(self.residue[num_rho - 2 - ii])
# Add in the new density and residue matrices
self.inputrho[0] = self.rhotot
self.outputrho[0] = self.rho
self.residue[0] = self.rho - self.rhotot
# Calculate the values of alpha to minimise the residue
alpha, igo = self.optimisation_routine(num_rho)
if igo == 1:
print "WARNING: Unable to optimise alpha for combining density matrices. Proceeding using guess."
# Guess for alpha is just 1.0 divided by the number of density matrices
alpha = np.zeros((num_rho), dtype='double')
alpha.fill(1.0/num_rho)
verboseprint(self.Job.Def['extraverbose'], "alpha: ", alpha)
# Create an optimised rhotot and an optimised rho and do linear mixing to make next input matrix
self.rhotot = sum(alpha[i]*self.inputrho[i] for i in range(num_rho))
self.rho = sum(alpha[i]*self.outputrho[i] for i in range(num_rho))
self.linear_mixing()
def chargepersite(self):
"""Compute the net charge on each site."""
norb = np.diag(self.rho)
qsite = np.zeros(self.Job.NAtom, dtype='double')
jH = self.Job.Hamilton
for a in range(0, self.Job.NAtom):
qsite[a] = (self.zcore[a] -
(np.sum(norb[jH.Hindex[a]:jH.Hindex[a+1]].real) +
np.sum(norb[jH.H0size+jH.Hindex[a]:jH.H0size+jH.Hindex[a+1]].real)))
return qsite
def electrons_site_orbital_spin(self,site,orbital,spin):
"""Compute the number of electrons with specified spin, orbital and site. """
index = TBH.map_atomic_to_index(site, orbital, spin, self.Job.NAtom, self.Job.NOrb)
return self.rho[index,index].real
def electrons_orbital_occupation_vec(self):
""" Return a vector of the occupation of each spin orbital. """
occupation = []
# Just collect the real part of the diagonal of the density matrix.
for ii in range(self.Job.Hamilton.HSOsize):
occupation.append(self.rho[ii,ii].real)
return occupation
def electrons_site_orbital(self,site,orbital):
"""Compute the number of electrons in a particular orbital on the specified site. """
return self.electrons_site_orbital_spin(site,orbital,0)+self.electrons_site_orbital_spin(site,orbital,1).real
def electrons_site(self,site):
"""Compute the number of electrons on a specified site. """
return sum(self.electrons_site_orbital(site,ii) for ii in range(self.Job.Model.atomic[self.Job.AtomType[site]]['NOrbitals'])).real
def electronspersite(self):
""" Return a vector of the number of electrons on each site. """
esite = np.zeros(self.Job.NAtom, dtype='double')
for a in range(self.Job.NAtom):
esite[a] = self.electrons_site(a).real
return esite
def spinpersite(self):
"""Compute the net spin on each site."""
ssite = np.zeros((3, self.Job.NAtom), dtype='double')
jH = self.Job.Hamilton
for a in range(0, self.Job.NAtom):
srho = np.zeros((2, 2), dtype='complex')
for j in range(jH.Hindex[a], jH.Hindex[a+1]):
#
# Sum over orbitals on one site to produce a 2x2 spin density matrix for the site
srho[0, 0] += self.rho[ j, j]
srho[0, 1] += self.rho[ j, jH.H0size+j]
srho[1, 0] += self.rho[jH.H0size+j, j]
srho[1, 1] += self.rho[jH.H0size+j, jH.H0size+j]
#
# Now compute the net spin vector for the site
ssite[0, a] = (srho[0, 1] + srho[1, 0]).real
ssite[1, a] = (srho[0, 1] - srho[1, 0]).imag
ssite[2, a] = (srho[0, 0] - srho[1, 1]).real
#
return ssite
def magnetic_correlation(self, site1, site2):
"""
Compute the direction averaged magnetic correlation between sites 1
and site 2. This requires the two particle density matrix. As we are
using the mean field approximation the two particle density matrix is
expressible in terms of the one particle density matrix. The equation
below is the equation for the magnetic correlation using the single
particle density matrix.
C_avg = 1/3 sum_{absz}( 2(rho_{aa}^{zs} rho_{bb}^{sz} - rho_{ab}^{zz}rho_{ba}^{ss})
- rho_{aa}^{ss}rho_{bb}^{zz}+rho_{ab}^{sz}rho_{ba}^{zs})
where a are the spatial orbitals on site 1, b are the spatial orbitals
on site 2, s and z are spin indices.
"""
C_avg = np.float64(0.0)
norb_1 = self.Job.Model.atomic[self.Job.AtomType[site1]]['NOrbitals']
norb_2 = self.Job.Model.atomic[self.Job.AtomType[site2]]['NOrbitals']
for s in range(2):
for z in range(2):
for a in range(norb_1):
for b in range(norb_2):
index_az = TBH.map_atomic_to_index(site1,a,z,self.Job.NAtom, self.Job.NOrb)
index_bz = TBH.map_atomic_to_index(site1,b,z,self.Job.NAtom, self.Job.NOrb)
index_bs = TBH.map_atomic_to_index(site1,b,s,self.Job.NAtom, self.Job.NOrb)
index_as = TBH.map_atomic_to_index(site1,a,s,self.Job.NAtom, self.Job.NOrb)
# term 1: 2.0*rho_{aa}^{zs} rho_{bb}^{sz}
C_avg += 2.0*self.rho[index_az,index_as]*self.rho[index_bs,index_bz]
# term 2: -2.0*rho_{ab}^{zz}rho_{ba}^{ss})
C_avg -= 2.0*self.rho[index_az,index_bz]*self.rho[index_as,index_bs]
# term 3: -rho_{aa}^{ss}rho_{bb}^{zz}
C_avg -= self.rho[index_as,index_as]*self.rho[index_bz,index_bz]
# term 4: rho_{ab}^{sz}rho_{ba}^{zs}
C_avg += self.rho[index_as,index_bz]*self.rho[index_bz,index_as]
# remember to divide by 3
C_avg = C_avg/3.0
return C_avg
def optimisation_routine1(self, num_rho):
"""
Optimisation routine where we try to solve for the norm squared of the
optimal density matrix with the constraint that the sum of the
coefficients is equal to one. To include the constraint we set up the
problem:
minimise: alpha_i M_ij alpha_j - lambda (sum_i alpha_i - 1)
where M_ij = Tr(R_i^dag R_j). We then differentiate with respect to
alpha_k and set to zero to minimise:
2 M alpha = lambda
We solve this equation for lambda = 1. We then can simply scale alpha,
such that sum_i alpha_i = 1, which is equivalent to having solved for
a different lambda.
"""
verboseprint(self.Job.Def['extraverbose'], "optimisation_routine")
small = 1e-14
# If there is only one density matrix the solution is simple.
if num_rho == 1:
return np.array([1.0], dtype='double'), 0
alpha = np.zeros(num_rho, dtype='double')
Mmat = np.matrix(np.zeros((num_rho, num_rho), dtype='complex'))
lamb = 0.5*np.ones(num_rho, dtype='double')
for i in range(num_rho):
Mmat[i, i] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[i]).H)
for j in range(i+1, num_rho):
Mmat[i, j] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[j]).H)
# if np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])) != Mmat[i, j].conj():
# print "Mmat[%i,%i] = %f. Mmat[%i,%i].conj() = %f." % (j, i, np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])), i, j, Mmat[i, j].conj())
Mmat[j, i] = Mmat[i, j].conj()
# if np.linalg.det(Mmat) < small:
# return alpha, 1
alpha = np.linalg.solve(Mmat, lamb)
myscale = np.sum(alpha)
if myscale == 0:
print "ERROR: alpha summed to 0 in optimisation_routine. Cannot be scaled to 1."
print alpha
return alpha, 1
else:
alpha = alpha/myscale
return alpha, 0
def optimisation_routine2(self, num_rho):
"""
Optimisation routine where we try to solve for the norm squared of the
optimal density matrix with the constraint that the sum of the
coefficients is equal to one. To include the constraint we set up the
problem:
minimise: alpha_i M_ij alpha_j - lambda (sum_i alpha_i - 1)
where M_ij = Tr(R_i^dag R_j). We then differentiate with respect to
alpha_k and set to zero to minimise:
2 M alpha - lambda = 0
We solve this equation. We have to add a buffer row and column to
include lambda as well as the constraint that the sum of alpha is
equal to one. We absorb the 2 into lambda:
{M_11 M_12 ... -1 {alpha_1 {0
M_21 M_22 ... -1 alpha_2 0
. . . .
. . . = .
. . . .
-1 -1 ... 0} lambda} -1}
"""
small = 1e-10
verboseprint(self.Job.Def['extraverbose'], "optimisation_routine2")
# If there is only one density matrix the solution is simple.
if num_rho == 1:
return np.array([1.0], dtype='double'), 0
alpha = np.zeros(num_rho+1, dtype='double')
Mmat = np.matrix(np.zeros((num_rho+1, num_rho+1), dtype='complex'))
# make all the elements -1
Mmat.fill(-1.0)
# replace the bottom right hand corner by 0
Mmat[-1,-1] = 0.0
# calculate the rest of the Mmat.
for i in range(num_rho):
Mmat[i, i] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[i]).H)
for j in range(i+1, num_rho):
Mmat[i, j] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[j]).H)
# if np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])) != Mmat[i, j].conj():
# print "Mmat[%i,%i] = %f. Mmat[%i,%i].conj() = %f." % (j, i, np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])), i, j, Mmat[i, j].conj())
Mmat[j, i] = Mmat[i, j].conj()
# if abs(np.linalg.det(Mmat)) < small:
# return alpha, 1
RHS = np.zeros(num_rho+1, dtype = 'double')
RHS[-1] = -1.0
alpha = np.linalg.solve(Mmat, RHS)
myscale = abs(np.sum(alpha)-alpha[-1])
if abs(myscale-1.0) > small:
print "ERROR: optimisation_routine2 -- sum alpha = %f. alpha must sum to 1.0." % myscale
print alpha
return alpha, 1
# if successful then return result and no error code.
return alpha, 0
def optimisation_routine3(self, num_rho):
"""
Solve the matrix vector equation approximately such that the alpha lie
between 0 and 1 and the constraint that sum_i alpha_i = 1:
{M_11 M_12 ... M_1N {alpha_1 {0
M_21 M_22 ... M_2N alpha_2 0
. . . .
. . . = .
. . . .
M_N1 M_N2 M_NN} alpha_N} 0}
We use the library PyDQED to find a good solution with alpha_i bound
between 0 and 1. To ensure that alpha_i are bound between 0 and 1 we
replace alpha_i by sin^2(alpha_i). To ensure that sum_i alpha_i = 1
we replace sin^2(alpha_i) by sin^2(alpha_i)/sum_a, where
sum_a = sum_i sin^2(alpha_i).
"""
verboseprint(self.Job.Def['extraverbose'], "optimisation_routine3")
# If there is only one density matrix the solution is simple.
if num_rho == 1:
return np.array([1.0], dtype='double'), 0
alpha = np.zeros(num_rho, dtype='double')
# initial guess for alpha:
alpha.fill(1.0/float(num_rho))
self.Mmat = np.zeros((num_rho, num_rho), dtype='complex')
# calculate Mmat.
for i in range(num_rho):
self.Mmat[i, i] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[i]).H)
for j in range(i+1, num_rho):
self.Mmat[i, j] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[j]).H)
# if np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])) != Mmat[i, j].conj():
# print "Mmat[%i,%i] = %f. Mmat[%i,%i].conj() = %f." % (j, i, np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])), i, j, Mmat[i, j].conj())
self.Mmat[j, i] = self.Mmat[i, j].conj()
# Initialise the PyDQED class
opt = self.optimisation_rho()
opt.Mmat = self.Mmat
# bounds for the x values
# mybounds = [(0,1) for kk in range(num_rho)]
mybounds = None
# No bounds for lambda
# mybounds += [(None, None)]
# Strict bounds for the constraint
# mybounds += [(-1e-12, 1e-12)]
opt.initialize(Nvars=num_rho, Ncons=0, Neq=num_rho, bounds=mybounds, tolf=1e-16, told=1e-8, tolx=1e-8, maxIter=100, verbose=False)
alpha, igo = opt.solve(alpha)
if igo > 1:
verboseprint(self.Job.Def['extraverbose'], dqed_err_dict[igo])
# replace alpha[i] by sin^2(alpha[i])/sum_i sin^2(alpha[i])
sum_alpha = sum(alpha[ii]*alpha[ii] for ii in range(num_rho))
alpha = np.array([alpha[ii]*alpha[ii]/sum_alpha for ii in range(num_rho)])
if abs(sum(alpha)-1.0) > 1e-8:
print "WARNING: sum_i alpha_i - 1.0 = " + str(sum_alpha-1.0) + ". It should be equal to 0.0. Proceeding using guess."
return alpha, 1
if self.Job.Def['random_seeds'] == 1:
# try the random seeds
trial_alpha, trial_cMc, trial_err = self.random_seeds_optimisation(num_rho, self.Job.Def['num_random_seeds'])
# Check to find out which solution is better, the guessed or the random seeds
cMc = alpha.conjugate().dot(self.Mmat.dot(alpha))
if cMc < trial_cMc:
return alpha, igo
else:
# print "Returning random seed answer. cMc = ", str(cMc), "; trial_cMc = ", str(trial_cMc)
return trial_alpha, trial_err
return alpha, igo
# def optimisation_routine4(self, num_rho):
# """
# Solve the matrix vector equation approximately such that the alpha lie
# between 0 and 1 and the constraint that sum_i alpha_i = 1:
# {M_11 M_12 ... M_1N {alpha_1 {0
# M_21 M_22 ... M_2N alpha_2 0
# . . . .
# . . . = .
# . . . .
# M_N1 M_N2 M_NN} alpha_N} 0}
# We use the library PyDQED to find a good solution with alpha_i. We use
# the following trick. We replace alpha[i] by y[i] = alpha[i]^2 to enforce
# that y[i] > 0. We also use the constraint that sum_i y[i] = 1. This
# ensures that y[i] are bound betweeen 0 and 1. This is all done in the
# class optimisation_rho_meloni. It will return alpha but we will have to
# replace it by alpha^2 -- for debugging purposes we should check to make
# sure that they sum to 1.
# """
# verboseprint(self.Job.Def['extraverbose'], "optimisation_routine4")
# # If there is only one density matrix the solution is simple.
# if num_rho == 1:
# return np.array([1.0], dtype='double'), 0
# alpha = np.zeros(num_rho, dtype='double')
# # initial guess for alpha:
# alpha.fill(1.0/float(num_rho))
# self.Mmat = np.zeros((num_rho, num_rho), dtype='complex')
# # calculate Mmat.
# for i in range(num_rho):
# self.Mmat[i, i] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[i]).H)
# for j in range(i+1, num_rho):
# self.Mmat[i, j] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[j]).H)
# # if np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])) != Mmat[i, j].conj():
# # print "Mmat[%i,%i] = %f. Mmat[%i,%i].conj() = %f." % (j, i, np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])), i, j, Mmat[i, j].conj())
# self.Mmat[j, i] = self.Mmat[i, j].conj()
# # Initialise the PyDQED class
# opt = optimisation_rho_Meloni()
# opt.Mmat = self.Mmat
# # bounds for the x values
# mybounds = [(0,1) for kk in range(num_rho)]
# mybounds += [(-1.e-12, 1e-12)]
# opt.initialize(Nvars=num_rho, Ncons=1, Neq=num_rho, bounds=mybounds, tolf=1e-16, told=1e-8, tolx=1e-12, maxIter=100, verbose=False)
# alpha, igo = opt.solve(alpha)
# if igo > 1:
# verboseprint(self.Job.Def['extraverbose'], dqed_err_dict[igo])
# # replace alpha[i] by alpha[i]^2
# alpha = np.array([alpha[ii]*alpha[ii] for ii in range(num_rho)])
# if abs(sum(alpha)-1.0) > 1e-8:
# print "WARNING: sum_i alpha_i - 1.0 = " + str(sum(alpha)-1.0) + ". It should be equal to 0.0. Proceeding using guess."
# return alpha, 1
# if self.Job.Def['random_seeds'] == 1:
# # try the random seeds
# trial_alpha, trial_cMc, trial_err = self.random_seeds_optimisation(num_rho, self.Job.Def['num_random_seeds'])
# # Check to find out which solution is better, the guessed or the random seeds
# cMc = alpha.conjugate().dot(self.Mmat.dot(alpha))
# if cMc < trial_cMc:
# return alpha, igo
# else:
# # print "Returning random seed answer. cMc = ", str(cMc), "; trial_cMc = ", str(trial_cMc)
# return trial_alpha, trial_err
# return alpha, igo
def optimisation_routine4(self, num_rho):
"""
Solve the matrix vector equation approximately such that the alpha lie
between 0 and 1 and the constraint that sum_i alpha_i = 1:
{M_11 M_12 ... M_1N {alpha_1 {0
M_21 M_22 ... M_2N alpha_2 0
. . . .
. . . = .
. . . .
M_N1 M_N2 M_NN} alpha_N} 0}
We use the library PyDQED to find a good solution with alpha_i. We use
the following trick. We replace alpha[i] by y[i] = alpha[i]^2 to enforce
that y[i] > 0. We also use the constraint that sum_i y[i] = 1. This
ensures that y[i] are bound betweeen 0 and 1. This is all done in the
class optimisation_rho_meloni. It will return alpha but we will have to
replace it by alpha^2 -- for debugging purposes we should check to make
sure that they sum to 1.
"""
verboseprint(self.Job.Def['extraverbose'], "optimisation_routine5")
# If there is only one density matrix the solution is simple.
if num_rho == 1:
return np.array([1.0], dtype='double'), 0
alpha = np.zeros(num_rho, dtype='double')
# initial guess for alpha:
alpha.fill(1.0/float(num_rho))
self.Mmat = np.zeros((num_rho, num_rho), dtype='complex')
# calculate Mmat.
for i in range(num_rho):
self.Mmat[i, i] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[i]).H)
for j in range(i+1, num_rho):
self.Mmat[i, j] = np.trace(np.matrix(self.residue[i])*np.matrix(self.residue[j]).H)
# if np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])) != Mmat[i, j].conj():
# print "Mmat[%i,%i] = %f. Mmat[%i,%i].conj() = %f." % (j, i, np.sum(np.matrix(self.residue[j]).H*np.matrix(self.residue[i])), i, j, Mmat[i, j].conj())
self.Mmat[j, i] = self.Mmat[i, j].conj()
# Initialise the PyDQED class
opt = self.optimisation_rho()
opt.Mmat = self.Mmat
# bounds for the x values
mybounds = [(0, None) for kk in range(num_rho)]
# mybounds += [(None, None)]
# mybounds += [(-1.e-12, 1e-12)]
opt.initialize(Nvars=num_rho, Ncons=0, Neq=2, bounds=mybounds, tolf=1e-16, told=1e-8, tolx=1e-8, maxIter=100, verbose=False)
alpha, igo = opt.solve(alpha)
# remove lambda
sum_a = sum(alpha)
alpha = alpha/sum_a
if igo > 1:
verboseprint(self.Job.Def['extraverbose'], dqed_err_dict[igo])
if abs(sum(alpha)-1.0) > 1e-8:
print "WARNING: sum_i alpha_i - 1.0 = " + str(sum(alpha)-1.0) + ". It should be equal to 0.0. Proceeding using guess."
return alpha, 1
if self.Job.Def['random_seeds'] == 1:
# try the random seeds
trial_alpha, trial_cMc, trial_err = self.random_seeds_optimisation(num_rho, self.Job.Def['num_random_seeds'])
# Check to find out which solution is better, the guessed or the random seeds
cMc = alpha.conjugate().dot(self.Mmat.dot(alpha))
if cMc < trial_cMc:
return alpha, igo
else:
# print "Returning random seed answer. cMc = ", str(cMc), "; trial_cMc = ", str(trial_cMc)
return trial_alpha, trial_err
return alpha, igo
def random_seeds_optimisation(self, num_rho, num_trials):
cMc_vec = []
cMc_val = []
cMc_err = []
random.seed()
# Initialise the PyDQED class
opt = self.optimisation_rho()
opt.Mmat = self.Mmat
mybounds = None
opt.initialize(Nvars=num_rho, Ncons=0, Neq=num_rho, bounds=mybounds, tolf=1e-16, told=1e-8, tolx=1e-8, maxIter=100, verbose=False)
# random starting seeds
for gg in range(num_trials):
alpha = np.array([random.random() for hh in range(num_rho)])
alpha, igo = opt.solve(alpha)
if igo > 1:
verboseprint(self.Job.Def['extraverbose'], dqed_err_dict[igo])
# replace alpha[i] by sin^2(alpha[i])/sum_i sin^2(alpha[i])
sum_alpha = sum(np.sin(alpha[ii])*np.sin(alpha[ii]) for ii in range(num_rho))
alpha = np.array([np.sin(alpha[ii])*np.sin(alpha[ii])/sum_alpha for ii in range(num_rho)])
cMc_vec.append(alpha)
cMc_val.append(alpha.conjugate().dot(self.Mmat.dot(alpha)))
cMc_err.append(igo)
# print "Trial values of cMc are: ", cMc_val
val, idx = min((val, idx) for (idx, val) in enumerate(cMc_val))
# print "chosen index = ", idx
return cMc_vec[idx], cMc_val[idx], cMc_err[idx]
# class optimisation_rho_Duff(DQED):
# """
# A DQED class containing the functions to optimise.
# It requires the small Mmat matrix to work.
# {M_11 M_12 ... M_1N
# M_21 M_22 ... M_2N
# . .
# . .
# . .
# M_N1 M_N2 M_NN}
# It implements the constraint that the sum_i x[i] = 1, and the bounds
# 0 < x[i] < 1 by replacing x[i] by sin^2(x[i])/sum_i sin^2(x[i]).
# """
# def evaluate(self, x):
# Neq = self.Neq; Nvars = self.Nvars; Ncons = self.Ncons
# f = np.zeros((Neq), np.float64)
# J = np.zeros((Neq, Nvars), np.float64)
# fcons = np.zeros((Ncons), np.float64)
# Jcons = np.zeros((Ncons, Nvars), np.float64)
# # Replace x[i] by sin^2(x[i])/sum_i sin^2(x[i])
# y = []
# sum_x = sum(np.sin(x[ii])*np.sin(x[ii]) for ii in range(Nvars))
# for ii in range(Nvars):
# y.append(np.sin(x[ii])*np.sin(x[ii])/sum_x)
# for pp in range(Neq):
# f[pp] = sum((self.Mmat[pp, ii])*y[ii] for ii in range(Nvars))
# for kk in range(Nvars):
# # find this equation by differentiating f[pp] w.r.t. x[kk]
# J[pp, kk] = 2*np.sin(x[kk])*np.cos(x[kk])*(f[pp] - self.Mmat[pp, kk])/sum_x
# return f, J, fcons, Jcons
class optimisation_rho_Duff_Meloni(DQED):
"""
A DQED class containing the functions to optimise.
It requires the small Mmat matrix to work.
{M_11 M_12 ... M_1N
M_21 M_22 ... M_2N
. .
. .
. .
M_N1 M_N2 M_NN}
It implements the constraint that the sum_i x[i] = 1, and the bounds
0 <= x[i] <= 1 by replacing x[i] by y[i] = x[i]^2/sum_i x[i]^2. As the y[i]
must be positive and they must sum to 1, they must also lie between 0 and
1.
"""
def evaluate(self, x):
Neq = self.Neq; Nvars = self.Nvars; Ncons = self.Ncons
f = np.zeros((Neq), np.float64)
J = np.zeros((Neq, Nvars), np.float64)
fcons = np.zeros((Ncons), np.float64)
Jcons = np.zeros((Ncons, Nvars), np.float64)
# Replace x[i] by sin^2(x[i])/sum_i sin^2(x[i])
y = []
sum_x = sum(x[ii]*x[ii] for ii in range(Nvars))
for ii in range(Nvars):
y.append(x[ii]*x[ii]/sum_x)
for pp in range(Neq):
f[pp] = sum((self.Mmat[pp, ii])*y[ii] for ii in range(Nvars))
for kk in range(Nvars):
# find this equation by differentiating f[pp] w.r.t. x[kk]
J[pp, kk] = 2*x[kk]*(self.Mmat[pp, kk] - f[pp])/sum_x
return f, J, fcons, Jcons
# class optimisation_rho_Meloni(DQED):
# """
# A DQED class containing the functions to optimise.
# It requires the small Mmat matrix to work.
# {M_11 M_12 ... M_1N
# M_21 M_22 ... M_2N
# . .
# . .
# . .
# M_N1 M_N2 M_NN}
# We replace x[i] by y[i] = x[i]^2. This enforces that y[i] >= 0. We also use
# the constraint that the sum_i y[i] = 1, therefore they must also lie between 0 and
# 1.
# """
# def evaluate(self, x):
# Neq = self.Neq; Nvars = self.Nvars; Ncons = self.Ncons
# f = np.zeros((Neq), np.float64)
# J = np.zeros((Neq, Nvars), np.float64)
# fcons = np.zeros((Ncons), np.float64)
# Jcons = np.zeros((Ncons, Nvars), np.float64)
# # Replace x[i] by sin^2(x[i])/sum_i sin^2(x[i])
# y = np.array((Nvars), np.float64)
# for ii in range(Nvars):
# y[ii] = x[ii]*x[ii]
# for pp in range(Neq):
# f[pp] = self.Mmat[pp].dot(y)
# for kk in range(Nvars):
# # find this equation by differentiating f[pp] w.r.t. x[kk]
# J[pp, kk] = 2*x[kk]*self.Mmat[pp, kk]
# fcons[0] = sum(y) - 1.0
# for kk in range(Nvars):
# Jcons[0, kk] = 2*x[kk]
# return f, J, fcons, Jcons
class optimisation_rho_total(DQED):
"""
A DQED class containing the functions to optimise.
It finds the values of alpha that best minimise:
{alpha_1 {M_11 M_12 ... M_1N {alpha_1
alpha_2 M_21 M_22 ... M_2N alpha_2
. . . .
. . . .
. . . .
alpha_N} M_N1 M_N2 M_NN} alpha_N}
It implements the constraint that the sum_i alpha[i] = 1, and the bounds
0 <= alpha[i] <= 1 by replacing x[i] by y[i] = x[i]/sum_i x[i]. As the y[i]
must be positive (because this problem is quadratic) and they must sum to
1 they must also lie between 0 and 1.
"""
def evaluate(self, x):
Neq = self.Neq; Nvars = self.Nvars; Ncons = self.Ncons
f = np.zeros((Neq), np.float64)
J = np.zeros((Neq, Nvars), np.float64)
fcons = np.zeros((Ncons), np.float64)
Jcons = np.zeros((Ncons, Nvars), np.float64)
sum_x = sum(x)
y = x/sum_x
f[0] = y.dot(self.Mmat.dot(y))
f[1] = sum_x - 1.0
# find this equation by differentiating f[pp] w.r.t. x[kk] and that Mmat is Hermitian
J[0] = 2.0/sum_x*(self.Mmat.dot(y)-f[0])
J[1] = np.ones((Nvars), np.float64)
return f, J, fcons, Jcons
dqed_err_dict={}
dqed_err_dict[2] = "The norm of the residual is zero; the solution vector is a root of the system."
dqed_err_dict[3] = "The bounds on the trust region are being encountered on each step; the solution vector may or may not be a local minimum."
dqed_err_dict[4] = "The solution vector is a local minimum."
dqed_err_dict[5] = "A significant amount of noise or uncertainty has been observed in the residual; the solution may or may not be a local minimum."
dqed_err_dict[6] = "The solution vector is only changing by small absolute amounts; the solution may or may not be a local minimum."
dqed_err_dict[7] = "The solution vector is only changing by small relative amounts; the solution may or may not be a local minimum."
dqed_err_dict[8] = "The maximum number of iterations has been reached; the solution is the best found, but may or may not be a local minimum."
for ii in range(9, 19):
dqed_err_dict[ii] = "An error occurred during the solve operation; the solution is not a local minimum."
| gpl-2.0 | 8,748,730,909,583,750,000 | 43.624297 | 184 | 0.558468 | false |
CS4098Group/Project | app/peos.py | 1 | 4950 | from subprocess import Popen, PIPE
import os.path
import os
import xml.etree.ElementTree as ET
PEOS_COMMAND = './peos/os/kernel/peos'
class PEOSException(Exception):
pass
def create(path_to_model_file):
if not os.path.isfile(path_to_model_file):
raise IOError("File does not exist")
proc = Popen('{} -c {}'.format(PEOS_COMMAND, path_to_model_file),
shell=True,
stdout=PIPE,
stderr=PIPE)
output, err = proc.communicate()
if err != '':
raise PEOSException(err)
elif proc.returncode != 1:
# Going to need to parse the stderr/stdout pipes for this
raise PEOSException('{} could not be found or started from {}'
.format(path_to_model_file, os.getcwd()))
pid = int(output.split("=")[1][1:])
return pid
def status(pid):
if not os.path.isfile("proc_table.dat.xml"):
# list command creates the xml file
list()
tree = ET.parse("proc_table.dat.xml")
root = tree.getroot()
process = root.find(".//process[@pid='" + str(pid) + "']")
if process is None:
raise NameError("PID not found")
actions = process.findall("action")
actions_dict = []
for action in actions:
inner_list = []
for i in range(len(action)):
inner_list.append({action[i].tag: action[i].attrib})
actions_dict.append({"details": inner_list})
[actions_dict[-1].update({key: value}) for key,
value in action.attrib. items()]
return actions_dict
def bind_resource_file(resource_file):
if not os.path.isfile(resource_file):
raise IOError("File Not Found")
proc = Popen('{} -b {}'.format(PEOS_COMMAND, resource_file),
shell=True,
stdout=PIPE,
stderr=PIPE)
output, err = proc.communicate()
if err != '':
raise PEOSException(err)
elif proc.returncode != 1:
# Going to need to parse the stderr/stdout pipes for this
raise PEOSException('{} could not be imported'
.format(resource_file))
return
def start(pid, action, event):
if event not in ['start', 'finish', 'abort', 'suspend']:
raise PEOSException('Unknown event: {}'.format(event))
proc = Popen('{} -n {} {} {}'.format(PEOS_COMMAND, pid, action, event),
shell=True,
stdout=PIPE,
stderr=PIPE)
output, err = proc.communicate()
if err != '':
raise PEOSException(err)
elif proc.returncode != 1:
# Going to need to parse the stderr/stdout pipes for this
raise PEOSException('{} action could not be started for pid: {}'
.format(action, pid))
return
def update_all():
proc = Popen('{} -u'.format(PEOS_COMMAND),
shell=True,
stdout=PIPE,
stderr=PIPE)
output, err = proc.communicate()
if err != '':
raise PEOSException(err)
elif proc.returncode != 1:
# Going to need to parse the stderr/stdout pipes for this
raise PEOSException('Unknown error')
return
def bind_resource(pid, resource_name, resource_value):
proc = Popen('{} -r {} {} {}'
.format(PEOS_COMMAND, pid, resource_name, resource_value),
shell=True,
stdout=PIPE,
stderr=PIPE)
output, err = proc.communicate()
if err != '':
raise PEOSException(err)
elif proc.returncode != 1:
# Going to need to parse the stderr/stdout pipes for this
raise PEOSException("Could not bind {} to {} in {}"
.format(pid, resource_name, resource_value))
return
def list():
proc = Popen('{} -i'.format(PEOS_COMMAND),
shell=True,
stdout=PIPE,
stderr=PIPE)
output, err = proc.communicate()
if err != '':
raise PEOSException(err)
elif proc.returncode != 1:
# Going to need to parse the stderr/stdout pipes for this
raise PEOSException('Unknown error. Process returned: {}'
.format(proc.returncode))
out = [p.split(" ") for p in output.split("\n")]
p_list = [{"pid": elm[0], "model":os.path.basename(os.path.splitext(elm[1])[0])} for elm in out
if len(elm) > 1 and elm[1] != ""]
return p_list
def delete(pid):
proc = Popen('{} -d {}'.format(PEOS_COMMAND, pid),
shell=True,
stdout=PIPE,
stderr=PIPE)
output, err = proc.communicate()
if err != '':
raise PEOSException(err)
elif proc.returncode != 1:
# Going to need to parse the stderr/stdout pipes for this
raise PEOSException('Failed to delete process {}. Does it exist?'
.format(proc.returncode))
return
| mit | -4,090,355,685,830,059,500 | 27.285714 | 99 | 0.55596 | false |
mnestis/provglish | provglish/nl/templates/generation_template.py | 1 | 3100 | from provglish import transform, prov
from provglish.lexicalisation import urn_from_uri as lex
from provglish.lexicalisation import plural_p
from provglish.prov import PROV
from provglish.nl.tools import SETTINGS, realise_sentence
import rdflib
from rdflib.plugins import sparql
from rdflib import RDF
import urllib2
_generation_query = sparql.prepareQuery(
"""
SELECT ?entity ?generation ?time ?activity WHERE {
GRAPH <prov_graph> {
{
?entity a prov:Entity .
?entity prov:qualifiedGeneration ?generation .
?generation a prov:Generation .
OPTIONAL { ?generation prov:atTime ?time } .
OPTIONAL { ?generation prov:activity ?activity } .
FILTER ( bound(?time) || bound(?activity))
} UNION {
?entity a prov:Entity .
?entity prov:wasGeneratedBy ?activity .
?activity a prov:Activity
}
}
}
""",
initNs={"prov":PROV})
def _generation_binding(graph):
results = graph.query(_generation_query)
return results.bindings
def _generation_coverage(bindings, graph):
if "?generation" in bindings:
# Qualified
coverage = [(bindings["?entity"], RDF.type, PROV.Entity),
(bindings["?entity"], PROV.qualifiedGeneration, bindings["?generation"]),
(bindings["?generation"], RDF.type, PROV.Generation)]
if "?time" in bindings:
coverage.append((bindings["?generation"], PROV.atTime, bindings["?time"]))
if "?activity" in bindings:
coverage.extend([(bindings["?generation"], PROV.activity, bindings["?activity"]),
(bindings["?activity"], RDF.type, PROV.Activity)])
return coverage
else:
# Unqualified
return [(bindings["?entity"], RDF.type, PROV.Entity),
(bindings["?entity"], PROV.wasGeneratedBy, bindings["?activity"]),
(bindings["?activity"], RDF.type, PROV.Activity)]
def _generation_string(bindings, history):
sentence = {}
sentence["object"] = {"type": "noun_phrase",
"head": lex(bindings["?entity"]),
"features": {"number": "plural" if plural_p(bindings["?entity"]) else "singular"}}
sentence["verb"] = "generate"
sentence["features"] = {"tense": "past",
"passive": "true"}
sentence["modifiers"] = []
if "?time" in bindings:
sentence["modifiers"].append({"type":"preposition_phrase",
"preposition": "at",
"noun": bindings["?time"]})
if "?activity" in bindings:
sentence["modifiers"].append({"type":"preposition_phrase",
"preposition":"by",
"noun": lex(bindings["?activity"])})
return realise_sentence({"sentence":sentence})
generation = transform.Template("Generation", _generation_binding, _generation_coverage, _generation_string)
| mit | -5,202,516,395,004,116,000 | 35.046512 | 109 | 0.569677 | false |
ledusledus/lidarheightcellclassifyscripts | cliffs.py | 1 | 3730 | import unittest
from itertools import imap
from operator import add
NO_CLIFF=1
HAS_CLIFF=2
class TestBuildCliff(unittest.TestCase):
def setUp(self):
pass
def testBuild_Cliff(self):
altitudes={(10,10):2,(10,11):4,(11,11):4,(11,10):2,(12,10):2,(12,11):4}
key=(10,10)
start_cliff=1
end_cliff=99
cliff=build_cliff(altitudes,key, start_cliff,end_cliff)
self.assertEqual(cliff,[(10.0,10.5),(11.0,10.5)])
key=(11,10)
cliff=build_cliff(altitudes,key, start_cliff,end_cliff)
self.assertEqual(cliff,[(11.0,10.5),(12.0,10.5)])
def testBuildFullCliff(self):
altitudes={(10,10):2,(10,11):4,(11,11):4,(11,10):2,}
key=(10,10)
start_cliff=1
end_cliff=99
cliff=build_cliff(altitudes,key, start_cliff,end_cliff)
self.assertEqual(cliff,[(10.0,10.5),(11.0,10.5)])
cells={}
res=build_full_cliff(cells,altitudes,key, start_cliff,end_cliff)
self.assertEqual(res, [[(10.0,10.5),(11.0,10.5)]])
cells={}
altitudes={(10,10):2,(10,11):4,(11,11):4,(11,10):2,(12,10):2,(12,11):4}
res=build_full_cliff(cells,altitudes,key, start_cliff,end_cliff)
self.assertEqual(res, [[(10.0,10.5),(11.0,10.5)],[(11.0,10.5),(12.0,10.5)]])
def divide_by_scalar(vector,scalar):
return tuple(i/scalar for i in vector)
def build_cliff( altitudes, key, start_cliff, end_cliff ):
keys = [key, (key[0]+1,key[1]), (key[0]+1,key[1]+1), (key[0],key[1]+1)]
alts = []
for k in keys:
# we need to have a full cell and there is none
if not altitudes.has_key(k):
return None
alts.append(altitudes[k])
deltas=[(abs(alts[(i+1)%4]-alts[i]),i) for i in range(len(alts))]
good_deltas = filter(lambda x: x[0]>=start_cliff and x[0]<end_cliff, deltas)
if len(good_deltas)>2:
print "special case good deltas"
if len(good_deltas) < 2: # no cliffs found
# 1 means we are at the end. In that case it should be found from another cliff.
return None
good_deltas.sort(reverse=True)
idx1=good_deltas[0][1]
idx2=good_deltas[1][1]
if alts[idx1]<alts[(idx1+1)%4]:
idx1,idx2=idx2,idx1
cliff_line=[divide_by_scalar(imap(add, keys[idx1],keys[(idx1+1)%4]),2.0),
divide_by_scalar(imap(add, keys[idx2],keys[(idx2+1)%4]),2.0),]
return cliff_line
def next_key(key, point):
if point[0]==key[0]:
return (key[0]-1,key[1])
if point[0]==key[0]+1:
return (key[0]+1,key[1])
if point[1]==key[1]:
return (key[0],key[1]-1)
return key[0],key[1]+1
def build_full_cliff(cells, altitudes, key, start_cliff, end_cliff ):
cliff_line = build_cliff(altitudes, key, start_cliff, end_cliff )
if cliff_line == None:
cells[key]=NO_CLIFF
return
else:
cells[key]=HAS_CLIFF
curkey=key
full_cliff_lines=[cliff_line]
curpoint=full_cliff_lines[-1][1]
# now we need to grow right:
while True:
curkey=next_key(curkey, curpoint)
if cells.has_key(curkey):
# has been visited already
break
print curkey
cliff_line=build_cliff(altitudes, curkey, start_cliff, end_cliff )
print cliff_line
if cliff_line==None:
cells[curkey]=NO_CLIFF
break
if cliff_line[0]!=full_cliff_lines[-1][1]:
# this is not our cliff
break
cells[curkey]=HAS_CLIFF
full_cliff_lines.append(cliff_line)
print full_cliff_lines
curpoint=full_cliff_lines[-1][1]
# todo: then we need to grow left
return full_cliff_lines
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 4,035,802,107,887,874,000 | 33.537037 | 88 | 0.584718 | false |
BrunoTh/ETS2Autopilot | UI/ui_updater.py | 1 | 2676 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'designer\updater.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(400, 131)
MainWindow.setLocale(QtCore.QLocale(QtCore.QLocale.German, QtCore.QLocale.Germany))
self.pb_progress = QtWidgets.QProgressBar(MainWindow)
self.pb_progress.setGeometry(QtCore.QRect(10, 60, 381, 23))
self.pb_progress.setProperty("value", 0)
self.pb_progress.setAlignment(QtCore.Qt.AlignCenter)
self.pb_progress.setOrientation(QtCore.Qt.Horizontal)
self.pb_progress.setObjectName("pb_progress")
self.b_run = QtWidgets.QPushButton(MainWindow)
self.b_run.setEnabled(False)
self.b_run.setGeometry(QtCore.QRect(210, 90, 181, 30))
self.b_run.setCheckable(False)
self.b_run.setChecked(False)
self.b_run.setObjectName("b_run")
self.label = QtWidgets.QLabel(MainWindow)
self.label.setGeometry(QtCore.QRect(10, 10, 81, 16))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(MainWindow)
self.label_2.setGeometry(QtCore.QRect(10, 30, 81, 16))
self.label_2.setObjectName("label_2")
self.b_check = QtWidgets.QPushButton(MainWindow)
self.b_check.setGeometry(QtCore.QRect(10, 90, 181, 30))
self.b_check.setObjectName("b_check")
self.l_newVersion = QtWidgets.QLabel(MainWindow)
self.l_newVersion.setGeometry(QtCore.QRect(100, 30, 81, 16))
self.l_newVersion.setText("")
self.l_newVersion.setObjectName("l_newVersion")
self.l_currentVersion = QtWidgets.QLabel(MainWindow)
self.l_currentVersion.setGeometry(QtCore.QRect(100, 10, 81, 16))
self.l_currentVersion.setText("")
self.l_currentVersion.setObjectName("l_currentVersion")
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Updater"))
self.pb_progress.setFormat(_translate("MainWindow", "%p%"))
self.b_run.setText(_translate("MainWindow", "Run Update"))
self.label.setText(_translate("MainWindow", "Current version:"))
self.label_2.setText(_translate("MainWindow", "New version:"))
self.b_check.setText(_translate("MainWindow", "Check for Update"))
| mit | -8,680,544,128,683,672,000 | 45.947368 | 91 | 0.681988 | false |
bgribble/mfp | mfp/gui/modes/global_mode.py | 1 | 12471 | #! /usr/bin/env python
'''
global_mode.py: Global input mode bindings
Copyright (c) 2012 Bill Gribble <[email protected]>
'''
from ..input_mode import InputMode
from .label_edit import LabelEditMode
from .transient import TransientMessageEditMode
from .enum_control import EnumEditMode
from ..message_element import TransientMessageElement
from ..patch_element import PatchElement
from mfp import MFPGUI
class GlobalMode (InputMode):
def __init__(self, window):
self.manager = window.input_mgr
self.window = window
self.allow_selection_drag = True
self.selection_drag_started = False
self.drag_started = False
self.selbox_started = False
self.selbox_changed = []
self.drag_start_x = None
self.drag_start_y = None
self.drag_last_x = None
self.drag_last_y = None
self.drag_target = None
self.next_console_position = 1
self.next_tree_position = 1
InputMode.__init__(self, "Global input bindings")
# global keybindings
self.bind("!", self.transient_msg, "Send message to selection")
self.bind("~", self.toggle_console, "Show/hide log and console")
self.bind("`", self.toggle_tree, "Show/hide left side info")
self.bind("PGUP", self.window.layer_select_up, "Select higher layer")
self.bind("PGDN", self.window.layer_select_down, "Select lower layer")
self.bind("C-PGUP", self.window.patch_select_prev, "Select higher patch")
self.bind("C-PGDN", self.window.patch_select_next, "Select lower patch")
self.bind('C-f', self.window.patch_new, "Create a new patch")
self.bind('C-o', self.open_file, "Load file into new patch")
self.bind('C-s', self.save_file, "Save patch to file")
self.bind('C-p', self.save_as_lv2, "Save patch as LV2 plugin")
self.bind('C-w', self.patch_close, "Close current patch")
self.bind('C-q', self.quit, "Quit")
self.bind('C-A-.', self.toggle_pause, "Pause/unpause execution")
self.bind("M1DOWN", lambda: self.selbox_start(None), "Start selection box")
self.bind("M1-MOTION", lambda: self.selbox_motion(True), "Drag selection box")
self.bind("M1UP", self.selbox_end, "End selection box")
self.bind("S-M1DOWN", lambda: self.selbox_start(True), "Start add-to-selection box")
self.bind("S-M1-MOTION", lambda: self.selbox_motion(True), "Drag add-to-selection box")
self.bind("S-M1UP", self.selbox_end, "End selection box")
self.bind("C-M1DOWN", lambda: self.selbox_start(False),
"Start toggle-selection box")
self.bind("C-M1-MOTION", lambda: self.selbox_motion(False),
"Drag toggle-selection box")
self.bind("C-M1UP", self.selbox_end, "End toggle-selection box")
self.bind("S-C-M1DOWN", self.drag_start, "Begin dragging viewport")
self.bind("S-C-M1-MOTION", self.drag_motion, "Drag viewport")
self.bind("S-C-M1UP", self.drag_end, "End drag viewport")
self.bind('+', lambda: self.window.zoom_in(1.25), "Zoom view in")
self.bind('=', lambda: self.window.zoom_in(1.25), "Zoom view in")
self.bind('-', lambda: self.window.zoom_out(0.8), "Zoom view out")
self.bind('SCROLLUP', lambda: self.window.zoom_in(1.06), "Zoom view in")
self.bind('SCROLLDOWN', lambda: self.window.zoom_in(0.95), "Zoom view out")
self.bind('SCROLLSMOOTHUP', lambda: self.window.zoom_in(1.015), "Zoom view in")
self.bind('SCROLLSMOOTHDOWN', lambda: self.window.zoom_in(0.985), "Zoom view out")
self.bind('C-0', self.window.reset_zoom, "Reset view position and zoom")
self.bind("HOVER", lambda: self.hover(False))
self.bind("S-HOVER", lambda: self.hover(True))
def toggle_console(self):
from gi.repository import Gdk
alloc = self.window.content_console_pane.get_allocation()
oldpos = self.window.content_console_pane.get_position()
self.window.content_console_pane.set_position(
alloc.height - self.next_console_position)
self.next_console_position = alloc.height - oldpos
# KLUDGE!
MFPGUI().clutter_do_later(100, self._refresh)
return False
def toggle_tree(self):
alloc = self.window.tree_canvas_pane.get_allocation()
oldpos = self.window.tree_canvas_pane.get_position()
self.window.tree_canvas_pane.set_position(self.next_tree_position)
self.next_tree_position = oldpos
# KLUDGE!
MFPGUI().clutter_do_later(100, self._refresh)
return False
def _refresh(self):
oldpos = self.window.content_console_pane.get_position()
self.window.content_console_pane.set_position(oldpos - 1)
return False
def transient_msg(self):
if self.window.selected:
return self.window.add_element(TransientMessageElement)
else:
return False
def hover(self, details):
for m in self.manager.minor_modes:
if m.enabled and isinstance(m, (TransientMessageEditMode, LabelEditMode,
EnumEditMode)):
details = False
o = self.manager.pointer_obj
try:
if o is not None and o.obj_state == PatchElement.OBJ_COMPLETE:
o.show_tip(self.manager.pointer_x, self.manager.pointer_y, details)
except Exception as e:
print("oops! exception in hover")
import traceback
traceback.print_exc()
pass
return False
def save_file(self):
import os.path
patch = self.window.selected_patch
if patch.last_filename is None:
default_filename = patch.obj_name + '.mfp'
else:
default_filename = patch.last_filename
def cb(fname):
if fname:
patch.last_filename = fname
if fname != default_filename:
basefile = os.path.basename(fname)
parts = os.path.splitext(basefile)
newname = parts[0]
patch.obj_name = newname
MFPGUI().mfp.rename_obj(patch.obj_id, newname)
patch.send_params()
self.window.refresh(patch)
MFPGUI().mfp.save_file(patch.obj_name, fname)
self.window.get_prompted_input("File name to save: ", cb, default_filename)
def save_as_lv2(self):
patch = self.window.selected_patch
default_plugname = 'mfp_' + patch.obj_name
def cb(plugname):
if plugname:
MFPGUI().mfp.save_lv2(patch.obj_name, plugname)
self.window.get_prompted_input("Plugin name to save: ", cb, default_plugname)
def open_file(self):
def cb(fname):
MFPGUI().mfp.open_file(fname)
self.window.get_prompted_input("File name to load: ", cb)
def drag_start(self):
self.drag_started = True
px = self.manager.pointer_ev_x
py = self.manager.pointer_ev_y
self.drag_last_x = px
self.drag_last_y = py
return True
def drag_motion(self):
if self.drag_started is False:
return False
px = self.manager.pointer_ev_x
py = self.manager.pointer_ev_y
dx = px - self.drag_last_x
dy = py - self.drag_last_y
self.drag_last_x = px
self.drag_last_y = py
self.window.move_view(dx, dy)
return True
def drag_end(self):
self.drag_started = False
return True
def selbox_start(self, select_mode):
if select_mode is None:
if self.manager.pointer_obj is not None:
if self.manager.pointer_obj not in self.window.selected:
self.window.unselect_all()
self.window.select(self.manager.pointer_obj)
raise self.manager.InputNeedsRequeue()
if self.allow_selection_drag:
self.selection_drag_started = True
else:
self.window.unselect_all()
self.selbox_started = True
elif select_mode is True:
if (self.manager.pointer_obj
and self.manager.pointer_obj not in self.window.selected):
self.window.select(self.manager.pointer_obj)
self.selbox_started = True
else:
if self.manager.pointer_obj in self.window.selected:
self.window.unselect(self.manager.pointer_obj)
self.selbox_started = True
px = self.manager.pointer_x
py = self.manager.pointer_y
self.drag_start_x = px
self.drag_start_y = py
self.drag_last_x = px
self.drag_last_y = py
return True
def selbox_motion(self, select_mode):
if not (self.selbox_started or self.selection_drag_started):
return False
px = self.manager.pointer_x
py = self.manager.pointer_y
dx = px - self.drag_last_x
dy = py - self.drag_last_y
self.drag_last_x = px
self.drag_last_y = py
if self.selection_drag_started:
for obj in self.window.selected:
if obj.editable and obj.display_type != 'connection':
obj.drag(dx, dy)
return True
enclosed = self.window.show_selection_box(self.drag_start_x, self.drag_start_y,
self.drag_last_x, self.drag_last_y)
for obj in enclosed:
if select_mode:
if obj not in self.window.selected:
if obj not in self.selbox_changed:
self.selbox_changed.append(obj)
self.window.select(obj)
else:
if obj not in self.selbox_changed:
self.selbox_changed.append(obj)
if obj in self.window.selected:
self.window.unselect(obj)
else:
self.window.select(obj)
new_changed = []
for obj in self.selbox_changed:
if obj not in enclosed:
if obj in self.window.selected:
self.window.unselect(obj)
else:
self.window.select(obj)
else:
new_changed.append(obj)
self.selbox_changed = new_changed
return True
def selbox_end(self):
if self.selection_drag_started:
for obj in self.window.selected:
obj.send_params()
self.selbox_started = False
self.selection_drag_started = False
self.selbox_changed = []
self.window.hide_selection_box()
return True
def patch_close(self):
def close_confirm(answer):
if answer is not None:
aa = answer.strip().lower()
if aa in ['y', 'yes']:
self.window.patch_close()
from mfp import log
p = self.window.selected_patch
log.debug("patch_close: checking for unsaved changes")
if MFPGUI().mfp.has_unsaved_changes(p.obj_id):
self.window.get_prompted_input("Patch has unsaved changes. Close anyway? [yN]",
close_confirm, '')
else:
self.window.patch_close()
def quit(self):
def quit_confirm(answer):
if answer is not None:
aa = answer.strip().lower()
if aa in ['y', 'yes']:
self.window.quit()
allpatches = MFPGUI().mfp.open_patches()
clean = True
for p in allpatches:
if MFPGUI().mfp.has_unsaved_changes(p):
clean = False
if not clean:
self.window.get_prompted_input(
"There are patches with unsaved changes. Quit anyway? [yN]",
quit_confirm, '')
else:
self.window.quit()
def toggle_pause(self):
from mfp import log
try:
paused = MFPGUI().mfp.toggle_pause()
if paused:
log.warning("Execution of all patches paused")
else:
log.warning("Execution of all patches resumed")
except Exception as e:
print("Caught exception", e)
| gpl-2.0 | -2,904,285,902,620,587,000 | 35.89645 | 95 | 0.571646 | false |
winguru/graphite-api | graphite_api/render/glyph.py | 1 | 84736 | """Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import cairocffi as cairo
import itertools
import json
import math
import pytz
import re
import six
from datetime import datetime, timedelta
from io import BytesIO
from six.moves import range
from six.moves.urllib.parse import unquote_plus
from .datalib import TimeSeries
from ..utils import to_seconds
INFINITY = float('inf')
colorAliases = {
'black': (0, 0, 0),
'white': (255, 255, 255),
'blue': (100, 100, 255),
'green': (0, 200, 0),
'red': (255, 0, 0),
'yellow': (255, 255, 0),
'orange': (255, 165, 0),
'purple': (200, 100, 255),
'brown': (150, 100, 50),
'cyan': (0, 255, 255),
'aqua': (0, 150, 150),
'gray': (175, 175, 175),
'grey': (175, 175, 175),
'magenta': (255, 0, 255),
'pink': (255, 100, 100),
'gold': (200, 200, 0),
'rose': (200, 150, 200),
'darkblue': (0, 0, 255),
'darkgreen': (0, 255, 0),
'darkred': (200, 00, 50),
'darkgray': (111, 111, 111),
'darkgrey': (111, 111, 111),
}
# This gets overridden by graphTemplates.conf
defaultGraphOptions = dict(
background='white',
foreground='black',
majorline='rose',
minorline='grey',
lineColors=("blue", "green", "red", "purple", "brown", "yellow", "aqua",
"grey", "magenta", "pink", "gold", "rose"),
fontname='Sans',
fontsize=10,
fontbold='false',
fontitalic='false',
)
# X-axis configurations (copied from rrdtool, this technique is evil & ugly
# but effective)
SEC = 1
MIN = 60
HOUR = MIN * 60
DAY = HOUR * 24
WEEK = DAY * 7
MONTH = DAY * 31
YEAR = DAY * 365
xAxisConfigs = (
dict(seconds=0.00,
minorGridUnit=SEC,
minorGridStep=5,
majorGridUnit=MIN,
majorGridStep=1,
labelUnit=SEC,
labelStep=5,
format="%H:%M:%S",
maxInterval=10*MIN),
dict(seconds=0.07,
minorGridUnit=SEC,
minorGridStep=10,
majorGridUnit=MIN,
majorGridStep=1,
labelUnit=SEC,
labelStep=10,
format="%H:%M:%S",
maxInterval=20*MIN),
dict(seconds=0.14,
minorGridUnit=SEC,
minorGridStep=15,
majorGridUnit=MIN,
majorGridStep=1,
labelUnit=SEC,
labelStep=15,
format="%H:%M:%S",
maxInterval=30*MIN),
dict(seconds=0.27,
minorGridUnit=SEC,
minorGridStep=30,
majorGridUnit=MIN,
majorGridStep=2,
labelUnit=MIN,
labelStep=1,
format="%H:%M",
maxInterval=2*HOUR),
dict(seconds=0.5,
minorGridUnit=MIN,
minorGridStep=1,
majorGridUnit=MIN,
majorGridStep=2,
labelUnit=MIN,
labelStep=1,
format="%H:%M",
maxInterval=2*HOUR),
dict(seconds=1.2,
minorGridUnit=MIN,
minorGridStep=1,
majorGridUnit=MIN,
majorGridStep=4,
labelUnit=MIN,
labelStep=2,
format="%H:%M",
maxInterval=3*HOUR),
dict(seconds=2,
minorGridUnit=MIN,
minorGridStep=1,
majorGridUnit=MIN,
majorGridStep=10,
labelUnit=MIN,
labelStep=5,
format="%H:%M",
maxInterval=6*HOUR),
dict(seconds=5,
minorGridUnit=MIN,
minorGridStep=2,
majorGridUnit=MIN,
majorGridStep=10,
labelUnit=MIN,
labelStep=10,
format="%H:%M",
maxInterval=12*HOUR),
dict(seconds=10,
minorGridUnit=MIN,
minorGridStep=5,
majorGridUnit=MIN,
majorGridStep=20,
labelUnit=MIN,
labelStep=20,
format="%H:%M",
maxInterval=1*DAY),
dict(seconds=30,
minorGridUnit=MIN,
minorGridStep=10,
majorGridUnit=HOUR,
majorGridStep=1,
labelUnit=HOUR,
labelStep=1,
format="%H:%M",
maxInterval=2*DAY),
dict(seconds=60,
minorGridUnit=MIN,
minorGridStep=30,
majorGridUnit=HOUR,
majorGridStep=2,
labelUnit=HOUR,
labelStep=2,
format="%H:%M",
maxInterval=2*DAY),
dict(seconds=100,
minorGridUnit=HOUR,
minorGridStep=2,
majorGridUnit=HOUR,
majorGridStep=4,
labelUnit=HOUR,
labelStep=4,
format="%a %H:%M",
maxInterval=6*DAY),
dict(seconds=255,
minorGridUnit=HOUR,
minorGridStep=6,
majorGridUnit=HOUR,
majorGridStep=12,
labelUnit=HOUR,
labelStep=12,
format="%m/%d %H:%M",
maxInterval=10*DAY),
dict(seconds=600,
minorGridUnit=HOUR,
minorGridStep=6,
majorGridUnit=DAY,
majorGridStep=1,
labelUnit=DAY,
labelStep=1,
format="%m/%d",
maxInterval=14*DAY),
dict(seconds=1000,
minorGridUnit=HOUR,
minorGridStep=12,
majorGridUnit=DAY,
majorGridStep=1,
labelUnit=DAY,
labelStep=1,
format="%m/%d",
maxInterval=365*DAY),
dict(seconds=2000,
minorGridUnit=DAY,
minorGridStep=1,
majorGridUnit=DAY,
majorGridStep=2,
labelUnit=DAY,
labelStep=2,
format="%m/%d",
maxInterval=365*DAY),
dict(seconds=4000,
minorGridUnit=DAY,
minorGridStep=2,
majorGridUnit=DAY,
majorGridStep=4,
labelUnit=DAY,
labelStep=4,
format="%m/%d",
maxInterval=365*DAY),
dict(seconds=8000,
minorGridUnit=DAY,
minorGridStep=3.5,
majorGridUnit=DAY,
majorGridStep=7,
labelUnit=DAY,
labelStep=7,
format="%m/%d",
maxInterval=365*DAY),
dict(seconds=16000,
minorGridUnit=DAY,
minorGridStep=7,
majorGridUnit=DAY,
majorGridStep=14,
labelUnit=DAY,
labelStep=14,
format="%m/%d",
maxInterval=365*DAY),
dict(seconds=32000,
minorGridUnit=DAY,
minorGridStep=15,
majorGridUnit=DAY,
majorGridStep=30,
labelUnit=DAY,
labelStep=30,
format="%m/%d",
maxInterval=365*DAY),
dict(seconds=64000,
minorGridUnit=DAY,
minorGridStep=30,
majorGridUnit=DAY,
majorGridStep=60,
labelUnit=DAY,
labelStep=60,
format="%m/%d %Y"),
dict(seconds=100000,
minorGridUnit=DAY,
minorGridStep=60,
majorGridUnit=DAY,
majorGridStep=120,
labelUnit=DAY,
labelStep=120,
format="%m/%d %Y"),
dict(seconds=120000,
minorGridUnit=DAY,
minorGridStep=120,
majorGridUnit=DAY,
majorGridStep=240,
labelUnit=DAY,
labelStep=240,
format="%m/%d %Y"),
)
UnitSystems = {
'binary': (
('Pi', 1024.0**5),
('Ti', 1024.0**4),
('Gi', 1024.0**3),
('Mi', 1024.0**2),
('Ki', 1024.0)),
'si': (
('P', 1000.0**5),
('T', 1000.0**4),
('G', 1000.0**3),
('M', 1000.0**2),
('K', 1000.0)),
'sec': (
('Y', 60*60*24*365),
('M', 60*60*24*30),
('D', 60*60*24),
('H', 60*60),
('m', 60)),
'msec': (
('Y', 60*60*24*365*1000),
('M', 60*60*24*30*1000),
('D', 60*60*24*1000),
('H', 60*60*1000),
('m', 60*1000),
('s', 1000)),
'none': [],
}
def force_text(value):
if not isinstance(value, six.string_types):
value = six.text_type(value)
return value
# We accept values fractionally outside of nominal limits, so that
# rounding errors don't cause weird effects. Since our goal is to
# create plots, and the maximum resolution of the plots is likely to
# be less than 10000 pixels, errors smaller than this size shouldn't
# create any visible effects.
EPSILON = 0.0001
class GraphError(Exception):
pass
class _AxisTics:
def __init__(self, minValue, maxValue, unitSystem=None):
self.minValue = self.checkFinite(minValue, "data value")
self.minValueSource = 'data'
self.maxValue = self.checkFinite(maxValue, "data value")
self.maxValueSource = 'data'
self.unitSystem = unitSystem
@staticmethod
def checkFinite(value, name='value'):
"""Check that value is a finite number.
If it is, return it. If not, raise GraphError describing the
problem, using name in the error message.
"""
if math.isnan(value):
raise GraphError('Encountered NaN %s' % (name,))
elif math.isinf(value):
raise GraphError('Encountered infinite %s' % (name,))
return value
@staticmethod
def chooseDelta(x):
"""Choose a reasonable axis range given that one limit is x.
Given that end of the axis range (i.e., minValue or maxValue) is
x, choose a reasonable distance to the other limit.
"""
if abs(x) < 1.0e-9:
return 1.0
else:
return 0.1 * abs(x)
def reconcileLimits(self):
"""If self.minValue is not less than self.maxValue, fix the problem.
If self.minValue is not less than self.maxValue, adjust
self.minValue and/or self.maxValue (depending on which was not
specified explicitly by the user) to make self.minValue <
self.maxValue. If the user specified both limits explicitly, then
raise GraphError.
"""
if self.minValue < self.maxValue:
# The limits are already OK.
return
minFixed = (self.minValueSource in ['min'])
maxFixed = (self.maxValueSource in ['max', 'limit'])
if minFixed and maxFixed:
raise GraphError('The %s must be less than the %s' %
(self.minValueSource, self.maxValueSource))
elif minFixed:
self.maxValue = self.minValue + self.chooseDelta(self.minValue)
elif maxFixed:
self.minValue = self.maxValue - self.chooseDelta(self.maxValue)
else:
delta = self.chooseDelta(max(abs(self.minValue),
abs(self.maxValue)))
average = (self.minValue + self.maxValue) / 2.0
self.minValue = average - delta
self.maxValue = average + delta
def applySettings(self, axisMin=None, axisMax=None, axisLimit=None):
"""Apply the specified settings to this axis.
Set self.minValue, self.minValueSource, self.maxValue,
self.maxValueSource, and self.axisLimit reasonably based on the
parameters provided.
Arguments:
axisMin -- a finite number, or None to choose a round minimum
limit that includes all of the data.
axisMax -- a finite number, 'max' to use the maximum value
contained in the data, or None to choose a round maximum limit
that includes all of the data.
axisLimit -- a finite number to use as an upper limit on maxValue,
or None to impose no upper limit.
"""
if axisMin is not None and not math.isnan(axisMin):
self.minValueSource = 'min'
self.minValue = self.checkFinite(axisMin, 'axis min')
if axisMax == 'max':
self.maxValueSource = 'extremum'
elif axisMax is not None and not math.isnan(axisMax):
self.maxValueSource = 'max'
self.maxValue = self.checkFinite(axisMax, 'axis max')
if axisLimit is None or math.isnan(axisLimit):
self.axisLimit = None
elif axisLimit < self.maxValue:
self.maxValue = self.checkFinite(axisLimit, 'axis limit')
self.maxValueSource = 'limit'
# The limit has already been imposed, so there is no need to
# remember it:
self.axisLimit = None
elif math.isinf(axisLimit):
# It must be positive infinity, which is the same as no limit:
self.axisLimit = None
else:
# We still need to remember axisLimit to avoid rounding top to
# a value larger than axisLimit:
self.axisLimit = axisLimit
self.reconcileLimits()
def makeLabel(self, value):
"""Create a label for the specified value.
Create a label string containing the value and its units (if any),
based on the values of self.step, self.span, and self.unitSystem.
"""
value, prefix = format_units(value, self.step,
system=self.unitSystem)
span, spanPrefix = format_units(self.span, self.step,
system=self.unitSystem)
if prefix:
prefix += " "
if value < 0.1:
return "%g %s" % (float(value), prefix)
elif value < 1.0:
return "%.2f %s" % (float(value), prefix)
if span > 10 or spanPrefix != prefix:
if type(value) is float:
return "%.1f %s" % (value, prefix)
else:
return "%d %s" % (int(value), prefix)
elif span > 3:
return "%.1f %s" % (float(value), prefix)
elif span > 0.1:
return "%.2f %s" % (float(value), prefix)
else:
return "%g %s" % (float(value), prefix)
class _LinearAxisTics(_AxisTics):
"""Axis ticmarks with uniform spacing."""
def __init__(self, minValue, maxValue, unitSystem=None):
_AxisTics.__init__(self, minValue, maxValue, unitSystem=unitSystem)
self.step = None
self.span = None
self.binary = None
def setStep(self, step):
"""Set the size of steps between ticmarks."""
self.step = self.checkFinite(float(step), 'axis step')
def generateSteps(self, minStep):
"""Generate allowed steps with step >= minStep in increasing order."""
self.checkFinite(minStep)
if self.binary:
base = 2.0
mantissas = [1.0]
exponent = math.floor(math.log(minStep, 2) - EPSILON)
else:
base = 10.0
mantissas = [1.0, 2.0, 5.0]
exponent = math.floor(math.log10(minStep) - EPSILON)
while True:
multiplier = base ** exponent
for mantissa in mantissas:
value = mantissa * multiplier
if value >= minStep * (1.0 - EPSILON):
yield value
exponent += 1
def computeSlop(self, step, divisor):
"""Compute the slop that would result from step and divisor.
Return the slop, or None if this combination can't cover the full
range. See chooseStep() for the definition of "slop".
"""
bottom = step * math.floor(self.minValue / float(step) + EPSILON)
top = bottom + step * divisor
if top >= self.maxValue - EPSILON * step:
return max(top - self.maxValue, self.minValue - bottom)
else:
return None
def chooseStep(self, divisors=None, binary=False):
"""Choose a nice, pretty size for the steps between axis labels.
Our main constraint is that the number of divisions must be taken
from the divisors list. We pick a number of divisions and a step
size that minimizes the amount of whitespace ("slop") that would
need to be included outside of the range [self.minValue,
self.maxValue] if we were to push out the axis values to the next
larger multiples of the step size.
The minimum step that could possibly cover the variance satisfies
minStep * max(divisors) >= variance
or
minStep = variance / max(divisors)
It's not necessarily possible to cover the variance with a step
that size, but we know that any smaller step definitely *cannot*
cover it. So we can start there.
For a sufficiently large step size, it is definitely possible to
cover the variance, but at some point the slop will start growing.
Let's define the slop to be
slop = max(minValue - bottom, top - maxValue)
Then for a given, step size, we know that
slop >= (1/2) * (step * min(divisors) - variance)
(the factor of 1/2 is for the best-case scenario that the slop is
distributed equally on the two sides of the range). So suppose we
already have a choice that yields bestSlop. Then there is no need
to choose steps so large that the slop is guaranteed to be larger
than bestSlop. Therefore, the maximum step size that we need to
consider is
maxStep = (2 * bestSlop + variance) / min(divisors)
"""
self.binary = binary
if divisors is None:
divisors = [4, 5, 6]
else:
for divisor in divisors:
self.checkFinite(divisor, 'divisor')
if divisor < 1:
raise GraphError('Divisors must be greater than or equal '
'to one')
if self.minValue == self.maxValue:
if self.minValue == 0.0:
self.maxValue = 1.0
elif self.minValue < 0.0:
self.minValue *= 1.1
self.maxValue *= 0.9
else:
self.minValue *= 0.9
self.maxValue *= 1.1
variance = self.maxValue - self.minValue
bestSlop = None
bestStep = None
for step in self.generateSteps(variance / float(max(divisors))):
if (
bestSlop is not None and
step * min(divisors) >= 2 * bestSlop + variance
):
break
for divisor in divisors:
slop = self.computeSlop(step, divisor)
if slop is not None and (bestSlop is None or slop < bestSlop):
bestSlop = slop
bestStep = step
self.step = bestStep
def chooseLimits(self):
if self.minValueSource == 'data':
# Start labels at the greatest multiple of step <= minValue:
self.bottom = self.step * math.floor(
(self.minValue / self.step + EPSILON))
else:
self.bottom = self.minValue
if self.maxValueSource == 'data':
# Extend the top of our graph to the lowest
# step multiple >= maxValue:
self.top = self.step * math.ceil(
(self.maxValue / self.step - EPSILON))
# ...but never exceed a user-specified limit:
if (
self.axisLimit is not None and
self.top > self.axisLimit + EPSILON * self.step
):
self.top = self.axisLimit
else:
self.top = self.maxValue
self.span = self.top - self.bottom
if self.span == 0:
self.top += 1
self.span += 1
def getLabelValues(self):
if self.step <= 0.0:
raise GraphError('The step size must be positive')
if self.span > 1000.0 * self.step:
# This is insane. Pick something that won't cause trouble:
self.chooseStep()
values = []
start = self.step * math.ceil(self.bottom / self.step - EPSILON)
i = 0
while True:
value = start + i * self.step
if value > self.top + EPSILON * self.step:
break
values.append(value)
i += 1
return values
class _LogAxisTics(_AxisTics):
def __init__(self, minValue, maxValue, unitSystem=None, base=10.0):
_AxisTics.__init__(self, minValue, maxValue, unitSystem=unitSystem)
if base <= 1.0:
raise GraphError('Logarithmic base must be greater than one')
self.base = self.checkFinite(base, 'log base')
self.step = None
self.span = None
def setStep(self, step):
# step is ignored for Logarithmic tics:
self.step = None
def chooseStep(self, divisors=None, binary=False):
# step is ignored for Logarithmic tics:
self.step = None
def chooseLimits(self):
if self.minValue <= 0:
raise GraphError('Logarithmic scale specified with a dataset with '
'a minimum value less than or equal to zero')
self.bottom = math.pow(self.base,
math.floor(math.log(self.minValue, self.base)))
self.top = math.pow(self.base,
math.ceil(math.log(self.maxValue, self.base)))
self.span = self.top - self.bottom
if self.span == 0:
self.top *= self.base
self.span = self.top - self.bottom
def getLabelValues(self):
values = []
value = math.pow(self.base,
math.ceil(math.log(self.bottom, self.base) - EPSILON))
while value < self.top * (1.0 + EPSILON):
values.append(value)
value *= self.base
return values
class Graph(object):
customizable = ('width', 'height', 'margin', 'bgcolor', 'fgcolor',
'fontName', 'fontSize', 'fontBold', 'fontItalic',
'colorList', 'template', 'yAxisSide', 'outputFormat')
def __init__(self, **params):
self.params = params
self.data = params['data']
self.dataLeft = []
self.dataRight = []
self.secondYAxis = False
self.width = int(params.get('width', 200))
self.height = int(params.get('height', 200))
self.margin = int(params.get('margin', 10))
self.userTimeZone = params.get('tz')
self.logBase = params.get('logBase', None)
self.minorY = int(params.get('minorY', 1))
if self.logBase:
if self.logBase == 'e':
self.logBase = math.e
elif self.logBase <= 1:
self.logBase = None
params['logBase'] = None
else:
self.logBase = float(self.logBase)
if self.margin < 0:
self.margin = 10
self.setupCairo(params.get('outputFormat', 'png').lower())
self.area = {
'xmin': self.margin + 10, # Need extra room when the time is
# near the left edge
'xmax': self.width - self.margin,
'ymin': self.margin,
'ymax': self.height - self.margin,
}
self.loadTemplate(params.get('template', 'default'))
opts = self.ctx.get_font_options()
opts.set_antialias(cairo.ANTIALIAS_NONE)
self.ctx.set_font_options(opts)
self.foregroundColor = params.get('fgcolor', self.defaultForeground)
self.backgroundColor = params.get('bgcolor', self.defaultBackground)
self.setColor(self.backgroundColor)
self.drawRectangle(0, 0, self.width, self.height)
if 'colorList' in params:
colorList = unquote_plus(str(params['colorList'])).split(',')
else:
colorList = self.defaultColorList
self.colors = itertools.cycle(colorList)
self.drawGraph(**params)
def setupCairo(self, outputFormat='png'):
self.outputFormat = outputFormat
if outputFormat == 'png':
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,
self.width, self.height)
elif outputFormat == 'svg':
self.surfaceData = BytesIO()
self.surface = cairo.SVGSurface(self.surfaceData,
self.width, self.height)
elif outputFormat == 'pdf':
self.surfaceData = BytesIO()
self.surface = cairo.PDFSurface(self.surfaceData,
self.width, self.height)
res_x, res_y = self.surface.get_fallback_resolution()
self.width = float(self.width / res_x) * 72
self.height = float(self.height / res_y) * 72
self.surface.set_size(self.width, self.height)
self.ctx = cairo.Context(self.surface)
def setColor(self, value, alpha=1.0, forceAlpha=False):
if isinstance(value, tuple) and len(value) == 3:
r, g, b = value
elif value in colorAliases:
r, g, b = colorAliases[value]
elif isinstance(value, six.string_types) and len(value) >= 6:
s = value
if s.startswith('#'):
s = s[1:]
if s.startswith('%23'):
s = s[3:]
r, g, b = (int(s[0:2], base=16), int(s[2:4], base=16),
int(s[4:6], base=16))
if len(s) == 8 and not forceAlpha:
alpha = int(s[6:8], base=16) / 255.0
elif isinstance(value, int) and len(str(value)) == 6:
s = str(value)
r, g, b = (int(s[0:2], base=16), int(s[2:4], base=16),
int(s[4:6], base=16))
else:
raise ValueError("Must specify an RGB 3-tuple, an html color "
"string, or a known color alias!")
r, g, b = [float(c) / 255.0 for c in (r, g, b)]
self.ctx.set_source_rgba(r, g, b, alpha)
def setFont(self, **params):
p = self.defaultFontParams.copy()
p.update(params)
self.ctx.select_font_face(p['name'], p['italic'], p['bold'])
self.ctx.set_font_size(float(p['size']))
def getExtents(self, text=None):
F = self.ctx.font_extents()
extents = {'maxHeight': F[2], 'maxAscent': F[0], 'maxDescent': F[1]}
if text is not None:
T = self.ctx.text_extents(text)
extents['width'] = T[4]
extents['height'] = T[3]
return extents
def drawRectangle(self, x, y, w, h, fill=True):
if not fill:
# offset for borders so they are drawn as lines would be
o = self.ctx.get_line_width() / 2.0
x += o
y += o
w -= o
h -= o
self.ctx.rectangle(x, y, w, h)
if fill:
self.ctx.fill()
else:
self.ctx.set_dash([], 0)
self.ctx.stroke()
def drawText(self, text, x, y, align='left', valign='top', rotate=0):
extents = self.getExtents(text)
angle = math.radians(rotate)
origMatrix = self.ctx.get_matrix()
horizontal = {
'left': 0,
'center': extents['width'] / 2,
'right': extents['width'],
}[align.lower()]
vertical = {
'top': extents['maxAscent'],
'middle': extents['maxHeight'] / 2 - extents['maxDescent'],
'bottom': -extents['maxDescent'],
'baseline': 0,
}[valign.lower()]
self.ctx.move_to(x, y)
self.ctx.rel_move_to(math.sin(angle) * -vertical,
math.cos(angle) * vertical)
self.ctx.rotate(angle)
self.ctx.rel_move_to(-horizontal, 0)
bx, by = self.ctx.get_current_point()
by -= extents['maxAscent']
self.ctx.text_path(text)
self.ctx.fill()
self.ctx.set_matrix(origMatrix)
def drawTitle(self, text):
self.encodeHeader('title')
y = self.area['ymin']
x = self.width / 2
lineHeight = self.getExtents()['maxHeight']
for line in text.split('\n'):
self.drawText(line, x, y, align='center')
y += lineHeight
if self.params.get('yAxisSide') == 'right':
self.area['ymin'] = y
else:
self.area['ymin'] = y + self.margin
def drawLegend(self, elements, unique=False):
# elements is [ (name,color,rightSide), (name,color,rightSide), ... ]
self.encodeHeader('legend')
if unique:
# remove duplicate names
namesSeen = []
newElements = []
for e in elements:
if e[0] not in namesSeen:
namesSeen.append(e[0])
newElements.append(e)
elements = newElements
# Check if there's enough room to use two columns.
rightSideLabels = False
padding = 5
longestName = sorted([e[0] for e in elements], key=len)[-1]
# Double it to check if there's enough room for 2 columns
testSizeName = longestName + " " + longestName
testExt = self.getExtents(testSizeName)
testBoxSize = testExt['maxHeight'] - 1
testWidth = testExt['width'] + 2 * (testBoxSize + padding)
if testWidth + 50 < self.width:
rightSideLabels = True
if self.secondYAxis and rightSideLabels:
extents = self.getExtents(longestName)
padding = 5
boxSize = extents['maxHeight'] - 1
lineHeight = extents['maxHeight'] + 1
labelWidth = extents['width'] + 2 * (boxSize + padding)
columns = max(1, math.floor(
(self.width - self.area['xmin']) / labelWidth))
numRight = len([name for (name, color, rightSide) in elements
if rightSide])
numberOfLines = max(len(elements) - numRight, numRight)
columns = math.floor(columns / 2.0)
columns = max(columns, 1)
legendHeight = max(
1, (numberOfLines / columns)) * (lineHeight + padding)
# scoot the drawing area up to fit the legend
self.area['ymax'] -= legendHeight
self.ctx.set_line_width(1.0)
x = self.area['xmin']
y = self.area['ymax'] + (2 * padding)
n = 0
xRight = self.area['xmax'] - self.area['xmin']
yRight = y
nRight = 0
for name, color, rightSide in elements:
self.setColor(color)
if rightSide:
nRight += 1
self.drawRectangle(xRight - padding, yRight,
boxSize, boxSize)
self.setColor('darkgrey')
self.drawRectangle(xRight - padding, yRight,
boxSize, boxSize, fill=False)
self.setColor(self.foregroundColor)
self.drawText(name, xRight - boxSize, yRight,
align='right')
xRight -= labelWidth
if nRight % columns == 0:
xRight = self.area['xmax'] - self.area['xmin']
yRight += lineHeight
else:
n += 1
self.drawRectangle(x, y, boxSize, boxSize)
self.setColor('darkgrey')
self.drawRectangle(x, y, boxSize, boxSize, fill=False)
self.setColor(self.foregroundColor)
self.drawText(name, x + boxSize + padding, y, align='left')
x += labelWidth
if n % columns == 0:
x = self.area['xmin']
y += lineHeight
else:
extents = self.getExtents(longestName)
boxSize = extents['maxHeight'] - 1
lineHeight = extents['maxHeight'] + 1
labelWidth = extents['width'] + 2 * (boxSize + padding)
columns = math.floor(self.width / labelWidth)
columns = max(columns, 1)
numberOfLines = math.ceil(float(len(elements)) / columns)
legendHeight = numberOfLines * (lineHeight + padding)
# scoot the drawing area up to fit the legend
self.area['ymax'] -= legendHeight
self.ctx.set_line_width(1.0)
x = self.area['xmin']
y = self.area['ymax'] + (2 * padding)
for i, (name, color, rightSide) in enumerate(elements):
if rightSide:
self.setColor(color)
self.drawRectangle(x + labelWidth + padding, y,
boxSize, boxSize)
self.setColor('darkgrey')
self.drawRectangle(x + labelWidth + padding, y,
boxSize, boxSize, fill=False)
self.setColor(self.foregroundColor)
self.drawText(name, x + labelWidth, y, align='right')
x += labelWidth
else:
self.setColor(color)
self.drawRectangle(x, y, boxSize, boxSize)
self.setColor('darkgrey')
self.drawRectangle(x, y, boxSize, boxSize, fill=False)
self.setColor(self.foregroundColor)
self.drawText(name, x + boxSize + padding, y, align='left')
x += labelWidth
if (i + 1) % columns == 0:
x = self.area['xmin']
y += lineHeight
def encodeHeader(self, text):
self.ctx.save()
self.setColor(self.backgroundColor)
self.ctx.move_to(-88, -88) # identifier
for i, char in enumerate(text):
self.ctx.line_to(-ord(char), -i-1)
self.ctx.stroke()
self.ctx.restore()
def loadTemplate(self, template):
from ..app import app
conf = app.config.get('templates', {})
opts = defaults = defaultGraphOptions
defaults.update(conf.get('defaults', {}))
opts.update(conf.get(template, {}))
self.defaultBackground = opts.get('background', defaults['background'])
self.defaultForeground = opts.get('foreground', defaults['foreground'])
self.defaultMajorGridLineColor = opts.get('majorline',
defaults['majorline'])
self.defaultMinorGridLineColor = opts.get('minorline',
defaults['minorline'])
self.defaultColorList = [
c.strip() for c in opts.get('lineColors',
defaults['lineColors'])]
fontName = opts.get('fontname', defaults['fontname'])
fontSize = float(opts.get('fontsize', defaults['fontsize']))
fontBold = opts.get('fontbold', defaults['fontbold']).lower() == 'true'
fontItalic = opts.get('fontitalic',
defaults['fontitalic']).lower() == 'true'
self.defaultFontParams = {
'name': self.params.get('fontName', fontName),
'size': int(self.params.get('fontSize', fontSize)),
'bold': self.params.get('fontBold', fontBold),
'italic': self.params.get('fontItalic', fontItalic),
}
def output(self, fileObj):
if self.outputFormat == 'png':
self.surface.write_to_png(fileObj)
elif self.outputFormat == 'pdf':
self.surface.finish()
pdfData = self.surfaceData.getvalue()
self.surfaceData.close()
fileObj.write(pdfData)
else:
if hasattr(self, 'startTime'):
has_data = True
metaData = {
'x': {
'start': self.startTime,
'end': self.endTime
},
'options': {
'lineWidth': self.lineWidth
},
'font': self.defaultFontParams,
'area': self.area,
'series': []
}
if not self.secondYAxis:
metaData['y'] = {
'top': self.yTop,
'bottom': self.yBottom,
'step': self.yStep,
'labels': self.yLabels,
'labelValues': self.yLabelValues
}
for series in self.data:
if 'stacked' not in series.options:
metaData['series'].append({
'name': series.name,
'start': series.start,
'end': series.end,
'step': series.step,
'valuesPerPoint': series.valuesPerPoint,
'color': series.color,
'data': series,
'options': series.options
})
else:
has_data = False
metaData = {}
self.surface.finish()
svgData = self.surfaceData.getvalue()
self.surfaceData.close()
# we expect height/width in pixels, not points
svgData = svgData.decode().replace('pt"', 'px"', 2)
svgData = svgData.replace('</svg>\n', '', 1)
svgData = svgData.replace('</defs>\n<g',
'</defs>\n<g class="graphite"', 1)
if has_data:
# We encode headers using special paths with d^="M -88 -88"
# Find these, and turn them into <g> wrappers instead
def onHeaderPath(match):
name = ''
for char in re.findall(r'L -(\d+) -\d+', match.group(1)):
name += chr(int(char))
return '</g><g data-header="true" class="%s">' % name
(svgData, subsMade) = re.subn(r'<path.+?d="M -88 -88 (.+?)"/>',
onHeaderPath, svgData)
# Replace the first </g><g> with <g>, and close out the
# last </g> at the end
svgData = svgData.replace('</g><g data-header',
'<g data-header', 1)
if subsMade > 0:
svgData += "</g>"
svgData = svgData.replace(' data-header="true"', '')
fileObj.write(svgData.encode())
fileObj.write(("""<script>
<![CDATA[
metadata = %s
]]>
</script>
</svg>""" % json.dumps(metaData)).encode())
class LineGraph(Graph):
customizable = Graph.customizable + (
'title', 'vtitle', 'lineMode', 'lineWidth', 'hideLegend', 'hideAxes',
'minXStep', 'hideGrid', 'majorGridLineColor', 'minorGridLineColor',
'thickness', 'min', 'max', 'graphOnly', 'yMin', 'yMax', 'yLimit',
'yStep', 'areaMode', 'areaAlpha', 'drawNullAsZero', 'tz', 'yAxisSide',
'pieMode', 'yUnitSystem', 'logBase', 'yMinLeft', 'yMinRight',
'yMaxLeft', 'yMaxRight', 'yLimitLeft', 'yLimitRight', 'yStepLeft',
'yStepRight', 'rightWidth', 'rightColor', 'rightDashed', 'leftWidth',
'leftColor', 'leftDashed', 'xFormat', 'minorY', 'hideYAxis',
'hideXAxis', 'uniqueLegend', 'vtitleRight', 'yDivisors',
'connectedLimit', 'hideNullFromLegend')
validLineModes = ('staircase', 'slope', 'connected')
validAreaModes = ('none', 'first', 'all', 'stacked')
validPieModes = ('maximum', 'minimum', 'average')
def drawGraph(self, **params):
# Make sure we've got datapoints to draw
if self.data:
startTime = min([series.start for series in self.data])
endTime = max([series.end for series in self.data])
timeRange = endTime - startTime
else:
timeRange = None
if not timeRange:
x = self.width / 2
y = self.height / 2
self.setColor('red')
self.setFont(size=math.log(self.width * self.height))
self.drawText("No Data", x, y, align='center')
return
# Determine if we're doing a 2 y-axis graph.
for series in self.data:
if 'secondYAxis' in series.options:
self.dataRight.append(series)
else:
self.dataLeft.append(series)
if len(self.dataRight) > 0:
self.secondYAxis = True
# API compatibilty hacks
if params.get('graphOnly', False):
params['hideLegend'] = True
params['hideGrid'] = True
params['hideAxes'] = True
params['hideXAxis'] = False
params['hideYAxis'] = False
params['yAxisSide'] = 'left'
params['title'] = ''
params['vtitle'] = ''
params['margin'] = 0
params['tz'] = ''
self.margin = 0
self.area['xmin'] = 0
self.area['xmax'] = self.width
self.area['ymin'] = 0
self.area['ymax'] = self.height
if 'yMin' not in params and 'min' in params:
params['yMin'] = params['min']
if 'yMax' not in params and 'max' in params:
params['yMax'] = params['max']
if 'lineWidth' not in params and 'thickness' in params:
params['lineWidth'] = params['thickness']
if 'yAxisSide' not in params:
params['yAxisSide'] = 'left'
if 'yUnitSystem' not in params:
params['yUnitSystem'] = 'si'
else:
params['yUnitSystem'] = force_text(params['yUnitSystem']).lower()
if params['yUnitSystem'] not in UnitSystems.keys():
params['yUnitSystem'] = 'si'
self.params = params
# Don't do any of the special right y-axis stuff if we're drawing 2
# y-axes.
if self.secondYAxis:
params['yAxisSide'] = 'left'
# When Y Axis is labeled on the right, we subtract x-axis positions
# from the max, instead of adding to the minimum
if self.params.get('yAxisSide') == 'right':
self.margin = self.width
# Now to setup our LineGraph specific options
self.lineWidth = float(params.get('lineWidth', 1.2))
self.lineMode = params.get('lineMode', 'slope').lower()
self.connectedLimit = params.get("connectedLimit", INFINITY)
assert self.lineMode in self.validLineModes, "Invalid line mode!"
self.areaMode = params.get('areaMode', 'none').lower()
assert self.areaMode in self.validAreaModes, "Invalid area mode!"
self.pieMode = params.get('pieMode', 'maximum').lower()
assert self.pieMode in self.validPieModes, "Invalid pie mode!"
# Line mode slope does not work (or even make sense) for series that
# have only one datapoint. So if any series have one datapoint we
# force staircase mode.
if self.lineMode == 'slope':
for series in self.data:
if len(series) == 1:
self.lineMode = 'staircase'
break
if self.secondYAxis:
for series in self.data:
if 'secondYAxis' in series.options:
if 'rightWidth' in params:
series.options['lineWidth'] = params['rightWidth']
if 'rightDashed' in params:
series.options['dashed'] = params['rightDashed']
if 'rightColor' in params:
series.color = params['rightColor']
else:
if 'leftWidth' in params:
series.options['lineWidth'] = params['leftWidth']
if 'leftDashed' in params:
series.options['dashed'] = params['leftDashed']
if 'leftColor' in params:
series.color = params['leftColor']
for series in self.data:
if not hasattr(series, 'color'):
series.color = next(self.colors)
titleSize = self.defaultFontParams['size'] + math.floor(
math.log(self.defaultFontParams['size']))
self.setFont(size=titleSize)
self.setColor(self.foregroundColor)
if params.get('title'):
self.drawTitle(force_text(params['title']))
if params.get('vtitle'):
self.drawVTitle(force_text(params['vtitle']))
if self.secondYAxis and params.get('vtitleRight'):
self.drawVTitle(force_text(params['vtitleRight']), rightAlign=True)
self.setFont()
if not params.get('hideLegend', len(self.data) > 10):
elements = []
hideNull = params.get('hideNullFromLegend', False)
for series in self.data:
if series.name:
if not(hideNull and all(v is None for v in list(series))):
elements.append((
unquote_plus(series.name),
series.color,
series.options.get('secondYAxis')))
if len(elements) > 0:
self.drawLegend(elements, params.get('uniqueLegend', False))
# Setup axes, labels, and grid
# First we adjust the drawing area size to fit X-axis labels
if (
not self.params.get('hideAxes', False) and
not self.params.get('hideXAxis', False)
):
self.area['ymax'] -= self.getExtents()['maxAscent'] * 2
self.startTime = min([series.start for series in self.data])
if (
self.lineMode == 'staircase' or
set([len(series) for series in self.data]) == set([2])
):
self.endTime = max([series.end for series in self.data])
else:
self.endTime = max([
(series.end - series.step) for series in self.data])
self.timeRange = self.endTime - self.startTime
# Now we consolidate our data points to fit in the currently estimated
# drawing area
self.consolidateDataPoints()
self.encodeHeader('axes')
# Now its time to fully configure the Y-axis and determine the space
# required for Y-axis labels. Since we'll probably have to squeeze the
# drawing area to fit the Y labels, we may need to reconsolidate our
# data points, which in turn means re-scaling the Y axis, this process
# will repeat until we have accurate Y labels and enough space to fit
# our data points
currentXMin = self.area['xmin']
currentXMax = self.area['xmax']
if self.secondYAxis:
self.setupTwoYAxes()
else:
self.setupYAxis()
while (
currentXMin != self.area['xmin'] or
currentXMax != self.area['xmax']
): # see if the Y-labels require more space
# this can cause the Y values to change
self.consolidateDataPoints()
# so let's keep track of the previous Y-label space requirements
currentXMin = self.area['xmin']
currentXMax = self.area['xmax']
if self.secondYAxis: # and recalculate their new requirements
self.setupTwoYAxes()
else:
self.setupYAxis()
# Now that our Y-axis is finalized, let's determine our X labels (this
# won't affect the drawing area)
self.setupXAxis()
if not self.params.get('hideAxes', False):
self.drawLabels()
if not self.params.get('hideGrid', False):
# hideAxes implies hideGrid
self.encodeHeader('grid')
self.drawGridLines()
# Finally, draw the graph lines
self.encodeHeader('lines')
self.drawLines()
def drawVTitle(self, text, rightAlign=False):
lineHeight = self.getExtents()['maxHeight']
if rightAlign:
self.encodeHeader('vtitleRight')
x = self.area['xmax'] - lineHeight
y = self.height / 2
for line in text.split('\n'):
self.drawText(line, x, y, align='center', valign='baseline',
rotate=90)
x -= lineHeight
self.area['xmax'] = x - self.margin - lineHeight
else:
self.encodeHeader('vtitle')
x = self.area['xmin'] + lineHeight
y = self.height / 2
for line in text.split('\n'):
self.drawText(line, x, y, align='center', valign='baseline',
rotate=270)
x += lineHeight
self.area['xmin'] = x + self.margin + lineHeight
def getYCoord(self, value, side=None):
if "left" == side:
yLabelValues = self.yLabelValuesL
yTop = self.yTopL
yBottom = self.yBottomL
elif "right" == side:
yLabelValues = self.yLabelValuesR
yTop = self.yTopR
yBottom = self.yBottomR
else:
yLabelValues = self.yLabelValues
yTop = self.yTop
yBottom = self.yBottom
try:
highestValue = max(yLabelValues)
lowestValue = min(yLabelValues)
except ValueError:
highestValue = yTop
lowestValue = yBottom
pixelRange = self.area['ymax'] - self.area['ymin']
relativeValue = value - lowestValue
valueRange = highestValue - lowestValue
if self.logBase:
if value <= 0:
return None
relativeValue = (
math.log(value, self.logBase) -
math.log(lowestValue, self.logBase))
valueRange = math.log(highestValue, self.logBase) - math.log(
lowestValue, self.logBase)
pixelToValueRatio = pixelRange / valueRange
valueInPixels = pixelToValueRatio * relativeValue
return self.area['ymax'] - valueInPixels
def drawLines(self, width=None, dash=None, linecap='butt',
linejoin='miter'):
if not width:
width = self.lineWidth
self.ctx.set_line_width(width)
originalWidth = width
width = float(int(width) % 2) / 2
if dash:
self.ctx.set_dash(dash, 1)
else:
self.ctx.set_dash([], 0)
self.ctx.set_line_cap({
'butt': cairo.LINE_CAP_BUTT,
'round': cairo.LINE_CAP_ROUND,
'square': cairo.LINE_CAP_SQUARE,
}[linecap])
self.ctx.set_line_join({
'miter': cairo.LINE_JOIN_MITER,
'round': cairo.LINE_JOIN_ROUND,
'bevel': cairo.LINE_JOIN_BEVEL,
}[linejoin])
# check whether there is an stacked metric
singleStacked = False
for series in self.data:
if 'stacked' in series.options:
singleStacked = True
if singleStacked:
self.data = sort_stacked(self.data)
# stack the values
if self.areaMode == 'stacked' and not self.secondYAxis:
# TODO Allow stacked area mode with secondYAxis
total = []
for series in self.data:
if 'drawAsInfinite' in series.options:
continue
series.options['stacked'] = True
for i in range(len(series)):
if len(total) <= i:
total.append(0)
if series[i] is not None:
original = series[i]
series[i] += total[i]
total[i] += original
elif self.areaMode == 'first':
self.data[0].options['stacked'] = True
elif self.areaMode == 'all':
for series in self.data:
if 'drawAsInfinite' not in series.options:
series.options['stacked'] = True
# apply alpha channel and create separate stroke series
if self.params.get('areaAlpha'):
try:
alpha = float(self.params['areaAlpha'])
except ValueError:
alpha = 0.5
strokeSeries = []
for series in self.data:
if 'stacked' in series.options:
series.options['alpha'] = alpha
newSeries = TimeSeries(
series.name, series.start, series.end,
series.step * series.valuesPerPoint,
[x for x in series])
newSeries.xStep = series.xStep
newSeries.color = series.color
if 'secondYAxis' in series.options:
newSeries.options['secondYAxis'] = True
strokeSeries.append(newSeries)
self.data += strokeSeries
# setup the clip region
self.ctx.set_line_width(1.0)
self.ctx.rectangle(self.area['xmin'], self.area['ymin'],
self.area['xmax'] - self.area['xmin'],
self.area['ymax'] - self.area['ymin'])
self.ctx.clip()
self.ctx.set_line_width(originalWidth)
# save clip to restore once stacked areas are drawn
self.ctx.save()
clipRestored = False
for series in self.data:
if 'stacked' not in series.options:
# stacked areas are always drawn first. if this series is not
# stacked, we finished stacking. reset the clip region so
# lines can show up on top of the stacked areas.
if not clipRestored:
clipRestored = True
self.ctx.restore()
if 'lineWidth' in series.options:
self.ctx.set_line_width(series.options['lineWidth'])
if 'dashed' in series.options:
self.ctx.set_dash([series.options['dashed']], 1)
else:
self.ctx.set_dash([], 0)
# Shift the beginning of drawing area to the start of the series
# if the graph itself has a larger range
missingPoints = (series.start - self.startTime) / series.step
startShift = series.xStep * (missingPoints / series.valuesPerPoint)
x = float(self.area['xmin']) + startShift + (self.lineWidth / 2.0)
y = float(self.area['ymin'])
startX = x
if series.options.get('invisible'):
self.setColor(series.color, 0, True)
else:
self.setColor(series.color,
series.options.get('alpha') or 1.0)
# The number of preceding datapoints that had a None value.
consecutiveNones = 0
for index, value in enumerate(series):
if value != value: # convert NaN to None
value = None
if value is None and self.params.get('drawNullAsZero'):
value = 0.0
if value is None:
if consecutiveNones == 0:
self.ctx.line_to(x, y)
if 'stacked' in series.options:
# Close off and fill area before unknown interval
if self.secondYAxis:
if 'secondYAxis' in series.options:
self.fillAreaAndClip(
x, y, startX,
self.getYCoord(0, "right"))
else:
self.fillAreaAndClip(
x, y, startX,
self.getYCoord(0, "left"))
else:
self.fillAreaAndClip(x, y, startX,
self.getYCoord(0))
x += series.xStep
consecutiveNones += 1
else:
if self.secondYAxis:
if 'secondYAxis' in series.options:
y = self.getYCoord(value, "right")
else:
y = self.getYCoord(value, "left")
else:
y = self.getYCoord(value)
if y is None:
value = None
elif y < 0:
y = 0
if 'drawAsInfinite' in series.options and value > 0:
self.ctx.move_to(x, self.area['ymax'])
self.ctx.line_to(x, self.area['ymin'])
self.ctx.stroke()
x += series.xStep
continue
if consecutiveNones > 0:
startX = x
if self.lineMode == 'staircase':
if consecutiveNones > 0:
self.ctx.move_to(x, y)
else:
self.ctx.line_to(x, y)
x += series.xStep
self.ctx.line_to(x, y)
elif self.lineMode == 'slope':
if consecutiveNones > 0:
self.ctx.move_to(x, y)
self.ctx.line_to(x, y)
x += series.xStep
elif self.lineMode == 'connected':
# If if the gap is larger than the connectedLimit or
# if this is the first non-None datapoint in the
# series, start drawing from that datapoint.
if (
consecutiveNones > self.connectedLimit or
consecutiveNones == index
):
self.ctx.move_to(x, y)
self.ctx.line_to(x, y)
x += series.xStep
consecutiveNones = 0
if 'stacked' in series.options:
if self.lineMode == 'staircase':
xPos = x
else:
xPos = x-series.xStep
if self.secondYAxis:
if 'secondYAxis' in series.options:
areaYFrom = self.getYCoord(0, "right")
else:
areaYFrom = self.getYCoord(0, "left")
else:
areaYFrom = self.getYCoord(0)
self.fillAreaAndClip(xPos, y, startX, areaYFrom)
else:
self.ctx.stroke()
# return to the original line width
self.ctx.set_line_width(originalWidth)
if 'dashed' in series.options:
# if we changed the dash setting before, change it back now
if dash:
self.ctx.set_dash(dash, 1)
else:
self.ctx.set_dash([], 0)
def fillAreaAndClip(self, x, y, startX=None, areaYFrom=None):
startX = (startX or self.area['xmin'])
areaYFrom = (areaYFrom or self.area['ymax'])
pattern = self.ctx.copy_path()
# fill
self.ctx.line_to(x, areaYFrom) # bottom endX
self.ctx.line_to(startX, areaYFrom) # bottom startX
self.ctx.close_path()
if self.areaMode == 'all':
self.ctx.fill_preserve()
else:
self.ctx.fill()
# clip above y axis
self.ctx.append_path(pattern)
self.ctx.line_to(x, areaYFrom) # yZero endX
self.ctx.line_to(self.area['xmax'], areaYFrom) # yZero right
self.ctx.line_to(self.area['xmax'], self.area['ymin']) # top right
self.ctx.line_to(self.area['xmin'], self.area['ymin']) # top left
self.ctx.line_to(self.area['xmin'], areaYFrom) # yZero left
self.ctx.line_to(startX, areaYFrom) # yZero startX
# clip below y axis
self.ctx.line_to(x, areaYFrom) # yZero endX
self.ctx.line_to(self.area['xmax'], areaYFrom) # yZero right
self.ctx.line_to(self.area['xmax'], self.area['ymax']) # bottom right
self.ctx.line_to(self.area['xmin'], self.area['ymax']) # bottom left
self.ctx.line_to(self.area['xmin'], areaYFrom) # yZero left
self.ctx.line_to(startX, areaYFrom) # yZero startX
self.ctx.close_path()
self.ctx.clip()
def consolidateDataPoints(self):
numberOfPixels = self.graphWidth = (
self.area['xmax'] - self.area['xmin'] - (self.lineWidth + 1))
for series in self.data:
numberOfDataPoints = self.timeRange / series.step
minXStep = float(self.params.get('minXStep', 1.0))
divisor = self.timeRange / series.step or 1
bestXStep = numberOfPixels / divisor
if bestXStep < minXStep:
drawableDataPoints = int(numberOfPixels / minXStep)
pointsPerPixel = math.ceil(
float(numberOfDataPoints) / float(drawableDataPoints))
series.consolidate(pointsPerPixel)
series.xStep = (
numberOfPixels * pointsPerPixel) / numberOfDataPoints
else:
series.xStep = bestXStep
def setupYAxis(self):
drawNullAsZero = self.params.get('drawNullAsZero')
stacked = (self.areaMode == 'stacked')
(yMinValue, yMaxValue) = dataLimits(self.data, drawNullAsZero,
stacked)
if self.logBase:
yTics = _LogAxisTics(yMinValue, yMaxValue,
unitSystem=self.params.get('yUnitSystem'),
base=self.logBase)
else:
yTics = _LinearAxisTics(yMinValue, yMaxValue,
unitSystem=self.params.get('yUnitSystem'))
yTics.applySettings(axisMin=self.params.get('yMin'),
axisMax=self.params.get('yMax'),
axisLimit=self.params.get('yLimit'))
if 'yStep' in self.params:
yTics.setStep(self.params['yStep'])
else:
yDivisors = str(self.params.get('yDivisors', '4,5,6'))
yDivisors = [int(d) for d in yDivisors.split(',')]
binary = self.params.get('yUnitSystem') == 'binary'
yTics.chooseStep(divisors=yDivisors, binary=binary)
yTics.chooseLimits()
# Copy the values we need back out of the yTics object:
self.yStep = yTics.step
self.yBottom = yTics.bottom
self.yTop = yTics.top
self.ySpan = yTics.span
if not self.params.get('hideAxes', False):
# Create and measure the Y-labels
self.yLabelValues = yTics.getLabelValues()
self.yLabels = [yTics.makeLabel(value)
for value in self.yLabelValues]
self.yLabelWidth = max([
self.getExtents(label)['width'] for label in self.yLabels])
if not self.params.get('hideYAxis'):
if self.params.get('yAxisSide') == 'left':
# Scoot the graph over to the left just enough to fit the
# y-labels:
xMin = self.margin + (self.yLabelWidth * 1.02)
if self.area['xmin'] < xMin:
self.area['xmin'] = xMin
else:
# Scoot the graph over to the right just enough to fit
# # the y-labels:
xMin = 0
xMax = self.margin - (self.yLabelWidth * 1.02)
if self.area['xmax'] >= xMax:
self.area['xmax'] = xMax
else:
self.yLabelValues = []
self.yLabels = []
self.yLabelWidth = 0.0
def setupTwoYAxes(self):
drawNullAsZero = self.params.get('drawNullAsZero')
stacked = (self.areaMode == 'stacked')
(yMinValueL, yMaxValueL) = dataLimits(self.dataLeft, drawNullAsZero,
stacked)
(yMinValueR, yMaxValueR) = dataLimits(self.dataRight, drawNullAsZero,
stacked)
# TODO: Allow separate bases for L & R Axes.
if self.logBase:
yTicsL = _LogAxisTics(yMinValueL, yMaxValueL,
unitSystem=self.params.get('yUnitSystem'),
base=self.logBase)
yTicsR = _LogAxisTics(yMinValueR, yMaxValueR,
unitSystem=self.params.get('yUnitSystem'),
base=self.logBase)
else:
yTicsL = _LinearAxisTics(yMinValueL, yMaxValueL,
unitSystem=self.params.get('yUnitSystem'))
yTicsR = _LinearAxisTics(yMinValueR, yMaxValueR,
unitSystem=self.params.get('yUnitSystem'))
yTicsL.applySettings(axisMin=self.params.get('yMinLeft'),
axisMax=self.params.get('yMaxLeft'),
axisLimit=self.params.get('yLimitLeft'))
yTicsR.applySettings(axisMin=self.params.get('yMinRight'),
axisMax=self.params.get('yMaxRight'),
axisLimit=self.params.get('yLimitRight'))
yDivisors = str(self.params.get('yDivisors', '4,5,6'))
yDivisors = [int(d) for d in yDivisors.split(',')]
binary = self.params.get('yUnitSystem') == 'binary'
if 'yStepLeft' in self.params:
yTicsL.setStep(self.params['yStepLeft'])
else:
yTicsL.chooseStep(divisors=yDivisors, binary=binary)
if 'yStepRight' in self.params:
yTicsR.setStep(self.params['yStepRight'])
else:
yTicsR.chooseStep(divisors=yDivisors, binary=binary)
yTicsL.chooseLimits()
yTicsR.chooseLimits()
# Copy the values we need back out of the yTics objects:
self.yStepL = yTicsL.step
self.yBottomL = yTicsL.bottom
self.yTopL = yTicsL.top
self.ySpanL = yTicsL.span
self.yStepR = yTicsR.step
self.yBottomR = yTicsR.bottom
self.yTopR = yTicsR.top
self.ySpanR = yTicsR.span
# Create and measure the Y-labels
self.yLabelValuesL = yTicsL.getLabelValues()
self.yLabelValuesR = yTicsR.getLabelValues()
self.yLabelsL = [yTicsL.makeLabel(value)
for value in self.yLabelValuesL]
self.yLabelsR = [yTicsR.makeLabel(value)
for value in self.yLabelValuesR]
self.yLabelWidthL = max([
self.getExtents(label)['width'] for label in self.yLabelsL])
self.yLabelWidthR = max([
self.getExtents(label)['width'] for label in self.yLabelsR])
# scoot the graph over to the left just enough to fit the y-labels
# xMin = self.margin + self.margin + (self.yLabelWidthL * 1.02)
xMin = self.margin + (self.yLabelWidthL * 1.02)
if self.area['xmin'] < xMin:
self.area['xmin'] = xMin
# scoot the graph over to the right just enough to fit the y-labels
xMax = self.width - (self.yLabelWidthR * 1.02)
if self.area['xmax'] >= xMax:
self.area['xmax'] = xMax
def setupXAxis(self):
from ..app import app
if self.userTimeZone:
tzinfo = pytz.timezone(self.userTimeZone)
else:
tzinfo = pytz.timezone(app.config['TIME_ZONE'])
self.start_dt = datetime.fromtimestamp(self.startTime, tzinfo)
self.end_dt = datetime.fromtimestamp(self.endTime, tzinfo)
secondsPerPixel = float(self.timeRange) / float(self.graphWidth)
# pixels per second
self.xScaleFactor = float(self.graphWidth) / float(self.timeRange)
potential = [
c for c in xAxisConfigs if c['seconds'] <= secondsPerPixel and
c.get('maxInterval', self.timeRange + 1) >= self.timeRange]
if potential:
self.xConf = potential[-1]
else:
self.xConf = xAxisConfigs[-1]
self.xLabelStep = self.xConf['labelUnit'] * self.xConf['labelStep']
self.xMinorGridStep = (
self.xConf['minorGridUnit'] * self.xConf['minorGridStep'])
self.xMajorGridStep = (
self.xConf['majorGridUnit'] * self.xConf['majorGridStep'])
def drawLabels(self):
# Draw the Y-labels
if not self.params.get('hideYAxis'):
if not self.secondYAxis:
for value, label in zip(self.yLabelValues, self.yLabels):
if self.params.get('yAxisSide') == 'left':
x = self.area['xmin'] - (self.yLabelWidth * 0.02)
else:
# Inverted for right side Y Axis
x = self.area['xmax'] + (self.yLabelWidth * 0.02)
y = self.getYCoord(value)
if y is None:
value = None
elif y < 0:
y = 0
if self.params.get('yAxisSide') == 'left':
self.drawText(label, x, y, align='right',
valign='middle')
else:
# Inverted for right side Y Axis
self.drawText(label, x, y, align='left',
valign='middle')
else: # Draws a right side and a Left side axis
for valueL, labelL in zip(self.yLabelValuesL, self.yLabelsL):
xL = self.area['xmin'] - (self.yLabelWidthL * 0.02)
yL = self.getYCoord(valueL, "left")
if yL is None:
value = None
elif yL < 0:
yL = 0
self.drawText(labelL, xL, yL, align='right',
valign='middle')
# Right Side
for valueR, labelR in zip(self.yLabelValuesR, self.yLabelsR):
# Inverted for right side Y Axis
xR = self.area['xmax'] + (self.yLabelWidthR * 0.02) + 3
yR = self.getYCoord(valueR, "right")
if yR is None:
valueR = None
elif yR < 0:
yR = 0
# Inverted for right side Y Axis
self.drawText(labelR, xR, yR, align='left',
valign='middle')
if not self.params.get('hideXAxis'):
dt, x_label_delta = find_x_times(self.start_dt,
self.xConf['labelUnit'],
self.xConf['labelStep'])
# Draw the X-labels
xFormat = self.params.get('xFormat', self.xConf['format'])
while dt < self.end_dt:
label = dt.strftime(xFormat)
x = self.area['xmin'] + (
to_seconds(dt - self.start_dt) * self.xScaleFactor)
y = self.area['ymax'] + self.getExtents()['maxAscent']
self.drawText(label, x, y, align='center', valign='top')
dt += x_label_delta
def drawGridLines(self):
# Not sure how to handle this for 2 y-axes
# Just using the left side info for the grid.
# Horizontal grid lines
leftSide = self.area['xmin']
rightSide = self.area['xmax']
labels = []
if self.secondYAxis:
labels = self.yLabelValuesL
else:
labels = self.yLabelValues
for i, value in enumerate(labels):
self.ctx.set_line_width(0.4)
self.setColor(self.params.get('majorGridLineColor',
self.defaultMajorGridLineColor))
if self.secondYAxis:
y = self.getYCoord(value, "left")
else:
y = self.getYCoord(value)
if y is None or y < 0:
continue
self.ctx.move_to(leftSide, y)
self.ctx.line_to(rightSide, y)
self.ctx.stroke()
# draw minor gridlines if this isn't the last label
if self.minorY >= 1 and i < (len(labels) - 1):
# in case graphite supports inverted Y axis now or someday
valueLower, valueUpper = sorted((value, labels[i+1]))
# each minor gridline is 1/minorY apart from the nearby
# gridlines. we calculate that distance, for adding to the
# value in the loop.
distance = ((valueUpper - valueLower) / float(1 + self.minorY))
# starting from the initial valueLower, we add the minor
# distance for each minor gridline that we wish to draw, and
# then draw it.
for minor in range(self.minorY):
self.ctx.set_line_width(0.3)
self.setColor(
self.params.get('minorGridLineColor',
self.defaultMinorGridLineColor))
# the current minor gridline value is halfway between the
# current and next major gridline values
value = valueLower + ((1+minor) * distance)
if self.logBase:
yTopFactor = self.logBase * self.logBase
else:
yTopFactor = 1
if self.secondYAxis:
if value >= (yTopFactor * self.yTopL):
continue
else:
if value >= (yTopFactor * self.yTop):
continue
if self.secondYAxis:
y = self.getYCoord(value, "left")
else:
y = self.getYCoord(value)
if y is None or y < 0:
continue
self.ctx.move_to(leftSide, y)
self.ctx.line_to(rightSide, y)
self.ctx.stroke()
# Vertical grid lines
top = self.area['ymin']
bottom = self.area['ymax']
# First we do the minor grid lines (majors will paint over them)
self.ctx.set_line_width(0.25)
self.setColor(self.params.get('minorGridLineColor',
self.defaultMinorGridLineColor))
dt, x_minor_delta = find_x_times(
self.start_dt, self.xConf['minorGridUnit'],
self.xConf['minorGridStep'])
while dt < self.end_dt:
x = self.area['xmin'] + (
to_seconds(dt - self.start_dt) * self.xScaleFactor)
if x < self.area['xmax']:
self.ctx.move_to(x, bottom)
self.ctx.line_to(x, top)
self.ctx.stroke()
dt += x_minor_delta
# Now we do the major grid lines
self.ctx.set_line_width(0.33)
self.setColor(self.params.get('majorGridLineColor',
self.defaultMajorGridLineColor))
dt, x_major_delta = find_x_times(self.start_dt,
self.xConf['majorGridUnit'],
self.xConf['majorGridStep'])
while dt < self.end_dt:
x = self.area['xmin'] + (
to_seconds(dt - self.start_dt) * self.xScaleFactor)
if x < self.area['xmax']:
self.ctx.move_to(x, bottom)
self.ctx.line_to(x, top)
self.ctx.stroke()
dt += x_major_delta
# Draw side borders for our graph area
self.ctx.set_line_width(0.5)
self.ctx.move_to(self.area['xmax'], bottom)
self.ctx.line_to(self.area['xmax'], top)
self.ctx.move_to(self.area['xmin'], bottom)
self.ctx.line_to(self.area['xmin'], top)
self.ctx.stroke()
class PieGraph(Graph):
customizable = Graph.customizable + (
'title', 'valueLabels', 'valueLabelsMin', 'hideLegend', 'pieLabels',
'areaAlpha', 'valueLabelsColor',
)
validValueLabels = ('none', 'number', 'percent')
def drawGraph(self, **params):
self.pieLabels = params.get('pieLabels', 'horizontal')
self.total = sum([t[1] for t in self.data])
if self.params.get('areaAlpha'):
try:
self.alpha = float(self.params['areaAlpha'])
except ValueError:
self.alpha = 1.0
else:
self.alpha = 1.0
self.slices = []
for name, value in self.data:
self.slices.append({
'name': name,
'value': value,
'percent': value / self.total,
'color': next(self.colors),
'alpha': self.alpha,
})
titleSize = self.defaultFontParams['size'] + math.floor(
math.log(self.defaultFontParams['size']))
self.setFont(size=titleSize)
self.setColor(self.foregroundColor)
if params.get('title'):
self.drawTitle(params['title'])
self.setFont()
if not params.get('hideLegend', False):
elements = [
(slice['name'], slice['color'], None) for slice in self.slices]
self.drawLegend(elements)
self.drawSlices()
if params.get('valueLabelsColor'):
self.valueLabelsColor = params.get('valueLabelsColor')
else:
self.valueLabelsColor = 'black'
self.valueLabelsMin = float(params.get('valueLabelsMin', 5))
self.valueLabels = params.get('valueLabels', 'percent')
assert self.valueLabels in self.validValueLabels, (
"valueLabels=%s must be one of %s" % (
self.valueLabels, self.validValueLabels))
if self.valueLabels != 'none':
self.drawLabels()
def drawSlices(self):
theta = 3.0 * math.pi / 2.0
halfX = (self.area['xmax'] - self.area['xmin']) / 2.0
halfY = (self.area['ymax'] - self.area['ymin']) / 2.0
self.x0 = x0 = self.area['xmin'] + halfX
self.y0 = y0 = self.area['ymin'] + halfY
self.radius = radius = min(halfX, halfY) * 0.95
for slice in self.slices:
self.setColor(slice['color'], slice['alpha'])
self.ctx.move_to(x0, y0)
phi = theta + (2 * math.pi) * slice['percent']
self.ctx.arc(x0, y0, radius, theta, phi)
self.ctx.line_to(x0, y0)
self.ctx.fill()
slice['midAngle'] = (theta + phi) / 2.0
slice['midAngle'] %= 2.0 * math.pi
theta = phi
def drawLabels(self):
self.setFont()
self.setColor(self.valueLabelsColor)
for slice in self.slices:
if self.valueLabels == 'percent':
if slice['percent'] * 100.0 < self.valueLabelsMin:
continue
label = "%%%.2f" % (slice['percent'] * 100.0)
elif self.valueLabels == 'number':
if slice['value'] < self.valueLabelsMin:
continue
if (
slice['value'] < 10 and
slice['value'] != int(slice['value'])
):
label = "%.2f" % slice['value']
else:
label = force_text(int(slice['value']))
theta = slice['midAngle']
x = self.x0 + (self.radius / 2.0 * math.cos(theta))
y = self.y0 + (self.radius / 2.0 * math.sin(theta))
if self.pieLabels == 'rotated':
if theta > (math.pi / 2.0) and theta <= (3.0 * math.pi / 2.0):
theta -= math.pi
self.drawText(label, x, y, align='center', valign='middle',
rotate=math.degrees(theta))
else:
self.drawText(label, x, y, align='center', valign='middle')
GraphTypes = {
'line': LineGraph,
'pie': PieGraph,
}
# Convience functions
def safeArgs(args):
"""Iterate over valid, finite values in an iterable.
Skip any items that are None, NaN, or infinite.
"""
return (arg for arg in args
if arg is not None and not math.isnan(arg) and not math.isinf(arg))
def safeMin(args):
args = list(safeArgs(args))
if args:
return min(args)
def safeMax(args):
args = list(safeArgs(args))
if args:
return max(args)
def safeSum(values):
return sum(safeArgs(values))
def dataLimits(data, drawNullAsZero=False, stacked=False):
"""Return the range of values in data as (yMinValue, yMaxValue).
data is an array of TimeSeries objects.
"""
missingValues = any(None in series for series in data)
finiteData = [series for series in data
if not series.options.get('drawAsInfinite')]
yMinValue = safeMin(safeMin(series) for series in finiteData)
if yMinValue is None:
# This can only happen if there are no valid, non-infinite data.
return (0.0, 1.0)
if yMinValue > 0.0 and drawNullAsZero and missingValues:
yMinValue = 0.0
if stacked:
length = safeMin(len(series) for series in finiteData)
sumSeries = []
for i in range(0, length):
sumSeries.append(safeSum(series[i] for series in finiteData))
yMaxValue = safeMax(sumSeries)
else:
yMaxValue = safeMax(safeMax(series) for series in finiteData)
if yMaxValue < 0.0 and drawNullAsZero and missingValues:
yMaxValue = 0.0
return (yMinValue, yMaxValue)
def sort_stacked(series_list):
stacked = [s for s in series_list if 'stacked' in s.options]
not_stacked = [s for s in series_list if 'stacked' not in s.options]
return stacked + not_stacked
def condition(value, size, step):
if step is None:
return abs(value) >= size
else:
return abs(value) >= size and step >= size
def format_units(v, step=None, system="si", units=None):
"""Format the given value in standardized units.
``system`` is either 'binary' or 'si'
For more info, see:
http://en.wikipedia.org/wiki/SI_prefix
http://en.wikipedia.org/wiki/Binary_prefix
"""
if v is None:
return 0, ''
for prefix, size in UnitSystems[system]:
if condition(v, size, step):
v2 = v / size
if v2 - math.floor(v2) < 0.00000000001 and v > 1:
v2 = float(math.floor(v2))
if units:
prefix = "%s%s" % (prefix, units)
return v2, prefix
if v - math.floor(v) < 0.00000000001 and v > 1:
v = float(math.floor(v))
if units:
prefix = units
else:
prefix = ''
return v, prefix
def find_x_times(start_dt, unit, step):
if not isinstance(start_dt, datetime):
raise ValueError("Invalid start_dt: %s" % start_dt)
if not isinstance(step, int) or not step > 0:
if not isinstance(step, float) or unit != DAY or not step > 0.0:
raise ValueError("Invalid step value: %s" % step)
if unit == SEC:
dt = start_dt.replace(
second=start_dt.second - (start_dt.second % step))
x_delta = timedelta(seconds=step)
elif unit == MIN:
dt = start_dt.replace(
second=0, minute=start_dt.minute - (start_dt.minute % step))
x_delta = timedelta(minutes=step)
elif unit == HOUR:
dt = start_dt.replace(
second=0, minute=0, hour=start_dt.hour - (start_dt.hour % step))
x_delta = timedelta(hours=step)
elif unit == DAY:
dt = start_dt.replace(second=0, minute=0, hour=0)
x_delta = timedelta(days=step)
else:
raise ValueError("Invalid unit: %s" % unit)
while dt < start_dt:
dt += x_delta
return (dt, x_delta)
| apache-2.0 | -1,222,734,711,232,685,800 | 36.067367 | 79 | 0.520098 | false |
tommartensen/arion-backend | arionBackend/serializers/json.py | 1 | 2209 | """
This module contains all json serializers in the project.
"""
from json import loads as json_loads
class JSONSerializer(object):
"""
This class contains methods for serializing model objects into JSON objects.
"""
@staticmethod
def serialize_hierarchy_overview(hierarchy):
"""
Serializes a hierarchy to JSON format with basic information.
:param hierarchy: given hierarchy to serialize.
:return: Dictionary with hierarchy information
"""
return {
"id": hierarchy.id,
"name": hierarchy.name,
"timestamp": hierarchy.timestamp
}
@staticmethod
def serialize_hierarchy_complete(hierarchy):
"""
Serializes a hierarchy to JSON format with all information.
:param hierarchy: given hierarchy to serialize.
:return: Dictionary with hierarchy information
"""
return {
"id": hierarchy.id,
"name": hierarchy.name,
"timestamp": hierarchy.timestamp,
"hierarchy": json_loads(hierarchy.graph_representation),
}
@staticmethod
def serialize_query(query):
"""
Serializes a query into JSON format.
:param query: the query to serialize
:return: Dictionary with query information
"""
return {
"id": query.id,
"query": query.query_string,
"eqmnRepresentation": json_loads(query.eqmn_representation),
"outputEventType": query.output_event_type.to_json(),
"inputEventTypes": [event_type.to_json() for event_type in query.input_event_types.all()]
}
@staticmethod
def serialize_basic_event_type(event_type):
"""
Serializes an event type into JSON format.
:param event_type: the event type to serialize
:return: Dictionary with event type information
"""
return {
"id": event_type.id,
"name": event_type.name,
"isBasicEventType": event_type.is_basic_event_type()
}
@staticmethod
def serialize_complete_event_type(event_type):
"""
Serializes an event type into JSON format with enhanced information.
:param event_type: the event type to serialize
:return: Dictionary with enhanced event type information
"""
return {
"id": event_type.id,
"name": event_type.name,
"isBasicEventType": event_type.is_basic_event_type(),
"inputQueries": [query.id for query in event_type.output_type.all()]
}
| mit | 2,168,103,708,467,919,400 | 26.962025 | 92 | 0.715256 | false |
viroulep/RebirthItemTracker | src/game_objects/state.py | 1 | 5925 | """This module handles anything related to the item tracker's state"""
import logging
import json
from game_objects.item import Item, ItemInfo
from game_objects.floor import Floor
from game_objects.serializable import Serializable
class TrackerState(Serializable):
"""This class represents a tracker state, and handle the logic to
modify it while keeping it coherent
"""
serialize = [('seed', basestring), ('floor_list', list),
('item_list', list), ('bosses', list), ('tracker_version', basestring), ('game_version', basestring)]
def __init__(self, seed, tracker_version, game_version):
self.reset(seed, game_version)
self.tracker_version = tracker_version
def reset(self, seed, game_version):
"""
Reset a run to a given string
This should be enough to enable the GC to clean everything from the previous run
"""
# When the tracker state has been restarted, put this to True
# The view can then put it to false once it's been rendered
self.modified = True
self.seed = seed
self.game_version = game_version
self.floor_list = []
self.item_list = []
self.bosses = []
self.player_stats = {}
self.player_transforms = {}
for stat in ItemInfo.stat_list:
self.player_stats[stat] = 0.0
for transform in ItemInfo.transform_list:
self.player_transforms[transform] = set()
def add_floor(self, floor):
""" Add a floor to the current run """
self.floor_list.append(floor)
self.modified = True
@property
def last_floor(self):
"""
Get current floor
If no floor is in the floor list, create a default one
"""
if len(self.floor_list) == 0:
self.add_floor(Floor("f1"))
return self.floor_list[-1]
def add_item(self, item):
"""
Add an item to the current run, and update player's stats accordingly
Return a tuple (boolean, list).
The boolean is true if the item has been added, false otherwise.
"""
# Ignore repeated pickups of space bar items
if not (item.info.space and item in self.item_list):
self.item_list.append(item)
self.__add_stats_for_item(item)
self.modified = True
return True
else:
return False
@property
def last_item(self):
"""
Get last item picked up
Can return None !
"""
if len(self.item_list) > 0:
return self.item_list[-1]
else:
return None
def contains_item(self, item_id):
""" Looks for the given item_id in our item_list """
return len([x for x in self.item_list if x.item_id == item_id]) >= 1
def reroll(self):
""" Tag every (non-spacebar) items as rerolled """
[item.rerolled() for item in self.item_list]
# Add curse to last floor
def add_curse(self, curse):
""" Add a curse to current floor """
self.last_floor.add_curse(curse)
def add_boss(self, bossid):
""" Add boss to seen boss """
if bossid not in self.bosses:
self.bosses.append(bossid)
nbosses = len(self.bosses)
if 11 <= nbosses <= 13:
suffix = 'th'
else:
suffix = {1: 'st', 2: 'nd', 3: 'rd'}.get(nbosses % 10, 'th')
logging.getLogger("tracker").debug("Defeated %s%s boss %s",
len(self.bosses),
suffix,
bossid)
@property
def last_boss(self):
"""
Get last boss encountered
Can return None !
"""
if len(self.bosses) > 0:
return self.bosses[-1]
else:
return None
def drawn(self):
""" Tag this state as rendered """
self.modified = False
@staticmethod
def from_valid_json(json_dic, *args):
""" Create a state from a type-checked dic """
state = TrackerState(json_dic['seed'], json_dic['tracker_version'], json_dic['game_version'])
# The order is important, we want a list of legal floors the item can
# be picked up on before parsing items
for floor_dic in json_dic['floor_list']:
floor = Floor.from_json(floor_dic)
if not floor:
return None
state.add_floor(floor)
for bossstr in json_dic['bosses']:
# TODO create a serializable boss class that would create
# a boss object with description from a bossid
# In any case it's sufficient to (de)serialize only bossids
if not isinstance(bossstr, basestring):
return None
state.add_boss(bossstr)
for item_dic in json_dic['item_list']:
item = Item.from_json(item_dic, state.floor_list)
if not item:
return None
state.add_item(item)
return state
def __add_stats_for_item(self, item):
"""
Update player's stats with the given item.
"""
item_info = item.info
for stat in ItemInfo.stat_list:
if not item_info[stat]:
continue
change = float(item_info[stat])
self.player_stats[stat] += change
for transform in ItemInfo.transform_list:
if not item_info[transform]:
continue
self.player_transforms[transform].add(item)
class TrackerStateEncoder(json.JSONEncoder):
""" An encoder to provide to the json.load method, which handle game objects """
def default(self, obj):
if isinstance(obj, Serializable):
return obj.to_json()
return obj.__dict__
| bsd-2-clause | -7,840,911,215,102,460,000 | 33.447674 | 118 | 0.560169 | false |
tommo/gii | lib/gii/DeviceManager/DeviceManager.py | 1 | 2811 | import os
import stat
from gii.core import *
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import QEventLoop, QEvent, QObject
from gii.qt.IconCache import getIcon
from gii.qt.controls.Window import MainWindow
from gii.qt.controls.Menu import MenuManager
from gii.qt.QtEditorModule import QtEditorModule
from gii.SearchView import requestSearchView, registerSearchEnumerator
import Device
##----------------------------------------------------------------##
# def getIOSDeviceName( dev ):
# name = u''
# try:
# name = dev.get_value(name=u'DeviceName')
# except:
# pass
# print( u'%s - "%s"' % ( dev.get_deviceid(), name.decode(u'utf-8') ) )
##----------------------------------------------------------------##
signals.register( 'device.connected' )
signals.register( 'device.disconnected' )
signals.register( 'device.activated' )
signals.register( 'device.deactivated' )
##----------------------------------------------------------------##
class DeviceManager( EditorModule ):
def __init__( self ):
pass
def getName( self ):
return 'device_manager'
def getDependency( self ):
return []
def onLoad( self ):
self.deviceTypes = {}
self.containers = {}
self.devices = {}
self.activeDevice = None
registerSearchEnumerator( deviceSearchEnumerator )
#load device history
signals.connect( 'project.done_deploy', self.onDoneDeploy )
def onDeviceEvent( self, ev, device ):
if ev == 'connected':
signals.emit( 'device.connected', device )
self.devices[ device ] = True
device.setActive( False )
if not self.activeDevice:
self.setActiveDevice( device )
elif ev == 'disconnected':
signals.emit( 'device.disconnected', device )
self.devices[ device ] = False
if device == self.activeDevice:
self.activeDevice = None
def setActiveDevice( self, device ):
if self.activeDevice:
self.activeDevice.setActive( False )
signals.emit( 'device.deactivated', self.activeDevice )
self.activeDevice = device
if device:
self.activeDevice.setActive( True )
signals.emit( 'device.activated', device )
def onDoneDeploy( self, context ):
if not self.devices: return
activeDevice = self.activeDevice or self.devices[0]
print u'deploy on device:'
r = repr( activeDevice )
print r
activeDevice.deploy( context )
print 'deploy done!'
DeviceManager().register()
##----------------------------------------------------------------##
def deviceSearchEnumerator( typeId, context, option ):
if not context in [ 'device' ]: return
result = []
dm = app.getModule( 'device_manager' )
for device in dm.enumerateDevice():
entry = ( device, device.getName(), device.getType(), None )
result.append( entry )
return result
##----------------------------------------------------------------##
| mit | -5,803,916,313,980,087,000 | 27.393939 | 76 | 0.617574 | false |
google/struct2tensor | struct2tensor/expression_impl/__init__.py | 1 | 1826 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import all modules in expression_impl.
The modules in this file should be accessed like the following:
```
import struct2tensor as s2t
from struct2tensor import expression_impl
s2t.expression_impl.apply_schema
```
"""
from struct2tensor.expression_impl import apply_schema
from struct2tensor.expression_impl import broadcast
from struct2tensor.expression_impl import depth_limit
from struct2tensor.expression_impl import filter_expression
from struct2tensor.expression_impl import index
from struct2tensor.expression_impl import map_prensor
from struct2tensor.expression_impl import map_prensor_to_prensor
from struct2tensor.expression_impl import map_values
from struct2tensor.expression_impl import parquet
from struct2tensor.expression_impl import placeholder
from struct2tensor.expression_impl import project
from struct2tensor.expression_impl import promote
from struct2tensor.expression_impl import promote_and_broadcast
from struct2tensor.expression_impl import proto
from struct2tensor.expression_impl import reroot
from struct2tensor.expression_impl import size
from struct2tensor.expression_impl import slice_expression
| apache-2.0 | -8,786,078,613,776,037,000 | 41.465116 | 80 | 0.787514 | false |
floringrigoriu/Algorthitms | Leetcode2021/Monthly/January/Jan23.py | 1 | 1312 | # https://leetcode.com/explore/challenge/card/january-leetcoding-challenge-2021/582/week-4-january-22nd-january-28th/3614/
# A matrix diagonal is a diagonal line of cells starting from some cell in either the topmost row or leftmost column and
# going in the bottom-right direction until reaching the matrix's end.
# For example, the matrix diagonal starting from mat[2][0], where mat is a 6 x 3 matrix, includes cells mat[2][0], mat[3][1], and mat[4][2].
# Given an m x n matrix mat of integers, sort each matrix diagonal in ascending order and return the resulting matrix.
class Solution:
def diagonalSort(self, mat: List[List[int]]) -> List[List[int]]:
if not mat or not mat[0]:
return mat
for d in range (1- len(mat), len(mat[0])):
delta = (-1)* min(d,0)
y= delta;
x = d+delta
diag = []
while y<len(mat) and x< len(mat[0]):
diag.append(mat[y][x])
y = y+1
x = x+1
diag.sort()
y= delta
x = d+delta
while y<len(mat) and x< len(mat[0]):
mat[y][x]=diag[y-delta]
y = y+1
x = x+1
return mat
s = Solution()
print(s.diagonalSort([[3,3,1,1],[2,2,1,2],[1,1,1,2]]))
| gpl-2.0 | -4,631,008,891,928,721,000 | 36.485714 | 141 | 0.560213 | false |
alexholcombe/twoWords | twoWords.py | 1 | 49945 | #Alex Holcombe [email protected]
#See the github repository for more information: https://github.com/alexholcombe/twoWords
from __future__ import print_function #use python3 style print
from psychopy import monitors, visual, event, data, logging, core, sound, gui
import psychopy.info
import numpy as np
from math import atan, log, ceil
import copy
import time, sys, os, pylab
try:
from noiseStaircaseHelpers import printStaircase, toStaircase, outOfStaircase, createNoise, plotDataAndPsychometricCurve
except ImportError:
print('Could not import from noiseStaircaseHelpers.py (you need that file to be in the same directory)')
try:
import stringResponse
except ImportError:
print('Could not import stringResponse.py (you need that file to be in the same directory)')
wordEccentricity=3 #degrees of visual angle away from the fixation point
tasks=['T1']; task = tasks[0]
#THINGS THAT COULD PREVENT SUCCESS ON A NEW MACHINE
#same screen or external screen? Set scrn=0 if one screen. scrn=1 means display stimulus on second screen.
#widthPix, heightPix
quitFinder = False #if checkRefreshEtc, quitFinder becomes True.
autopilot=False
demo=False #False
exportImages= False #quits after one trial
subject='Hubert' #user is prompted to enter true subject name
if autopilot: subject='auto'
if os.path.isdir('.'+os.sep+'data'):
dataDir='data'
else:
print('"data" directory does not exist, so saving data in present working directory')
dataDir='.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
showRefreshMisses=True #flicker fixation at refresh rate, to visualize if frames missed
feedback=True
autoLogging=False
refreshRate = 60.; #100
if demo:
refreshRate = 60.; #100
staircaseTrials = 25
prefaceStaircaseTrialsN = 20 #22
prefaceStaircaseNoise = np.array([5,20,20,20, 50,50,50,5,80,80,80,5,95,95,95]) #will be recycled / not all used, as needed
descendingPsycho = True #psychometric function- more noise means worse performance
threshCriterion = 0.58
numWordsInStream = 24
wordsUnparsed="the, and, for, you, say, but, his, not, she, can, who, get, her, all, one, out, see, him, now, how, its, our, two, way" #24 most common words
lettersUnparsed = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".upper()
wordList = wordsUnparsed.split(",") #split into list
for i in range(len(wordList)):
wordList[i] = wordList[i].replace(" ", "") #delete spaces
#Later on, this list will be randomly permuted for each trial
bgColor = [-.7,-.7,-.7] # [-1,-1,-1]
cueColor = [1.,1.,1.]
letterColor = [1.,1.,1.]
cueRadius = 7 #6 deg in Goodbourn & Holcombe
widthPix= 1280 #monitor width in pixels of Agosta
heightPix= 800 #800 #monitor height in pixels
monitorwidth = 38.7 #monitor width in cm
scrn=0 #0 to use main screen, 1 to use external screen connected to computer
fullscr=True #True to use fullscreen, False to not. Timing probably won't be quite right if fullscreen = False
allowGUI = False
if demo: monitorwidth = 23#18.0
if exportImages:
widthPix = 600; heightPix = 600
monitorwidth = 13.0
fullscr=False; scrn=0
framesSaved=0
if demo:
scrn=0; fullscr=False
widthPix = 800; heightPix = 600
monitorname='testMonitor'
allowGUI = True
viewdist = 57. #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
print('pixelperdegree=',pixelperdegree)
# create a dialog from dictionary
infoFirst = { 'Do staircase (only)': False, 'Check refresh etc':True, 'Fullscreen (timing errors if not)': False, 'Screen refresh rate':refreshRate }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='Dual-RSVP experiment OR staircase to find thresh noise level for performance criterion',
order=['Do staircase (only)', 'Check refresh etc', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating'},
#fixed=['Check refresh etc'])#this attribute can't be changed by the user
)
if not OK.OK:
print('User cancelled from dialog box'); core.quit()
doStaircase = infoFirst['Do staircase (only)']
checkRefreshEtc = infoFirst['Check refresh etc']
fullscr = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
if checkRefreshEtc:
quitFinder = True
if quitFinder:
import os
applescript="\'tell application \"Finder\" to quit\'"
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
#letter size 2.5 deg
SOAms = 133 #Battelli, Agosta, Goodbourn, Holcombe mostly using 133
#Minimum SOAms should be 84 because any shorter, I can't always notice the second ring when lag1. 71 in Martini E2 and E1b (actually he used 66.6 but that's because he had a crazy refresh rate of 90 Hz)
letterDurMs = 80 #23.6 in Martini E2 and E1b (actually he used 22.2 but that's because he had a crazy refresh rate of 90 Hz)
ISIms = SOAms - letterDurMs
letterDurFrames = int( np.floor(letterDurMs / (1000./refreshRate)) )
cueDurFrames = letterDurFrames
ISIframes = int( np.floor(ISIms / (1000./refreshRate)) )
#have set ISIframes and letterDurFrames to integer that corresponds as close as possible to originally intended ms
rateInfo = 'total SOA=' + str(round( (ISIframes + letterDurFrames)*1000./refreshRate, 2)) + ' or ' + str(ISIframes + letterDurFrames) + ' frames, comprising\n'
rateInfo+= 'ISIframes ='+str(ISIframes)+' or '+str(ISIframes*(1000./refreshRate))+' ms and letterDurFrames ='+str(letterDurFrames)+' or '+str(round( letterDurFrames*(1000./refreshRate), 2))+'ms'
logging.info(rateInfo); print(rateInfo)
trialDurFrames = int( numWordsInStream*(ISIframes+letterDurFrames) ) #trial duration in frames
monitorname = 'testmonitor'
waitBlank = False
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#relying on monitorwidth cm (39 for Mitsubishi to do deg calculations) and gamma info in calibratn
mon.setSizePix( (widthPix,heightPix) )
units='deg' #'cm'
def openMyStimWindow(): #make it a function because have to do it several times, want to be sure is identical each time
myWin = visual.Window(monitor=mon,size=(widthPix,heightPix),allowGUI=allowGUI,units=units,color=bgColor,colorSpace='rgb',fullscr=fullscr,screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor
return myWin
myWin = openMyStimWindow()
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
#print(runInfo)
logging.info(runInfo)
print('Finished runInfo- which assesses the refresh and processes of this computer')
#check screen refresh is what assuming it is ##############################################
Hzs=list()
myWin.flip(); myWin.flip();myWin.flip();myWin.flip();
myWin.setRecordFrameIntervals(True) #otherwise myWin.fps won't work
print('About to measure frame flips')
for i in range(50):
myWin.flip()
Hzs.append( myWin.fps() ) #varies wildly on successive runs!
myWin.setRecordFrameIntervals(False)
# end testing of screen refresh########################################################
Hzs = np.array( Hzs ); Hz= np.median(Hzs)
msPerFrame= 1000./Hz
refreshMsg1= 'Frames per second ~='+ str( np.round(Hz,1) )
refreshRateTolerancePct = 3
pctOff = abs( (np.median(Hzs)-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
defaultNoiseLevel = 0.0 #to use if no staircase, can be set by user
trialsPerCondition = 1 #default value
dlgLabelsOrdered = list()
if doStaircase:
myDlg = gui.Dlg(title="Staircase to find appropriate noisePercent", pos=(200,400))
else:
myDlg = gui.Dlg(title="RSVP experiment", pos=(200,400))
if not autopilot:
myDlg.addField('Subject name (default="Hubert"):', 'Hubert', tip='or subject code')
dlgLabelsOrdered.append('subject')
if doStaircase:
easyTrialsCondText = 'Num preassigned noise trials to preface staircase with (default=' + str(prefaceStaircaseTrialsN) + '):'
myDlg.addField(easyTrialsCondText, tip=str(prefaceStaircaseTrialsN))
dlgLabelsOrdered.append('easyTrials')
myDlg.addField('Staircase trials (default=' + str(staircaseTrials) + '):', tip="Staircase will run until this number is reached or it thinks it has precise estimate of threshold")
dlgLabelsOrdered.append('staircaseTrials')
pctCompletedBreak = 101
else:
myDlg.addField('\tPercent noise dots=', defaultNoiseLevel, tip=str(defaultNoiseLevel))
dlgLabelsOrdered.append('defaultNoiseLevel')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 50
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Screen apparently NOT the desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels!!'
myDlg.addText(msgWrongResolution, color='Red')
logging.error(msgWrongResolution)
print(msgWrongResolution)
dimGreyForDlgBox = 'DimGrey'
from distutils.version import LooseVersion
if LooseVersion(psychopy.__version__) < LooseVersion("1.84.2"):
dimGreyForDlgBox = [-1.,1.,-1.] #color names stopped working along the way, for unknown reason
myDlg.addText('Note: to abort press ESC at a trials response screen', color=dimGreyForDlgBox) # color='DimGrey') color names stopped working along the way, for unknown reason
myDlg.show()
if myDlg.OK: #unpack information entered in dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
if not autopilot:
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
if doStaircase:
if len(thisInfo[dlgLabelsOrdered.index('staircaseTrials')]) >0:
staircaseTrials = int( thisInfo[ dlgLabelsOrdered.index('staircaseTrials') ] ) #convert string to integer
print('staircaseTrials entered by user=',staircaseTrials)
logging.info('staircaseTrials entered by user=',staircaseTrials)
if len(thisInfo[dlgLabelsOrdered.index('easyTrials')]) >0:
prefaceStaircaseTrialsN = int( thisInfo[ dlgLabelsOrdered.index('easyTrials') ] ) #convert string to integer
print('prefaceStaircaseTrialsN entered by user=',thisInfo[dlgLabelsOrdered.index('easyTrials')])
logging.info('prefaceStaircaseTrialsN entered by user=',prefaceStaircaseTrialsN)
else: #not doing staircase
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition =',trialsPerCondition)
defaultNoiseLevel = int (thisInfo[ dlgLabelsOrdered.index('defaultNoiseLevel') ])
else:
print('User cancelled from dialog box.')
logging.flush()
core.quit()
if not demo:
allowGUI = False
myWin = openMyStimWindow() #reopen stim window. Had to close test window to allow for dialogue boxes
#set up output data file, log file, copy of program code, and logging
infix = '' #part of the filenames
if doStaircase:
infix = 'staircase_'
fileName = os.path.join(dataDir, subject + '_' + infix+ timeAndDateStr)
if not demo and not exportImages:
dataFile = open(fileName+'.txt', 'w')
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileName + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logFname = fileName+'.log'
ppLogF = logging.LogFile(logFname,
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#errors, data and warnings will be sent to this logfile
if demo or exportImages:
dataFile = sys.stdout; logF = sys.stdout
logging.console.setLevel(logging.ERROR) #only show this level messages and higher
logging.console.setLevel(logging.ERROR) #DEBUG means set console to receive nearly all messges, INFO next level, EXP, DATA, WARNING and ERROR
if fullscr and not demo and not exportImages:
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=False, ## True means report on everything
userProcsDetailed=True, ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
#randomSeed='set:42', ## a way to record, and optionally set, a random seed of type str for making reproducible random sequences
## None -> default
## 'time' will use experimentRuntime.epoch as the value for the seed, different value each time the script is run
##'set:time' --> seed value is set to experimentRuntime.epoch, and initialized: random.seed(info['randomSeed'])
##'set:42' --> set & initialize to str('42'), and will give the same sequence of random.random() for all runs of the script
)
logging.info(runInfo)
logging.flush()
textStimuliStream1 = list()
textStimuliStream2 = list() #used for second, simultaneous RSVP stream
def calcAndPredrawStimuli(wordList,thisTrial): #Called before each trial
if len(wordList) < numWordsInStream:
print('Error! Your word list must have at least ',numWordsInStream,'strings')
idxsIntoWordList = np.arange( len(wordList) ) #create a list of indexes of the entire word list: 0,1,2,3,4,5,...23
print('wordList=',wordList)
for i in range(0,numWordsInStream): #draw the words that will be used on this trial, the first 26 of the shuffled list
word = wordList[ i ] # #[ idxsIntoWordList[i] ]
#flipHoriz, flipVert textStim http://www.psychopy.org/api/visual/textstim.html
#Create one bucket of words for the left stream
textStimulusStream1 = visual.TextStim(myWin,text=word,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
#Create a bucket of words for the right stream
textStimulusStream2 = visual.TextStim(myWin,text=word,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
textStimulusStream1.setPos([-wordEccentricity,0]) #left
textStimuliStream1.append(textStimulusStream1) #add to list of text stimuli that comprise stream 1
textStimulusStream2.setPos([wordEccentricity,0]) #right
textStimuliStream2.append(textStimulusStream2) #add to list of text stimuli that comprise stream 2
#If you are Joel or someone else who needs to mess with the stream conditional on the cue position, this is probably where we are going to do it
#pseudoHomophonePos = thisTrial['cuePos'] -1
#Use these buckets by pulling out the drawn words in the order you want them. For now, just create the order you want.
idxsStream1 = idxsIntoWordList #first RSVP stream
np.random.shuffle(idxsIntoWordList) #0,1,2,3,4,5,... -> randomly permuted 3,2,5,...
idxsStream2 = copy.deepcopy(idxsIntoWordList) #make a copy for the right stream, and permute them on the next list
np.random.shuffle(idxsStream2)
return idxsStream1, idxsStream2
#create click sound for keyboard
try:
click=sound.Sound('406__tictacshutup__click-1-d.wav')
except: #in case file missing, create inferiro click manually
logging.warn('Could not load the desired click sound file, instead using manually created inferior click')
click=sound.Sound('D',octave=4, sampleRate=22050, secs=0.015, bits=8)
if showRefreshMisses:
fixSizePix = 32 #2.6 #make fixation bigger so flicker more conspicuous
else: fixSizePix = 32
fixColor = [1,1,1]
if exportImages: fixColor= [0,0,0]
fixatnNoiseTexture = np.round( np.random.rand(fixSizePix/4,fixSizePix/4) ,0 ) *2.0-1 #Can counterphase flicker noise texture to create salient flicker if you break fixation
#Construct the fixation point.
fixation= visual.PatchStim(myWin, tex=fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False)
fixationBlank= visual.PatchStim(myWin, tex= -1*fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False) #reverse contrast
fixationPoint= visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=(1,1,1),size=4,units='pix',autoLog=autoLogging)
#Construct the holders for the experiment text that will appear on screen
respPromptStim = visual.TextStim(myWin,pos=(0, -.9),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim = visual.TextStim(myWin,pos=(0, -.8),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim.setText('Hit ENTER to accept. Backspace to edit')
respStim = visual.TextStim(myWin,pos=(0,0),colorSpace='rgb',color=(1,1,0),alignHoriz='center', alignVert='center',height=3,units='deg',autoLog=autoLogging)
requireAcceptance = False
nextText = visual.TextStim(myWin,pos=(0, .1),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
NextRemindCountText = visual.TextStim(myWin,pos=(0,.2),colorSpace='rgb',color= (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
clickSound, badKeySound = stringResponse.setupSoundsForResponse()
screenshot= False; screenshotDone = False
stimList = []
#SETTING THE CONDITIONS, This implements the full factorial design!
cuePositions = np.array([10,11,12,13,14])
for cuePos in cuePositions:
for rightResponseFirst in [False,True]:
for bothWordsFlipped in [False,True]:
stimList.append( {'cuePos':cuePos, 'rightResponseFirst':rightResponseFirst,
'leftStreamFlip':bothWordsFlipped, 'rightStreamFlip':bothWordsFlipped} )
trials = data.TrialHandler(stimList,trialsPerCondition) #constant stimuli method
trialsForPossibleStaircase = data.TrialHandler(stimList,trialsPerCondition) #independent randomization, just to create random trials for staircase phase
numRightWrongEachCuepos = np.zeros([ len(cuePositions), 1 ]); #summary results to print out at end
logging.info( 'numtrials=' + str(trials.nTotal) + ' and each trialDurFrames='+str(trialDurFrames)+' or '+str(trialDurFrames*(1000./refreshRate))+ \
' ms' + ' task=' + task)
def numberToLetter(number): #0 = A, 25 = Z
#if it's not really a letter, return @
if number < 0 or number > 25:
return ('@')
else: #it's probably a letter
try:
return chr( ord('A')+number )
except:
return('@')
def letterToNumber(letter): #A = 0, Z = 25
#if it's not really a letter, return -999
#HOW CAN I GENERICALLY TEST FOR LENGTH. EVEN IN CASE OF A NUMBER THAT'S NOT PART OF AN ARRAY?
try:
#if len(letter) > 1:
# return (-999)
if letter < 'A' or letter > 'Z':
return (-999)
else: #it's a letter
return ord(letter)-ord('A')
except:
return (-999)
def wordToIdx(word,wordList):
#if it's not in the list of stimuli, return -999
try:
#http://stackoverflow.com/questions/7102050/how-can-i-get-a-python-generator-to-return-none-rather-than-stopiteration
firstMatchIdx = next((i for i, val in enumerate(wordList) if val.upper()==word), None) #return i (index) unless no matches, in which case return None
#print('Looked for ',word,' in ',wordList,'\nfirstMatchIdx =',firstMatchIdx)
return firstMatchIdx
except:
print('Unexpected error in wordToIdx with word=',word)
return (None)
#print header for data file
print('experimentPhase\ttrialnum\tsubject\ttask\t',file=dataFile,end='')
print('noisePercent\tleftStreamFlip\trightStreamFlip\t',end='',file=dataFile)
if task=='T1':
numRespsWanted = 2
dataFile.write('rightResponseFirst\t')
for i in range(numRespsWanted):
dataFile.write('cuePos'+str(i)+'\t') #have to use write to avoid ' ' between successive text, at least until Python 3
dataFile.write('answer'+str(i)+'\t')
dataFile.write('response'+str(i)+'\t')
dataFile.write('correct'+str(i)+'\t')
dataFile.write('responsePosRelative'+str(i)+'\t')
print('timingBlips',file=dataFile)
#end of header
def oneFrameOfStim( n,cue,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,
noise,proportnNoise,allFieldCoords,numNoiseDots ):
#defining a function to draw each frame of stim.
#seq1 is an array of indices corresponding to the appropriate pre-drawn stimulus, contained in textStimuli
SOAframes = letterDurFrames+ISIframes
cueFrames = thisTrial['cuePos']*SOAframes #cuesPos is global variable
stimN = int( np.floor(n/SOAframes) )
frameOfThisLetter = n % SOAframes #every SOAframes, new letter
showLetter = frameOfThisLetter < letterDurFrames #if true, it's not time for the blank ISI. it's still time to draw the letter
#print 'n=',n,' SOAframes=',SOAframes, ' letterDurFrames=', letterDurFrames, ' (n % SOAframes) =', (n % SOAframes) #DEBUGOFF
thisStimIdx = seq1[stimN] #which letter, from A to Z (1 to 26), should be shown?
if seq2 is not None:
thisStim2Idx = seq2[stimN]
#so that any timing problems occur just as often for every frame, always draw the letter and the cue, but simply draw it in the bgColor when it's not meant to be on
cue.setLineColor( bgColor )
if type(cueFrames) not in [tuple,list,np.ndarray]: #scalar. But need collection to do loop based on it
cueFrames = list([cueFrames])
for cueFrame in cueFrames: #cheTck whether it's time for any cue
if n>=cueFrame and n<cueFrame+cueDurFrames:
cue.setLineColor( cueColor )
if showLetter:
textStimuliStream1[thisStimIdx].setColor( letterColor )
textStimuliStream2[thisStim2Idx].setColor( letterColor )
else:
textStimuliStream1[thisStimIdx].setColor( bgColor )
textStimuliStream2[thisStim2Idx].setColor( bgColor )
textStimuliStream1[thisStimIdx].flipHoriz = thisTrial['leftStreamFlip']
textStimuliStream2[thisStim2Idx].flipHoriz = thisTrial['rightStreamFlip']
textStimuliStream1[thisStimIdx].draw()
textStimuliStream2[thisStim2Idx].draw()
cue.draw()
refreshNoise = False #Not recommended because takes longer than a frame, even to shuffle apparently. Or may be setXYs step
if proportnNoise>0 and refreshNoise:
if frameOfThisLetter ==0:
np.random.shuffle(allFieldCoords)
dotCoords = allFieldCoords[0:numNoiseDots]
noise.setXYs(dotCoords)
if proportnNoise>0:
noise.draw()
return True
# #######End of function definition that displays the stimuli!!!! #####################################
#############################################################################################################################
cue = visual.Circle(myWin,
radius=cueRadius,#Martini used circles with diameter of 12 deg
lineColorSpace = 'rgb',
lineColor=bgColor,
lineWidth=4.0, #in pixels. Was thinner (2 pixels) in letter AB experiments
units = 'deg',
fillColorSpace = 'rgb',
fillColor=None, #beware, with convex shapes fill colors don't work
pos= [0,0], #the anchor (rotation and vertices are position with respect to this)
interpolate=True,
autoLog=False)#this stim changes too much for autologging to be useful
ltrHeight = 2.5 #Martini letters were 2.5deg high
#All noise dot coordinates ultimately in pixels, so can specify each dot is one pixel
noiseFieldWidthDeg=ltrHeight *1.0
noiseFieldWidthPix = int( round( noiseFieldWidthDeg*pixelperdegree ) )
def timingCheckAndLog(ts,trialN):
#check for timing problems and log them
#ts is a list of the times of the clock after each frame
interframeIntervs = np.diff(ts)*1000
#print ' interframe intervs were ',around(interframeIntervs,1) #DEBUGOFF
frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss
longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)
idxsInterframeLong = np.where( interframeIntervs > longFrameLimit ) [0] #frames that exceeded 150% of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong >0 and (not demo):
longFramesStr = 'ERROR,'+str(numCasesInterframeLong)+' frames were longer than '+str(longFrameLimit)+' ms'
if demo:
longFramesStr += 'not printing them all because in demo mode'
else:
longFramesStr += ' apparently screen refreshes skipped, interframe durs were:'+\
str( np.around( interframeIntervs[idxsInterframeLong] ,1 ) )+ ' and was these frames: '+ str(idxsInterframeLong)
if longFramesStr != None:
logging.error( 'trialnum='+str(trialN)+' '+longFramesStr )
if not demo:
flankingAlso=list()
for idx in idxsInterframeLong: #also print timing of one before and one after long frame
if idx-1>=0:
flankingAlso.append(idx-1)
else: flankingAlso.append(np.NaN)
flankingAlso.append(idx)
if idx+1<len(interframeIntervs): flankingAlso.append(idx+1)
else: flankingAlso.append(np.NaN)
flankingAlso = np.array(flankingAlso)
flankingAlso = flankingAlso[np.negative(np.isnan(flankingAlso))] #remove nan values
flankingAlso = flankingAlso.astype(np.integer) #cast as integers, so can use as subscripts
logging.info( 'flankers also='+str( np.around( interframeIntervs[flankingAlso], 1) ) ) #because this is not an essential error message, as previous one already indicates error
#As INFO, at least it won't fill up the console when console set to WARNING or higher
return numCasesInterframeLong
#end timing check
trialClock = core.Clock()
numTrialsCorrect = 0;
numTrialsApproxCorrect = 0;
numTrialsEachCorrect= np.zeros( numRespsWanted )
numTrialsEachApproxCorrect= np.zeros( numRespsWanted )
def do_RSVP_stim(thisTrial, seq1, seq2, proportnNoise,trialN):
#relies on global variables:
# textStimuli, logging, bgColor
# thisTrial should have 'cuePos'
global framesSaved #because change this variable. Can only change a global variable if you declare it
cuesPos = [] #will contain the positions in the stream of all the cues (targets)
cuesPos.append(thisTrial['cuePos'])
cuesPos = np.array(cuesPos)
noise = None; allFieldCoords=None; numNoiseDots=0
if proportnNoise > 0: #gtenerating noise is time-consuming, so only do it once per trial. Then shuffle noise coordinates for each letter
(noise,allFieldCoords,numNoiseDots) = createNoise(proportnNoise,myWin,noiseFieldWidthPix, bgColor)
preDrawStimToGreasePipeline = list() #I don't know why this works, but without drawing it I have consistent timing blip first time that draw ringInnerR for phantom contours
cue.setLineColor(bgColor)
preDrawStimToGreasePipeline.extend([cue])
for stim in preDrawStimToGreasePipeline:
stim.draw()
myWin.flip(); myWin.flip()
#end preparation of stimuli
core.wait(.1);
trialClock.reset()
fixatnPeriodMin = 0.3
fixatnPeriodFrames = int( (np.random.rand(1)/2.+fixatnPeriodMin) *refreshRate) #random interval between 800ms and 1.3s
ts = list(); #to store time of each drawing, to check whether skipped frames
for i in range(fixatnPeriodFrames+20): #prestim fixation interval
#if i%4>=2 or demo or exportImages: #flicker fixation on and off at framerate to see when skip frame
# fixation.draw()
#else: fixationBlank.draw()
fixationPoint.draw()
myWin.flip() #end fixation interval
#myWin.setRecordFrameIntervals(True); #can't get it to stop detecting superlong frames
t0 = trialClock.getTime()
for n in range(trialDurFrames): #this is the loop for this trial's stimulus!
worked = oneFrameOfStim( n,cue,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,
noise,proportnNoise,allFieldCoords,numNoiseDots ) #draw letter and possibly cue and noise on top
fixationPoint.draw()
if exportImages:
myWin.getMovieFrame(buffer='back') #for later saving
framesSaved +=1
myWin.flip()
t=trialClock.getTime()-t0; ts.append(t);
#end of big stimulus loop
myWin.setRecordFrameIntervals(False);
if task=='T1':
respPromptStim.setText('What was circled?',log=False)
else: respPromptStim.setText('Error: unexpected task',log=False)
postCueNumBlobsAway=-999 #doesn't apply to non-tracking and click tracking task
correctAnswerIdxsStream1 = np.array( seq1[cuesPos] )
correctAnswerIdxsStream2 = np.array( seq2[cuesPos] )
#print('correctAnswerIdxsStream1=',correctAnswerIdxsStream1, 'wordList[correctAnswerIdxsStream1[0]]=',wordList[correctAnswerIdxsStream1[0]])
return cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2,ts
def handleAndScoreResponse(passThisTrial,response,responseAutopilot,task,stimSequence,cuePos,correctAnswerIdx):
#Handle response, calculate whether correct, ########################################
#responses are actual characters
#correctAnswer is index into stimSequence
#autopilot is global variable
if autopilot or passThisTrial:
response = responseAutopilot
#print('handleAndScoreResponse correctAnswerIdxs=',correctAnswerIdxs,'\nstimSequence=',stimSequence, '\nwords=',wordList)
correct = 0
approxCorrect = 0
posOfResponse = -999
responsePosRelative = -999
idx = correctAnswerIdx
correctAnswer = wordList[idx].upper()
responseString= ''.join(['%s' % char for char in response])
responseString= responseString.upper()
#print('correctAnswer=',correctAnswer ,' responseString=',responseString)
if correctAnswer == responseString:
correct = 1
#print('correct=',correct)
responseWordIdx = wordToIdx(responseString,wordList)
if responseWordIdx is None: #response is not in the wordList
posOfResponse = -999
logging.warn('Response was not present in the stimulus stream')
else:
posOfResponse= np.where( responseWordIdx==stimSequence )
posOfResponse= posOfResponse[0] #list with two entries, want first which will be array of places where the response was found in the sequence
if len(posOfResponse) > 1:
logging.error('Expected response to have occurred in only one position in stream')
posOfResponse = posOfResponse[0] #first element of list (should be only one element long
responsePosRelative = posOfResponse - cuePos
approxCorrect = abs(responsePosRelative)<= 3 #Vul efficacy measure of getting it right to within plus/minus
#print('wordToIdx(',responseString,',',wordList,')=',responseWordIdx,' stimSequence=',stimSequence,'\nposOfResponse = ',posOfResponse) #debugON
#print response stuff to dataFile
#header was answerPos0, answer0, response0, correct0, responsePosRelative0
print(cuePos,'\t', end='', file=dataFile)
print(correctAnswer, '\t', end='', file=dataFile) #answer0
print(responseString, '\t', end='', file=dataFile) #response0
print(correct, '\t', end='',file=dataFile) #correct0
print(responsePosRelative, '\t', end='',file=dataFile) #responsePosRelative0
return correct,approxCorrect,responsePosRelative
#end handleAndScoreResponses
def play_high_tone_correct_low_incorrect(correct, passThisTrial=False):
highA = sound.Sound('G',octave=5, sampleRate=6000, secs=.3, bits=8)
low = sound.Sound('F',octave=3, sampleRate=6000, secs=.3, bits=8)
highA.setVolume(0.9)
low.setVolume(1.0)
if correct:
highA.play()
elif passThisTrial:
high= sound.Sound('G',octave=4, sampleRate=2000, secs=.08, bits=8)
for i in range(2):
high.play(); low.play();
else: #incorrect
low.play()
expStop=False
nDoneMain = -1 #change to zero once start main part of experiment
if doStaircase:
#create the staircase handler
useQuest = True
if useQuest:
staircase = data.QuestHandler(startVal = 95,
startValSd = 80,
stopInterval= 1, #sd of posterior has to be this small or smaller for staircase to stop, unless nTrials reached
nTrials = staircaseTrials,
#extraInfo = thisInfo,
pThreshold = threshCriterion, #0.25,
gamma = 1./26,
delta=0.02, #lapse rate, I suppose for Weibull function fit
method = 'quantile', #uses the median of the posterior as the final answer
stepType = 'log', #will home in on the 80% threshold. But stepType = 'log' doesn't usually work
minVal=1, maxVal = 100
)
print('created QUEST staircase')
else:
stepSizesLinear = [.2,.2,.1,.1,.05,.05]
stepSizesLog = [log(1.4,10),log(1.4,10),log(1.3,10),log(1.3,10),log(1.2,10)]
staircase = data.StairHandler(startVal = 0.1,
stepType = 'log', #if log, what do I want to multiply it by
stepSizes = stepSizesLog, #step size to use after each reversal
minVal=0, maxVal=1,
nUp=1, nDown=3, #will home in on the 80% threshold
nReversals = 2, #The staircase terminates when nTrials have been exceeded, or when both nReversals and nTrials have been exceeded
nTrials=1)
print('created conventional staircase')
if prefaceStaircaseTrialsN > len(prefaceStaircaseNoise): #repeat array to accommodate desired number of easyStarterTrials
prefaceStaircaseNoise = np.tile( prefaceStaircaseNoise, ceil( prefaceStaircaseTrialsN/len(prefaceStaircaseNoise) ) )
prefaceStaircaseNoise = prefaceStaircaseNoise[0:prefaceStaircaseTrialsN]
phasesMsg = ('Doing '+str(prefaceStaircaseTrialsN)+'trials with noisePercent= '+str(prefaceStaircaseNoise)+' then doing a max '+str(staircaseTrials)+'-trial staircase')
print(phasesMsg); logging.info(phasesMsg)
#staircaseStarterNoise PHASE OF EXPERIMENT
corrEachTrial = list() #only needed for easyStaircaseStarterNoise
staircaseTrialN = -1; mainStaircaseGoing = False
while (not staircase.finished) and expStop==False: #staircase.thisTrialN < staircase.nTrials
if staircaseTrialN+1 < len(prefaceStaircaseNoise): #still doing easyStaircaseStarterNoise
staircaseTrialN += 1
noisePercent = prefaceStaircaseNoise[staircaseTrialN]
else:
if staircaseTrialN+1 == len(prefaceStaircaseNoise): #add these non-staircase trials so QUEST knows about them
mainStaircaseGoing = True
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise)
staircase.importData(100-prefaceStaircaseNoise, np.array(corrEachTrial))
printStaircase(staircase, descendingPsycho, briefTrialUpdate=False, printInternalVal=True, alsoLog=False)
try: #advance the staircase
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
noisePercent = 100. - staircase.next() #will step through the staircase, based on whether told it (addResponse) got it right or wrong
staircaseTrialN += 1
except StopIteration: #Need this here, even though test for finished above. I can't understand why finished test doesn't accomplish this.
print('stopping because staircase.next() returned a StopIteration, which it does when it is finished')
break #break out of the trials loop
#print('staircaseTrialN=',staircaseTrialN)
idxsStream1, idxsStream2 = calcAndPredrawStimuli(wordList,staircaseTrials)
cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \
do_RSVP_stim(cuePos, idxsStream1, idxsStream2, noisePercent/100.,staircaseTrialN)
numCasesInterframeLong = timingCheckAndLog(ts,staircaseTrialN)
expStop,passThisTrial,responses,responsesAutopilot = \
stringResponse.collectStringResponse(numRespsWanted,respPromptStim,respStim,acceptTextStim,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
if not expStop:
if mainStaircaseGoing:
print('staircase\t', end='', file=dataFile)
else:
print('staircase_preface\t', end='', file=dataFile)
#header start 'trialnum\tsubject\ttask\t'
print(staircaseTrialN,'\t', end='', file=dataFile) #first thing printed on each line of dataFile
print(subject,'\t',task,'\t', round(noisePercent,2),'\t', end='', file=dataFile)
correct,approxCorrect,responsePosRelative= handleAndScoreResponse(
passThisTrial,responses,responseAutopilot,task,sequenceLeft,cuesPos[0],correctAnswerIdx )
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
core.wait(.06)
if feedback:
play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
print('staircaseTrialN=', staircaseTrialN,' noisePercent=',round(noisePercent,3),' T1approxCorrect=',T1approxCorrect) #debugON
corrEachTrial.append(T1approxCorrect)
if mainStaircaseGoing:
staircase.addResponse(T1approxCorrect, intensity = 100-noisePercent) #Add a 1 or 0 to signify a correct/detected or incorrect/missed trial
#print('Have added an intensity of','{:.3f}'.format(100-noisePercent), 'T1approxCorrect =', T1approxCorrect, ' to staircase') #debugON
#ENDING STAIRCASE PHASE
if staircaseTrialN+1 < len(prefaceStaircaseNoise) and (staircaseTrialN>=0): #exp stopped before got through staircase preface trials, so haven't imported yet
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise[0:staircaseTrialN+1])
staircase.importData(100-prefaceStaircaseNoise[0:staircaseTrialN], np.array(corrEachTrial))
print('framesSaved after staircase=',framesSaved) #debugON
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = ('prefaceStaircase phase' if expStop else '')
msg += ('ABORTED' if expStop else 'Finished') + ' staircase part of experiment at ' + timeAndDateStr
logging.info(msg); print(msg)
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
#print('staircase.quantile=',round(staircase.quantile(),2),' sd=',round(staircase.sd(),2))
threshNoise = round(staircase.quantile(),3)
if descendingPsycho:
threshNoise = 100- threshNoise
threshNoise = max( 0, threshNoise ) #e.g. ff get all trials wrong, posterior peaks at a very negative number
msg= 'Staircase estimate of threshold = ' + str(threshNoise) + ' with sd=' + str(round(staircase.sd(),2))
logging.info(msg); print(msg)
myWin.close()
#Fit and plot data
fit = None
try:
intensityForCurveFitting = staircase.intensities
if descendingPsycho:
intensityForCurveFitting = 100-staircase.intensities #because fitWeibull assumes curve is ascending
fit = data.FitWeibull(intensityForCurveFitting, staircase.data, expectedMin=1/26., sems = 1.0/len(staircase.intensities))
except:
print("Fit failed.")
plotDataAndPsychometricCurve(staircase,fit,descendingPsycho,threshCriterion)
#save figure to file
pylab.savefig(fileName+'.pdf')
print('The plot has been saved, as '+fileName+'.pdf')
pylab.show() #must call this to actually show plot
else: #not staircase
noisePercent = defaultNoiseLevel
phasesMsg = 'Experiment will have '+str(trials.nTotal)+' trials. Letters will be drawn with superposed noise of ' + "{:.2%}".format(defaultNoiseLevel)
print(phasesMsg); logging.info(phasesMsg)
nDoneMain =0
while nDoneMain < trials.nTotal and expStop==False: #MAIN EXPERIMENT LOOP
if nDoneMain==0:
msg='Starting main (non-staircase) part of experiment'
logging.info(msg); print(msg)
thisTrial = trials.next() #get a proper (non-staircase) trial
sequenceStream1, sequenceStream2 = calcAndPredrawStimuli(wordList,thisTrial)
cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \
do_RSVP_stim(thisTrial, sequenceStream1, sequenceStream2, noisePercent/100.,nDoneMain)
numCasesInterframeLong = timingCheckAndLog(ts,nDoneMain)
#call for each response
expStop = list(); passThisTrial = list(); responses=list(); responsesAutopilot=list()
numCharsInResponse = len(wordList[0])
dL = [None]*numRespsWanted #dummy list for null values
expStop = copy.deepcopy(dL); responses = copy.deepcopy(dL); responsesAutopilot = copy.deepcopy(dL); passThisTrial=copy.deepcopy(dL)
responseOrder = range(numRespsWanted)
if thisTrial['rightResponseFirst']: #change order of indices depending on rightResponseFirst. response0, answer0 etc refer to which one had to be reported first
responseOrder.reverse()
for i in responseOrder:
x = 3* wordEccentricity*(i*2-1) #put it 3 times farther out than stimulus, so participant is sure which is left and which right
expStop[i],passThisTrial[i],responses[i],responsesAutopilot[i] = stringResponse.collectStringResponse(
numCharsInResponse,x,respPromptStim,respStim,acceptTextStim,fixationPoint,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
expStop = np.array(expStop).any(); passThisTrial = np.array(passThisTrial).any()
if not expStop:
print('main\t', end='', file=dataFile) #first thing printed on each line of dataFile to indicate main part of experiment, not staircase
print(nDoneMain,'\t', end='', file=dataFile)
print(subject,'\t',task,'\t', round(noisePercent,3),'\t', end='', file=dataFile)
print(thisTrial['leftStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['rightStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['rightResponseFirst'],'\t', end='', file=dataFile)
i = 0
eachCorrect = np.ones(numRespsWanted)*-999; eachApproxCorrect = np.ones(numRespsWanted)*-999
for i in range(numRespsWanted): #scored and printed to dataFile in left first, right second order even if collected in different order
if i==0:
sequenceStream = sequenceStream1; correctAnswerIdxs = correctAnswerIdxsStream1;
else: sequenceStream = sequenceStream2; correctAnswerIdxs = correctAnswerIdxsStream2;
correct,approxCorrect,responsePosRelative = (
handleAndScoreResponse(passThisTrial,responses[i],responsesAutopilot[i],task,sequenceStream,thisTrial['cuePos'],correctAnswerIdxs ) )
eachCorrect[i] = correct
eachApproxCorrect[i] = approxCorrect
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
print('correct=',correct,' approxCorrect=',approxCorrect,' eachCorrect=',eachCorrect, ' responsePosRelative=', responsePosRelative)
numTrialsCorrect += eachCorrect.all() #so count -1 as 0
numTrialsApproxCorrect += eachApproxCorrect.all()
numTrialsEachCorrect += eachCorrect #list numRespsWanted long
numTrialsEachApproxCorrect += eachApproxCorrect #list numRespsWanted long
if exportImages: #catches one frame of response
myWin.getMovieFrame() #I cant explain why another getMovieFrame, and core.wait is needed
framesSaved +=1; core.wait(.1)
myWin.saveMovieFrames('images_sounds_movies/frames.png') #mov not currently supported
expStop=True
core.wait(.1)
if feedback: play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
nDoneMain+=1
dataFile.flush(); logging.flush()
print('nDoneMain=', nDoneMain,' trials.nTotal=',trials.nTotal) #' trials.thisN=',trials.thisN
if (trials.nTotal > 6 and nDoneMain > 2 and nDoneMain %
( trials.nTotal*pctCompletedBreak/100. ) ==1): #dont modulus 0 because then will do it for last trial
nextText.setText('Press "SPACE" to continue!')
nextText.draw()
progressMsg = 'Completed ' + str(nDoneMain) + ' of ' + str(trials.nTotal) + ' trials'
NextRemindCountText.setText(progressMsg)
NextRemindCountText.draw()
myWin.flip() # myWin.flip(clearBuffer=True)
waiting=True
while waiting:
if autopilot: break
elif expStop == True:break
for key in event.getKeys(): #check if pressed abort-type key
if key in ['space','ESCAPE']:
waiting=False
if key in ['ESCAPE']:
expStop = True
myWin.clearBuffer()
core.wait(.2); time.sleep(.2)
#end main trials loop
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = 'Finishing at '+timeAndDateStr
print(msg); logging.info(msg)
if expStop:
msg = 'user aborted experiment on keypress with trials done=' + str(nDoneMain) + ' of ' + str(trials.nTotal+1)
print(msg); logging.error(msg)
if not doStaircase and (nDoneMain >0):
msg = 'Of ' + str(nDoneMain)+' trials, on '+str(numTrialsCorrect*1.0/nDoneMain*100.)+'% of all trials all targets reported exactly correct'
print(msg); logging.info(msg)
msg= 'All targets approximately correct in '+ str( round(numTrialsApproxCorrect*1.0/nDoneMain*100,1)) + '% of trials'
print(msg); logging.info(msg)
for i in range(numRespsWanted):
msg = 'stream'+str(i)+': '+str( round(numTrialsEachCorrect[i]*1.0/nDoneMain*100.,2) ) + '% correct'
print(msg); logging.info(msg)
msg = 'stream' + str(i) + ': '+ str( round(numTrialsEachApproxCorrect[i]*1.0/nDoneMain*100,2) ) +'% approximately correct'
print(msg); logging.info(msg)
logging.flush(); dataFile.close()
myWin.close() #have to close window if want to show a plot
if quitFinder:
applescript="\'tell application \"Finder\" to launch\'" #turn Finder back on
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
| mit | -25,841,017,926,910,716 | 57.966942 | 209 | 0.683372 | false |
cloudnautique/rancher-compose | tests/integration/cattletest/core/test_compose.py | 1 | 57541 | from common_fixtures import * # NOQA
import subprocess
from subprocess import Popen
from os import path
import os
import sys
import pytest
import cattle
import ConfigParser
PROJECTS = []
CERT = '''-----BEGIN CERTIFICATE-----
MIIDJjCCAg4CCQDLCSjwGXM72TANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJB
VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0
cyBQdHkgTHRkMQ4wDAYDVQQDEwVhbGVuYTAeFw0xNTA3MjMwMzUzMDdaFw0xNjA3
MjIwMzUzMDdaMFUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEw
HwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDjAMBgNVBAMTBWFsZW5h
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxdVIDGlAySQmighbfNqb
TtqetENPXjNNq1JasIjGGZdOsmFvNciroNBgCps/HPJphICQwtHpNeKv4+ZuL0Yg
1FECgW7oo6DOET74swUywtq/2IOeik+i+7skmpu1o9uNC+Fo+twpgHnGAaGk8IFm
fP5gDgthrWBWlEPTPY1tmPjI2Hepu2hJ28SzdXi1CpjfFYOiWL8cUlvFBdyNqzqT
uo6M2QCgSX3E1kXLnipRT6jUh0HokhFK4htAQ3hTBmzcxRkgTVZ/D0hA5lAocMKX
EVP1Tlw0y1ext2ppS1NR9Sg46GP4+ATgT1m3ae7rWjQGuBEB6DyDgyxdEAvmAEH4
LQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQA45V0bnGPhIIkb54Gzjt9jyPJxPVTW
mwTCP+0jtfLxAor5tFuCERVs8+cLw1wASfu4vH/yHJ/N/CW92yYmtqoGLuTsywJt
u1+amECJaLyq0pZ5EjHqLjeys9yW728IifDxbQDX0cj7bBjYYzzUXp0DB/dtWb/U
KdBmT1zYeKWmSxkXDFFSpL/SGKoqx3YLTdcIbgNHwKNMfTgD+wTZ/fvk0CLxye4P
n/1ZWdSeZPAgjkha5MTUw3o1hjo/0H0ekI4erZFrZnG2N3lDaqDPR8djR+x7Gv6E
vloANkUoc1pvzvxKoz2HIHUKf+xFT50xppx6wsQZ01pNMSNF0qgc1vvH
-----END CERTIFICATE-----
'''
KEY = '''-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAxdVIDGlAySQmighbfNqbTtqetENPXjNNq1JasIjGGZdOsmFv
NciroNBgCps/HPJphICQwtHpNeKv4+ZuL0Yg1FECgW7oo6DOET74swUywtq/2IOe
ik+i+7skmpu1o9uNC+Fo+twpgHnGAaGk8IFmfP5gDgthrWBWlEPTPY1tmPjI2Hep
u2hJ28SzdXi1CpjfFYOiWL8cUlvFBdyNqzqTuo6M2QCgSX3E1kXLnipRT6jUh0Ho
khFK4htAQ3hTBmzcxRkgTVZ/D0hA5lAocMKXEVP1Tlw0y1ext2ppS1NR9Sg46GP4
+ATgT1m3ae7rWjQGuBEB6DyDgyxdEAvmAEH4LQIDAQABAoIBAEKeWL29L9DL+KJg
wBYiM0xxeCHxzKdHFW+Msvdhh3wUpK6S+vUclxb3NHA96RnhU8EH3jeMokDADkTr
Us1eiy2T/gkCBRscymeqUetO49IUAahyYg/nU1X7pg7eQmNkSnHmvQhE3UDjQNdJ
zJYkrROIQWZZVNIib+VLlbXTi0WIYcoukS+Jy2lfABLZbYVFMOEOv5IfRvXTjcgc
jiHUbamYM9ADR/mtupFTShyVV2UBoI8cuWSPJnWNHZ39TN61owNoVycxfagBlheO
Jb07cY0DSSx9968RYRzX9YGMUCpnoleWG5Qg29ySaLDJWqpEkNXdeJlJ+0RzErFr
TrnlXMECgYEA6OTUpfRHu8m1yhqF9HK0+aiOPVLBOkFc55Ja/dBaSApaYtcU5ZYe
IlCgGRM1+3G3bzwrwunbAdGVKdd+SiXLY5+p08HW0sFSgebdkRtcTmbq1Gvns+Fx
ZUX9QBxZq7jiQjHde68y1kpSqJfjeHktZ1voueZ0JUZwx9c7YDC/+V0CgYEA2XX1
W9f7b4Om740opDwgSLIEgIpBqSrSoJQQNzcOAWbY2CTY5xUqM9WbitlgbJ9Bo0Zo
jyHmsp3CLGz8onv7wlR67WJSqriedIBJLQD2DnmQpb3j61rNLruhcxTC5phtBheN
0ZQrO0SmfCjevLefc3jmB0Uu9qfvkoZoJPXAfRECgYEAvxbK+CPYG9fkhiB/GtRn
c5V+qAhXrUHmRceLS0iCWyvLf9/0MHCc5xD6W7isiVSD6wwW6AXTgcmCN2OuJo6e
NG7T/IDGkAS5ewZ/c8lcUqQVOBgVdD2dOjhUFB9u3/yCAUhC73IQJ02yRszhgn8C
5xS9fpL9Z3xFm2MZP9KgIa0CgYBksg1ygPmp8pF7fabjHgBpCR2yk9LBzdWIi+dS
Wgj/NyuUMsPJhXBsXi5PRkczJS+Utoa2OKGF9i0yuyjk6Hp0yv+9KnlTGngtRDYe
Q8Ksgzgqt1px4jL+v92L14JEmzJozsFZ2b2HDUv2VEqHopOQOdxyY2PSzYLPG7Pf
4XhHsQKBgEfRPtokHpt+dJ6RhdUTEQAoT2jDVGhZLaYbtGh5Jtf2F5mhQR3UlvVi
FH/0iMK8IRo8XhFw0lrmZvY0rC0ycFGewvdW5oSvZvStATObGRMHUYNdbMEAMu86
dkOGpBSMzSXoZ2d0rKcetwRWZqUadDJnakNfZkjIY64sbd5Vo4ev
-----END RSA PRIVATE KEY-----
'''
class Compose(object):
def __init__(self, client, compose_bin):
self.compose_bin = compose_bin
self.client = client
def check_retcode(self, input, check_retcode, *args, **kw):
p = self.call(*args, **kw)
output = p.communicate(input=input)
retcode = p.wait()
assert check_retcode == retcode
return output
def check_call(self, input, *args):
p = self.call(*args)
output = p.communicate(input=input)
retcode = p.wait()
assert 0 == retcode
return output
def call(self, *args, **kw):
env = {
'RANCHER_CLIENT_DEBUG': 'true',
'RANCHER_ACCESS_KEY': self.client._access_key,
'RANCHER_SECRET_KEY': self.client._secret_key,
'RANCHER_URL': self.client._url,
}
cmd = [self.compose_bin]
cmd.extend(args)
kw_args = {
'env': env,
'stdin': subprocess.PIPE,
'stdout': sys.stdout,
'stderr': sys.stderr,
'cwd': _base(),
}
kw_args.update(kw)
return Popen(cmd, **kw_args)
@pytest.fixture(scope='session')
def client(admin_user_client, request):
try:
return cattle.from_env(url=os.environ['RANCHER_URL'],
access_key=os.environ['RANCHER_ACCESS_KEY'],
secret_key=os.environ['RANCHER_SECRET_KEY'])
except KeyError:
pass
try:
config = ConfigParser.ConfigParser()
config.read(path.join(_base(), '../../tox.ini'))
return cattle.from_env(url=config.get('rancher', 'url'),
access_key=config.get('rancher', 'access-key'),
secret_key=config.get('rancher', 'secret-key'))
except ConfigParser.NoOptionError:
pass
return new_context(admin_user_client, request).client
def _file(f):
return path.join(_base(), '../../../../{}'.format(f))
def _base():
return path.dirname(__file__)
@pytest.fixture(scope='session')
def compose_bin():
c = _file('bin/rancher-compose')
assert path.exists(c)
return c
def _clean_all(client):
for p in PROJECTS:
client.delete(p)
@pytest.fixture(scope='session')
def compose(client, compose_bin, request):
return new_compose(client, compose_bin, request)
def new_compose(client, compose_bin, request):
request.addfinalizer(lambda: _clean_all(client))
return Compose(client, compose_bin)
def create_project(compose, operation='create', project_name=None, file=None,
input=None):
if project_name is None:
project_name = random_str()
if file is not None:
compose.check_call(None, '--verbose', '-f', file, '-p', project_name,
operation)
elif input is not None:
compose.check_call(input, '--verbose', '-f', '-', '-p', project_name,
operation)
PROJECTS.append(project_name)
return project_name
@pytest.mark.skipif('True')
def test_build(client, compose):
project_name = create_project(compose, file='assets/build/test.yml')
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.name == 'fromfile'
assert service.launchConfig.build.dockerfile == 'subdir/Dockerfile'
assert service.launchConfig.build.remote is None
assert service.launchConfig.build.context.startswith('https://')
def test_args(client, compose):
project_name = create_project(compose, file='assets/full.yml')
project = find_one(client.list_environment, name=project_name)
assert project.name == project_name
service = find_one(project.services)
assert service.name == 'web'
assert service.launchConfig.command == ['/bin/sh', '-c']
assert service.launchConfig.imageUuid == 'docker:nginx'
assert set(service.launchConfig.ports) == {'80:81/tcp', '123/tcp'}
assert service.launchConfig.dataVolumes == ['/tmp/foo', '/tmp/x:/tmp/y']
assert service.launchConfig.environment == {'foo': 'bar', 'a': 'b'}
assert service.launchConfig.dns == ['8.8.8.8', '1.1.1.1']
assert service.launchConfig.capAdd == ['ALL', 'SYS_ADMIN']
assert service.launchConfig.capDrop == ['NET_ADMIN', 'SYS_ADMIN']
assert service.launchConfig.dnsSearch == ['foo.com', 'bar.com']
assert service.launchConfig.entryPoint == ['/bin/foo', 'bar']
assert service.launchConfig.workingDir == '/somewhere'
assert service.launchConfig.user == 'somebody'
assert service.launchConfig.hostname == 'myhostname'
assert service.launchConfig.domainName == 'example.com'
assert service.launchConfig.memory == 100
assert service.launchConfig.memorySwap == 101
assert service.launchConfig.privileged
assert service.launchConfig.stdinOpen
assert service.launchConfig.tty
assert 'name' not in service.launchConfig
assert service.launchConfig.cpuShares == 42
assert service.launchConfig.cpuSet == '1,2'
assert service.launchConfig.devices == ['/dev/sda:/dev/a:rwm',
'/dev/sdb:/dev/c:ro']
s = 'io.rancher.service.selector.'
assert service.launchConfig.labels['io.rancher.service.hash'] is not None
del service.launchConfig.labels['io.rancher.service.hash']
assert service.launchConfig.labels == {'a': 'b',
s + 'link': 'bar in (a,b)',
s + 'container': 'foo',
'c': 'd'}
assert service.selectorLink == 'bar in (a,b)'
assert service.selectorContainer == 'foo'
assert service.launchConfig.securityOpt == ['label:foo', 'label:bar']
assert service.launchConfig.pidMode == 'host'
assert service.launchConfig.logConfig == {
'driver': 'syslog',
'config': {
'tag': 'foo',
}
}
assert service.launchConfig.extraHosts == ['host:1.1.1.1', 'host:2.2.2.2']
assert service.launchConfig.networkMode == 'host'
assert service.launchConfig.volumeDriver == 'foo'
assert service.launchConfig.build == {
'dockerfile': 'something/other',
'remote': 'github.com/ibuildthecloud/tiny-build',
}
# Not supported
# assert service.launchConfig.externalLinks == ['foo', 'bar']
def test_git_build(client, compose):
template = '''
nginx:
build: github.com/ibuildthecloud/tiny-build
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.launchConfig.build == {
'remote': 'github.com/ibuildthecloud/tiny-build',
}
assert service.launchConfig.imageUuid is not None
prefix = 'docker:{}_nginx_'.format(project_name)
assert service.launchConfig.imageUuid.startswith(prefix)
def test_circular_sidekick(client, compose):
template = '''
primary:
stdin_open: true
image: busybox
command: cat
labels:
io.rancher.sidekicks: secondary
volumes_from:
- secondary
secondary:
stdin_open: true
image: busybox
command: cat
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.launchConfig.dataVolumesFromLaunchConfigs == ['secondary']
secondary = filter(lambda x: x.name == 'secondary',
service.secondaryLaunchConfigs)
assert len(secondary) == 1
def test_delete(client, compose):
template = '''
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.state == 'inactive'
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
service = client.wait_success(service)
assert service.state == 'active'
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'rm', '--force')
service = client.wait_success(service)
assert service.state == 'removed'
def test_delete_while_stopped(client, compose):
template = '''
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.state == 'inactive'
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'rm', 'web')
service = client.wait_success(service)
assert service.state == 'removed'
def test_network_bridge(client, compose):
template = '''
web:
net: bridge
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.launchConfig.networkMode == 'bridge'
def test_network_none(client, compose):
template = '''
web:
net: none
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.launchConfig.networkMode == 'none'
def test_network_container(compose, client):
template = '''
foo:
labels:
io.rancher.sidekicks: web
image: nginx
web:
net: container:foo
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.launchConfig.networkMode == 'managed'
assert service.secondaryLaunchConfigs[0].networkMode == 'container'
assert service.secondaryLaunchConfigs[0].networkLaunchConfig == 'foo'
def test_network_managed(client, compose):
template = '''
web:
net: managed
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.launchConfig.networkMode == 'managed'
def test_network_default(client, compose):
template = '''
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.launchConfig.networkMode == 'managed'
def test_env_file(client, compose):
project_name = create_project(compose, file='assets/base.yml')
project = find_one(client.list_environment, name=project_name)
assert project.name == project_name
second = _get_service(project.services(), 'base')
assert second.launchConfig.environment == {
'bar': 'baz',
'd': 'e',
'env': '2',
'foo': 'bar',
'a': 'b',
}
def test_extends(client, compose):
project_name = create_project(compose, file='assets/base.yml')
project = find_one(client.list_environment, name=project_name)
assert project.name == project_name
base = _get_service(project.services(), 'base')
local = _get_service(project.services(), 'local')
other_base = _get_service(project.services(), 'other-base')
assert base.launchConfig.imageUuid == 'docker:second'
assert local.launchConfig.imageUuid == 'docker:local'
assert local.launchConfig.ports == ['80/tcp']
assert local.launchConfig.environment == {'key': 'value'}
assert other_base.launchConfig.ports == ['80/tcp', '81/tcp']
assert other_base.launchConfig.imageUuid == 'docker:other'
assert other_base.launchConfig.environment == {'key': 'value',
'key2': 'value2'}
def test_extends_1556(client, compose):
project_name = create_project(compose,
file='assets/extends/docker-compose.yml')
project = find_one(client.list_environment, name=project_name)
assert project.name == project_name
web = _get_service(project.services(), 'web')
db = _get_service(project.services(), 'db')
assert web.launchConfig.imageUuid == 'docker:ubuntu:14.04'
assert db.launchConfig.imageUuid == 'docker:ubuntu:14.04'
web = find_one(db.consumedservices)
assert web.name == 'web'
def test_extends_1556_2(compose):
with pytest.raises(AssertionError):
create_project(compose, file='assets/extends_2/docker-compose.yml')
def test_lb_private(client, compose):
template = '''
lb:
expose:
- 111:222
- 222:333/tcp
image: rancher/load-balancer-service
ports:
- 80
links:
- web
- web2
web:
image: nginx
web2:
image: nginx'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 3
lb = _get_service(project.services(), 'lb')
assert lb.launchConfig.expose == ['111:222', '222:333/tcp']
def test_lb_basic(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 80
links:
- web
- web2
web:
image: nginx
web2:
image: nginx'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 3
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
web2 = _get_service(project.services(), 'web2')
maps = client.list_service_consume_map(serviceId=lb.id)
assert len(maps) == 2
for map in maps:
if map.consumedServiceId == web.id:
assert map.ports == []
elif map.consumedServiceId == web2.id:
assert map.ports == []
else:
assert False
assert lb.type == 'loadBalancerService'
assert lb.launchConfig.ports == ['80']
def test_lb_default_port_http(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 7900:80/tcp
links:
- web
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
assert lb.launchConfig.ports == ['7900:80/tcp']
map = find_one(client.list_service_consume_map, serviceId=lb.id)
assert map.consumedServiceId == web.id
assert map.ports == []
assert lb.launchConfig.ports == ['7900:80/tcp']
def test_lb_default_port_with_mapped_tcp(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 80:8080/tcp
links:
- web
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
assert lb.launchConfig.ports == ['80:8080/tcp']
web = _get_service(project.services(), 'web')
map = find_one(client.list_service_consume_map, serviceId=lb.id)
assert map.consumedServiceId == web.id
assert map.ports == []
assert lb.launchConfig.ports == ['80:8080/tcp']
def test_lb_default_port_with_tcp(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 80/tcp
links:
- web
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
map = find_one(client.list_service_consume_map, serviceId=lb.id)
assert map.consumedServiceId == web.id
assert map.ports == []
lb.launchConfig.ports == ['80/tcp']
def test_lb_path_space_target(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 80:8080
labels:
io.rancher.loadbalancer.target.web: "hostname/path:6000,
7000"
links:
- web
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
maps = client.list_service_consume_map(serviceId=lb.id)
assert len(maps) == 1
for map in maps:
if map.consumedServiceId == web.id:
assert map.ports == ['hostname/path:6000',
'7000']
else:
assert False
assert lb.type == 'loadBalancerService'
def test_lb_path_name(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 80:8080
labels:
io.rancher.loadbalancer.target.web: hostname/path:6000,hostname:7000
io.rancher.loadbalancer.target.web2: 9000
links:
- web
- web2
- web3
web:
image: nginx
web2:
image: nginx
web3:
image: nginx'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 4
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
web2 = _get_service(project.services(), 'web2')
web3 = _get_service(project.services(), 'web2')
maps = client.list_service_consume_map(serviceId=lb.id)
assert len(maps) == 3
for map in maps:
if map.consumedServiceId == web.id:
assert map.ports == ['hostname/path:6000',
'hostname:7000']
elif map.consumedServiceId == web2.id:
assert map.ports == ['9000']
elif map.consumedServiceId == web3.id:
assert map.ports == []
assert lb.launchConfig.ports == ['80:8080']
assert lb.type == 'loadBalancerService'
def test_lb_path_name_minimal(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 84
links:
- web
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
web = _get_service(project.services(), 'web')
map = find_one(client.list_service_consume_map, serviceId=lb.id)
assert map.ports == []
assert map.consumedServiceId == web.id
assert lb.type == 'loadBalancerService'
assert lb.launchConfig.ports == ['84']
def test_lb_full_config(client, compose):
project_name = create_project(compose, file='assets/lb/docker-compose.yml')
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
_get_service(project.services(), 'web')
assert lb.type == 'loadBalancerService'
assert lb.loadBalancerConfig.appCookieStickinessPolicy.cookie == 'foo'
assert lb.loadBalancerConfig.appCookieStickinessPolicy.maxLength == 1024
assert 'prefix' not in lb.loadBalancerConfig.appCookieStickinessPolicy
assert lb.loadBalancerConfig.appCookieStickinessPolicy.requestLearn
assert lb.loadBalancerConfig.appCookieStickinessPolicy.mode == \
'path_parameters'
assert lb.loadBalancerConfig.haproxyConfig['global'] == 'foo bar\n'
assert lb.loadBalancerConfig.haproxyConfig.defaults == 'def 1\n'
def test_links(client, compose):
template = '''
web:
image: nginx
db:
image: mysql
links:
- web
other:
image: foo
links:
- web
- db
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
web = _get_service(project.services(), 'web')
db = _get_service(project.services(), 'db')
other = _get_service(project.services(), 'other')
assert len(web.consumedservices()) == 0
db_consumed = db.consumedservices()
assert len(db_consumed) == 1
assert db_consumed[0].name == 'web'
other_consumed = other.consumedservices()
assert len(other_consumed) == 2
names = {i.name for i in other_consumed}
assert names == {'web', 'db'}
def test_volumes_from(client, compose):
template = '''
web:
labels:
io.rancher.sidekicks: db
image: nginx
db:
image: mysql
volumes_from:
- web
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.secondaryLaunchConfigs[0].dataVolumesFromLaunchConfigs == \
['web']
def test_sidekick_simple(client, compose):
template = '''
web:
labels:
io.rancher.sidekicks: log
image: nginx
log:
image: mysql
log2:
image: bar
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
services = project.services()
service = _get_service(services, 'web')
log2 = _get_service(services, 'log2')
assert len(services) == 2
assert service.name == 'web'
assert service.launchConfig.imageUuid == 'docker:nginx'
assert service.launchConfig.networkMode == 'managed'
assert len(service.secondaryLaunchConfigs) == 1
assert service.secondaryLaunchConfigs[0].name == 'log'
assert service.secondaryLaunchConfigs[0].imageUuid == 'docker:mysql'
assert service.secondaryLaunchConfigs[0].networkMode == 'managed'
assert log2.name == 'log2'
assert log2.launchConfig.imageUuid == 'docker:bar'
def test_sidekick_container_network(client, compose):
template = '''
web:
labels:
io.rancher.sidekicks: log
image: nginx
log:
net: container:web
image: mysql
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.launchConfig.imageUuid == 'docker:nginx'
assert len(service.secondaryLaunchConfigs) == 1
assert service.secondaryLaunchConfigs[0].name == 'log'
assert service.secondaryLaunchConfigs[0].imageUuid == 'docker:mysql'
assert service.secondaryLaunchConfigs[0].networkMode == 'container'
assert service.secondaryLaunchConfigs[0].networkLaunchConfig == 'web'
def test_not_external_service_hostname(client, compose):
template = '''
web:
hostname: foo
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.type == 'service'
assert service.launchConfig.hostname == 'foo'
def test_external_service_hostname(client, compose):
project_name = create_project(compose, file='assets/hostname/test.yml')
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.type == 'externalService'
assert service.hostname == 'example.com'
def test_external_ip(client, compose):
project_name = create_project(compose, file='assets/externalip/test.yml')
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.type == 'externalService'
assert service.externalIpAddresses == ['1.1.1.1', '2.2.2.2']
assert service.healthCheck.healthyThreshold == 2
def test_service_inplace_rollback(client, compose):
project_name = random_str()
template = '''
web:
image: nginx
'''
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
project = find_one(client.list_environment, name=project_name)
s = find_one(project.services)
assert s.state == 'active'
template = '''
web:
image: nginx:1.9.5
'''
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-u',
'-d')
s2 = find_one(project.services)
assert s.launchConfig.labels['io.rancher.service.hash'] != \
s2.launchConfig.labels['io.rancher.service.hash']
assert s2.launchConfig.imageUuid == 'docker:nginx:1.9.5'
assert s2.state == 'upgraded'
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-r',
'-d')
s2 = find_one(project.services)
assert s2.state == 'active'
assert s2.launchConfig.imageUuid == 'docker:nginx'
def test_service_inplace_upgrade_inactive(client, compose):
project_name = random_str()
template = '''
web:
image: nginx
'''
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'create')
project = find_one(client.list_environment, name=project_name)
s = find_one(project.services)
assert s.state == 'inactive'
template = '''
web:
image: nginx:1.9.5
'''
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-u',
'-d')
s2 = find_one(project.services)
assert s.launchConfig.labels['io.rancher.service.hash'] != \
s2.launchConfig.labels['io.rancher.service.hash']
assert s2.launchConfig.imageUuid == 'docker:nginx:1.9.5'
assert s2.state == 'upgraded'
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-c',
'-d')
s2 = find_one(project.services)
assert s2.state == 'active'
def test_service_inplace_upgrade(client, compose):
project_name = random_str()
template = '''
web:
image: nginx
'''
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
project = find_one(client.list_environment, name=project_name)
s = find_one(project.services)
assert s.state == 'active'
template = '''
web:
image: nginx:1.9.5
'''
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-u',
'-d')
s2 = find_one(project.services)
assert s.launchConfig.labels['io.rancher.service.hash'] != \
s2.launchConfig.labels['io.rancher.service.hash']
assert s2.launchConfig.imageUuid == 'docker:nginx:1.9.5'
assert s2.state == 'upgraded'
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-c',
'-d')
s2 = find_one(project.services)
assert s2.state == 'active'
def test_service_hash_with_rancher(client, compose):
project_name = create_project(compose,
file='assets/hash-no-rancher/test.yml')
project = find_one(client.list_environment, name=project_name)
s = find_one(project.services)
project_name = create_project(compose,
file='assets/hash-with-rancher/test.yml')
project = find_one(client.list_environment, name=project_name)
s2 = find_one(project.services)
assert s.metadata['io.rancher.service.hash'] is not None
assert s2.metadata['io.rancher.service.hash'] is not None
assert s.metadata['io.rancher.service.hash'] != \
s2.metadata['io.rancher.service.hash']
def test_service_hash_no_change(client, compose):
template = '''
web1:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
web = find_one(project.services)
assert web.metadata['io.rancher.service.hash'] is not None
assert web.launchConfig.labels['io.rancher.service.hash'] is not None
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
web2 = find_one(project.services)
assert web.metadata['io.rancher.service.hash'] == \
web2.metadata['io.rancher.service.hash']
assert web.launchConfig.labels['io.rancher.service.hash'] == \
web2.launchConfig.labels['io.rancher.service.hash']
def test_dns_service(client, compose):
template = '''
web1:
image: nginx
web2:
image: nginx
web:
image: rancher/dns-service
links:
- web1
- web2
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
services = project.services()
assert len(services) == 3
web = _get_service(services, 'web')
assert web.type == 'dnsService'
consumed = web.consumedservices()
assert len(consumed) == 2
names = {x.name for x in consumed}
assert names == {'web1', 'web2'}
def test_up_relink(client, compose):
template = '''
lb:
image: rancher/load-balancer-service
ports:
- 80
links:
- web
labels:
a: b
c: d
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
lb = _get_service(project.services(), 'lb')
consumed = lb.consumedservices()
assert len(consumed) == 1
assert consumed[0].name == 'web'
del lb.launchConfig.labels['io.rancher.service.hash']
assert lb.launchConfig.labels == {
'a': 'b',
'c': 'd',
}
template2 = '''
lb:
image: nginx
ports:
- 80
links:
- web2
web2:
image: nginx
'''
compose.check_call(template2, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
def check():
x = lb.consumedservices()
if len(x) == 1:
return x
consumed = wait_for(check, timeout=5)
assert len(consumed) == 1
assert consumed[0].name == 'web2'
def test_service_upgrade_from_nil(client, compose):
template = '''
foo:
image: nginx
web2:
image: nginx
'''
project_name = create_project(compose, input=template)
upgrade = '''
foo:
image: nginx
web:
image: nginx
web2:
image: nginx
'''
compose.check_retcode(upgrade, 1, '-p', project_name, '-f',
'-', 'upgrade', 'web', 'web2')
def test_service_upgrade_no_global_on_src(client, compose):
template = '''
web:
image: nginx
labels:
io.rancher.scheduler.global: true
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 1
upgrade = '''
web2:
image: nginx
'''
out, err = compose.check_retcode(upgrade, 1, '-p', project_name, '-f',
'-', 'upgrade', 'web', 'web2',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
assert out.find('Upgrade is not supported for global services')
assert len(project.services()) == 1
def test_service_upgrade_no_global_on_dest(client, compose):
template = '''
web:
image: nginx
'''
project_name = create_project(compose, input=template)
upgrade = '''
web2:
image: nginx
labels:
io.rancher.scheduler.global: true
'''
out, err = compose.check_retcode(upgrade, 1, '-p', project_name, '-f',
'-', 'upgrade', 'web', 'web2',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
assert out.find('Upgrade is not supported for global services')
def test_service_map_syntax(client, compose):
template = '''
foo:
image: nginx
links:
web: alias
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
foo = _get_service(project.services(), 'foo')
maps = client.list_serviceConsumeMap(serviceId=foo.id)
assert len(maps) == 1
assert maps[0].name == 'alias'
def test_cross_stack_link(client, compose):
template = '''
dest:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
dest = _get_service(project.services(), 'dest')
template = '''
src:
external_links:
- {}/dest
image: nginx
'''.format(project_name)
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
src = _get_service(project.services(), 'src')
services = src.consumedservices()
assert len(services) == 1
assert services[0].id == dest.id
def test_up_deletes_links(client, compose):
template = '''
dest:
image: busybox
command: cat
stdin_open: true
tty: true
src:
image: busybox
command: cat
stdin_open: true
tty: true
links:
- dest
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
src = _get_service(project.services(), 'src')
services = src.consumedservices()
assert len(services) == 1
template = '''
src:
image: nginx
'''
compose.check_call(template, '-f', '-', '-p', project_name, 'up', '-d')
services = src.consumedservices()
assert len(services) == 0
def test_upgrade_no_source(client, compose):
project_name = random_str()
compose.check_retcode(None, 1, '-p', project_name, '-f',
'assets/upgrade-ignore-scale/docker-compose.yml',
'upgrade', '--interval', '1000',
'--scale=2', 'from', 'to')
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 0
def test_upgrade_ignore_scale(client, compose):
project_name = create_project(compose, file='assets/upgrade-ignore-scale/'
'docker-compose-source.yml')
compose.check_call(None, '--verbose', '-f', 'assets/upgrade-ignore-scale/'
'docker-compose-source.yml',
'-p', project_name, 'up', '-d')
project = find_one(client.list_environment, name=project_name)
compose.check_call(None, '-p', project_name, '-f',
'assets/upgrade-ignore-scale/docker-compose.yml',
'upgrade', '--pull', '--interval', '1000',
'--scale=2', 'from', 'to')
f = _get_service(project.services(), 'from')
to = _get_service(project.services(), 'to')
assert to.scale <= 2
f = client.wait_success(f)
to = client.wait_success(to)
assert f.scale == 0
assert to.scale == 2
assert to.state == 'active'
def test_service_link_with_space(client, compose):
template = '''
foo:
image: nginx
links:
- "web: alias"
web:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
foo = _get_service(project.services(), 'foo')
maps = client.list_serviceConsumeMap(serviceId=foo.id)
assert len(maps) == 1
assert maps[0].name == 'alias'
def test_circle_simple(client, compose):
template = '''
foo:
image: nginx
links:
- web
web:
image: nginx
links:
- foo
'''
project_name = random_str()
compose.check_call(template, '-p', project_name, '-f',
'-', 'create')
project = find_one(client.list_environment, name=project_name)
foo = _get_service(project.services(), 'foo')
web = _get_service(project.services(), 'web')
s = find_one(foo.consumedservices)
assert s.name == 'web'
s = find_one(web.consumedservices)
assert s.name == 'foo'
def test_one_circle(client, compose):
template = '''
foo:
image: nginx
links:
- foo
'''
project_name = random_str()
compose.check_call(template, '-p', project_name, '-f',
'-', 'create')
project = find_one(client.list_environment, name=project_name)
foo = _get_service(project.services(), 'foo')
s = find_one(foo.consumedservices)
assert s.name == 'foo'
def test_circle_madness(client, compose):
template = '''
foo:
image: nginx
links:
- foo
- foo2
- foo3
foo2:
image: nginx
links:
- foo
- foo2
- foo3
foo3:
image: nginx
links:
- foo
- foo2
- foo3
'''
project_name = random_str()
compose.check_call(template, '-p', project_name, '-f',
'-', 'up', '-d')
project = find_one(client.list_environment, name=project_name)
foo = _get_service(project.services(), 'foo')
foo2 = _get_service(project.services(), 'foo2')
foo3 = _get_service(project.services(), 'foo3')
assert len(foo.consumedservices()) == 3
assert len(foo2.consumedservices()) == 3
assert len(foo3.consumedservices()) == 3
def test_variables(client, compose):
project_name = random_str()
compose.check_call(None, '--env-file', 'assets/env-file/env-file',
'--verbose', '-f', 'assets/env-file/docker-compose.yml',
'-p', project_name, 'create')
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.launchConfig.imageUuid == 'docker:B'
assert service.launchConfig.labels['var'] == 'B'
assert service.metadata.var == 'E'
assert service.metadata.var2 == ''
def test_metadata_on_service(client, compose):
project_name = create_project(compose, file='assets/metadata/test.yml')
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.metadata.test1[0] == 'one'
assert service.metadata.test1[1] == 'two'
assert service.metadata.test2.name == "t2name"
assert service.metadata.test2.value == "t2value"
assert service.metadata.test3
assert service.metadata.test4[0].test5.name == "t5name"
assert service.metadata.test4[1].test6.name == "t6name"
assert service.metadata.test4[1].test6.value == "t6value"
assert service.metadata.test7.test7nest.test7nestofnest[0].test7dot1.name \
== "test7dot1name"
assert service.metadata.test7.test7nest.test7nestofnest[1].test7dot2.name \
== "test7dot2name"
assert service.metadata.test8[0].test8a[0].name == "test8a"
assert service.metadata.test8[0].test8a[0].value == "test8avalue"
assert service.metadata.test8[0].test8a[1].name == "test8ab"
assert service.metadata.test8[0].test8a[1].value == "test8abvalue"
assert service.metadata.test8[1].test8b[0].name == "test8ba"
assert service.metadata.test8[1].test8b[0].value == "test8bavalue"
def test_healthchecks(client, compose):
project_name = create_project(compose, file='assets/health/test.yml')
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert service.name == 'web'
assert service.launchConfig.healthCheck.port == 80
assert service.launchConfig.healthCheck.interval == 2000
assert service.launchConfig.healthCheck.unhealthyThreshold == 3
assert service.launchConfig.healthCheck.requestLine == \
"OPTIONS /ping HTTP/1.1\r\nHost:\\ www.example.com"
def _get_service(services, name):
service = None
for i in services:
if i.name == name:
service = i
break
assert service is not None
return service
def test_restart_no(client, compose):
template = '''
web:
image: nginx
restart: no
'''
project_name = create_project(compose, input=template)
find_one(client.list_environment, name=project_name)
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
p = find_one(client.list_environment, name=project_name)
find_one(p.services)
def test_stack_case(client, compose):
template = '''
web:
image: nginx
'''
project_name = create_project(compose, input=template)
find_one(client.list_environment, name=project_name)
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
compose.check_call(template, '--verbose', '-f', '-', '-p',
project_name.upper(), 'up', '-d')
find_one(client.list_environment, name=project_name)
@pytest.mark.skipif('True')
def test_certs(new_context, compose_bin, request):
client = new_context.client
compose = new_compose(client, compose_bin, request)
cert = client.create_certificate(name='cert1',
cert=CERT,
certChain=CERT,
key=KEY)
cert2 = client.create_certificate(name='cert2',
cert=CERT,
certChain=CERT,
key=KEY)
cert = client.wait_success(cert)
cert2 = client.wait_success(cert2)
assert cert.state == 'active'
assert cert2.state == 'active'
project_name = create_project(compose,
file='assets/ssl/docker-compose.yml')
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
assert lb.defaultCertificateId == cert.id
assert lb.certificateIds == [cert.id, cert2.id]
def test_cert_not_found(new_context, compose_bin, request):
compose = new_compose(new_context.client, compose_bin, request)
compose.check_retcode(None, 1, '-p', random_str(), '-f',
'assets/ssl/docker-compose.yml', 'create')
def test_cert_removed(new_context, compose_bin, request):
client = new_context.client
compose = new_compose(client, compose_bin, request)
cert = client.create_certificate(name='cert1',
cert=CERT,
certChain=CERT,
key=KEY)
cert2 = client.create_certificate(name='cert2',
cert=CERT,
certChain=CERT,
key=KEY)
cert = client.wait_success(cert)
cert2 = client.wait_success(cert2)
assert cert.state == 'active'
assert cert2.state == 'active'
cert2 = client.wait_success(cert2.remove())
wait_for(
lambda: len(client.list_certificate()) == 1
)
cert3 = client.create_certificate(name='cert2',
cert=CERT,
certChain=CERT,
key=KEY)
cert3 = client.wait_success(cert3)
assert cert3.state == 'active'
project_name = create_project(compose,
file='assets/ssl/docker-compose.yml')
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 2
lb = _get_service(project.services(), 'lb')
assert lb.defaultCertificateId == cert.id
assert lb.certificateIds == [cert.id, cert3.id]
def test_project_name(client, compose):
project_name = 'FooBar23-' + random_str()
stack = client.create_environment(name=project_name)
stack = client.wait_success(stack)
assert stack.state == 'active'
template = '''
web:
image: nginx
'''
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 0
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'create')
assert len(project.services()) == 1
def test_project_name_case_insensitive(client, compose):
project_name = 'FooBar23-' + random_str()
stack = client.create_environment(name=project_name)
stack = client.wait_success(stack)
assert stack.state == 'active'
template = '''
web:
image: nginx
'''
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 0
project_name = project_name.replace('FooBar', 'fOoBaR')
assert project_name.startswith('fOoBaR')
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'create')
assert len(project.services()) == 1
def test_project_name_with_dots(client, compose):
project_name = 'something-with-dashes-v0-2-6'
bad_project_name = 'something-with-dashes-v0.2.6'
ret = client.list_environment(name=project_name)
assert len(ret) == 0
compose.check_call(None, '--verbose', '-f',
'assets/{}/docker-compose.yml'.format(bad_project_name),
'create')
ret = client.list_environment(name=project_name)
assert len(ret) == 1
def test_create_then_up_on_circle(client, compose):
template = '''
etcd-lb:
image: rancher/load-balancer-service
links:
- etcd0
- etcd1
- etcd2
etcd0:
stdin_open: true
image: busybox
command: cat
links:
- etcd1
- etcd2
etcd1:
stdin_open: true
image: busybox
command: cat
links:
- etcd0
- etcd2
etcd2:
stdin_open: true
image: busybox
command: cat
links:
- etcd0
- etcd1
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
etcd_lb = _get_service(project.services(), 'etcd-lb')
etcd0 = _get_service(project.services(), 'etcd0')
etcd1 = _get_service(project.services(), 'etcd1')
etcd2 = _get_service(project.services(), 'etcd2')
assert len(etcd_lb.consumedservices()) == 3
assert len(etcd0.consumedservices()) == 2
assert len(etcd1.consumedservices()) == 2
assert len(etcd2.consumedservices()) == 2
assert len(etcd_lb.consumedservices()) == 3
compose.check_call(template, '-f', '-', '-p', project_name, 'up', '-d')
assert len(etcd_lb.consumedservices()) == 3
assert len(etcd0.consumedservices()) == 2
assert len(etcd1.consumedservices()) == 2
assert len(etcd2.consumedservices()) == 2
def test_expose_port_ignore(client, compose):
template = '''
foo:
image: nginx
expose:
- 1234
links:
- foo
'''
project_name = random_str()
compose.check_call(template, '-p', project_name, '-f',
'-', 'create')
project = find_one(client.list_environment, name=project_name)
foo = _get_service(project.services(), 'foo')
assert 'ports' not in foo.launchConfig
def test_create_no_update_links(client, compose):
template = '''
foo:
image: nginx
links:
- foo2
foo2:
image: tianon/true
foo3:
image: tianon/true
'''
project_name = random_str()
compose.check_call(template, '--verbose', '-f', '-', '-p', project_name,
'up', '-d')
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 3
foo = _get_service(project.services(), 'foo')
foo2 = find_one(foo.consumedservices)
assert foo2.name == 'foo2'
template2 = '''
foo:
image: tianon/true
links:
- foo3
foo2:
image: tianon/true
foo3:
image: tianon/true
'''
compose.check_call(template2, '-p', project_name, '-f', '-', 'create')
foo2 = find_one(foo.consumedservices)
assert foo2.name == 'foo2'
def test_pull_sidekick(client, compose):
template = '''
foo:
labels:
io.rancher.sidekicks: foo2
image: nginx
foo2:
image: tianon/true
'''
project_name = random_str()
out, err = compose.check_retcode(template, 0, '-p', project_name, '-f',
'-', 'pull', stdout=subprocess.PIPE)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 0
assert 'nginx' in out
assert 'tianon/true' in out
def test_service_schema(client, compose):
project_name = create_project(compose, file='assets/service-schema/'
'docker-compose.yml')
project = find_one(client.list_environment, name=project_name)
service = find_one(project.services)
assert 'kubernetesReplicationController' in service.serviceSchemas
assert 'kubernetesService' in service.serviceSchemas
def test_no_update_selector_link(client, compose):
template = '''
parent:
labels:
io.rancher.service.selector.link: foo=bar
image: tianon/true
child:
labels:
foo: bar
image: tianon/true
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 2
parent = _get_service(project.services(), 'parent')
find_one(parent.consumedservices)
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-d',
'parent')
parent = _get_service(project.services(), 'parent')
find_one(parent.consumedservices)
def test_sidekick_build_remote(client, compose):
template = '''
parent:
labels:
io.rancher.sidekicks: child
build: http://parent
dockerfile: parent-file
child:
build: http://child
dockerfile: child-file
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 1
parent = _get_service(project.services(), 'parent')
assert parent.launchConfig.build.remote == 'http://parent'
assert parent.launchConfig.build.dockerfile == 'parent-file'
assert len(parent.secondaryLaunchConfigs) == 1
assert parent.secondaryLaunchConfigs[0].build.remote == 'http://child'
assert parent.secondaryLaunchConfigs[0].build.dockerfile == 'child-file'
def test_sidekick_healthcheck(client, compose):
project_name = create_project(compose, file='assets/sidekick-health/'
'docker-compose.yml')
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 1
parent = _get_service(project.services(), 'parent')
assert parent.launchConfig.healthCheck.port == 80
assert parent.secondaryLaunchConfigs[0].healthCheck.port == 81
def test_force_upgrade_primary(client, compose):
template = '''
parent:
labels:
io.rancher.sidekicks: child
image: nginx
child:
image: nginx
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
assert len(project.services()) == 1
compose.check_call(template, '-p', project_name, '-f', '-', 'up', '-d')
parent = _get_service(project.services(), 'parent')
instances = parent.instances()
child_prefix = project_name + '_child'
child_id = [x.id for x in instances if x.name.startswith(child_prefix)]
assert len(instances) == 2
compose.check_call(template, '-p', project_name, '-f', '-', 'up',
'--force-upgrade', '-d', 'parent')
new_instances = parent.instances()
new_child_id = [x.id for x in instances if x.name.startswith(child_prefix)]
assert child_id == new_child_id
ids = {x.id for x in instances}.union({x.id for x in new_instances})
assert len(ids) == 3
compose.check_call(template, '-p', project_name, '-f', '-', 'up',
'-c', '-d')
compose.check_call(template, '-p', project_name, '-f', '-', 'up',
'--force-upgrade', '-d')
ids = ids.union({x.id for x in parent.instances()})
assert len(ids) == 5
def test_virtual_machine(client, compose):
template = '''
vm:
type: virtualMachine
image: nginx
vcpu: 2
memory: 1024
userdata: |
#cloud-config
foo
disks:
- name: foo
size: 1g
opts:
foo: bar
- name: foo2
size: 2g
'''
project_name = create_project(compose, input=template)
project = find_one(client.list_environment, name=project_name)
vm = find_one(project.services)
assert vm.launchConfig.kind == 'virtualMachine'
assert vm.launchConfig.vcpu == 2
assert vm.launchConfig.userdata == '#cloud-config\nfoo\n'
assert vm.launchConfig.memoryMb == 1024
assert vm.launchConfig.disks[0] == {'name': 'foo', 'size': '1g',
'opts': {'foo': 'bar'}}
assert vm.launchConfig.disks[1] == {'name': 'foo2', 'size': '2g'}
| apache-2.0 | -6,945,807,674,935,528,000 | 29.252892 | 79 | 0.620705 | false |
jianghuaw/nova | nova/api/openstack/compute/extended_volumes.py | 1 | 3796 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Volumes API extension."""
from nova.api.openstack import api_version_request
from nova.api.openstack import wsgi
from nova import context
from nova import objects
from nova.policies import extended_volumes as ev_policies
class ExtendedVolumesController(wsgi.Controller):
def _extend_server(self, context, server, req, bdms):
volumes_attached = []
for bdm in bdms:
if bdm.get('volume_id'):
volume_attached = {'id': bdm['volume_id']}
if api_version_request.is_supported(req, min_version='2.3'):
volume_attached['delete_on_termination'] = (
bdm['delete_on_termination'])
volumes_attached.append(volume_attached)
# NOTE(mriedem): The os-extended-volumes prefix should not be used for
# new attributes after v2.1. They are only in v2.1 for backward compat
# with v2.0.
key = "os-extended-volumes:volumes_attached"
server[key] = volumes_attached
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if context.can(ev_policies.BASE_POLICY_NAME, fatal=False):
server = resp_obj.obj['server']
bdms = objects.BlockDeviceMappingList.bdms_by_instance_uuid(
context, [server['id']])
instance_bdms = self._get_instance_bdms(bdms, server)
self._extend_server(context, server, req, instance_bdms)
@staticmethod
def _get_instance_bdms_in_multiple_cells(ctxt, servers):
instance_uuids = [server['id'] for server in servers]
inst_maps = objects.InstanceMappingList.get_by_instance_uuids(
ctxt, instance_uuids)
cell_mappings = {}
for inst_map in inst_maps:
if (inst_map.cell_mapping is not None and
inst_map.cell_mapping.uuid not in cell_mappings):
cell_mappings.update(
{inst_map.cell_mapping.uuid: inst_map.cell_mapping})
bdms = {}
for cell_mapping in cell_mappings.values():
with context.target_cell(ctxt, cell_mapping) as cctxt:
bdms.update(
objects.BlockDeviceMappingList.bdms_by_instance_uuid(
cctxt, instance_uuids))
return bdms
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if context.can(ev_policies.BASE_POLICY_NAME, fatal=False):
servers = list(resp_obj.obj['servers'])
bdms = self._get_instance_bdms_in_multiple_cells(context, servers)
for server in servers:
instance_bdms = self._get_instance_bdms(bdms, server)
self._extend_server(context, server, req, instance_bdms)
def _get_instance_bdms(self, bdms, server):
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in the 'detail' or 'show' method.
# If that instance has since been deleted, it won't be in the
# 'bdms' dictionary though, so use 'get' to avoid KeyErrors.
return bdms.get(server['id'], [])
| apache-2.0 | 4,793,651,557,385,615,000 | 43.139535 | 78 | 0.629874 | false |
sailfish-sdk/sailfish-qtcreator | share/qtcreator/debugger/stdtypes.py | 1 | 37164 | ############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
from dumper import *
def qform__std__array():
return arrayForms()
def qdump__std__array(d, value):
size = value.type[1]
d.putItemCount(size)
if d.isExpanded():
d.putPlotData(value.address(), size, value.type[0])
def qform__std____1__array():
return arrayForms()
def qdump__std____1__array(d, value):
qdump__std__array(d, value)
def qdump__std__function(d, value):
(ptr, dummy1, manager, invoker) = value.split('pppp')
if manager:
if ptr > 2:
d.putSymbolValue(ptr)
else:
d.putEmptyValue()
d.putBetterType(value.type)
else:
d.putValue('(null)')
d.putPlainChildren(value)
def qdump__std__complex(d, value):
innerType = value.type[0]
(real, imag) = value.split('{%s}{%s}' % (innerType.name, innerType.name))
d.putValue("(%s, %s)" % (real.display(), imag.display()))
d.putNumChild(2)
if d.isExpanded():
with Children(d, 2, childType=innerType):
d.putSubItem("real", real)
d.putSubItem("imag", imag)
def qdump__std____1__complex(d, value):
qdump__std__complex(d, value)
def qdump__std__deque(d, value):
if d.isQnxTarget():
qdump__std__deque__QNX(d, value)
return
if d.isMsvcTarget():
qdump__std__deque__MSVC(d, value)
return
innerType = value.type[0]
innerSize = innerType.size()
bufsize = 1
if innerSize < 512:
bufsize = 512 // innerSize
(mapptr, mapsize, startCur, startFirst, startLast, startNode,
finishCur, finishFirst, finishLast, finishNode) = value.split("pppppppppp")
size = bufsize * ((finishNode - startNode) // d.ptrSize() - 1)
size += (finishCur - finishFirst) // innerSize
size += (startLast - startCur) // innerSize
d.check(0 <= size and size <= 1000 * 1000 * 1000)
d.putItemCount(size)
if d.isExpanded():
with Children(d, size, maxNumChild=2000, childType=innerType):
pcur = startCur
plast = startLast
pnode = startNode
for i in d.childRange():
d.putSubItem(i, d.createValue(pcur, innerType))
pcur += innerSize
if pcur == plast:
newnode = pnode + d.ptrSize()
pfirst = d.extractPointer(newnode)
plast = pfirst + bufsize * d.ptrSize()
pcur = pfirst
pnode = newnode
def qdump__std____1__deque(d, value):
mptr, mfirst, mbegin, mend, start, size = value.split("pppptt")
d.check(0 <= size and size <= 1000 * 1000 * 1000)
d.putItemCount(size)
if d.isExpanded():
innerType = value.type[0]
innerSize = innerType.size()
ptrSize = d.ptrSize()
bufsize = (4096 // innerSize) if innerSize < 256 else 16
with Children(d, size, maxNumChild=2000, childType=innerType):
for i in d.childRange():
k, j = divmod(start + i, bufsize)
base = d.extractPointer(mfirst + k * ptrSize)
d.putSubItem(i, d.createValue(base + j * innerSize, innerType))
def qdump__std__deque__QNX(d, value):
innerType = value.type[0]
innerSize = innerType.size()
if innerSize <= 1:
bufsize = 16
elif innerSize <= 2:
bufsize = 8
elif innerSize <= 4:
bufsize = 4
elif innerSize <= 8:
bufsize = 2
else:
bufsize = 1
try:
val = value['_Mypair']['_Myval2']
except:
val = value
myoff = val['_Myoff'].integer()
mysize = val['_Mysize'].integer()
mapsize = val['_Mapsize'].integer()
d.check(0 <= mapsize and mapsize <= 1000 * 1000 * 1000)
d.putItemCount(mysize)
if d.isExpanded():
with Children(d, mysize, maxNumChild=2000, childType=innerType):
map = val['_Map']
for i in d.childRange():
block = myoff / bufsize
offset = myoff - (block * bufsize)
if mapsize <= block:
block -= mapsize
d.putSubItem(i, map[block][offset])
myoff += 1;
def qdump__std__deque__MSVC(d, value):
innerType = value.type[0]
innerSize = innerType.size()
if innerSize <= 1:
bufsize = 16
elif innerSize <= 2:
bufsize = 8
elif innerSize <= 4:
bufsize = 4
elif innerSize <= 8:
bufsize = 2
else:
bufsize = 1
(proxy, map, mapsize, myoff, mysize) = value.split("ppppp")
d.check(0 <= mapsize and mapsize <= 1000 * 1000 * 1000)
d.putItemCount(mysize)
if d.isExpanded():
with Children(d, mysize, maxNumChild=2000, childType=innerType):
for i in d.childRange():
if myoff >= bufsize * mapsize:
myoff = 0
buf = map + ((myoff // bufsize) * d.ptrSize())
address = d.extractPointer(buf) + ((myoff % bufsize) * innerSize)
d.putSubItem(i, d.createValue(address, innerType))
myoff += 1
def qdump__std____debug__deque(d, value):
qdump__std__deque(d, value)
def qdump__std__list(d, value):
if d.isQnxTarget() or d.isMsvcTarget():
qdump__std__list__QNX(d, value)
return
if value.type.size() == 3 * d.ptrSize():
# C++11 only.
(dummy1, dummy2, size) = value.split("ppp")
d.putItemCount(size)
else:
# Need to count manually.
p = d.extractPointer(value)
head = value.address()
size = 0
while head != p and size < 1001:
size += 1
p = d.extractPointer(p)
d.putItemCount(size, 1000)
if d.isExpanded():
p = d.extractPointer(value)
innerType = value.type[0]
with Children(d, size, maxNumChild=1000, childType=innerType):
for i in d.childRange():
d.putSubItem(i, d.createValue(p + 2 * d.ptrSize(), innerType))
p = d.extractPointer(p)
def qdump__std__list__QNX(d, value):
(proxy, head, size) = value.split("ppp")
d.putItemCount(size, 1000)
if d.isExpanded():
p = d.extractPointer(head)
innerType = value.type[0]
with Children(d, size, maxNumChild=1000, childType=innerType):
for i in d.childRange():
d.putSubItem(i, d.createValue(p + 2 * d.ptrSize(), innerType))
p = d.extractPointer(p)
def qdump__std____debug__list(d, value):
qdump__std__list(d, value)
def qdump__std____cxx11__list(d, value):
qdump__std__list(d, value)
def qdump__std____1__list(d, value):
if value.type.size() == 3 * d.ptrSize():
# C++11 only.
(dummy1, dummy2, size) = value.split("ppp")
d.putItemCount(size)
else:
# Need to count manually.
p = d.extractPointer(value)
head = value.address()
size = 0
while head != p and size < 1001:
size += 1
p = d.extractPointer(p)
d.putItemCount(size, 1000)
if d.isExpanded():
(prev, p) = value.split("pp")
innerType = value.type[0]
typeCode = "pp{%s}" % innerType.name
with Children(d, size, maxNumChild=1000, childType=innerType):
for i in d.childRange():
(prev, p, val) = d.split(typeCode, p)
d.putSubItem(i, val)
def qform__std__map():
return mapForms()
def qdump__std__map(d, value):
if d.isQnxTarget() or d.isMsvcTarget():
qdump_std__map__helper(d, value)
return
# stuff is actually (color, pad) with 'I@', but we can save cycles/
(compare, stuff, parent, left, right, size) = value.split('pppppp')
d.check(0 <= size and size <= 100*1000*1000)
d.putItemCount(size)
if d.isExpanded():
keyType = value.type[0]
valueType = value.type[1]
with Children(d, size, maxNumChild=1000):
node = value["_M_t"]["_M_impl"]["_M_header"]["_M_left"]
nodeSize = node.dereference().type.size()
typeCode = "@{%s}@{%s}" % (keyType.name, valueType.name)
for i in d.childRange():
(pad1, key, pad2, value) = d.split(typeCode, node.pointer() + nodeSize)
d.putPairItem(i, (key, value))
if node["_M_right"].pointer() == 0:
parent = node["_M_parent"]
while True:
if node.pointer() != parent["_M_right"].pointer():
break
node = parent
parent = parent["_M_parent"]
if node["_M_right"] != parent:
node = parent
else:
node = node["_M_right"]
while True:
if node["_M_left"].pointer() == 0:
break
node = node["_M_left"]
def qdump_std__map__helper(d, value):
(proxy, head, size) = value.split("ppp")
d.check(0 <= size and size <= 100*1000*1000)
d.putItemCount(size)
if d.isExpanded():
keyType = value.type[0]
valueType = value.type[1]
pairType = value.type[3][0]
def helper(node):
(left, parent, right, color, isnil, pad, pair) = d.split("pppcc@{%s}" % (pairType.name), node)
if left != head:
for res in helper(left):
yield res
yield pair.split("{%s}@{%s}" % (keyType.name, valueType.name))[::2]
if right != head:
for res in helper(right):
yield res
(smallest, root) = d.split("pp", head)
with Children(d, size, maxNumChild=1000):
for (pair, i) in zip(helper(root), d.childRange()):
d.putPairItem(i, pair)
def qdump__std____debug__map(d, value):
qdump__std__map(d, value)
def qdump__std____debug__set(d, value):
qdump__std__set(d, value)
def qdump__std__multiset(d, value):
qdump__std__set(d, value)
def qdump__std____cxx1998__map(d, value):
qdump__std__map(d, value)
def qform__std__multimap():
return mapForms()
def qdump__std__multimap(d, value):
return qdump__std__map(d, value)
def qdumpHelper__std__tree__iterator(d, value, isSet=False):
if value.type.name.endswith("::iterator"):
treeTypeName = value.type.name[:-len("::iterator")]
elif value.type.name.endswith("::const_iterator"):
treeTypeName = value.type.name[:-len("::const_iterator")]
treeType = d.lookupType(treeTypeName)
keyType = treeType[0]
valueType = treeType[1]
node = value["_M_node"].dereference() # std::_Rb_tree_node_base
d.putNumChild(1)
d.putEmptyValue()
if d.isExpanded():
with Children(d):
if isSet:
typecode = 'pppp@{%s}' % keyType.name
(color, parent, left, right, pad1, key) = d.split(typecode, node)
d.putSubItem("value", key)
else:
typecode = 'pppp@{%s}@{%s}' % (keyType.name, valueType.name)
(color, parent, left, right, pad1, key, pad2, value) = d.split(typecode, node)
d.putSubItem("first", key)
d.putSubItem("second", value)
with SubItem(d, "[node]"):
d.putNumChild(1)
d.putEmptyValue()
d.putType(" ")
if d.isExpanded():
with Children(d):
#d.putSubItem("color", color)
nodeType = node.type.pointer()
d.putSubItem("left", d.createValue(left, nodeType))
d.putSubItem("right", d.createValue(right, nodeType))
d.putSubItem("parent", d.createValue(parent, nodeType))
def qdump__std___Rb_tree_iterator(d, value):
qdumpHelper__std__tree__iterator(d, value)
def qdump__std___Rb_tree_const_iterator(d, value):
qdumpHelper__std__tree__iterator(d, value)
def qdump__std__map__iterator(d, value):
qdumpHelper__std__tree__iterator(d, value)
def qdump____gnu_debug___Safe_iterator(d, value):
d.putItem(value["_M_current"])
def qdump__std__map__const_iterator(d, value):
qdumpHelper__std__tree__iterator(d, value)
def qdump__std__set__iterator(d, value):
qdumpHelper__std__tree__iterator(d, value, True)
def qdump__std__set__const_iterator(d, value):
qdumpHelper__std__tree__iterator(d, value, True)
def qdump__std____cxx1998__set(d, value):
qdump__std__set(d, value)
def qdumpHelper__std__tree__iterator_MSVC(d, value):
d.putNumChild(1)
d.putEmptyValue()
if d.isExpanded():
with Children(d):
childType = value.type[0][0][0]
(proxy, nextIter, node) = value.split("ppp")
(left, parent, right, color, isnil, pad, child) = \
d.split("pppcc@{%s}" % (childType.name), node)
if (childType.name.startswith("std::pair")):
# workaround that values created via split have no members
keyType = childType[0].name
valueType = childType[1].name
d.putPairItem(None, child.split("{%s}@{%s}" % (keyType, valueType))[::2])
else:
d.putSubItem("value", child)
def qdump__std___Tree_const_iterator(d, value):
qdumpHelper__std__tree__iterator_MSVC(d, value)
def qdump__std___Tree_iterator(d, value):
qdumpHelper__std__tree__iterator_MSVC(d, value)
def qdump__std__set(d, value):
if d.isQnxTarget() or d.isMsvcTarget():
qdump__std__set__QNX(d, value)
return
impl = value["_M_t"]["_M_impl"]
size = impl["_M_node_count"].integer()
d.check(0 <= size and size <= 100*1000*1000)
d.putItemCount(size)
if d.isExpanded():
valueType = value.type[0]
node = impl["_M_header"]["_M_left"]
nodeSize = node.dereference().type.size()
typeCode = "@{%s}" % valueType.name
with Children(d, size, maxNumChild=1000, childType=valueType):
for i in d.childRange():
(pad, val) = d.split(typeCode, node.pointer() + nodeSize)
d.putSubItem(i, val)
if node["_M_right"].pointer() == 0:
parent = node["_M_parent"]
while node == parent["_M_right"]:
node = parent
parent = parent["_M_parent"]
if node["_M_right"] != parent:
node = parent
else:
node = node["_M_right"]
while node["_M_left"].pointer() != 0:
node = node["_M_left"]
def qdump__std__set__QNX(d, value):
(proxy, head, size) = value.split("ppp")
d.check(0 <= size and size <= 100*1000*1000)
d.putItemCount(size)
if d.isExpanded():
childType=value.type[0]
def helper(node):
(left, parent, right, color, isnil, pad, value) = d.split("pppcc@{%s}" % childType.name, node)
if left != head:
for res in helper(left):
yield res
yield value
if right != head:
for res in helper(right):
yield res
(smallest, root) = d.split("pp", head)
with Children(d, size, maxNumChild=1000):
for (item, i) in zip(helper(root), d.childRange()):
d.putSubItem(i, item)
def std1TreeMin(d, node):
#_NodePtr __tree_min(_NodePtr __x):
# while (__x->__left_ != nullptr)
# __x = __x->__left_;
# return __x;
#
left = node['__left_']
if left.pointer():
node = left
return node
def std1TreeIsLeftChild(d, node):
# bool __tree_is_left_child(_NodePtr __x):
# return __x == __x->__parent_->__left_;
#
other = node['__parent_']['__left_']
return node.pointer() == other.pointer()
def std1TreeNext(d, node):
#_NodePtr __tree_next(_NodePtr __x):
# if (__x->__right_ != nullptr)
# return __tree_min(__x->__right_);
# while (!__tree_is_left_child(__x))
# __x = __x->__parent_;
# return __x->__parent_;
#
right = node['__right_']
if right.pointer():
return std1TreeMin(d, right)
while not std1TreeIsLeftChild(d, node):
node = node['__parent_']
return node['__parent_']
def qdump__std____1__set(d, value):
tree = value["__tree_"]
base3 = tree["__pair3_"].address()
size = d.extractUInt(base3)
d.check(size <= 100*1000*1000)
d.putItemCount(size)
if d.isExpanded():
# type of node is std::__1::__tree_node<Foo, void *>::value_type
valueType = value.type[0]
d.putFields(tree)
node = tree["__begin_node_"]
nodeType = node.type
with Children(d, size):
for i in d.childRange():
with SubItem(d, i):
d.putItem(node['__value_'])
d.putBetterType(valueType)
node = std1TreeNext(d, node).cast(nodeType)
def qdump__std____1__multiset(d, value):
qdump__std____1__set(d, value)
def qform__std____1__map():
return mapForms()
def qdump__std____1__map(d, value):
tree = value["__tree_"]
base3 = tree["__pair3_"].address()
size = d.extractUInt(base3)
d.check(size <= 100*1000*1000)
d.putItemCount(size)
if d.isExpanded():
# type of node is std::__1::__tree_node<Foo, Bar>::value_type
valueType = value.type[0]
node = tree["__begin_node_"]
nodeType = node.type
with Children(d, size, maxNumChild=1000):
node = tree["__begin_node_"]
for i in d.childRange():
# There's possibly also:
#pair = node['__value_']['__nc']
pair = node['__value_']['__cc']
d.putPairItem(i, pair)
node = std1TreeNext(d, node).cast(nodeType)
def qform__std____1__multimap():
return mapForms()
def qdump__std____1__multimap(d, value):
qdump__std____1__map(d, value)
def qdump__std____1__map__iterator(d, value):
d.putEmptyValue()
if d.isExpanded():
with Children(d):
node = value['__i_']['__ptr_'].dereference()['__value_']['__cc']
d.putSubItem('first', node['first'])
d.putSubItem('second', node['second'])
def qdump__std____1__map__const_iterator(d, value):
qdump__std____1__map__iterator(d, value)
def qdump__std____1__set__iterator(d, value):
d.putEmptyValue()
d.putNumChild(1)
if value.type.name.endswith("::iterator"):
treeTypeName = value.type.name[:-len("::iterator")]
elif value.type.name.endswith("::const_iterator"):
treeTypeName = value.type.name[:-len("::const_iterator")]
treeType = d.lookupType(treeTypeName)
keyType = treeType[0]
if d.isExpanded():
with Children(d):
node = value['__ptr_'].dereference()['__value_']
node = node.cast(keyType)
d.putSubItem('value', node)
def qdump__std____1__set_const_iterator(d, value):
qdump__std____1__set__iterator(d, value)
def qdump__std__stack(d, value):
d.putItem(value["c"])
d.putBetterType(value.type)
def qdump__std____debug__stack(d, value):
qdump__std__stack(d, value)
def qdump__std____1__stack(d, value):
d.putItem(value["c"])
d.putBetterType(value.type)
def qform__std__string():
return [Latin1StringFormat, SeparateLatin1StringFormat,
Utf8StringFormat, SeparateUtf8StringFormat ]
def qdump__std__string(d, value):
qdumpHelper_std__string(d, value, d.createType("char"), d.currentItemFormat())
def qdumpHelper_std__string(d, value, charType, format):
if d.isQnxTarget():
qdumpHelper__std__string__QNX(d, value, charType, format)
return
if d.isMsvcTarget():
qdumpHelper__std__string__MSVC(d, value, charType, format)
return
data = value.extractPointer()
# We can't lookup the std::string::_Rep type without crashing LLDB,
# so hard-code assumption on member position
# struct { size_type _M_length, size_type _M_capacity, int _M_refcount; }
(size, alloc, refcount) = d.split("ppp", data - 3 * d.ptrSize())
refcount = refcount & 0xffffffff
d.check(refcount >= -1) # Can be -1 according to docs.
d.check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
d.putCharArrayHelper(data, size, charType, format)
def qdumpHelper__std__string__QNX(d, value, charType, format):
size = value['_Mysize']
alloc = value['_Myres']
_BUF_SIZE = int(16 / charType.size())
if _BUF_SIZE <= alloc: #(_BUF_SIZE <= _Myres ? _Bx._Ptr : _Bx._Buf);
data = value['_Bx']['_Ptr']
else:
data = value['_Bx']['_Buf']
sizePtr = data.cast(d.charType().pointer())
refcount = int(sizePtr[-1])
d.check(refcount >= -1) # Can be -1 accoring to docs.
d.check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
d.putCharArrayHelper(sizePtr, size, charType, format)
def qdumpHelper__std__string__MSVC(d, value, charType, format):
(proxy, buffer, size, alloc) = value.split("p16spp");
_BUF_SIZE = int(16 / charType.size());
d.check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
if _BUF_SIZE <= alloc:
(proxy, data) = value.split("pp");
else:
data = value.address() + d.ptrSize()
d.putCharArrayHelper(data, size, charType, format)
def qdump__std____1__string(d, value):
firstByte = value.split('b')[0]
if int(firstByte & 1) == 0:
# Short/internal.
size = int(firstByte / 2)
data = value.address() + 1
else:
# Long/external.
(dummy, size, data) = value.split('ppp')
d.putCharArrayHelper(data, size, d.charType(), d.currentItemFormat())
d.putType("std::string")
def qdump__std____1__wstring(d, value):
firstByte = value.split('b')[0]
if int(firstByte & 1) == 0:
# Short/internal.
size = int(firstByte / 2)
data = value.address() + 4
else:
# Long/external.
(dummy, size, data) = value.split('ppp')
d.putCharArrayHelper(data, size, d.createType('wchar_t'))
d.putType("std::wstring")
def qdump__std____weak_ptr(d, value):
return qdump__std__shared_ptr(d, value)
def qdump__std__weak_ptr(d, value):
return qdump__std__shared_ptr(d, value)
def qdump__std____1__weak_ptr(d, value):
return qdump__std____1__shared_ptr(d, value)
def qdump__std__shared_ptr(d, value):
if d.isMsvcTarget():
i = value["_Ptr"]
else:
i = value["_M_ptr"]
if i.pointer() == 0:
d.putValue("(null)")
d.putNumChild(0)
else:
d.putItem(i.dereference())
d.putBetterType(value.type)
def qdump__std____1__shared_ptr(d, value):
i = value["__ptr_"]
if i.pointer() == 0:
d.putValue("(null)")
d.putNumChild(0)
else:
d.putItem(i.dereference())
d.putBetterType(value.type)
def qdump__std__unique_ptr(d, value):
p = d.extractPointer(value)
if p == 0:
d.putValue("(null)")
d.putNumChild(0)
else:
d.putItem(d.createValue(p, value.type[0]))
d.putBetterType(value.type)
def qdump__std____1__unique_ptr(d, value):
qdump__std__unique_ptr(d, value)
def qdump__std__pair(d, value):
typeCode = '{%s}@{%s}' % (value.type[0].name, value.type[1].name)
first, pad, second = value.split(typeCode)
with Children(d):
key = d.putSubItem('first', first)
value = d.putSubItem('second', second)
d.putField('key', key.value)
if key.encoding is not None:
d.putField('keyencoded', key.encoding)
d.putValue(value.value, value.encoding)
def qform__std__unordered_map():
return mapForms()
def qform__std____debug__unordered_map():
return mapForms()
def qdump__std__unordered_map(d, value):
if d.isQnxTarget() or d.isMsvcTarget():
qdump__std__list__QNX(d, value["_List"])
return
try:
# gcc ~= 4.7
size = value["_M_element_count"].integer()
start = value["_M_before_begin"]["_M_nxt"]
except:
try:
# libc++ (Mac)
size = value["_M_h"]["_M_element_count"].integer()
start = value["_M_h"]["_M_bbegin"]["_M_node"]["_M_nxt"]
except:
try:
# gcc 4.9.1
size = value["_M_h"]["_M_element_count"].integer()
start = value["_M_h"]["_M_before_begin"]["_M_nxt"]
except:
# gcc 4.6.2
size = value["_M_element_count"].integer()
start = value["_M_buckets"].dereference()
# FIXME: Pointer-aligned?
d.putItemCount(size)
# We don't know where the data is
d.putNumChild(0)
return
d.putItemCount(size)
if d.isExpanded():
keyType = value.type[0]
valueType = value.type[1]
typeCode = 'p@{%s}@{%s}' % (value.type[0].name, value.type[1].name)
p = start.pointer()
with Children(d, size):
for i in d.childRange():
p, pad, key, pad, val = d.split(typeCode, p)
d.putPairItem(i, (key, val))
def qdump__std____debug__unordered_map(d, value):
qdump__std__unordered_map(d, value)
def qform__std__unordered_multimap():
return qform__std__unordered_map()
def qform__std____debug__unordered_multimap():
return qform__std____debug__unordered_map()
def qdump__std__unordered_multimap(d, value):
qdump__std__unordered_map(d, value)
def qdump__std____debug__unordered_multimap(d, value):
qdump__std__unordered_multimap(d, value)
def qdump__std__unordered_set(d, value):
if d.isQnxTarget() or d.isMsvcTarget():
qdump__std__list__QNX(d, value["_List"])
return
try:
# gcc ~= 4.7
size = value["_M_element_count"].integer()
start = value["_M_before_begin"]["_M_nxt"]
offset = 0
except:
try:
# libc++ (Mac)
size = value["_M_h"]["_M_element_count"].integer()
start = value["_M_h"]["_M_bbegin"]["_M_node"]["_M_nxt"]
offset = 0
except:
try:
# gcc 4.6.2
size = value["_M_element_count"].integer()
start = value["_M_buckets"].dereference()
offset = d.ptrSize()
except:
# gcc 4.9.1
size = value["_M_h"]["_M_element_count"].integer()
start = value["_M_h"]["_M_before_begin"]["_M_nxt"]
offset = 0
d.putItemCount(size)
if d.isExpanded():
p = start.pointer()
valueType = value.type[0]
with Children(d, size, childType=valueType):
ptrSize = d.ptrSize()
for i in d.childRange():
d.putSubItem(i, d.createValue(p + ptrSize - offset, valueType))
p = d.extractPointer(p + offset)
def qform__std____1__unordered_map():
return mapForms()
def qdump__std____1__unordered_map(d, value):
size = value["__table_"]["__p2_"]["__first_"].integer()
d.putItemCount(size)
if d.isExpanded():
# There seem to be several versions of the implementation.
def valueCCorNot(val):
try:
return val["__cc"]
except:
return val
node = value["__table_"]["__p1_"]["__first_"]["__next_"]
with Children(d, size):
for i in d.childRange():
d.putPairItem(i, valueCCorNot(node["__value_"]))
node = node["__next_"]
def qdump__std____1__unordered_set(d, value):
size = int(value["__table_"]["__p2_"]["__first_"])
d.putItemCount(size)
if d.isExpanded():
node = value["__table_"]["__p1_"]["__first_"]["__next_"]
with Children(d, size, childType=value.type[0], maxNumChild=1000):
for i in d.childRange():
d.putSubItem(i, node["__value_"])
node = node["__next_"]
def qdump__std____debug__unordered_set(d, value):
qdump__std__unordered_set(d, value)
def qdump__std__unordered_multiset(d, value):
qdump__std__unordered_set(d, value)
def qdump__std____debug__unordered_multiset(d, value):
qdump__std__unordered_multiset(d, value)
def qform__std__valarray():
return arrayForms()
def qdump__std__valarray(d, value):
if d.isMsvcTarget():
(data, size) = value.split('pp')
else:
(size, data) = value.split('pp')
d.putItemCount(size)
d.putPlotData(data, size, value.type[0])
def qform__std____1__valarray():
return arrayForms()
def qdump__std____1__valarray(d, value):
innerType = value.type[0]
(begin, end) = value.split('pp')
size = int((end - begin) / innerType.size())
d.putItemCount(size)
d.putPlotData(begin, size, innerType)
def qform__std__vector():
return arrayForms()
def qedit__std__vector(d, value, data):
import gdb
values = data.split(',')
n = len(values)
innerType = value.type[0].name
cmd = "set $d = (%s*)calloc(sizeof(%s)*%s,1)" % (innerType, innerType, n)
gdb.execute(cmd)
cmd = "set {void*[3]}%s = {$d, $d+%s, $d+%s}" % (value.address(), n, n)
gdb.execute(cmd)
cmd = "set (%s[%d])*$d={%s}" % (innerType, n, data)
gdb.execute(cmd)
def qdump__std__vector(d, value):
if d.isQnxTarget() or d.isMsvcTarget():
qdumpHelper__std__vector__QNX(d, value)
else:
qdumpHelper__std__vector(d, value, False)
def qdumpHelper__std__vector(d, value, isLibCpp):
innerType = value.type[0]
isBool = innerType.name == 'bool'
if isBool:
if isLibCpp:
(start, size) = value.split("pp") # start is 'unsigned long *'
alloc = size
else:
(start, soffset, pad, finish, foffset, pad, alloc) = value.split("pI@pI@p")
size = (finish - start) * 8 + foffset - soffset # 8 is CHAR_BIT.
else:
(start, finish, alloc) = value.split("ppp")
size = int((finish - start) / innerType.size())
d.check(finish <= alloc)
if size > 0:
d.checkPointer(start)
d.checkPointer(finish)
d.checkPointer(alloc)
d.check(0 <= size and size <= 1000 * 1000 * 1000)
d.putItemCount(size)
if isBool:
if d.isExpanded():
with Children(d, size, maxNumChild=10000, childType=innerType):
for i in d.childRange():
q = start + int(i / 8)
with SubItem(d, i):
d.putValue((int(d.extractPointer(q)) >> (i % 8)) & 1)
d.putType("bool")
d.putNumChild(0)
else:
d.putPlotData(start, size, innerType)
def qdumpHelper__std__vector__QNX(d, value):
innerType = value.type[0]
isBool = innerType.name == 'bool'
if isBool:
(proxy1, proxy2, start, last, end, size) = value.split("pppppi")
else:
(proxy, start, last, end) = value.split("pppp")
size = (last - start) // innerType.size()
d.check(0 <= size and size <= 1000 * 1000 * 1000)
d.check(last <= end)
if size > 0:
d.checkPointer(start)
d.checkPointer(last)
d.checkPointer(end)
d.putItemCount(size)
if d.isExpanded():
if isBool:
with Children(d, size, maxNumChild=10000, childType=innerType):
for i in d.childRange():
q = start + int(i / 8)
with SubItem(d, i):
d.putValue((d.extractPointer(q) >> (i % 8)) & 1)
d.putType("bool")
d.putNumChild(0)
else:
d.putPlotData(start, size, innerType)
def qform__std____1__vector():
return arrayForms()
def qdump__std____1__vector(d, value):
qdumpHelper__std__vector(d, value, True)
def qform__std____debug__vector():
return arrayForms()
def qdump__std____debug__vector(d, value):
qdump__std__vector(d, value)
def qedit__std__string(d, value, data):
d.call('void', value, 'assign', '"%s"' % data.replace('"', '\\"'))
def qedit__string(d, expr, value):
qedit__std__string(d, expr, value)
def qedit__std____cxx11__string(d, expr, value):
qedit__std__string(d, expr, value)
def qedit__std__wstring(d, value, data):
d.call('void', value, 'assign', 'L"%s"' % data.replace('"', '\\"'))
def qedit__wstring(d, expr, value):
qedit__std__wstring(d, expr, value)
def qedit__std____cxx11__wstring(d, expr, value):
qedit__std__wstring(d, expr, value)
def qdump__string(d, value):
qdump__std__string(d, value)
def qform__std__wstring():
return [SimpleFormat, SeparateFormat]
def qdump__std__wstring(d, value):
qdumpHelper_std__string(d, value, d.createType('wchar_t'), d.currentItemFormat())
def qdump__std__basic_string(d, value):
innerType = value.type[0]
qdumpHelper_std__string(d, value, innerType, d.currentItemFormat())
def qdump__std____cxx11__basic_string(d, value):
innerType = value.type[0]
(data, size) = value.split("pI")
d.check(0 <= size) #and size <= alloc and alloc <= 100*1000*1000)
d.putCharArrayHelper(data, size, innerType, d.currentItemFormat())
def qform__std____cxx11__string(d, value):
qform__std__string(d, value)
def qdump__std____cxx11__string(d, value):
(data, size) = value.split("pI")
d.check(0 <= size) #and size <= alloc and alloc <= 100*1000*1000)
d.putCharArrayHelper(data, size, d.charType(), d.currentItemFormat())
# Needed only to trigger the form report above.
def qform__std____cxx11__string():
return qform__std__string()
def qform__std____cxx11__wstring():
return qform__std__wstring()
def qdump__std____1__basic_string(d, value):
innerType = value.type[0].name
if innerType == "char":
qdump__std____1__string(d, value)
elif innerType == "wchar_t":
qdump__std____1__wstring(d, value)
else:
warn("UNKNOWN INNER TYPE %s" % innerType)
def qdump__wstring(d, value):
qdump__std__wstring(d, value)
def qdump__std____1__once_flag(d, value):
qdump__std__once_flag(d, value)
def qdump__std__once_flag(d, value):
d.putValue(value.split("i")[0])
d.putBetterType(value.type)
d.putPlainChildren(value)
def qdump____gnu_cxx__hash_set(d, value):
ht = value["_M_ht"]
size = ht["_M_num_elements"].integer()
d.check(0 <= size and size <= 1000 * 1000 * 1000)
d.putItemCount(size)
innerType = value.type[0]
d.putType("__gnu__cxx::hash_set<%s>" % innerType.name)
if d.isExpanded():
with Children(d, size, maxNumChild=1000, childType=innerType):
buckets = ht["_M_buckets"]["_M_impl"]
bucketStart = buckets["_M_start"]
bucketFinish = buckets["_M_finish"]
p = bucketStart
itemCount = 0
for i in xrange((bucketFinish.pointer() - bucketStart.pointer()) // d.ptrSize()):
if p.dereference().pointer():
cur = p.dereference()
while cur.pointer():
d.putSubItem(itemCount, cur["_M_val"])
cur = cur["_M_next"]
itemCount += 1
p = p + 1
def qdump__uint8_t(d, value):
d.putNumChild(0)
d.putValue(value.integer())
def qdump__int8_t(d, value):
d.putNumChild(0)
d.putValue(value.integer())
def qdump__std__byte(d, value):
d.putNumChild(0)
d.putValue(value.integer())
def qdump__std__optional(d, value):
innerType = value.type[0]
(initialized, pad, payload) = d.split('b@{%s}' % innerType.name, value)
if initialized:
d.putItem(payload)
d.putBetterType(value.type)
else:
d.putSpecialValue("uninitialized")
d.putNumChild(0)
def qdump__std__experimental__optional(d, value):
qdump__std__optional(d, value)
| gpl-3.0 | -7,329,428,618,475,966,000 | 32.360862 | 106 | 0.551367 | false |
stscieisenhamer/ginga | ginga/rv/plugins/Drawing.py | 1 | 15096 | #
# Drawing.py -- Drawing plugin for Ginga reference viewer
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga import GingaPlugin
from ginga import colors
from ginga.gw import Widgets
from ginga.misc import ParamSet, Bunch
from ginga.util import dp
draw_colors = colors.get_colors()
default_drawtype = 'circle'
default_drawcolor = 'lightblue'
fillkinds = ('circle', 'rectangle', 'polygon', 'triangle', 'righttriangle',
'square', 'ellipse', 'box')
class Drawing(GingaPlugin.LocalPlugin):
"""Local plugin to draw shapes on canvas."""
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Drawing, self).__init__(fv, fitsimage)
self.layertag = 'drawing-canvas'
self.dc = fv.get_draw_classes()
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_drawtype('point', color='cyan')
canvas.set_callback('draw-event', self.draw_cb)
canvas.set_callback('edit-event', self.edit_cb)
canvas.set_callback('edit-select', self.edit_select_cb)
canvas.set_surface(self.fitsimage)
# So we can draw and edit with the cursor
canvas.register_for_cursor_drawing(self.fitsimage)
self.canvas = canvas
self.drawtypes = list(canvas.get_drawtypes())
self.drawcolors = draw_colors
self.linestyles = ['solid', 'dash']
self.coordtypes = ['data', 'wcs', 'cartesian', 'canvas']
# contains all parameters to be passed to the constructor
self.draw_args = []
self.draw_kwdargs = {}
# cache of all canvas item parameters
self.drawparams_cache = {}
# holds object being edited
self.edit_obj = None
# For mask creation from drawn objects
self._drawn_tags = []
self._mask_prefix = 'drawing'
def build_gui(self, container):
top = Widgets.VBox()
top.set_border_width(4)
vbox, sw, orientation = Widgets.get_oriented_box(container)
self.orientation = orientation
vbox.set_border_width(4)
vbox.set_spacing(2)
msg_font = self.fv.get_font("sansFont", 12)
tw = Widgets.TextArea(wrap=True, editable=False)
tw.set_font(msg_font)
self.tw = tw
fr = Widgets.Expander("Instructions")
fr.set_widget(tw)
vbox.add_widget(fr, stretch=0)
fr = Widgets.Frame("Drawing")
captions = (("Draw type:", 'label', "Draw type", 'combobox'),
("Coord type:", 'label', "Coord type", 'combobox'),
)
w, b = Widgets.build_info(captions)
self.w.update(b)
combobox = b.draw_type
for name in self.drawtypes:
combobox.append_text(name)
index = self.drawtypes.index(default_drawtype)
combobox.set_index(index)
combobox.add_callback('activated', lambda w, idx: self.set_drawparams_cb())
combobox = b.coord_type
for name in self.coordtypes:
combobox.append_text(name)
index = 0
combobox.set_index(index)
combobox.add_callback('activated', lambda w, idx: self.set_drawparams_cb())
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
mode = self.canvas.get_draw_mode()
hbox = Widgets.HBox()
btn1 = Widgets.RadioButton("Draw")
btn1.set_state(mode == 'draw')
btn1.add_callback('activated', lambda w, val: self.set_mode_cb('draw', val))
btn1.set_tooltip("Choose this to draw")
self.w.btn_draw = btn1
hbox.add_widget(btn1)
btn2 = Widgets.RadioButton("Edit", group=btn1)
btn2.set_state(mode == 'edit')
btn2.add_callback('activated', lambda w, val: self.set_mode_cb('edit', val))
btn2.set_tooltip("Choose this to edit")
self.w.btn_edit = btn2
hbox.add_widget(btn2)
hbox.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(hbox, stretch=0)
fr = Widgets.Frame("Attributes")
vbox2 = Widgets.VBox()
self.w.attrlbl = Widgets.Label()
vbox2.add_widget(self.w.attrlbl, stretch=0)
self.w.drawvbox = Widgets.VBox()
vbox2.add_widget(self.w.drawvbox, stretch=1)
fr.set_widget(vbox2)
vbox.add_widget(fr, stretch=0)
captions = (("Rotate By:", 'label', 'Rotate By', 'entry',
"Scale By:", 'label', 'Scale By', 'entry'),
("Delete Obj", 'button', "sp1", 'spacer',
"Create mask", 'button', "Clear canvas", 'button'),
)
w, b = Widgets.build_info(captions)
self.w.update(b)
b.delete_obj.add_callback('activated', lambda w: self.delete_object())
b.delete_obj.set_tooltip("Delete selected object in edit mode")
b.delete_obj.set_enabled(False)
b.scale_by.add_callback('activated', self.scale_object)
b.scale_by.set_text('0.9')
b.scale_by.set_tooltip("Scale selected object in edit mode")
b.scale_by.set_enabled(False)
b.rotate_by.add_callback('activated', self.rotate_object)
b.rotate_by.set_text('90.0')
b.rotate_by.set_tooltip("Rotate selected object in edit mode")
b.rotate_by.set_enabled(False)
b.create_mask.add_callback('activated', lambda w: self.create_mask())
b.create_mask.set_tooltip("Create boolean mask from drawing")
b.clear_canvas.add_callback('activated', lambda w: self.clear_canvas())
b.clear_canvas.set_tooltip("Delete all drawing objects")
vbox.add_widget(w, stretch=0)
spacer = Widgets.Label('')
vbox.add_widget(spacer, stretch=1)
top.add_widget(sw, stretch=1)
btns = Widgets.HBox()
btns.set_spacing(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
container.add_widget(top, stretch=1)
self.toggle_create_button()
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
def instructions(self):
self.tw.set_text(
"""Draw a figure with the cursor.
For polygons/paths press 'v' to create a vertex, 'z' to remove last vertex.""")
def start(self):
self.instructions()
self.set_drawparams_cb()
# insert layer if it is not already
p_canvas = self.fitsimage.get_canvas()
try:
obj = p_canvas.get_object_by_tag(self.layertag)
except KeyError:
# Add canvas layer
p_canvas.add(self.canvas, tag=self.layertag)
self.resume()
def pause(self):
self.canvas.ui_setActive(False)
def resume(self):
self.canvas.ui_setActive(True)
self.fv.show_status("Draw a figure with the right mouse button")
def stop(self):
# remove the canvas from the image
p_canvas = self.fitsimage.get_canvas()
try:
p_canvas.delete_object_by_tag(self.layertag)
except:
pass
# don't leave us stuck in edit mode
self.canvas.set_draw_mode('draw')
self.canvas.ui_setActive(False)
self.fv.show_status("")
def redo(self):
pass
def draw_cb(self, canvas, tag):
obj = canvas.get_object_by_tag(tag)
self._drawn_tags.append(tag)
self.toggle_create_button()
self.logger.info("drew a %s" % (obj.kind))
return True
def set_drawparams_cb(self):
## if self.canvas.get_draw_mode() != 'draw':
## # if we are in edit mode then don't initialize draw gui
## return
index = self.w.draw_type.get_index()
kind = self.drawtypes[index]
index = self.w.coord_type.get_index()
coord = self.coordtypes[index]
# remove old params
self.w.drawvbox.remove_all()
# Create new drawing class of the right kind
drawClass = self.canvas.get_draw_class(kind)
self.w.attrlbl.set_text("New Object: %s" % (kind))
# Build up a set of control widgets for the parameters
# of the canvas object to be drawn
paramlst = drawClass.get_params_metadata()
params = self.drawparams_cache.setdefault(kind, Bunch.Bunch())
self.draw_params = ParamSet.ParamSet(self.logger, params)
w = self.draw_params.build_params(paramlst,
orientation=self.orientation)
self.draw_params.add_callback('changed', self.draw_params_changed_cb)
self.w.drawvbox.add_widget(w, stretch=1)
# disable edit-only controls
self.w.delete_obj.set_enabled(False)
self.w.scale_by.set_enabled(False)
self.w.rotate_by.set_enabled(False)
args, kwdargs = self.draw_params.get_params()
#self.logger.debug("changing params to: %s" % str(kwdargs))
if kind != 'compass':
kwdargs['coord'] = coord
self.canvas.set_drawtype(kind, **kwdargs)
def draw_params_changed_cb(self, paramObj, params):
index = self.w.draw_type.get_index()
kind = self.drawtypes[index]
args, kwdargs = self.draw_params.get_params()
#self.logger.debug("changing params to: %s" % str(kwdargs))
self.canvas.set_drawtype(kind, **kwdargs)
def edit_cb(self, fitsimage, obj):
# <-- obj has been edited
#self.logger.debug("edit event on canvas: obj=%s" % (obj))
if obj != self.edit_obj:
# edit object is new. Update visual parameters
self.edit_select_cb(fitsimage, obj)
else:
# edit object has been modified. Sync visual parameters
self.draw_params.params_to_widgets()
def edit_params_changed_cb(self, paramObj, obj):
self.draw_params.widgets_to_params()
if hasattr(obj, 'coord'):
tomap = self.fitsimage.get_coordmap(obj.coord)
if obj.crdmap != tomap:
#self.logger.debug("coordmap has changed to '%s'--converting mapper" % (
# str(tomap)))
# user changed type of mapper; convert coordinates to
# new mapper and update widgets
obj.convert_mapper(tomap)
paramObj.params_to_widgets()
obj.sync_state()
# TODO: change whence to 0 if allowing editing of images
whence = 2
self.canvas.redraw(whence=whence)
def edit_initialize(self, fitsimage, obj):
# remove old params
self.w.drawvbox.remove_all()
self.edit_obj = obj
if (obj is not None) and self.canvas.is_selected(obj):
self.w.attrlbl.set_text("Editing a %s" % (obj.kind))
drawClass = obj.__class__
# Build up a set of control widgets for the parameters
# of the canvas object to be drawn
paramlst = drawClass.get_params_metadata()
self.draw_params = ParamSet.ParamSet(self.logger, obj)
w = self.draw_params.build_params(paramlst,
orientation=self.orientation)
self.draw_params.add_callback('changed', self.edit_params_changed_cb)
self.w.drawvbox.add_widget(w, stretch=1)
self.w.delete_obj.set_enabled(True)
self.w.scale_by.set_enabled(True)
self.w.rotate_by.set_enabled(True)
else:
self.w.attrlbl.set_text("")
self.w.delete_obj.set_enabled(False)
self.w.scale_by.set_enabled(False)
self.w.rotate_by.set_enabled(False)
def edit_select_cb(self, fitsimage, obj):
self.logger.debug("editing selection status has changed for %s" % str(obj))
self.edit_initialize(fitsimage, obj)
def set_mode_cb(self, mode, tf):
if tf:
self.canvas.set_draw_mode(mode)
if mode == 'edit':
self.edit_initialize(self.fitsimage, None)
elif mode == 'draw':
self.set_drawparams_cb()
return True
def toggle_create_button(self):
"""Enable or disable Create Mask button based on drawn objects."""
if len(self._drawn_tags) > 0:
self.w.create_mask.set_enabled(True)
else:
self.w.create_mask.set_enabled(False)
def create_mask(self):
"""Create boolean mask from drawing.
All areas enclosed by all the shapes drawn will be set to 1 (True)
in the mask. Otherwise, the values will be set to 0 (False).
The mask will be inserted as a new image buffer, like ``Mosaic``.
"""
ntags = len(self._drawn_tags)
if ntags == 0:
return
old_image = self.fitsimage.get_image()
if old_image is None:
return
mask = None
obj_kinds = set()
# Create mask
for tag in self._drawn_tags:
obj = self.canvas.get_object_by_tag(tag)
try:
cur_mask = old_image.get_shape_mask(obj)
except Exception as e:
self.logger.error('Cannot create mask: {0}'.format(str(e)))
continue
if mask is not None:
mask |= cur_mask
else:
mask = cur_mask
obj_kinds.add(obj.kind)
# Might be useful to inherit header from displayed image (e.g., WCS)
# but the displayed image should not be modified.
# Bool needs to be converted to int so FITS writer would not crash.
image = dp.make_image(mask.astype('int16'), old_image, {},
pfx=self._mask_prefix)
imname = image.get('name')
# Insert new image
self.fv.gui_call(self.fv.add_image, imname, image, chname=self.chname)
# This sets timestamp
image.make_callback('modified')
# Add change log to ChangeHistory
s = 'Mask created from {0} drawings ({1})'.format(
ntags, ','.join(sorted(obj_kinds)))
iminfo = self.channel.get_image_info(imname)
iminfo.reason_modified = s
self.logger.info(s)
def clear_canvas(self):
self.canvas.clear_selected()
self.canvas.delete_all_objects()
self._drawn_tags = []
self.toggle_create_button()
def delete_object(self):
tag = self.canvas.lookup_object_tag(self.canvas._edit_obj)
self._drawn_tags.remove(tag)
self.toggle_create_button()
self.canvas.edit_delete()
self.canvas.redraw(whence=2)
def rotate_object(self, w):
delta = float(w.get_text())
self.canvas.edit_rotate(delta, self.fitsimage)
def scale_object(self, w):
delta = float(w.get_text())
self.canvas.edit_scale(delta, delta, self.fitsimage)
def __str__(self):
return 'drawing'
#END
| bsd-3-clause | 7,770,706,608,207,105,000 | 33.703448 | 88 | 0.588169 | false |
miguelfrde/stanford-cs231n | assignment2/cs231n/optim.py | 1 | 6261 | import numpy as np
"""
This file implements various first-order update rules that are commonly used
for training neural networks. Each update rule accepts current weights and the
gradient of the loss with respect to those weights and produces the next set of
weights. Each update rule has the same interface:
def update(w, dw, config=None):
Inputs:
- w: A numpy array giving the current weights.
- dw: A numpy array of the same shape as w giving the gradient of the
loss with respect to w.
- config: A dictionary containing hyperparameter values such as learning
rate, momentum, etc. If the update rule requires caching values over many
iterations, then config will also hold these cached values.
Returns:
- next_w: The next point after the update.
- config: The config dictionary to be passed to the next iteration of the
update rule.
NOTE: For most update rules, the default learning rate will probably not
perform well; however the default values of the other hyperparameters should
work well for a variety of different problems.
For efficiency, update rules may perform in-place updates, mutating w and
setting next_w equal to w.
"""
def sgd(w, dw, config=None):
"""
Performs vanilla stochastic gradient descent.
config format:
- learning_rate: Scalar learning rate.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
w -= config['learning_rate'] * dw
return w, config
def sgd_momentum(w, dw, config=None):
"""
Performs stochastic gradient descent with momentum.
config format:
- learning_rate: Scalar learning rate.
- momentum: Scalar between 0 and 1 giving the momentum value.
Setting momentum = 0 reduces to sgd.
- velocity: A numpy array of the same shape as w and dw used to store a
moving average of the gradients.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('momentum', 0.9)
v = config.get('velocity', np.zeros_like(w))
next_w = None
###########################################################################
# TODO: Implement the momentum update formula. Store the updated value in #
# the next_w variable. You should also use and update the velocity v. #
###########################################################################
v = config['momentum']*v - config['learning_rate']*dw
next_w = w + v
###########################################################################
# END OF YOUR CODE #
###########################################################################
config['velocity'] = v
return next_w, config
def rmsprop(x, dx, config=None):
"""
Uses the RMSProp update rule, which uses a moving average of squared
gradient values to set adaptive per-parameter learning rates.
config format:
- learning_rate: Scalar learning rate.
- decay_rate: Scalar between 0 and 1 giving the decay rate for the squared
gradient cache.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- cache: Moving average of second moments of gradients.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('decay_rate', 0.99)
config.setdefault('epsilon', 1e-8)
config.setdefault('cache', np.zeros_like(x))
next_x = None
###########################################################################
# TODO: Implement the RMSprop update formula, storing the next value of x #
# in the next_x variable. Don't forget to update cache value stored in #
# config['cache']. #
###########################################################################
config['cache'] = config['decay_rate']*config['cache'] + (1 - config['decay_rate']) * dx * dx
next_x = x - config['learning_rate']*dx / (np.sqrt(config['cache']) + config['epsilon'])
###########################################################################
# END OF YOUR CODE #
###########################################################################
return next_x, config
def adam(x, dx, config=None):
"""
Uses the Adam update rule, which incorporates moving averages of both the
gradient and its square and a bias correction term.
config format:
- learning_rate: Scalar learning rate.
- beta1: Decay rate for moving average of first moment of gradient.
- beta2: Decay rate for moving average of second moment of gradient.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- m: Moving average of gradient.
- v: Moving average of squared gradient.
- t: Iteration number.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-3)
config.setdefault('beta1', 0.9)
config.setdefault('beta2', 0.999)
config.setdefault('epsilon', 1e-8)
config.setdefault('m', np.zeros_like(x))
config.setdefault('v', np.zeros_like(x))
config.setdefault('t', 0)
next_x = None
###########################################################################
# TODO: Implement the Adam update formula, storing the next value of x in #
# the next_x variable. Don't forget to update the m, v, and t variables #
# stored in config. #
###########################################################################
config['t'] += 1
config['m'] = config['beta1']*config['m'] + (1 - config['beta1'])*dx
config['v'] = config['beta2']*config['v'] + (1 - config['beta2'])*dx*dx
mt = config['m'] / (1 - config['beta1']**config['t'])
vt = config['v'] / (1 - config['beta2']**config['t'])
next_x = x - config['learning_rate']*mt / (np.sqrt(vt) + config['epsilon'])
###########################################################################
# END OF YOUR CODE #
###########################################################################
return next_x, config
| mit | 2,448,093,510,012,221,400 | 40.463576 | 97 | 0.54001 | false |
cvandeplas/plaso | plaso/parsers/winreg_plugins/msie_zones.py | 1 | 11257 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the MSIE zone settings plugin."""
from plaso.events import windows_events
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
__author__ = 'Elizabeth Schweinsberg ([email protected])'
class MsieZoneSettingsPlugin(interface.KeyPlugin):
"""Windows Registry plugin for parsing the MSIE Zones settings."""
NAME = 'winreg_msie_zone'
DESCRIPTION = u'Parser for Internet Explorer zone settings Registry data.'
REG_TYPE = 'NTUSER'
REG_KEYS = [
(u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Zones'),
(u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Lockdown_Zones')]
URLS = ['http://support.microsoft.com/kb/182569']
ZONE_NAMES = {
'0': '0 (My Computer)',
'1': '1 (Local Intranet Zone)',
'2': '2 (Trusted sites Zone)',
'3': '3 (Internet Zone)',
'4': '4 (Restricted Sites Zone)',
'5': '5 (Custom)'
}
KNOWN_PERMISSIONS_VALUE_NAMES = [
'1001', '1004', '1200', '1201', '1400', '1402', '1405', '1406', '1407',
'1601', '1604', '1606', '1607', '1608', '1609', '1800', '1802', '1803',
'1804', '1809', '1A04', '2000', '2001', '2004', '2100', '2101', '2102',
'2200', '2201', '2300']
CONTROL_VALUES_PERMISSIONS = {
0x00000000: '0 (Allow)',
0x00000001: '1 (Prompt User)',
0x00000003: '3 (Not Allowed)',
0x00010000: '0x00010000 (Administrator approved)'
}
CONTROL_VALUES_SAFETY = {
0x00010000: '0x00010000 (High safety)',
0x00020000: '0x00020000 (Medium safety)',
0x00030000: '0x00030000 (Low safety)'
}
CONTROL_VALUES_1A00 = {
0x00000000: ('0x00000000 (Automatic logon with current user name and '
'password)'),
0x00010000: '0x00010000 (Prompt for user name and password)',
0x00020000: '0x00020000 (Automatic logon only in Intranet zone)',
0x00030000: '0x00030000 (Anonymous logon)'
}
CONTROL_VALUES_1C00 = {
0x00000000: '0x00000000 (Disable Java)',
0x00010000: '0x00010000 (High safety)',
0x00020000: '0x00020000 (Medium safety)',
0x00030000: '0x00030000 (Low safety)',
0x00800000: '0x00800000 (Custom)'
}
FEATURE_CONTROLS = {
'1200': 'Run ActiveX controls and plug-ins',
'1400': 'Active scripting',
'1001': 'Download signed ActiveX controls',
'1004': 'Download unsigned ActiveX controls',
'1201': 'Initialize and script ActiveX controls not marked as safe',
'1206': 'Allow scripting of IE Web browser control',
'1207': 'Reserved',
'1208': 'Allow previously unused ActiveX controls to run without prompt',
'1209': 'Allow Scriptlets',
'120A': 'Override Per-Site (domain-based) ActiveX restrictions',
'120B': 'Override Per-Site (domain-based) ActiveX restrictions',
'1402': 'Scripting of Java applets',
'1405': 'Script ActiveX controls marked as safe for scripting',
'1406': 'Access data sources across domains',
'1407': 'Allow Programmatic clipboard access',
'1408': 'Reserved',
'1601': 'Submit non-encrypted form data',
'1604': 'Font download',
'1605': 'Run Java',
'1606': 'Userdata persistence',
'1607': 'Navigate sub-frames across different domains',
'1608': 'Allow META REFRESH',
'1609': 'Display mixed content',
'160A': 'Include local directory path when uploading files to a server',
'1800': 'Installation of desktop items',
'1802': 'Drag and drop or copy and paste files',
'1803': 'File Download',
'1804': 'Launching programs and files in an IFRAME',
'1805': 'Launching programs and files in webview',
'1806': 'Launching applications and unsafe files',
'1807': 'Reserved',
'1808': 'Reserved',
'1809': 'Use Pop-up Blocker',
'180A': 'Reserved',
'180B': 'Reserved',
'180C': 'Reserved',
'180D': 'Reserved',
'1A00': 'User Authentication: Logon',
'1A02': 'Allow persistent cookies that are stored on your computer',
'1A03': 'Allow per-session cookies (not stored)',
'1A04': 'Don\'t prompt for client cert selection when no certs exists',
'1A05': 'Allow 3rd party persistent cookies',
'1A06': 'Allow 3rd party session cookies',
'1A10': 'Privacy Settings',
'1C00': 'Java permissions',
'1E05': 'Software channel permissions',
'1F00': 'Reserved',
'2000': 'Binary and script behaviors',
'2001': '.NET: Run components signed with Authenticode',
'2004': '.NET: Run components not signed with Authenticode',
'2100': 'Open files based on content, not file extension',
'2101': 'Web sites in less privileged zone can navigate into this zone',
'2102': ('Allow script initiated windows without size/position '
'constraints'),
'2103': 'Allow status bar updates via script',
'2104': 'Allow websites to open windows without address or status bars',
'2105': 'Allow websites to prompt for information using scripted windows',
'2200': 'Automatic prompting for file downloads',
'2201': 'Automatic prompting for ActiveX controls',
'2300': 'Allow web pages to use restricted protocols for active content',
'2301': 'Use Phishing Filter',
'2400': '.NET: XAML browser applications',
'2401': '.NET: XPS documents',
'2402': '.NET: Loose XAML',
'2500': 'Turn on Protected Mode',
'2600': 'Enable .NET Framework setup',
'{AEBA21FA-782A-4A90-978D-B72164C80120}': 'First Party Cookie',
'{A8A88C49-5EB2-4990-A1A2-0876022C854F}': 'Third Party Cookie'
}
def GetEntries(
self, parser_context, file_entry=None, key=None, registry_type=None,
**unused_kwargs):
"""Retrieves information of the Internet Settings Zones values.
The MSIE Feature controls are stored in the Zone specific subkeys in:
Internet Settings\\Zones key
Internet Settings\\Lockdown_Zones key
Args:
parser_context: A parser context object (instance of ParserContext).
file_entry: optional file entry object (instance of dfvfs.FileEntry).
The default is None.
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
registry_type: Optional Registry type string. The default is None.
"""
text_dict = {}
if key.number_of_values == 0:
error_string = u'Key: {0:s} missing values.'.format(key.path)
parser_context.ProduceParseError(
self.NAME, error_string, file_entry=file_entry)
else:
for value in key.GetValues():
if not value.name:
value_name = '(default)'
else:
value_name = u'{0:s}'.format(value.name)
if value.DataIsString():
value_string = u'[{0:s}] {1:s}'.format(
value.data_type_string, value.data)
elif value.DataIsInteger():
value_string = u'[{0:s}] {1:d}'.format(
value.data_type_string, value.data)
elif value.DataIsMultiString():
value_string = u'[{0:s}] {1:s}'.format(
value.data_type_string, u''.join(value.data))
else:
value_string = u'[{0:s}]'.format(value.data_type_string)
text_dict[value_name] = value_string
# Generate at least one event object for the key.
event_object = windows_events.WindowsRegistryEvent(
key.last_written_timestamp, key.path, text_dict, offset=key.offset,
registry_type=registry_type, urls=self.URLS)
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
if key.number_of_subkeys == 0:
error_string = u'Key: {0:s} missing subkeys.'.format(key.path)
parser_context.ProduceParseError(
self.NAME, error_string, file_entry=file_entry)
return
for zone_key in key.GetSubkeys():
# TODO: these values are stored in the Description value of the
# zone key. This solution will break on zone values that are larger
# than 5.
path = u'{0:s}\\{1:s}'.format(key.path, self.ZONE_NAMES[zone_key.name])
text_dict = {}
# TODO: this plugin currently just dumps the values and does not
# distinguish between what is a feature control or not.
for value in zone_key.GetValues():
# Ignore the default value.
if not value.name:
continue
if value.DataIsString():
value_string = value.data
elif value.DataIsInteger():
if value.name in self.KNOWN_PERMISSIONS_VALUE_NAMES:
value_string = self.CONTROL_VALUES_PERMISSIONS[value.data]
elif value.name == '1A00':
value_string = self.CONTROL_VALUES_1A00[value.data]
elif value.name == '1C00':
value_string = self.CONTROL_VALUES_1C00[value.data]
elif value.name == '1E05':
value_string = self.CONTROL_VALUES_SAFETY[value.data]
else:
value_string = u'{0:d}'.format(value.data)
else:
value_string = u'[{0:s}]'.format(value.data_type_string)
if len(value.name) == 4 and value.name != 'Icon':
value_description = self.FEATURE_CONTROLS.get(value.name, 'UNKNOWN')
else:
value_description = self.FEATURE_CONTROLS.get(value.name, '')
if value_description:
feature_control = u'[{0:s}] {1:s}'.format(
value.name, value_description)
else:
feature_control = u'[{0:s}]'.format(value.name)
text_dict[feature_control] = value_string
event_object = windows_events.WindowsRegistryEvent(
zone_key.last_written_timestamp, path, text_dict,
offset=zone_key.offset, registry_type=registry_type,
urls=self.URLS)
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
class MsieZoneSettingsSoftwareZonesPlugin(MsieZoneSettingsPlugin):
"""Parses the Zones key in the Software hive."""
NAME = 'winreg_msie_zone_software'
REG_TYPE = 'SOFTWARE'
REG_KEYS = [
u'\\Microsoft\\Windows\\CurrentVersion\\Internet Settings\\Zones',
(u'\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Lockdown_Zones'),
(u'\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Zones'),
(u'\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Internet Settings'
u'\\Lockdown_Zones')]
winreg.WinRegistryParser.RegisterPlugins([
MsieZoneSettingsPlugin, MsieZoneSettingsSoftwareZonesPlugin])
| apache-2.0 | -8,224,367,203,974,624,000 | 38.36014 | 80 | 0.639424 | false |
datamade/yournextmp-popit | elections/bf_elections_2015/management/commands/bf_elections_2015_import_candidate.py | 1 | 9711 | # -*- coding: utf-8 -*-
import dateutil.parser
import csv
from os.path import dirname, join
import re
import string
import codecs
import requests
from django.core.management.base import BaseCommand
from candidates.utils import strip_accents
from candidates.views.version_data import get_change_metadata
from elections.models import Election
UNKNOWN_PARTY_ID = 'unknown'
USER_AGENT = (
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Ubuntu Chromium/38.0.2125.111 '
'Chrome/38.0.2125.111Safari/537.36'
)
def get_post_data(api, election_id, province):
from candidates.cache import get_post_cached
from candidates.election_specific import AREA_DATA, AREA_POST_DATA
ynr_election_data = Election.objects.get_by_slug(election_id)
area_key = (ynr_election_data.area_types.first().name,
ynr_election_data.area_generation)
areas_by_name = AREA_DATA.areas_by_name[area_key]
if province != 'Burkina Faso':
province = strip_accents(province).upper()
area = areas_by_name[province]
post_id = AREA_POST_DATA.get_post_id(
election_id, area['type'], area['id']
)
post_data = get_post_cached(api, post_id)['result']
return ynr_election_data, post_data
def get_existing_popit_person(vi_person_id):
from candidates.models import PopItPerson
from candidates.popit import get_search_url
# See if this person already exists by searching for the
# ID they were imported with:
query_format = \
'identifiers.identifier:"{id}" AND ' + \
'identifiers.scheme:"{scheme}"'
search_url = get_search_url(
'persons',
query_format.format(
id=vi_person_id, scheme='import-id'
),
embed='membership.organization'
)
results = requests.get(search_url).json()
total = results['total']
if total > 1:
message = "Multiple matches for CI ID {0}"
raise Exception(message.format(vi_person_id))
if total == 0:
return None
# Otherwise there was exactly one result:
return PopItPerson.create_from_dict(results['result'][0])
def get_party_data(party_name):
from candidates.popit import get_search_url
# See if this person already exists by searching for the
# ID they were imported with:
party_name = party_name.replace('/', '')
party_name = party_name.decode('utf-8')
query_format = \
'name:"{name}"'
search_url = get_search_url(
'organizations',
query_format.format(
name=party_name
)
)
print party_name
results = requests.get(search_url).json()
print results
total = results['total']
if total > 1:
message = "Multiple matches for party {0}"
raise Exception(message.format(party_name))
if total == 0:
return None
# Otherwise there was exactly one result:
return results['result'][0]
""" These functions taken from the csv docs -
https://docs.python.org/2/library/csv.html#examples"""
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),
dialect=dialect, **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield [unicode(cell, 'utf-8') for cell in row]
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
class Command(BaseCommand):
help = "Import inital candidate data"
def handle(self, username=None, **options):
from slumber.exceptions import HttpClientError
from candidates.election_specific import PARTY_DATA, shorten_post_label
from candidates.models import PopItPerson
from candidates.popit import create_popit_api_object
election_data = {
'prv-2015': 'listedescandidatsauxelectionslegislativeslisteprovincialeanptic.csv',
'nat-2015': 'listedescandidatsauxelectionslegislativesanptic.csv'
}
field_map = {
'prv-2015': {
'region': 1,
'party': 4,
'list_order': 5,
'first_name': 7,
'last_name': 6,
'gender': 8,
'birth_date': 9,
'party_short': 3
},
'nat-2015': {
'region': 0,
'party': 2,
'list_order': 3,
'first_name': 5,
'last_name': 4,
'gender': 6,
'birth_date': 7,
'party_short': 2
}
}
api = create_popit_api_object()
party_id_missing = {}
party_name_to_id = {}
for party_id, party_name in PARTY_DATA.party_id_to_name.items():
party_name_to_id[party_name] = party_id
for election_id, filename in election_data.items():
csv_filename = join(
dirname(__file__), '..', '..', 'data', filename
)
fields = field_map[election_id]
with codecs.open(csv_filename, 'r', encoding='windows-1252') as f:
initial = True
for candidate in unicode_csv_reader(f):
# skip header line
if initial:
initial = False
continue
region = candidate[fields['region']]
party = candidate[fields['party']]
party_list_order = candidate[fields['list_order']]
first_name = string.capwords(candidate[fields['first_name']])
last_name = string.capwords(candidate[fields['last_name']])
gender = candidate[fields['gender']]
birth_date = None
if candidate[fields['birth_date']] is not None:
birth_date = str(dateutil.parser.parse(
candidate[fields['birth_date']], dayfirst=True
).date())
name = first_name + ' ' + last_name
id = '-'.join([
re.sub('[^\w]*', '', re.sub(r' ', '-', strip_accents(name.lower()))),
re.sub('[^\w]*', '', candidate[fields['party_short']].lower()),
birth_date
])
# national candidate
if region == 'PAYS':
region = 'Burkina Faso'
election_data, post_data = get_post_data(
api, election_id, region
)
# debug
# tmp = '%s %s %s (%s) - %s (%s)' % ( id, first_name, last_name, party, region, post_data['label'] )
# print tmp
person = get_existing_popit_person(id)
if person:
# print "Found an existing person:", person.get_absolute_url()
pass
else:
print "No existing person, creating a new one:", name
person = PopItPerson()
person.set_identifier('import-id', id)
person.family_name = last_name
person.given_name = first_name
person.name = name
person.gender = gender
if birth_date:
person.birth_date = str(birth_date)
else:
person.birth_date = None
standing_in_election = {
'post_id': post_data['id'],
'name': shorten_post_label(post_data['label']),
'party_list_position': party_list_order,
}
if 'area' in post_data:
standing_in_election['mapit_url'] = post_data['area']['identifier']
person.standing_in = {
election_data.slug: standing_in_election
}
change_metadata = get_change_metadata(
None,
'Imported candidate from CSV',
)
party_comp = re.sub(' +', ' ', party)
party_id = UNKNOWN_PARTY_ID
if party_comp in party_name_to_id.keys():
party_id = party_name_to_id[party_comp]
party = party_comp
else:
party_id = party_name_to_id['Unknown Party']
party = 'Unknown Party'
if party_id == UNKNOWN_PARTY_ID and party_comp not in party_id_missing.keys():
party_id_missing[party_comp] = 1
person.party_memberships = {
election_data.slug: {
'id': party_id,
'name': party,
'imported_name': party_comp
}
}
person.record_version(change_metadata)
try:
person.save_to_popit(api)
except HttpClientError as hce:
print "Got an HttpClientError:", hce.content
raise
if len(party_id_missing) > 0:
print "Unmatched party names:"
for name in party_id_missing.keys():
print name
| agpl-3.0 | -1,523,529,099,116,446,000 | 34.702206 | 120 | 0.510658 | false |
matthewghgriffiths/nestedbasinsampling | examples/LJ31/system.py | 1 | 3922 | import logging
from pele.potentials import LJ
from nestedbasinsampling import (
NoGUTSSampler, NestedOptimizerKalman, HardShellConstraint, random_structure,
RecordMinimization, CompareStructures, LOG_CONFIG, Database)
logger = logging.getLogger("LJ31.system")
logger = logging.getLogger("NBS.LJ_system")
default_sampler_kws = dict(
max_depth=7, remove_linear_momentum=True, remove_angular_momentum=True,
remove_initial_linear_momentum=False, remove_initial_angular_momentum=False)
default_nopt_kws = dict(
nsteps=2000, MC_steps=10, target_acc=0.4, nsave=30, tol=1e-2,
nwait=15, kalman_discount=100.)
default_struct_kws = dict(niter=100)
default_database_kws = dict()
class NBS_LJ(object):
"""
"""
def __init__(self, natoms, radius=None, stepsize=None,
sampler_kws=None, nopt_kws=None, stepsize_kw=None,
struct_kws=None, database_kws=None):
self.pot = LJ()
self.natoms = natoms
self.radius = float(natoms) ** (1. / 3) if radius is None else radius
self.constraint = HardShellConstraint(self.radius)
self.sampler_kws = default_sampler_kws.copy()
if sampler_kws is not None: self.sampler_kws.update(sampler_kw)
self.sampler = NoGUTSSampler(
self.pot, constraint=self.constraint, **self.sampler_kws)
self.nopt_kws = default_nopt_kws.copy()
if nopt_kws is not None: self.nopt_kws.update(nopt_kws)
self.struct_kws = default_struct_kws.copy()
if struct_kws is not None: self.struct_kws.update(struct_kws)
self.database_kws = default_database_kws.copy()
if database_kws is not None: self.database_kws.update(database_kws)
if 'compareMinima' not in self.database_kws:
self.database_kws['compareMinima'] = self.get_compare_structures()
if stepsize is None:
kws = {} if stepsize_kw is None else stepsize_kw
s = self.determine_stepsize(
target_acc=self.nopt_kws['target_acc'], **kws)
self.stepsize = s[-1]
else:
self.stepsize = stepsize
def determine_stepsize(self, coords=None, E=None, **kwargs):
if coords is None: coords = self.random_config()
if E is None: E = self.pot.getEnergy(coords)
s = self.sampler.determine_stepsize(coords, E, **kwargs)
return s
def random_config(self):
return random_structure(self.natoms, self.radius)
def nopt(self, coords=None, Ecut=None, stepsize=None):
if coords is None: coords = self.random_config()
if Ecut is None: Ecut = self.pot.getEnergy(coords)
if stepsize is None: stepsize = self.stepsize
opt = NestedOptimizerKalman(
coords, self.pot, sampler=self.sampler,
energy=Ecut, stepsize=stepsize, **self.nopt_kws)
return dict(opt.run())
def get_configuration(self):
coords = self.random_config()
Ecut = self.pot.getEnergy(coords)
stepsize = self.stepsize
return coords, Ecut, stepsize
def get_compare_structures(self):
return CompareStructures(**self.struct_kws)
def get_database(self, dbname=":memory:"):
db = Database(dbname, **self.database_kws)
db.add_property('sampler', self.sampler_kws, overwrite=False)
db.add_property('nopt', self.nopt_kws, overwrite=False)
db.add_property('struct', self.struct_kws, overwrite=False)
logger.info("Connecting to database: {:s}".format(dbname))
logger.info("params:\nsampler:\n{:s}\nnopt:\n{:s}".format(
str(self.sampler_kws), str(self.nopt_kws)))
return db
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, **LOG_CONFIG)
system = NBS_LJ(natoms=31, stepsize=0.1)
res = system.nopt()
| gpl-3.0 | 6,799,967,800,464,881,000 | 37.616162 | 80 | 0.629526 | false |
ubivar/ubivar-python | ubivar/error.py | 1 | 1837 | # Exceptions
class UbivarError(Exception):
def __init__(self, message=None, http_body=None, http_status=None,
json_body=None, headers=None):
super(UbivarError, self).__init__(message)
if http_body and hasattr(http_body, 'decode'):
try:
http_body = http_body.decode('utf-8')
except BaseException:
http_body = ('<Could not decode body as utf-8. '
'Please report to [email protected]>')
self._message = message
self.http_body = http_body
self.http_status = http_status
self.json_body = json_body
self.headers = headers or {}
self.request_id = self.headers.get('request-id', None)
def __unicode__(self):
if self.request_id is not None:
msg = self._message or "<empty message>"
return u"Request {0}: {1}".format(self.request_id, msg)
else:
return self._message
def __str__(self):
return self.__unicode__()
class APIError(UbivarError):
pass
class APIConnectionError(UbivarError):
pass
class InvalidRequestError(UbivarError):
def __init__(self, message, param, http_body=None,
http_status=None, json_body=None, headers=None):
super(InvalidRequestError, self).__init__(
message, http_body, http_status, json_body,
headers)
self.param = param
class AuthenticationError(UbivarError):
pass
class PermissionError(UbivarError):
pass
class RateLimitError(UbivarError):
pass
class SignatureVerificationError(UbivarError):
def __init__(self, message, sig_header, http_body=None):
super(SignatureVerificationError, self).__init__(
message, http_body)
self.sig_header = sig_header
| mit | -8,536,734,799,136,476,000 | 25.623188 | 70 | 0.598258 | false |
duncanwp/cis_plugins | Grosvenor_CDNC.py | 1 | 2421 | from cis.data_io.products import NetCDF_Gridded
import cis.data_io.gridded_data as gd
import logging
from cis.utils import demote_warnings
class Grosvenor_CDNC(NetCDF_Gridded):
"""
Plugin for reading Dan Grosvenor's MODIS CDNC files.
"""
@staticmethod
def load_multiple_files_callback(cube, field, filename):
# We need to remove these global attributes when reading multiple files so that the cubes can be properly merged
cube.attributes.pop('history', None)
cube.attributes.pop('CreationDate', None)
return cube
def get_file_signature(self):
return [r'.*\.nc']
def create_data_object(self, filenames, variable):
"""Reads the data for a variable.
:param filenames: list of names of files from which to read data
:param variable: (optional) name of variable; if None, the file(s) must contain data for only one cube
:return: iris.cube.Cube
"""
from cis.time_util import convert_cube_time_coord_to_standard_time
from cis.utils import single_warnings_only
from iris.coords import DimCoord
from numpy.ma import masked_invalid
import iris
# Filter the warnings so that they only appear once - otherwise you get lots of repeated warnings
# - partly because we open the files multiple times (to look for aux coords) and partly because iris
# will throw a warning every time it meets a variable with a non-CF dimension
with single_warnings_only():
cube = self._create_cube(filenames, variable)
# For this data we actually need to add the dim coords...
cubes = iris.load(filenames)
cube.add_dim_coord(DimCoord(cubes.extract('lon')[0].data, units='degrees_east',
standard_name='longitude'), 0)
cube.add_dim_coord(DimCoord(cubes.extract('lat')[0].data, units='degrees_north',
standard_name='latitude'), 1)
cube.add_dim_coord(DimCoord(cubes.extract('time')[0].data, units='days since 1970-01-01 00:00:00',
standard_name='time'), 2)
if cube.attributes['invalid_units'] == 'cm^{-3}':
cube.units = 'cm-3'
# Mask the NaNs
cube.data = masked_invalid(cube.data)
cube = convert_cube_time_coord_to_standard_time(cube)
return cube
| lgpl-3.0 | -3,232,205,901,868,574,700 | 40.033898 | 120 | 0.634036 | false |
PaulWay/insights-core | insights/parsers/current_clocksource.py | 1 | 1559 | """
CurrentClockSource - file ``/sys/devices/system/clocksource/clocksource0/current_clocksource``
==============================================================================================
This is a relatively simple parser that reads the
``/sys/devices/system/clocksource/clocksource0/current_clocksource`` file.
As well as reporting the contents of the file in its ``data`` property, it
also provides three properties that are true if the clock source is set to
that value:
* **is_kvm** - the clock source file contains 'kvm-clock'
* **is_tsc** - the clock source file contains 'tsc'
* **is_vmi_timer** - the clock source file contains 'vmi-timer'
Examples:
>>> cs = shared[CurrentClockSource]
>>> cs.data
'tsc'
>>> cs.is_tsc
True
"""
from .. import Parser, parser
@parser("current_clocksource")
class CurrentClockSource(Parser):
"""
The CurrentClockSource parser class.
Attributes:
data (str): the content of the current_clocksource file.
"""
def parse_content(self, content):
self.data = list(content)[0]
@property
def is_kvm(self):
"""
bool: does the clock source contain 'kvm-clock'?
"""
return 'kvm-clock' in self.data
@property
def is_tsc(self):
"""
bool: does the clock source contain 'tsc'?
"""
return 'tsc' in self.data
@property
def is_vmi_timer(self):
"""
bool: does the clock source contain 'vmi-timer'?
"""
return 'vmi-timer' in self.data
| apache-2.0 | -2,943,900,483,633,057,000 | 25.87931 | 94 | 0.592046 | false |
akuster/yali | yali/gui/ScrCheckCD.py | 1 | 4754 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2010 TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import gettext
_ = gettext.translation('yali', fallback=True).ugettext
from PyQt4.Qt import QWidget, SIGNAL, QIcon, QPixmap
import pisi.ui
import yali.context as ctx
import yali.pisiiface
from yali.gui import ScreenWidget
from yali.gui.Ui.checkcdwidget import Ui_CheckCDWidget
from yali.gui.YaliDialog import Dialog
class Widget(QWidget, ScreenWidget):
name = "mediaCheck"
def __init__(self):
QWidget.__init__(self)
self.ui = Ui_CheckCDWidget()
self.ui.setupUi(self)
self.check_media_stop = True
self.connect(self.ui.checkButton, SIGNAL("clicked()"), self.slotCheckCD)
if ctx.consts.lang == "tr":
self.ui.progressBar.setFormat("%%p")
self.ui.validationSucceedBox.hide()
self.ui.validationFailBox.hide()
self.ui.progressBar.hide()
def shown(self):
pass
def slotCheckCD(self):
if self.check_media_stop:
self.check_media_stop = False
self.ui.progressBar.show()
icon = QIcon()
icon.addPixmap(QPixmap(":/gui/pics/dialog-error.png"), QIcon.Normal, QIcon.Off)
self.ui.checkButton.setIcon(icon)
self.ui.checkButton.setText("")
self.checkMedia()
else:
self.check_media_stop = True
self.ui.progressBar.show()
icon = QIcon()
icon.addPixmap(QPixmap(":/gui/pics/task-accepted.png"), QIcon.Normal, QIcon.Off)
self.ui.checkButton.setIcon(icon)
self.ui.checkButton.setText(_("Validate"))
def checkMedia(self):
ctx.mainScreen.disableNext()
ctx.mainScreen.disableBack()
ctx.interface.informationWindow.update(_("Starting validation..."))
class PisiUI(pisi.ui.UI):
def notify(self, event, **keywords):
pass
def display_progress(self, operation, percent, info, **keywords):
pass
yali.pisiiface.initialize(ui=PisiUI(), with_comar=False, nodestDir=True)
yali.pisiiface.addCdRepo()
ctx.mainScreen.processEvents()
pkg_names = yali.pisiiface.getAvailablePackages()
self.ui.progressBar.setMaximum(len(pkg_names))
self.ui.checkLabel.setText(_("Package validation is in progress. "
"Please wait until it is completed."))
cur = 0
flag = 0
for pkg_name in pkg_names:
cur += 1
ctx.logger.debug("Validating %s " % pkg_name)
ctx.interface.informationWindow.update(_("Validating %s") % pkg_name)
if self.check_media_stop:
continue
try:
yali.pisiiface.checkPackageHash(pkg_name)
self.ui.progressBar.setValue(cur)
except:
rc = ctx.interface.messageWindow(_("Warning"),
_("Validation of %s package failed."
"Please remaster your installation medium and"
"reboot.") % pkg_name,
type="custom", customIcon="warning",
customButtons=[_("Skip Validation"), _("Skip Package"), _("Reboot")],
default=0)
flag = 1
if not rc:
self.ui.validationBox.hide()
self.ui.validationFailBox.show()
ctx.mainScreen.enableNext()
break
elif rc == 1:
continue
else:
yali.util.reboot()
if not self.check_media_stop and flag == 0:
ctx.interface.informationWindow.update(_('<font color="#FFF"><b>Validation succeeded. You can proceed with the installation.</b></font>'))
self.ui.validationSucceedBox.show()
self.ui.validationBox.hide()
else:
ctx.interface.informationWindow.hide()
self.ui.progressBar.setValue(0)
yali.pisiiface.removeRepo(ctx.consts.cd_repo_name)
ctx.mainScreen.enableNext()
ctx.mainScreen.enableBack()
self.ui.checkLabel.setText(_("Package validation is finished."))
ctx.interface.informationWindow.hide()
| gpl-2.0 | -8,076,896,464,390,555,000 | 35.569231 | 150 | 0.562474 | false |
cscanlin/munger-builder | script_builder/views.py | 1 | 5537 | import os
import re
import csv
import json
import time
from django.shortcuts import render, render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib import messages
from django.contrib.auth.models import User, Group
from django.contrib.auth import authenticate, login
from django.conf import settings
from guardian.shortcuts import assign_perm, get_objects_for_user
from .models import DataField, FieldType, CSVDocument, MungerBuilder, PivotField
from .tasks import download_munger_async, download_test_data_async
INDEX_REDIRECT = HttpResponseRedirect('/script_builder/munger_builder_index')
def munger_builder_index(request):
user = get_user_or_create_anon(request)
anon_check(request)
munger_builder_list = get_objects_for_user(user, 'script_builder.change_mungerbuilder')
if len(munger_builder_list) == 0:
munger_builder_list = add_sample_munger(user)
context = {'munger_builder_list': munger_builder_list}
return render(request, 'script_builder/munger_builder_index.html', context)
def get_user_or_create_anon(request):
if not request.user.id:
timestamp = int(time.time())
credentials = {
'username': 'anon_{0}'.format(timestamp),
'password': timestamp,
}
user = User.objects.create_user(**credentials)
user.save()
assign_perm('script_builder.add_mungerbuilder', user)
assign_perm('script_builder.add_fieldtype', user)
assign_perm('script_builder.add_datafield', user)
assign_perm('script_builder.add_pivotfield', user)
group = Group.objects.get(name='Global Sample')
user.groups.add(group)
user.save()
anon_user = authenticate(**credentials)
login(request, anon_user)
else:
user = request.user
return user
def add_sample_munger(user):
mb = MungerBuilder.objects.create(
munger_name='Sample for {0}'.format(user.username),
input_path='test_data.csv',
is_sample=True,
)
mb.save()
mb.assign_perms(user)
sample_field_dict = {
'order_num': ['count'],
'product': None,
'sales_name': ['index'],
'region': ['column'],
'revenue': ['mean', 'sum'],
'shipping': ['median'],
}
for field_name, field_types in sample_field_dict.items():
data_field = DataField.objects.create(munger_builder=mb, current_name=field_name)
data_field.save()
data_field.assign_perms(user)
if field_types:
for type_name in field_types:
field_type = FieldType.objects.get(type_name=type_name)
PivotField.objects.create(data_field=data_field, field_type=field_type).save()
return get_objects_for_user(user, 'script_builder.change_mungerbuilder')
def new_munger_builder(request):
user = get_user_or_create_anon(request)
mb = MungerBuilder.objects.create(munger_name='New Munger - {0}'.format(user.username))
mb.save()
mb.assign_perms(user)
return HttpResponseRedirect('/script_builder/pivot_builder/{0}'.format(mb.id))
def pivot_builder(request, munger_builder_id):
anon_check(request)
mb = MungerBuilder.objects.get(pk=munger_builder_id)
if not mb.user_is_authorized():
return INDEX_REDIRECT
return render(request, 'script_builder/pivot_builder_react.html', context={'mb': mb})
def download_munger(request, munger_builder_id):
task = download_munger_async.delay(munger_builder_id)
return render_to_response('script_builder/poll_for_download.html',
{'task_id': task.id, 'mb_id': munger_builder_id})
def download_test_data(request, munger_builder_id):
task = download_test_data_async.delay(munger_builder_id)
return render_to_response('script_builder/poll_for_download.html',
{'task_id': task.id, 'mb_id': munger_builder_id})
def poll_for_download(request):
task_id = request.GET.get("task_id")
filename = request.GET.get("filename")
if filename == 'test_data.csv':
async_func = download_test_data_async
file_path = os.path.join(settings.STATIC_ROOT, filename)
else:
async_func = download_munger_async
file_path = os.path.join(settings.MEDIA_ROOT, 'user_munger_scripts', '{0}'.format(filename))
if request.is_ajax():
result = async_func.AsyncResult(task_id)
if result.ready():
return HttpResponse(json.dumps({"filename": result.get()}))
return HttpResponse(json.dumps({"filename": None}))
with open(file_path, 'r') as f:
response = HttpResponse(f, content_type='application/octet-stream')
response['Content-Disposition'] = 'attachment; filename={0}'.format(filename)
return response
# Helper Functions
def parse_text_fields(form, request, input_type):
if input_type == 'text':
return re.split('[,\t\n]', form.cleaned_data['fields_paste'])
if input_type == 'csv':
new_csv = CSVDocument(csv_file=request.FILES['csv_file'])
new_csv.save()
reader = csv.DictReader(request.FILES['csv_file'])
return reader.fieldnames
def anon_check(request):
if 'anon_' in request.user.username:
anon_message = """You are logged in as an anonymous user.
You may not be able to transfer any mungers to a permanent account in the future.
Register to save mungers."""
messages.warning(request, anon_message)
| mit | -3,080,153,474,421,255,000 | 34.954545 | 104 | 0.657576 | false |
CI-WATER/gsshapy | gsshapy/grid/nwm_to_gssha.py | 1 | 4942 | # -*- coding: utf-8 -*-
#
# nwm_to_gssha.py
# GSSHApy
#
# Created by Alan D Snow, 2016.
# License BSD 3-Clause
import logging
from datetime import timedelta
from os import mkdir, path, remove, rename
import xarray as xr
from .grid_to_gssha import GRIDtoGSSHA
log = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# MAIN CLASS
# ------------------------------------------------------------------------------
class NWMtoGSSHA(GRIDtoGSSHA):
"""This class converts the National Water Model output data to GSSHA formatted input.
This class inherits from class:`GRIDtoGSSHA`.
Attributes:
gssha_project_folder(:obj:`str`): Path to the GSSHA project folder
gssha_project_file_name(:obj:`str`): Name of the GSSHA elevation grid file.
lsm_input_folder_path(:obj:`str`): Path to the input folder for the LSM files.
lsm_search_card(:obj:`str`): Glob search pattern for LSM files. Ex. "*.grib2".
lsm_lat_var(Optional[:obj:`str`]): Name of the latitude variable in the LSM netCDF files. Defaults to 'lat'.
lsm_lon_var(Optional[:obj:`str`]): Name of the longitude variable in the LSM netCDF files. Defaults to 'lon'.
lsm_time_var(Optional[:obj:`str`]): Name of the time variable in the LSM netCDF files. Defaults to 'time'.
lsm_lat_dim(Optional[:obj:`str`]): Name of the latitude dimension in the LSM netCDF files. Defaults to 'lat'.
lsm_lon_dim(Optional[:obj:`str`]): Name of the longitude dimension in the LSM netCDF files. Defaults to 'lon'.
lsm_time_dim(Optional[:obj:`str`]): Name of the time dimension in the LSM netCDF files. Defaults to 'time'.
output_timezone(Optional[:obj:`tzinfo`]): This is the timezone to output the dates for the data. Default is he GSSHA model timezone. This option does NOT currently work for NetCDF output.
Example::
from datetime import datetime
from gsshapy.grid import NWMtoGSSHA
n2g = NWMtoGSSHA(gssha_project_folder='E:\\GSSHA',
gssha_project_file_name='gssha.prj',
lsm_input_folder_path='E:\\GSSHA\\nwm-data',
lsm_search_card="*.grib")
# example rain gage
out_gage_file = 'E:\\GSSHA\\nwm_rain1.gag'
n2g.lsm_precip_to_gssha_precip_gage(out_gage_file,
lsm_data_var="RAINRATE",
precip_type="RADAR")
# example data var map array
# WARNING: This is not complete
data_var_map_array = [
['precipitation_rate', 'RAINRATE'],
['pressure', 'PSFC'],
['relative_humidity', ['Q2D','T2D', 'PSFC']],
['wind_speed', ['U2D', 'V2D']],
['direct_radiation', 'SWDOWN'], # ???
['diffusive_radiation', 'SWDOWN'], # ???
['temperature', 'T2D'],
['cloud_cover', '????'],
]
e2g.lsm_data_to_arc_ascii(data_var_map_array)
"""
def __init__(self,
gssha_project_folder,
gssha_project_file_name,
lsm_input_folder_path,
lsm_search_card="*.nc",
lsm_lat_var='y',
lsm_lon_var='x',
lsm_time_var='time',
lsm_lat_dim='y',
lsm_lon_dim='x',
lsm_time_dim='time',
output_timezone=None,
):
"""
Initializer function for the NWMtoGSSHA class
"""
super(NWMtoGSSHA, self).__init__(gssha_project_folder,
gssha_project_file_name,
lsm_input_folder_path,
lsm_search_card,
lsm_lat_var,
lsm_lon_var,
lsm_time_var,
lsm_lat_dim,
lsm_lon_dim,
lsm_time_dim,
output_timezone)
@property
def xd(self):
"""get xarray dataset file handle to LSM files"""
if self._xd is None:
path_to_lsm_files = path.join(self.lsm_input_folder_path,
self.lsm_search_card)
self._xd = super(NWMtoGSSHA, self).xd
self._xd.lsm.coords_projected = True
return self._xd
def _load_converted_gssha_data_from_lsm(self, gssha_var, lsm_var, load_type):
"""
This function loads data from LSM and converts to GSSHA format
"""
super(NWMtoGSSHA, self).\
_load_converted_gssha_data_from_lsm(gssha_var, lsm_var, load_type)
self.data.lsm.coords_projected = True
| bsd-3-clause | 1,060,683,975,107,513,100 | 41.973913 | 195 | 0.510522 | false |
eirmag/weboob | weboob/core/bcall.py | 1 | 7438 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon, Christophe Benz
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
from copy import copy
from threading import Thread, Event, RLock, Timer
from weboob.capabilities.base import CapBaseObject
from weboob.tools.misc import get_backtrace
from weboob.tools.log import getLogger
__all__ = ['BackendsCall', 'CallErrors', 'IResultsCondition', 'ResultsConditionError']
class CallErrors(Exception):
def __init__(self, errors):
msg = 'Errors during backend calls:\n' + \
'\n'.join(['Module(%r): %r\n%r\n' % (backend, error, backtrace)
for backend, error, backtrace in errors])
Exception.__init__(self, msg)
self.errors = copy(errors)
def __iter__(self):
return self.errors.__iter__()
class IResultsCondition(object):
def is_valid(self, obj):
raise NotImplementedError()
class ResultsConditionError(Exception):
pass
class BackendsCall(object):
def __init__(self, backends, condition, function, *args, **kwargs):
"""
@param backends list of backends to call.
@param condition a IResultsCondition object. Can be None.
@param function backends' method name, or callable object.
@param args, kwargs arguments given to called functions.
"""
self.logger = getLogger('bcall')
# Store if a backend is finished
self.backends = {}
for backend in backends:
self.backends[backend.name] = False
# Condition
self.condition = condition
# Global mutex on object
self.mutex = RLock()
# Event set when every backends have give their data
self.finish_event = Event()
# Event set when there are new responses
self.response_event = Event()
# Waiting responses
self.responses = []
# Errors
self.errors = []
# Threads
self.threads = []
# Create jobs for each backend
with self.mutex:
for backend in backends:
self.logger.debug('Creating a new thread for %s' % backend)
self.threads.append(Timer(0, self._caller, (backend, function, args, kwargs)).start())
if not backends:
self.finish_event.set()
def _store_error(self, backend, error):
with self.mutex:
backtrace = get_backtrace(error)
self.errors.append((backend, error, backtrace))
def _store_result(self, backend, result):
with self.mutex:
if isinstance(result, CapBaseObject):
if self.condition and not self.condition.is_valid(result):
return
result.backend = backend.name
self.responses.append((backend, result))
self.response_event.set()
def _caller(self, backend, function, args, kwargs):
self.logger.debug('%s: Thread created successfully' % backend)
with backend:
try:
# Call method on backend
try:
self.logger.debug('%s: Calling function %s' % (backend, function))
if callable(function):
result = function(backend, *args, **kwargs)
else:
result = getattr(backend, function)(*args, **kwargs)
except Exception, error:
self.logger.debug('%s: Called function %s raised an error: %r' % (backend, function, error))
self._store_error(backend, error)
else:
self.logger.debug('%s: Called function %s returned: %r' % (backend, function, result))
if hasattr(result, '__iter__') and not isinstance(result, basestring):
# Loop on iterator
try:
for subresult in result:
# Lock mutex only in loop in case the iterator is slow
# (for example if backend do some parsing operations)
self._store_result(backend, subresult)
except Exception, error:
self._store_error(backend, error)
else:
self._store_result(backend, result)
finally:
with self.mutex:
# This backend is now finished
self.backends[backend.name] = True
for finished in self.backends.itervalues():
if not finished:
return
self.response_event.set()
self.finish_event.set()
def _callback_thread_run(self, callback, errback):
responses = []
while not self.finish_event.isSet() or self.response_event.isSet():
self.response_event.wait()
with self.mutex:
responses = self.responses
self.responses = []
# Reset event
self.response_event.clear()
# Consume responses
while responses:
callback(*responses.pop(0))
if errback:
with self.mutex:
while self.errors:
errback(*self.errors.pop(0))
callback(None, None)
def callback_thread(self, callback, errback=None):
"""
Call this method to create a thread which will callback a
specified function everytimes a new result comes.
When the process is over, the function will be called with
both arguments set to None.
The functions prototypes:
def callback(backend, result)
def errback(backend, error)
"""
thread = Thread(target=self._callback_thread_run, args=(callback, errback))
thread.start()
return thread
def wait(self):
self.finish_event.wait()
with self.mutex:
if self.errors:
raise CallErrors(self.errors)
def __iter__(self):
# Don't know how to factorize with _callback_thread_run
responses = []
while not self.finish_event.isSet() or self.response_event.isSet():
self.response_event.wait()
with self.mutex:
responses = self.responses
self.responses = []
# Reset event
self.response_event.clear()
# Consume responses
while responses:
yield responses.pop(0)
# Raise errors
with self.mutex:
if self.errors:
raise CallErrors(self.errors)
| agpl-3.0 | -295,907,511,899,842,750 | 35.106796 | 112 | 0.565071 | false |
lujinda/replace | replace/args.py | 1 | 1706 | #/usr/bin/env python
#coding:utf-8
# Author : tuxpy
# Email : [email protected]
# Last modified : 2015-05-19 14:03:37
# Filename : args.py
# Description :
import optparse
from replace import version
import os
def parser_args():
usage = "Usage: %prog [options] target_path"
parser = optparse.OptionParser(usage,
version = version)
_help = "exclude files matching PATTERN"
parser.add_option('--filter_filename',
dest = 'filter_filename', type = str, action="append",
metavar = 'PATTERN', help = _help)
_help = 'only include files matching PATTERN(high priority)'
parser.add_option('--include_filename',
dest = 'include_filename', type = str, action="append",
metavar = 'PATTERN', help = _help)
_help = 'source re pattern'
parser.add_option('-s', '--source', type = str,
dest = 'source_re_string', help = _help)
_help = 'target string'
parser.add_option('-t', '--target', type = str,
dest = 'target_string', help = _help)
_help = 'include hidden file'
parser.add_option('-H', default = False, action = "store_true", dest="include_hidden", help = _help)
_help = 'prompt before every replace'
parser.add_option('-i', default = False,
dest = 'interactive', action = 'store_true',
help = _help)
opt, args = parser.parse_args()
if opt.source_re_string == None or opt.target_string == None:
parser.error('--source or --target be must')
for target_path in args:
if not os.path.exists(target_path):
parser.error("%s is not exists" % (target_path, ))
return opt, args
| gpl-3.0 | -682,240,807,930,640,600 | 29.464286 | 104 | 0.594373 | false |
macieksmuga/server | tests/unit/test_client.py | 1 | 21768 | """
Tests for the client
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import mock
import ga4gh.protocol as protocol
import ga4gh.backend as backend
import ga4gh.client as client
import ga4gh.datarepo as datarepo
import tests.utils as utils
import ga4gh.exceptions as exceptions
class TestSearchMethodsCallRunRequest(unittest.TestCase):
"""
Test that search methods call lower-level functionality correctly
"""
def setUp(self):
self.httpClient = client.HttpClient("http://example.com")
self.httpClient._runSearchRequest = mock.Mock()
self.httpClient._runGetRequest = mock.Mock()
self.objectId = "SomeId"
self.objectName = "objectName"
self.datasetId = "datasetId"
self.variantSetId = "variantSetId"
self.variantAnnotationSetId = "variantAnnotationSetId"
self.referenceSetId = "referenceSetId"
self.referenceId = "referenceId"
self.readGroupIds = ["readGroupId"]
self.referenceName = "referenceName"
self.start = 100
self.end = 101
self.referenceName = "referenceName"
self.callSetIds = ["id1", "id2"]
self.pageSize = 1000
self.httpClient.setPageSize(self.pageSize)
self.assemblyId = "assemblyId"
self.accession = "accession"
self.md5checksum = "md5checksum"
def testSetPageSize(self):
testClient = client.AbstractClient()
# pageSize is None by default
self.assertIsNone(testClient.getPageSize())
for pageSize in [1, 10, 100]:
testClient.setPageSize(pageSize)
self.assertEqual(testClient.getPageSize(), pageSize)
def testSearchVariants(self):
request = protocol.SearchVariantsRequest()
request.reference_name = self.referenceName
request.start = self.start
request.end = self.end
request.variant_set_id = self.variantSetId
request.call_set_ids.extend(self.callSetIds)
request.page_size = self.pageSize
self.httpClient.searchVariants(
self.variantSetId, start=self.start, end=self.end,
referenceName=self.referenceName, callSetIds=self.callSetIds)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "variants", protocol.SearchVariantsResponse)
def testSearchDatasets(self):
request = protocol.SearchDatasetsRequest()
request.page_size = self.pageSize
self.httpClient.searchDatasets()
self.httpClient._runSearchRequest.assert_called_once_with(
request, "datasets", protocol.SearchDatasetsResponse)
def testSearchVariantSets(self):
request = protocol.SearchVariantSetsRequest()
request.dataset_id = self.datasetId
request.page_size = self.pageSize
self.httpClient.searchVariantSets(self.datasetId)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "variantsets", protocol.SearchVariantSetsResponse)
def testSearchVariantAnnotationSets(self):
request = protocol.SearchVariantAnnotationSetsRequest()
request.variant_set_id = self.variantSetId
request.page_size = self.pageSize
self.httpClient.searchVariantAnnotationSets(self.variantSetId)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "variantannotationsets",
protocol.SearchVariantAnnotationSetsResponse)
def testSearchVariantAnnotations(self):
request = protocol.SearchVariantAnnotationsRequest()
request.variant_annotation_set_id = self.variantAnnotationSetId
request.page_size = self.pageSize
request.reference_name = self.referenceName
request.reference_id = self.referenceId
request.start = self.start
request.end = self.end
self.httpClient.searchVariantAnnotations(
self.variantAnnotationSetId,
referenceName=self.referenceName,
start=self.start,
end=self.end,
effects=[],
referenceId=self.referenceId)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "variantannotations",
protocol.SearchVariantAnnotationsResponse)
with self.assertRaises(exceptions.BadRequestException):
self.httpClient.searchVariantAnnotations(
self.variantAnnotationSetId,
referenceName=self.referenceName,
start=self.start,
end=self.end,
effects=[{"term": "just a term"}, {"id": "an id"}],
referenceId=self.referenceId)
def testSearchFeatureSets(self):
request = protocol.SearchFeatureSetsRequest()
request.dataset_id = self.datasetId
request.page_size = self.pageSize
self.httpClient.searchFeatureSets(self.datasetId)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "featuresets", protocol.SearchFeatureSetsResponse)
def testSearchReferenceSets(self):
request = protocol.SearchReferenceSetsRequest()
request.page_size = self.pageSize
request.accession = self.accession
request.md5checksum = self.md5checksum
request.assembly_id = self.assemblyId
self.httpClient.searchReferenceSets(
accession=self.accession, md5checksum=self.md5checksum,
assemblyId=self.assemblyId)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "referencesets", protocol.SearchReferenceSetsResponse)
def testSearchReferences(self):
request = protocol.SearchReferencesRequest()
request.reference_set_id = self.referenceSetId
request.page_size = self.pageSize
request.accession = self.accession
request.md5checksum = self.md5checksum
self.httpClient.searchReferences(
self.referenceSetId, accession=self.accession,
md5checksum=self.md5checksum)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "references", protocol.SearchReferencesResponse)
def testSearchReadGroupSets(self):
request = protocol.SearchReadGroupSetsRequest()
request.dataset_id = self.datasetId
request.name = self.objectName
request.page_size = self.pageSize
self.httpClient.searchReadGroupSets(
self.datasetId, name=self.objectName)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "readgroupsets", protocol.SearchReadGroupSetsResponse)
def testSearchCallSets(self):
request = protocol.SearchCallSetsRequest()
request.variant_set_id = self.variantSetId
request.name = self.objectName
request.page_size = self.pageSize
self.httpClient.searchCallSets(
self.variantSetId, name=self.objectName)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "callsets", protocol.SearchCallSetsResponse)
def testSearchReads(self):
request = protocol.SearchReadsRequest()
request.read_group_ids.extend(self.readGroupIds)
request.reference_id = self.referenceId
request.start = self.start
request.end = self.end
request.page_size = self.pageSize
self.httpClient.searchReads(
self.readGroupIds, referenceId=self.referenceId,
start=self.start, end=self.end)
self.httpClient._runSearchRequest.assert_called_once_with(
request, "reads", protocol.SearchReadsResponse)
def testGetReferenceSet(self):
self.httpClient.getReferenceSet(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"referencesets", protocol.ReferenceSet, self.objectId)
def testGetVariantAnnotationSet(self):
self.httpClient.getVariantAnnotationSet(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"variantannotationsets", protocol.VariantAnnotationSet,
self.objectId)
def testGetVariantSet(self):
self.httpClient.getVariantSet(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"variantsets", protocol.VariantSet, self.objectId)
def testGetReference(self):
self.httpClient.getReference(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"references", protocol.Reference, self.objectId)
def testGetReadGroupSets(self):
self.httpClient.getReadGroupSet(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"readgroupsets", protocol.ReadGroupSet, self.objectId)
def testGetReadGroup(self):
self.httpClient.getReadGroup(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"readgroups", protocol.ReadGroup, self.objectId)
def testGetCallSets(self):
self.httpClient.getCallSet(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"callsets", protocol.CallSet, self.objectId)
def testGetDatasets(self):
self.httpClient.getDataset(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"datasets", protocol.Dataset, self.objectId)
def testGetVariant(self):
self.httpClient.getVariant(self.objectId)
self.httpClient._runGetRequest.assert_called_once_with(
"variants", protocol.Variant, self.objectId)
class DatamodelObjectWrapper(object):
"""
Thin wrapper class that allows us to treat data model objects uniformly.
We should update the data model interface so that objects are always
returned so that we always call toProtocolElement on the results.
Variants and Reads are the exceptions here.
"""
def __init__(self, gaObject):
self.gaObject = gaObject
def toProtocolElement(self):
return self.gaObject
class DummyResponse(object):
"""
Stand in for requests Response object;
"""
def __init__(self, text):
self.text = text
self.status_code = 200
class DummyRequestsSession(object):
"""
Takes the place of a requests session so that we can check that all
values are sent and received correctly.
"""
def __init__(self, backend, urlPrefix):
self._backend = backend
self._urlPrefix = urlPrefix
self._getMethodMap = {
"datasets": self._backend.runGetDataset,
"referencesets": self._backend.runGetReferenceSet,
"references": self._backend.runGetReference,
"variantsets": self._backend.runGetVariantSet,
"variants": self._backend.runGetVariant,
"readgroupsets": self._backend.runGetReadGroupSet,
"readgroups": self._backend.runGetReadGroup,
}
self._searchMethodMap = {
"datasets": self._backend.runSearchDatasets,
"referencesets": self._backend.runSearchReferenceSets,
"references": self._backend.runSearchReferences,
"variantsets": self._backend.runSearchVariantSets,
"variants": self._backend.runSearchVariants,
"readgroupsets": self._backend.runSearchReadGroupSets,
"reads": self._backend.runSearchReads,
}
self.headers = {}
def checkSessionParameters(self):
contentType = "Content-type"
assert contentType in self.headers
assert self.headers[contentType] == "application/json"
def get(self, url, params):
# TODO add some more checks for params to see if Key is set,
# and we're not sending any extra stuff.
self.checkSessionParameters()
assert url.startswith(self._urlPrefix)
suffix = url[len(self._urlPrefix):]
basesSuffix = "/bases"
splits = suffix.split("/")
if suffix.endswith(basesSuffix):
# ListReferenceBases is an oddball and needs to be treated
# separately.
assert splits[0] == ''
assert splits[1] == 'references'
id_ = splits[2]
assert splits[3] == 'bases'
# This is all very ugly --- see the comments in the LocalClient
# for why we need to do this. Definitely needs to be fixed.
args = dict(params)
if args[u'end'] == u'0':
del args['end']
if args['pageToken'] is "":
del args['pageToken']
result = self._backend.runListReferenceBases(id_, args)
else:
assert len(splits) == 3
assert splits[0] == ''
datatype, id_ = splits[1:]
assert datatype in self._getMethodMap
method = self._getMethodMap[datatype]
result = method(id_)
return DummyResponse(result)
def post(self, url, params=None, data=None):
self.checkSessionParameters()
assert url.startswith(self._urlPrefix)
suffix = url[len(self._urlPrefix):]
searchSuffix = "/search"
assert suffix.startswith("/")
assert suffix.endswith(searchSuffix)
datatype = suffix[1:-len(searchSuffix)]
assert datatype in self._searchMethodMap
method = self._searchMethodMap[datatype]
result = method(data)
return DummyResponse(result)
class DummyHttpClient(client.HttpClient):
"""
Client in which we intercept calls to the underlying requests connection.
"""
def __init__(self, backend):
self._urlPrefix = "http://example.com"
super(DummyHttpClient, self).__init__(self._urlPrefix)
self._session = DummyRequestsSession(backend, self._urlPrefix)
self._setupHttpSession()
class ExhaustiveListingsMixin(object):
"""
Tests exhaustive listings using the high-level API with a Simulated
backend.
"""
@classmethod
def setUpClass(cls):
cls.backend = backend.Backend(datarepo.SimulatedDataRepository(
randomSeed=100, numDatasets=3,
numVariantSets=3, numCalls=3, variantDensity=0.5,
numReferenceSets=3, numReferencesPerReferenceSet=3,
numReadGroupSets=3, numReadGroupsPerReadGroupSet=3,
numAlignments=3))
cls.dataRepo = cls.backend.getDataRepository()
def setUp(self):
self.client = self.getClient()
def verifyObjectList(self, gaObjects, datamodelObjects, getMethod):
"""
Verifies that the specified list of protocol objects corresponds
to the specified list of datamodel objects.
"""
for gaObject, datamodelObject in utils.zipLists(
gaObjects, datamodelObjects):
self.assertEqual(gaObject, datamodelObject.toProtocolElement())
otherGaObject = getMethod(gaObject.id)
self.assertEqual(gaObject, otherGaObject)
def testAllDatasets(self):
datasets = list(self.client.searchDatasets())
self.verifyObjectList(
datasets, self.dataRepo.getDatasets(), self.client.getDataset)
def testAllReferenceSets(self):
referenceSets = list(self.client.searchReferenceSets())
self.verifyObjectList(
referenceSets, self.dataRepo.getReferenceSets(),
self.client.getReferenceSet)
def testAllReferences(self):
for referenceSet in self.client.searchReferenceSets():
references = list(self.client.searchReferences(referenceSet.id))
datamodelReferences = self.dataRepo.getReferenceSet(
referenceSet.id).getReferences()
self.verifyObjectList(
references, datamodelReferences, self.client.getReference)
for datamodelReference in datamodelReferences:
bases = self.client.listReferenceBases(
datamodelReference.getId())
otherBases = datamodelReference.getBases(
0, datamodelReference.getLength())
self.assertEqual(bases, otherBases)
def testAllVariantSets(self):
for dataset in self.client.searchDatasets():
variantSets = list(self.client.searchVariantSets(dataset.id))
datamodelVariantSets = self.dataRepo.getDataset(
dataset.id).getVariantSets()
self.verifyObjectList(
variantSets, datamodelVariantSets, self.client.getVariantSet)
def testAllVariants(self):
for datamodelDataset in self.dataRepo.getDatasets():
for datamodelVariantSet in datamodelDataset.getVariantSets():
# TODO the values should be derived from the datamodel
# variant set object.
start = 0
end = 20
referenceName = "fixme"
variants = list(self.client.searchVariants(
datamodelVariantSet.getId(), start=start, end=end,
referenceName=referenceName))
datamodelVariants = [
DatamodelObjectWrapper(variant) for variant in
datamodelVariantSet.getVariants(
referenceName, start, end)]
self.verifyObjectList(
variants, datamodelVariants, self.client.getVariant)
def testAllReadGroupSets(self):
for dataset in self.client.searchDatasets():
readGroupSets = list(self.client.searchReadGroupSets(dataset.id))
datamodelReadGroupSets = self.dataRepo.getDataset(
dataset.id).getReadGroupSets()
self.verifyObjectList(
readGroupSets, datamodelReadGroupSets,
self.client.getReadGroupSet)
# Check the readGroups.
for readGroupSet, datamodelReadGroupSet in zip(
readGroupSets, datamodelReadGroupSets):
datamodelReadGroups = datamodelReadGroupSet.getReadGroups()
self.verifyObjectList(
readGroupSet.read_groups, datamodelReadGroups,
self.client.getReadGroup)
def testAllReads(self):
for dmDataset in self.dataRepo.getDatasets():
for dmReadGroupSet in dmDataset.getReadGroupSets():
dmReferenceSet = dmReadGroupSet.getReferenceSet()
for dmReadGroup in dmReadGroupSet.getReadGroups():
for dmReference in dmReferenceSet.getReferences():
# TODO fix these coordinates.
start = 0
end = 10
dmReads = list(dmReadGroup.getReadAlignments(
dmReference, start, end))
reads = list(self.client.searchReads(
[dmReadGroup.getId()], dmReference.getId(),
start, end))
self.assertGreater(len(reads), 0)
for dmRead, read in utils.zipLists(dmReads, reads):
self.assertEqual(dmRead, read)
class TestExhaustiveListingsHttp(ExhaustiveListingsMixin, unittest.TestCase):
"""
Tests the exhaustive listings using the HTTP client.
"""
def getClient(self):
return DummyHttpClient(self.backend)
class TestExhaustiveListingsLocal(ExhaustiveListingsMixin, unittest.TestCase):
"""
Tests the exhaustive listings using the local client.
"""
def getClient(self):
return client.LocalClient(self.backend)
class PagingMixin(object):
"""
Tests the paging code using a simulated backend.
"""
@classmethod
def setUpClass(cls):
cls.numReferences = 25
cls.backend = backend.Backend(datarepo.SimulatedDataRepository(
randomSeed=100, numDatasets=0,
numReferenceSets=1,
numReferencesPerReferenceSet=cls.numReferences))
cls.dataRepo = cls.backend.getDataRepository()
def setUp(self):
self.client = self.getClient()
self.datamodelReferenceSet = self.dataRepo.getReferenceSetByIndex(0)
self.datamodelReferences = self.datamodelReferenceSet.getReferences()
self.references = [
dmReference.toProtocolElement()
for dmReference in self.datamodelReferences]
self.assertEqual(len(self.references), self.numReferences)
def verifyAllReferences(self):
"""
Verifies that we correctly return all references.
"""
references = list(self.client.searchReferences(
self.datamodelReferenceSet.getId()))
self.assertEqual(references, self.references)
def testDefaultPageSize(self):
self.verifyAllReferences()
def verifyPageSize(self, pageSize):
self.client.setPageSize(pageSize)
self.assertEqual(pageSize, self.client.getPageSize())
self.verifyAllReferences()
def testPageSize1(self):
self.verifyPageSize(1)
def testPageSize2(self):
self.verifyPageSize(2)
def testPageSize3(self):
self.verifyPageSize(3)
def testPageSizeAlmostListLength(self):
self.verifyPageSize(self.numReferences - 1)
def testPageSizeListLength(self):
self.verifyPageSize(self.numReferences)
class TestPagingLocal(PagingMixin, unittest.TestCase):
"""
Tests paging using the local client.
"""
def getClient(self):
return client.LocalClient(self.backend)
class TestPagingHttp(PagingMixin, unittest.TestCase):
"""
Tests paging using the HTTP client.
"""
def getClient(self):
return DummyHttpClient(self.backend)
| apache-2.0 | 127,916,958,536,394,690 | 38.795247 | 78 | 0.652931 | false |
streampref/wcimport | tool/query/bestseq/move.py | 1 | 6139 | # -*- coding: utf-8 -*-
'''
Queries for experiments with preference operators
'''
# =============================================================================
# Queries with preference operators
# =============================================================================
# Moves
Q_MOVE_BESTSEQ = '''
SELECT SEQUENCE IDENTIFIED BY player_id
[RANGE {ran} SECOND, SLIDE {sli} SECOND] FROM s
ACCORDING TO TEMPORAL PREFERENCES
IF PREVIOUS (move = 'rec') THEN
(move = 'drib') BETTER (move = 'pass') [place]
AND
(move = 'pass') BETTER (move = 'bpas')
AND
IF ALL PREVIOUS (place = 'mf') THEN
(place = 'mf') BETTER (place = 'di')
;
'''
# =============================================================================
# CQL Equivalences for moves
# =============================================================================
Q_MOVE_DICT = {}
Q_MOVE_ID_LIST = ['z', 'p_join', 'p', 'r1', 'r2', 'nv_ap', 'm_ap', 'r3',
'd1_pref', 'd1_npref', 'd2_pref', 'd2_npref',
'd3_pref', 'd3_npref', 'd1', 'd2', 'd3', 't1', 't2', 't3',
'id', 'equiv']
# Sequence extraction
Q_MOVE_DICT['z'] = '''
SELECT SEQUENCE IDENTIFIED BY player_id
[RANGE {ran} SECOND, SLIDE {sli} SECOND]
FROM s;
'''
# Join same positions
Q_MOVE_DICT['p_join'] = '''
SELECT z1._pos, z1.player_id AS x1, z1.place, z1.move,
z2.player_id AS x2, z2.place AS _place, z2.move AS _move
FROM z AS z1, z AS z2 WHERE z1._pos = z2._pos;
'''
# Smaller non correspondent position (positions to be compared)
Q_MOVE_DICT['p'] = '''
SELECT MIN(_pos) AS _pos, x1, x2 FROM p_join
WHERE NOT place = _place OR NOT move = _move
GROUP BY x1, x2;
'''
# PREVIOUS condition of rule 1
Q_MOVE_DICT['r1'] = '''
SELECT p._pos, p.x1 FROM z, p
WHERE p.x1 = z.player_id AND p._pos = z._pos+1 AND z.move = 'rec';
'''
# Temporal condition of rule 2
Q_MOVE_DICT['r2'] = '''
SELECT _pos, x1 FROM p;
'''
# ALL PREVIOUS condition of rule 2
Q_MOVE_DICT['nv_ap'] = '''
SELECT MAX(_pos) AS _pos, x1 FROM p GROUP BY x1
UNION
SELECT _pos, player_id AS x1 FROM z WHERE NOT place = 'mf';
'''
Q_MOVE_DICT['m_ap'] = '''
SELECT MIN(_pos) AS _pos, x1 FROM nv_ap GROUP BY x1;
'''
Q_MOVE_DICT['r3'] = '''
SELECT p._pos, p.x1 FROM p, m_ap AS pmin
WHERE p.x1 = pmin.x1 AND p._pos <= pmin._pos AND p._pos > 1;
'''
# Preferred tuples according to rule 1
Q_MOVE_DICT['d1_pref'] = '''
SELECT r._pos, r.x1, place, move, 1 AS t FROM r1 AS r, z
WHERE r._pos = z._pos AND r.x1 = z.player_id AND move = 'drib'
UNION
SELECT r._pos, r.x1, t.place, t.move, 0 AS t
FROM r1 AS r, tup AS t WHERE t.move = 'drib';
'''
# Non-preferred tuples according to rule 1
Q_MOVE_DICT['d1_npref'] = '''
SELECT r._pos, r.x1 AS x2, place, move, 1 AS t FROM r1 AS r, z
WHERE r._pos = z._pos AND r.x1 = z.player_id AND move = 'pass'
UNION
SELECT r._pos, r.x1 AS x2, t.place, t.move, 0 AS t
FROM r1 AS r, tup AS t WHERE t.move = 'pass';
'''
# Preferred tuples according to rule 2
Q_MOVE_DICT['d2_pref'] = '''
SELECT r._pos, r.x1, place, move, 1 AS t FROM r2 AS r, z
WHERE r._pos = z._pos AND r.x1 = z.player_id AND move = 'pass'
UNION
SELECT r._pos, r.x1, t.place, t.move, 0 AS t
FROM r2 AS r, tup AS t WHERE t.move = 'pass';
'''
# Non-preferred tuples according to rule 2
Q_MOVE_DICT['d2_npref'] = '''
SELECT r._pos, r.x1 AS x2, place, move, 1 AS t FROM r2 AS r, z
WHERE r._pos = z._pos AND r.x1 = z.player_id AND move = 'bpas'
UNION
SELECT r._pos, r.x1 AS x2, t.place, t.move, 0 AS t
FROM r2 AS r, tup AS t WHERE t.move = 'bpas';
'''
# Preferred tuples according to rule 3
Q_MOVE_DICT['d3_pref'] = '''
SELECT r._pos, r.x1, place, move, 1 AS t FROM r3 AS r, z
WHERE r._pos = z._pos AND r.x1 = z.player_id AND move = 'drib'
UNION
SELECT r._pos, r.x1, t.place, t.move, 0 AS t
FROM r3 AS r, tup AS t WHERE t.move = 'drib';
'''
# Non-preferred tuples according to rule 3
Q_MOVE_DICT['d3_npref'] = '''
SELECT r._pos, r.x1 AS x2, place, move, 1 AS t FROM r3 AS r, z
WHERE r._pos = z._pos AND r.x1 = z.player_id AND move = 'pass'
UNION
SELECT r._pos, r.x1 AS x2, t.place, t.move, 0 AS t
FROM r3 AS r, tup AS t WHERE t.move = 'pass';
'''
# Direct comparisons
Q_MOVE_DICT['d1'] = '''
SELECT ri._pos, ri.x1, ri.x2, pref.place, pref.move , pref.t,
npref.place AS _place, npref.move AS _move, npref.t AS _t
FROM p AS ri, d1_pref AS pref, d1_npref AS npref
WHERE ri._pos = pref._pos AND ri._pos = npref._pos
AND ri.x1 = pref.x1 AND ri.x2 = npref.x2;
'''
Q_MOVE_DICT['d2'] = '''
SELECT ri._pos, ri.x1, ri.x2, pref.place, pref.move , pref.t,
npref.place AS _place, npref.move AS _move, npref.t AS _t
FROM p AS ri, d2_pref AS pref, d2_npref AS npref
WHERE ri._pos = pref._pos AND ri._pos = npref._pos
AND ri.x1 = pref.x1 AND ri.x2 = npref.x2
AND pref.place = npref.place;
'''
Q_MOVE_DICT['d3'] = '''
SELECT ri._pos, ri.x1, ri.x2, pref.place, pref.move , pref.t,
npref.place AS _place, npref.move AS _move, npref.t AS _t
FROM p AS ri, d3_pref AS pref, d3_npref AS npref
WHERE ri._pos = pref._pos AND ri._pos = npref._pos
AND ri.x1 = pref.x1 AND ri.x2 = npref.x2
AND pref.move = npref.move;
'''
# Transitive comparisons
Q_MOVE_DICT['t1'] = '''
SELECT * FROM d1
UNION SELECT * FROM d2
UNION SELECT * FROM d3;
'''
Q_MOVE_DICT['t2'] = '''
SELECT pref._pos, pref.x1, npref.x2, pref.place, pref.move, pref.t,
npref.place AS _place, npref.move AS _move, npref._t
FROM t1 AS pref, t1 AS npref
WHERE pref._pos = npref._pos AND pref.x1 = npref.x1 AND pref.x2 = npref.x2
AND pref._place = npref.place AND pref._move = npref.move
UNION SELECT * FROM t1;
'''
Q_MOVE_DICT['t3'] = '''
SELECT pref._pos, pref.x1, npref.x2, pref.place, pref.move, pref.t,
npref.place AS _place, npref.move AS _move, npref._t
FROM t2 AS pref, t2 AS npref
WHERE pref._pos = npref._pos AND pref.x1 = npref.x1 AND pref.x2 = npref.x2
AND pref._place = npref.place AND pref._move = npref.move
UNION SELECT * FROM t2;
'''
# ID of dominated sequences
Q_MOVE_DICT['id'] = '''
SELECT DISTINCT player_id FROM z
EXCEPT
SELECT DISTINCT x2 AS player_id FROM t3
WHERE t = 1 AND _t = 1;
'''
# Dominant sequences
Q_MOVE_DICT['equiv'] = '''
SELECT z.* FROM z, id
WHERE z.player_id = id.player_id;
'''
| gpl-3.0 | 3,218,681,803,519,124,000 | 29.849246 | 79 | 0.59684 | false |
hperala/kontuwikibot | scripts/spamremove.py | 1 | 3739 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script to remove links that are being or have been spammed.
Usage:
spamremove.py www.spammedsite.com
It will use Special:Linksearch to find the pages on the wiki that link to
that site, then for each page make a proposed change consisting of removing
all the lines where that url occurs. You can choose to:
* accept the changes as proposed
* edit the page yourself to remove the offending link
* not change the page in question
Command line options:
-always Do not ask, but remove the lines automatically. Be very
careful in using this option!
-namespace: Filters the search to a given namespace. If this is specified
multiple times it will search all given namespaces
"""
#
# (C) Pywikibot team, 2007-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: c728e9bcc488a9695bca883a5fc654f3cf0197b9 $'
#
import pywikibot
from pywikibot import i18n
from pywikibot.editor import TextEditor
def main(*args):
"""
Process command line arguments and perform task.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
always = False
namespaces = []
spamSite = ''
for arg in pywikibot.handle_args(args):
if arg == "-always":
always = True
elif arg.startswith('-namespace:'):
try:
namespaces.append(int(arg[len('-namespace:'):]))
except ValueError:
namespaces.append(arg[len('-namespace:'):])
else:
spamSite = arg
if not spamSite:
pywikibot.showHelp()
pywikibot.output(u"No spam site specified.")
return
mysite = pywikibot.Site()
pages = mysite.exturlusage(spamSite, namespaces=namespaces, content=True)
summary = i18n.twtranslate(mysite, 'spamremove-remove',
{'url': spamSite})
for i, p in enumerate(pages, 1):
text = p.text
if spamSite not in text:
continue
# Show the title of the page we're working on.
# Highlight the title in purple.
pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<"
% p.title())
lines = text.split('\n')
newpage = []
lastok = ""
for line in lines:
if spamSite in line:
if lastok:
pywikibot.output(lastok)
pywikibot.output('\03{lightred}%s\03{default}' % line)
lastok = None
else:
newpage.append(line)
if line.strip():
if lastok is None:
pywikibot.output(line)
lastok = line
if always:
answer = "y"
else:
answer = pywikibot.input_choice(
u'\nDelete the red lines?',
[('yes', 'y'), ('no', 'n'), ('edit', 'e')],
'n', automatic_quit=False)
if answer == "n":
continue
elif answer == "e":
editor = TextEditor()
newtext = editor.edit(text, highlight=spamSite,
jumpIndex=text.find(spamSite))
else:
newtext = "\n".join(newpage)
if newtext != text:
p.text = newtext
p.save(summary)
else:
if "i" not in locals():
pywikibot.output('No page found.')
elif i == 1:
pywikibot.output('1 pages done.')
else:
pywikibot.output('%d pages done.' % i)
if __name__ == '__main__':
main()
| mit | -3,847,130,002,318,686,000 | 28.912 | 79 | 0.554426 | false |
googleapis/python-compute | google/cloud/compute_v1/services/license_codes/transports/rest.py | 1 | 8998 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import gapic_v1 # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.auth.transport.requests import AuthorizedSession
from google.cloud.compute_v1.types import compute
from .base import LicenseCodesTransport, DEFAULT_CLIENT_INFO
class LicenseCodesRestTransport(LicenseCodesTransport):
"""REST backend transport for LicenseCodes.
The LicenseCodes API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
"""
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._prep_wrapped_messages(client_info)
def get(
self,
request: compute.GetLicenseCodeRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.LicenseCode:
r"""Call the get method over HTTP.
Args:
request (~.compute.GetLicenseCodeRequest):
The request object. A request message for
LicenseCodes.Get. See the method
description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.LicenseCode:
Represents a License Code resource.
A License Code is a unique identifier used to represent
a license resource. Caution This resource is intended
for use only by third-party partners who are creating
Cloud Marketplace images. (== resource_for
{$api_version}.licenseCodes ==)
"""
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/global/licenseCodes/{license_code}".format(
host=self._host, project=request.project, license_code=request.license_code,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = self._session.get(url, headers=headers,)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
return compute.LicenseCode.from_json(
response.content, ignore_unknown_fields=True
)
def test_iam_permissions(
self,
request: compute.TestIamPermissionsLicenseCodeRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.TestPermissionsResponse:
r"""Call the test iam permissions method over HTTP.
Args:
request (~.compute.TestIamPermissionsLicenseCodeRequest):
The request object. A request message for
LicenseCodes.TestIamPermissions. See the
method description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.TestPermissionsResponse:
"""
# Jsonify the request body
body = compute.TestPermissionsRequest.to_json(
request.test_permissions_request_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
)
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/global/licenseCodes/{resource}/testIamPermissions".format(
host=self._host, project=request.project, resource=request.resource,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = self._session.post(url, headers=headers, data=body,)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
return compute.TestPermissionsResponse.from_json(
response.content, ignore_unknown_fields=True
)
__all__ = ("LicenseCodesRestTransport",)
| apache-2.0 | 1,804,528,747,988,835,600 | 40.275229 | 118 | 0.631918 | false |
olichtne/python-perfrepo | perfrepo/PerfRepoTest.py | 1 | 3986 | """
This module contains the PerfRepoTest class.
Copyright 2015 Red Hat, Inc.
Licensed under the GNU General Public License, version 2 as
published by the Free Software Foundation; see COPYING for details.
"""
__author__ = """
[email protected] (Ondrej Lichtner)
"""
import textwrap
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, iselement
from perfrepo.PerfRepoObject import PerfRepoObject
from perfrepo.PerfRepoMetric import PerfRepoMetric
from perfrepo.Common import PerfRepoException
from perfrepo.Common import indent
class PerfRepoTest(PerfRepoObject):
def __init__(self, xml=None):
if xml is None:
self._id = None
self._name = None
self._uid = None
self._description = ""
self._groupid = None
self._metrics = []
elif isinstance(xml, str) or isinstance(xml, bytes) or iselement(xml):
if isinstance(xml, str) or isinstance(xml, bytes):
root = ElementTree.fromstring(xml)
else:
root = xml
if root.tag != "test":
raise PerfRepoException("Invalid xml.")
self._id = root.get("id")
self._name = root.get("name")
self._uid = root.get("uid")
self._groupid = root.get("groupId")
if root.find("description") is not None:
self._description = root.find("description").text
else:
self._description = ""
self._metrics = []
for metric in root.find("metrics"):
if metric.tag != "metric":
continue
self._metrics.append(PerfRepoMetric(metric))
else:
raise PerfRepoException("Parameter xml must be"\
" a string, an Element or None")
def get_obj_url(self):
return "test/%s" % self._id
def get_id(self):
return self._id
def get_name(self):
return self._name
def get_uid(self):
return self._uid
def get_description(self):
return self._description
def get_groupid(self):
return self._groupid
def get_metrics(self):
return self._metrics
def set_id(self, id):
self._id = id
def set_name(self, name):
self._name = name
def set_uid(self, uid):
self._uid = uid
def set_description(self, description):
self._description = description
def set_groupid(self, groupid):
self._groupid = groupid
def add_metric(self, metric):
if not isinstance(metric, PerfRepoMetric):
return None
else:
self._metrics.append(metric)
return metric
def to_xml(self):
root = Element('test')
self._set_element_atrib(root, 'id', self._id)
self._set_element_atrib(root, 'name', self._name)
self._set_element_atrib(root, 'uid', self._uid)
self._set_element_atrib(root, 'groupId', self._groupid)
description = ElementTree.SubElement(root, 'description')
description.text = self._description
metrics = ElementTree.SubElement(root, 'metrics')
for metric in self._metrics:
metrics.append(metric.to_xml())
return root
def __str__(self):
ret_str = """\
id = %s
uid = %s
name = %s
groupid = %s
description:
""" % (self._id,
self._uid,
self._name,
self._groupid)
ret_str = textwrap.dedent(ret_str)
ret_str += indent(str(self._description) + "\n", 4)
ret_str += "metrics:\n"
for metric in self._metrics:
ret_str += indent(str(metric) + "\n", 4)
ret_str += indent("------------------------\n", 4)
return textwrap.dedent(ret_str)
| gpl-2.0 | -8,145,718,045,749,561,000 | 29.899225 | 78 | 0.544656 | false |
nvdv/vprof | vprof/tests/code_heatmap_e2e.py | 1 | 5023 | """End-to-end tests for code heatmap module."""
# pylint: disable=missing-docstring, blacklisted-name
import functools
import gzip
import json
import inspect
import threading
import os
import unittest
import urllib.request
from vprof import code_heatmap
from vprof import stats_server
from vprof import runner
from vprof.tests import test_pkg # pylint: disable=unused-import
_HOST, _PORT = 'localhost', 12345
_MODULE_FILENAME = 'vprof/tests/test_pkg/dummy_module.py'
_PACKAGE_PATH = 'vprof/tests/test_pkg/'
_DUMMY_MODULE_SOURCELINES = [
['line', 1, 'def dummy_fib(n):'],
['line', 2, ' if n < 2:'],
['line', 3, ' return n'],
['line', 4, ' return dummy_fib(n - 1) + dummy_fib(n - 2)'],
['line', 5, '']]
_MAIN_MODULE_SOURCELINES = [
['line', 1, 'from test_pkg import dummy_module'],
['line', 2, ''],
['line', 3, 'dummy_module.dummy_fib(5)'],
['line', 4, '']]
_POLL_INTERVAL = 0.01
class CodeHeatmapModuleEndToEndTest(unittest.TestCase):
def setUp(self):
program_stats = code_heatmap.CodeHeatmapProfiler(
_MODULE_FILENAME).run()
stats_handler = functools.partial(
stats_server.StatsHandler, program_stats)
self.server = stats_server.StatsServer(
(_HOST, _PORT), stats_handler)
threading.Thread(
target=self.server.serve_forever,
kwargs={'poll_interval': _POLL_INTERVAL}).start()
def tearDown(self):
self.server.shutdown()
self.server.server_close()
def testRequest(self):
response = urllib.request.urlopen(
'http://%s:%s/profile' % (_HOST, _PORT))
response_data = gzip.decompress(response.read())
stats = json.loads(response_data.decode('utf-8'))
self.assertEqual(stats['objectName'], _MODULE_FILENAME)
self.assertTrue(stats['runTime'] > 0)
heatmaps = stats['heatmaps']
self.assertEqual(len(heatmaps), 1)
self.assertTrue(_MODULE_FILENAME in heatmaps[0]['name'])
self.assertDictEqual(heatmaps[0]['executionCount'], {'1': 1})
self.assertListEqual(heatmaps[0]['srcCode'], _DUMMY_MODULE_SOURCELINES)
class CodeHeatmapPackageEndToEndTest(unittest.TestCase):
def setUp(self):
program_stats = code_heatmap.CodeHeatmapProfiler(
_PACKAGE_PATH).run()
stats_handler = functools.partial(
stats_server.StatsHandler, program_stats)
self.server = stats_server.StatsServer(
(_HOST, _PORT), stats_handler)
threading.Thread(
target=self.server.serve_forever,
kwargs={'poll_interval': _POLL_INTERVAL}).start()
def tearDown(self):
self.server.shutdown()
self.server.server_close()
def testRequest(self):
response = urllib.request.urlopen(
'http://%s:%s/profile' % (_HOST, _PORT))
response_data = gzip.decompress(response.read())
stats = json.loads(response_data.decode('utf-8'))
self.assertEqual(stats['objectName'], _PACKAGE_PATH)
self.assertTrue(stats['runTime'] > 0)
heatmap_files = {heatmap['name'] for heatmap in stats['heatmaps']}
self.assertTrue(os.path.abspath(
'vprof/tests/test_pkg/__main__.py') in heatmap_files)
self.assertTrue(os.path.abspath(
'vprof/tests/test_pkg/dummy_module.py') in heatmap_files)
class CodeHeatmapFunctionEndToEndTest(unittest.TestCase):
def setUp(self):
def _func(foo, bar):
baz = foo + bar
return baz
self._func = _func
stats_handler = functools.partial(
stats_server.StatsHandler, {})
self.server = stats_server.StatsServer(
(_HOST, _PORT), stats_handler)
threading.Thread(
target=self.server.serve_forever,
kwargs={'poll_interval': _POLL_INTERVAL}).start()
def tearDown(self):
self.server.shutdown()
self.server.server_close()
def testRequest(self):
runner.run(
self._func, 'h', ('foo', 'bar'), host=_HOST, port=_PORT)
response = urllib.request.urlopen(
'http://%s:%s/profile' % (_HOST, _PORT))
response_data = gzip.decompress(response.read())
stats = json.loads(response_data.decode('utf-8'))
self.assertTrue(stats['h']['runTime'] > 0)
heatmaps = stats['h']['heatmaps']
curr_filename = inspect.getabsfile(inspect.currentframe())
self.assertEqual(stats['h']['objectName'],
'_func @ %s (function)' % curr_filename)
self.assertEqual(len(heatmaps), 1)
self.assertDictEqual(
heatmaps[0]['executionCount'], {'101': 1, '102': 1})
self.assertListEqual(
heatmaps[0]['srcCode'],
[['line', 100, u' def _func(foo, bar):\n'],
['line', 101, u' baz = foo + bar\n'],
['line', 102, u' return baz\n']])
# pylint: enable=missing-docstring, blacklisted-name
| bsd-2-clause | -3,235,933,077,664,283,000 | 35.398551 | 79 | 0.601234 | false |
django-oscar/django-oscar-paymentexpress | tests/facade_tests.py | 1 | 7334 | from django.test import TestCase
from mock import Mock, patch
from paymentexpress.facade import Facade
from paymentexpress.gateway import AUTH, PURCHASE
from paymentexpress.models import OrderTransaction
from tests import (XmlTestingMixin, CARD_VISA, SAMPLE_SUCCESSFUL_RESPONSE,
SAMPLE_DECLINED_RESPONSE, SAMPLE_ERROR_RESPONSE)
from oscar.apps.payment.utils import Bankcard
from oscar.apps.payment.exceptions import (UnableToTakePayment,
InvalidGatewayRequestError)
class MockedResponseTestCase(TestCase):
def create_mock_response(self, body, status_code=200):
response = Mock()
response.content = body
response.text = body
response.status_code = status_code
return response
class FacadeTests(TestCase, XmlTestingMixin):
def setUp(self):
self.facade = Facade()
def test_zero_amount_raises_exception(self):
card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123",
start_date="1010")
with self.assertRaises(UnableToTakePayment):
self.facade.authorise('1000', 0, card)
def test_zero_amount_for_complete_raises_exception(self):
with self.assertRaises(UnableToTakePayment):
self.facade.complete('1000', 0, '1234')
def test_zero_amount_for_purchase_raises_exception(self):
with self.assertRaises(UnableToTakePayment):
self.facade.purchase('1000', 0)
def test_purchase_without_billing_id_or_card_raises_exception(self):
with self.assertRaises(ValueError):
self.facade.purchase('1000', 1.23)
def test_zero_amount_for_refund_raises_exception(self):
with self.assertRaises(UnableToTakePayment):
self.facade.refund('1000', 0, '1234')
def test_merchant_reference_format(self):
merchant_ref = self.facade._get_merchant_reference('1000', AUTH)
self.assertRegexpMatches(merchant_ref, r'^\d+_[A-Z]+_\d+_\d{4}$')
class FacadeSuccessfulResponseTests(MockedResponseTestCase):
dps_txn_ref = '000000030884cdc6'
dps_billing_id = '0000080023225598'
def setUp(self):
self.facade = Facade()
self.card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123",
start_date="1010")
def test_successful_call_returns_valid_dict(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
auth_dict = self.facade.authorise('1000', 1, self.card)
complete_dict = self.facade.complete('1000', 1.23,
self.dps_txn_ref)
refund_dict = self.facade.refund('1000', 1.23, '000000030884cdc6')
validate_dict = self.facade.validate(self.card)
response_dicts = (auth_dict, complete_dict, refund_dict,
validate_dict)
for response_dict in response_dicts:
self.assertEquals(self.dps_txn_ref,
response_dict['txn_reference'])
self.assertEquals(self.dps_billing_id,
response_dict['partner_reference'])
def test_purchase_with_billing_id_returns_valid_dict(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
txn_ref = self.facade.purchase('1000', 1.23, 'abc123')
self.assertEquals(self.dps_txn_ref, txn_ref['txn_reference'])
def test_purchase_with_bankcard_returns_valid_dict(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
txn_ref = self.facade.purchase('1000', 1.23, None, self.card)
self.assertEquals(self.dps_txn_ref, txn_ref['txn_reference'])
def test_successful_call_is_recorded(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
self.facade.authorise('10001', 10.25, self.card)
txn = OrderTransaction.objects.filter(order_number='10001')[0]
self.assertEquals(AUTH, txn.txn_type)
def test_empty_issue_date_is_allowed(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123")
txn_ref = self.facade.authorise('1000', 1.23, card)
self.assertEquals(self.dps_txn_ref, txn_ref['txn_reference'])
class FacadeDeclinedResponseTests(MockedResponseTestCase):
def setUp(self):
self.facade = Facade()
self.card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123",
start_date="1010")
def test_declined_call_raises_an_exception(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_DECLINED_RESPONSE)
with self.assertRaises(UnableToTakePayment):
self.facade.authorise('1000', 1, self.card)
with self.assertRaises(UnableToTakePayment):
self.facade.complete('1000', 1.23, '000000030884cdc6')
with self.assertRaises(UnableToTakePayment):
self.facade.purchase('1000', 1.23, 'abc123')
with self.assertRaises(UnableToTakePayment):
self.facade.purchase('1000', 1.23, None, self.card)
with self.assertRaises(UnableToTakePayment):
self.facade.refund('1000', 1.23, '000000030884cdc6')
with self.assertRaises(UnableToTakePayment):
self.facade.validate(self.card)
def test_declined_call_is_recorded(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_DECLINED_RESPONSE)
try:
self.facade.purchase('1001', 10.24, None, self.card)
except Exception:
pass
txn = OrderTransaction.objects.filter(order_number='1001')[0]
self.assertIsNotNone(txn)
self.assertEquals(PURCHASE, txn.txn_type)
class FacadeErrorResponseTests(MockedResponseTestCase):
def setUp(self):
self.facade = Facade()
self.card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123",
start_date="1010")
def test_error_response_raises_invalid_gateway_request_exception(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_ERROR_RESPONSE)
with self.assertRaises(InvalidGatewayRequestError):
self.facade.purchase('1000', 10.24, None, self.card)
| bsd-3-clause | -3,153,989,515,319,986,000 | 39.076503 | 78 | 0.604854 | false |
daonb/tumulus | tumuli/urls.py | 1 | 1745 | """tumuli URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from rest_framework.urlpatterns import format_suffix_patterns
from biography import views
urlpatterns = [
path('admin/', admin.site.urls),
# path('api/', include(biography.urls)),
url('api/bio/(?P<username>.+)/', views.BiographyByUserList.as_view()), # get user's Bio by username
url('^api/periods/(?P<username>.+)/$', views.PeriodByUserList.as_view()), # get user's Periods by username
url('^api/memoirs/(?P<username>.+)/$', views.MemoirsByUserList.as_view()),
# get user's Memoirs by username
url('^api/contentatom/(?P<username>.+)/$', views.ContentAtomByUserList.as_view()),
# get user's Content Atoms by username
]
urlpatterns = format_suffix_patterns(urlpatterns)
try:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
except ImproperlyConfigured:
# it's on S3, nothing for us to do
pass
| agpl-3.0 | 1,101,324,068,289,894,900 | 38.659091 | 110 | 0.718052 | false |
muccg/rdrf | scripts/check_views.py | 1 | 5203 | '''
TO DO:
- Further abstract states (maybe find some way of removing reliance
on indices)
- Add comments to provide full information on code
- Create unit tests for script (view with mixin, view w/out mixin
with decorators, no mixin no decorators)
'''
import os
import re
import sys
from os.path import abspath, join
check_decorator_strings = [
'@method_decorator(login_required)',
'@login_required',
]
check_method_strings = [
'def get(',
'def post(',
]
ignore_dirs = set([
'build',
])
vcheck_states = {
's': "SEARCH",
'v': "INVIEW",
}
whitelist = [
'ClinicianActivationView',
'CopyrightView',
'LandingView',
'PatientsListingView',
'PromsCompletedPageView',
'PromsLandingPageView',
'PromsView',
'RecaptchaValidator',
'RegistryListView',
'RegistryView',
'RouterView',
'SurveyEndpoint',
'UsernameLookup',
]
def get_lines(file_name, file_dir):
full_file = join(file_dir, file_name)
with open(full_file) as open_file:
lines = open_file.readlines()
return lines, full_file
def get_superclass(class_text):
super_strings = []
ret_strings = []
if re.match(r'^class', class_text) is not None:
super_strings = re.split(r'^class.+\(|,|\):', class_text)
for substr in super_strings:
if substr != "":
ret_strings.append(substr.strip())
return ret_strings
def find_view(line_text):
state_n = 's'
view_n = ''
# Check line
superclasses = get_superclass(line_text)
if superclasses != [] and "View" in superclasses:
# Change to "in-view" state if check for mixin is false
if "LoginRequiredMixin" not in superclasses:
state_n = 'v'
view_n = re.findall(r'class (.+)\(', line_text)[0]
return state_n, view_n
def validate_view(line_text, v_lines, v_index):
has_failed = False
# Check for get/post
if any(met_str in line_text for met_str in check_method_strings):
# Check if get/post has a decorator - if not, add to list
if not any(dec_str in v_lines[v_index - 1] for
dec_str in check_decorator_strings):
has_failed = True
return has_failed
def search_and_check_views(cur_line, all_lines, line_index,
cur_state, cur_view):
view_failed = False
# Change back to normal search once normal indent level is reached
# (use regex to match no leading whitespace and no comments)
if re.match(r'^[^\s\#]', cur_line) is not None:
cur_state = 's'
# Redefine current state
new_state = vcheck_states[cur_state]
# Search until view is found
if new_state == "SEARCH":
cur_state, cur_view = find_view(cur_line)
# While in "in-view" state, look for get/post methods
elif new_state == "INVIEW":
view_failed = validate_view(cur_line, all_lines, line_index)
return view_failed, cur_state, cur_view
def remove_whitelisted(insecure_dict):
remove_files = []
for bad_file, bad_views in insecure_dict.items():
remove_views = []
for bad_view in bad_views:
if bad_view in whitelist:
remove_views.append(bad_view)
for rm_view in remove_views:
insecure_dict[bad_file].remove(rm_view)
if insecure_dict[bad_file] == []:
remove_files.append(bad_file)
for rm_file in remove_files:
insecure_dict.pop(rm_file)
def show_bad_views(file_view_dict):
if len(file_view_dict) > 0:
print("Non-secure views found:")
for bad_file, bad_views in file_view_dict.items():
print(f"File: {bad_file}")
print("Views:")
for bad_view in bad_views:
print(bad_view)
sys.exit(1)
else:
print("Views secure.")
def check_view_security():
files_and_views = {}
# Not the best, but this way only one base directory is read.
# Perhaps do some error handling if a directory isn't passed in
dir_name = abspath(sys.argv[1])
for base_dir, sub_dirs, files in os.walk(dir_name, topdown=True):
# Don't check certain folders - removes duplicates
sub_dirs[:] = [s_dir for s_dir in sub_dirs if
s_dir not in ignore_dirs]
for f_name in files:
if re.match(r'.+\.py$', f_name) is not None:
f_lines, full_f_name = get_lines(f_name, base_dir)
state = 's'
view = ''
view_list = []
for index, line_var in enumerate(f_lines):
weak_view, state, view = search_and_check_views(
line_var, f_lines, index, state, view
)
if weak_view:
if view not in view_list:
view_list.append(view)
if view_list != []:
files_and_views.update({full_f_name: view_list})
remove_whitelisted(files_and_views)
show_bad_views(files_and_views)
# Run the primary function if this is being used standalone
if __name__ == "__main__":
check_view_security()
| agpl-3.0 | 6,208,441,592,899,194,000 | 27.277174 | 71 | 0.582933 | false |
antmicro/distant-rec | tools/shebang-replace.py | 1 | 1211 | #!/usr/bin/env python3
import sys
from os import listdir, chdir
from os.path import isfile, abspath
UNTIL = '/build/'
REPLACE_WITH = '/b/f/w'
def bangchange(file_path):
script = File(file_path)
if script.flist[0].find("#!") == 0:
if script.flist[0].find(UNTIL) > 0:
print("\033[92m" + "[MOD] {}".format(file_path))
where_divide = script.flist[0].find(UNTIL)
script.flist[0] = "#!" + REPLACE_WITH + script.flist[0][where_divide:]
script.flush()
class File:
def __init__(self, path):
self.fh = open(path, "r+")
try:
self.fstring = self.fh.read()
except UnicodeDecodeError:
print("\033[94m" + "[SKP] {}".format(path))
self.fstring = ""
self.flist = self.fstring.split("\n")
def flush(self):
self.fstring = "\n".join(self.flist)
self.fh.seek(0)
self.fh.write(self.fstring)
self.fh.close()
def main():
if len(sys.argv) != 2:
print("\033[91m"+"[FAIL] Invalid arguments")
return 1
chdir(sys.argv[1])
for filename in listdir("."):
if isfile(abspath(filename)):
bangchange(filename)
main()
| apache-2.0 | -7,748,558,177,413,436,000 | 24.765957 | 82 | 0.549959 | false |
jamesls/labmanager-shell | labmanager/shell.py | 1 | 11699 | import argparse
import getpass
import cmd
import sys
from pprint import pprint
import textwrap
import rlcompleter
import readline
import logging
import ConfigParser
import urllib2
from texttable import Texttable
import suds
from labmanager import api
from labmanager import config
from labmanager.loghandler import NullHandler
# A mapping from the SOAP returned names
# to the nicer to display names.
DISPLAY_TYPE_MAP = {
'dateCreated': 'created',
'fenceMode': 'fencemode',
'isDeployed': 'deployed',
'isPublic': 'public',
'bucketName': 'bucket',
'internalIP': 'internal',
'externalIP': 'external',
'macAddress': 'MAC',
'OwnerFullName': 'owner',
'configID': 'config',
}
SOAP_API_EXCEPTION = (
suds.MethodNotFound,
suds.PortNotFound,
suds.ServiceNotFound,
suds.TypeNotFound,
suds.BuildError,
suds.SoapHeadersNotPermitted,
suds.WebFault,
suds.transport.TransportError,
)
class LMShell(cmd.Cmd):
prompt = '(lmsh) '
LIST_CFG_COLUMNS = ['id', 'name', 'isDeployed', 'type', 'owner']
LIST_MACHINES_COLUMNS = ['id', 'name', 'internalIP', 'externalIP',
'macAddress', 'memory', 'configID']
ENUM_TYPES = {
'type': {
1: 'workspace',
2: 'library',
},
'status': {
1: 'off',
2: 'on',
3: 'suspended',
4: 'stuck',
128: 'invalid',
}
}
def __init__(self, lmapi, stdin=None, stdout=None):
cmd.Cmd.__init__(self, '', stdin, stdout)
self._lmapi = lmapi
def complete_list(self, text, line, begidx, endidx):
subcommands = ['library', 'workspace']
if not text:
return subcommands
return [c for c in subcommands if c.startswith(text)]
def do_list(self, line):
"""
List configurations.
Syntax:
list [library | workspace]
List all library and workspace configurations:
list
There are several subcommands that can optionally be used.
List only library configurations:
list library
List only workspace configurations:
list workspace
"""
configs = self._get_configs(line.strip())
if not configs:
return
columns = self.LIST_CFG_COLUMNS
table = Texttable(max_width=120)
table.set_deco(Texttable.HEADER | Texttable.VLINES)
table.set_cols_align(['l' for l in columns])
table.set_cols_width([6, 30, 8, 10, 15])
table.header([DISPLAY_TYPE_MAP.get(c, c) for c in columns])
rows = self._get_rows(configs, columns)
table.add_rows(rows, header=False)
print table.draw()
def _get_configs(self, config_type):
if config_type == 'library':
configs = self._lmapi.list_library_configurations()
elif config_type == 'workspace':
configs = self._lmapi.list_workspace_configurations()
else:
configs = self._lmapi.list_all_configurations()
return configs
def do_show(self, line):
"""
Show all information for a single configuration.
Syntax:
show <configid>
The config ID can be obtained from the 'list' command.
"""
configuration = self._lmapi.show_configuration(line.strip())
pprint(configuration)
def do_machines(self, line):
"""
List all machines in a configuration.
Syntax:
machines <configid>
The config ID can be obtained from the 'list' command.
"""
machines = self._lmapi.list_machines(line.strip())
if not machines:
return
columns = self._get_machine_output_columns(machines)
table = Texttable(max_width=140)
table.set_deco(Texttable.HEADER | Texttable.VLINES)
table.set_cols_align(['l' for l in columns])
table.header([DISPLAY_TYPE_MAP.get(c, c) for c in columns])
rows = self._get_rows(machines, columns)
table.add_rows(rows, header=False)
print table.draw()
def _get_machine_output_columns(self, machines):
return [c for c in self.LIST_MACHINES_COLUMNS if
c in machines[0]]
def _get_rows(self, objects, columns):
rows = []
for obj in objects:
row = []
for col in columns:
if col in self.ENUM_TYPES:
# Using .get() here because sometimes
# labmanager returned non documented
# types/statuses/etc.
row.append(self.ENUM_TYPES[col].get(
obj[col], obj[col]))
elif col in obj:
row.append(obj[col])
rows.append(row)
return rows
def do_undeploy(self, line):
"""
Undeploying a configuration.
Syntax:
undeploy <configid>
"""
config_id = line.strip()
print "Undeploying config..."
self._lmapi.undeploy_configuration(config_id)
def complete_deploy(self, text, line, begidx, endidx):
subcommands = ['unfenced', 'fenced']
if not text:
return subcommands
return [c for c in subcommands if c.startswith(text)]
def do_deploy(self, line):
"""
Deploy a configuration in a workspace.
Syntax:
deploy <fenced|unfenced> <configid>
After the configuration has been deployed, you
can use the 'machines' command to get a list of
the IP addresses of the machines.
"""
args = line.split()
if len(args) != 2:
print "wrong number of args"
return
fence_mode = self._get_fence_mode_from(args[0])
config_id = args[1]
print "Deploying config..."
self._lmapi.deploy_configuration(config_id, fence_mode)
def _get_fence_mode_from(self, mode):
if mode == 'fenced':
return self._lmapi.FENCE_ALLOW_IN_AND_OUT
elif mode == 'unfenced':
return self._lmapi.NON_FENCED
def do_checkout(self, line):
"""
Checkout a configuration from the library to the workspace.
Syntax:
checkout <configid> <workspacename>
Where the configid is the ID of the configuration as it
currently exists in the library, and workspacename is the
name you'd like the configuration to have in the workspace.
After a configuration has been checked out, it can then
be deployed (though keep in mind the newly checked out
workspace configuration will have a different configid that
you'll need to use to deploy it.
Due to bug's in Lab Manager 4.x, this command will fail
if multiple organizations have the same workspace name
(this is likely if your workspace name is 'Main'). It
might be possible to work around this using the internal
SOAP api, but this is currently not implemented.
Another way to work around this is to create a unique
workspace name (you will need admin privileges to do so).
"""
args = line.split()
if len(args) != 2:
print "wrong number of args"
config_id = args[0]
workspace_name = args[1]
print "Checking out config..."
checkout_id = self._lmapi.checkout_configuration(config_id,
workspace_name)
print "Config ID of checked out configuration:", checkout_id
def do_delete(self, line):
"""
Delete a configuration.
Syntax:
delete <configid>
"""
print "Deleting config..."
self._lmapi.delete_configuration(line.strip())
def do_EOF(self, line):
print
return True
def do_quit(self, line):
return True
def do_help(self, line):
if line:
try:
func = getattr(self, 'help_' + line)
except AttributeError:
try:
doc = getattr(self, 'do_' + line).__doc__
if doc:
self.stdout.write("%s\n" % textwrap.dedent(doc))
return
except AttributeError:
pass
self.stdout.write("%s\n" % (self.nohelp % (line,)))
return
func()
else:
cmd.Cmd.do_help(self, line)
def onecmd(self, line):
try:
return cmd.Cmd.onecmd(self, line)
except SOAP_API_EXCEPTION, e:
sys.stderr.write("ERROR: %s\n" % e)
return ReturnCode(1)
def postcmd(self, stop, line):
if isinstance(stop, ReturnCode):
return None
return stop
class ReturnCode(object):
def __init__(self, return_code):
self.return_code = return_code
def __nonzero__(self):
return False
def get_cmd_line_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--hostname', help="The hostname of the "
"Lab Manager server.")
parser.add_argument('--username', help="The Lab Manager username.")
parser.add_argument('--organization', help="The organization name that "
"contains the objects on which you want to perform "
"operations.")
parser.add_argument('--workspace', default='Main', help="The workspace "
"name that contains the objects on which you want to "
"perform operations.")
parser.add_argument('--timeout', default=None, type=int,
help="The default timeout to use with all SOAP "
"calls. If this is not specified, then no timeout "
"will be used.")
parser.add_argument('--section', default='default', help="What section "
"name to load config values from (if loading values "
"from a config file).")
parser.add_argument('-l', '--list-sections', action="store_true", help="Show "
"available sections in the .lmshrc file.")
parser.add_argument('onecmd', nargs='*', default=None)
return parser
def main():
parser = get_cmd_line_parser()
args = parser.parse_args()
config_parser = ConfigParser.SafeConfigParser()
# If the user explicitly specifies a section but it does
# not exist, we should let them know and exit.
api_config = config.load_config(parser, args, config_parser)
if not args.section == parser.get_default('section') \
and not config_parser.has_section(args.section):
sys.stderr.write("section does not exist: %s\n" % args.section)
sys.exit(1)
if args.list_sections:
print '\n'.join(config_parser.sections())
sys.exit(0)
if api_config.password is None:
api_config.password = getpass.getpass('password: ')
logging.getLogger('suds').addHandler(NullHandler())
try:
client = api.create_soap_client(api_config)
except urllib2.URLError, e:
sys.stderr.write("could not connect to server: %s\n" % e)
sys.exit(1)
labmanager_api = api.LabManager(client)
lmsh = LMShell(labmanager_api)
if args.onecmd:
result = lmsh.onecmd(' '.join(args.onecmd))
if isinstance(result, ReturnCode):
sys.exit(result.return_code)
sys.exit(0)
else:
readline.set_completer(lmsh.complete)
readline.parse_and_bind("tab: complete")
lmsh.cmdloop()
| bsd-3-clause | -8,810,449,703,896,863,000 | 30.877384 | 82 | 0.575177 | false |
bgoli/cbmpy-build | ubuntu/1_install_cbmpy_dependencies.py | 1 | 6549 | # Detect all MetaToolKit depencies on Ubuntu and create a custom script to install them.
# Tested on Ubuntu 14.04, 16.04
# Author Brett G. Olivier ([email protected])
# (C) All rights reserved, Brett G. Olivier, Amsterdam 2016.
import os, subprocess, itertools, stat
UBUNTU = CONDA = False
try:
print(os.sys.argv)
arg = os.sys.argv[1]
except:
arg = 'UBUNTU'
if arg == 'UBUNTU':
UBUNTU = True
elif arg == 'CONDA':
CONDA = True
else:
print('\nPlease call script with CONDA as argument for Anaconda install script, defaulting to UBUNTU')
UBUNTU = True
res = {'Required' : {},\
'Optional' : {}
}
# First lets check for some Python essentials
reqcnt = itertools.count(1,1)
optcnt = itertools.count(1,1)
# this should just be there for any sane python build environment
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install build-essential g++ gfortran python python-dev'
if CONDA:
pass
#res['Required'][reqcnt.next()] = 'conda update -y conda # if this is the only required package ignore it'
try:
import pip
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-pip'
try:
import numpy
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-numpy'
try:
import sympy
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-sympy'
try:
import xlrd
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-xlrd'
try:
import xlwt
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-xlwt'
try:
import matplotlib
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-matplotlib'
try:
import PyQt4
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-qt4'
elif CONDA:
res['Required'][reqcnt.next()] = 'conda install -y pyqt=4.11.4'
try:
import Bio
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-biopython'
elif CONDA:
res['Required'][reqcnt.next()] = 'conda install -y biopython'
try:
import nose
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install python-nose'
try:
import docx
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -EH pip install docx'
elif CONDA:
res['Required'][reqcnt.next()] = 'pip install docx'
try:
import libsbml
if libsbml.LIBSBML_VERSION < 51201:
print('\nPlease update to the latest version of libSBML.\n')
raise ImportError
except ImportError:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install libxml2 libxml2-dev libxslt-dev zlib1g zlib1g-dev bzip2 libbz2-dev'
res['Required'][reqcnt.next()] = 'sudo -EH pip install --upgrade python-libsbml'
elif CONDA:
res['Required'][reqcnt.next()] = 'conda install -c SBMLTeam -y python-libsbml'
try:
import cbmpy
except:
if UBUNTU:
res['Required'][reqcnt.next()] = 'sudo -EH pip install --upgrade cbmpy'
res['Required'][reqcnt.next()] = 'sudo python -c "import cbmpy"'
if CONDA:
res['Required'][reqcnt.next()] = 'pip install cbmpy'
res['Required'][reqcnt.next()] = 'python -c "import cbmpy"'
try:
out = subprocess.call(['java', '-version'])
except (OSError):
if UBUNTU or CONDA:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install default-jre'
try:
out = subprocess.call(['perl', '-v'])
except (OSError):
if UBUNTU or CONDA:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install perl'
p_script = """\
my $rc = 0;
$rc = eval
{
require XML::Parser;
XML::Parser->import();
1;
};
if ($rc){
exit 0
} else {
exit 1
}
"""
try:
PF = file('_test.pl', 'w')
PF.write(p_script)
PF.close()
out = int(subprocess.call(['perl', '_test.pl']))
if out:
raise OSError
except (OSError):
if UBUNTU or CONDA:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install libxml-parser-perl'
try:
out = subprocess.call(['blastall'])
except (OSError):
if UBUNTU or CONDA:
res['Required'][reqcnt.next()] = 'sudo -E apt-get -y install blast2'
# Optional/recommended
# https://github.com/bgoli/cbmpy-glpk
if UBUNTU:
res['Optional'][optcnt.next()] = 'sudo apt-get -y install git cython libxml2-dev libxslt-dev'
try:
import IPython
except ImportError:
if UBUNTU:
res['Optional'][optcnt.next()] = 'sudo -E apt-get -y install ipython ipython-notebook'
try:
import suds
except ImportError:
if UBUNTU:
res['Optional'][optcnt.next()] = 'sudo -E apt-get -y install python-suds'
elif CONDA:
res['Optional'][optcnt.next()] = 'pip install suds'
try:
import flask
except ImportError:
if UBUNTU:
res['Optional'][optcnt.next()] = 'sudo -E apt-get -y install python-flask'
if UBUNTU or CONDA:
bash_script="""\
while true; do
read -p "Do you wish to install *{}* MetaToolkit dependencies? [y/n]: " yn
case $yn in
[Yy]* ) echo "Installing ..."; break;;
[Nn]* ) exit;;
* ) echo "Please enter y/n.";;
esac
done
"""
bash_script="""\
# {}
"""
output = '#!/bin/sh\n\n'
output += '#MetaToolkit: Ubuntu system requirements check\n'
output += '#=============================================\n\n'
REQUIRE_USER_INPUT = False
for r in res:
if len(res[r]) > 0:
if REQUIRE_USER_INPUT:
output += bash_script.format(r)
output += '#{}\n#{}\n\n'.format(r, '-'*len(r))
resk = list(res[r])
resk.sort()
for k in resk:
if k != None:
output += '{}\n'.format(res[r][k])
output += '\n'
output += 'exit\n\n'
fname = 'metatoolkit_install_dependencies.sh'
F = file(fname, 'w')
F.write(output)
F.close()
os.chmod(fname, stat.S_IRWXU)
print('')
print(output)
print('\n\nInstall script (shown above) saved as file: {}\nplease examine it carefully and run. Alternatively install individual dependencies manually').format(fname)
| gpl-3.0 | -6,120,319,862,562,689,000 | 28.90411 | 170 | 0.603604 | false |
j4k0bk/pyidaemon | replies.py | 1 | 7720 |
# FIXME: Move these messages to somewhere else
NUMERIC_REPLIES = {
'001' : 'Welcome to the Internet Relay Network %s',
'002' : 'Your host is %s, running version %s',
'003' : 'This server was created %s',
'004' : '<servername> <version> <available user modes> <available channel modes>',
'318' : 'End of WHOIS list',
'331' : 'No topic is set',
'366' : 'End of /NAMES list.',
'401' : 'No such nick',
'403' : 'No such channel',
'404' : 'Cannot send to channel',
'405' : 'You have joined too many channels',
'411' : 'No recipient given (%s)',
'412' : 'No text to send',
'421' : 'Unknown command',
'431' : 'No nickname given',
'432' : 'Erroneous nickname',
'433' : 'Nickname is already in use',
'442' : 'You\'re not on that channel',
'451' : 'You have not registered',
'461' : 'Not enough parameters',
'475' : 'Cannot join channel (+k)',
}
# -- IRC REPLIES -------------------------------------------------------------
SERVER = 0
THIS_USER = 1
class IRCReply(object):
def __init__(self, source, cmd, args=[], msg=None, msg_args=None):
self.source = source
self.cmd = cmd # May be a 3-digit numeric
self.args = args
self.msg = msg
self.nick = None
if self.msg is None:
self.msg = NUMERIC_REPLIES.get(cmd)
if self.msg and msg_args:
self.msg = self.msg % msg_args
def __str__(self):
words = []
if self.source:
words.append(':%s' % self.source)
words.append(self.cmd)
if self.nick:
words.append(self.nick)
if self.args:
words.append(' '.join(self.args))
if not self.msg is None:
words.append(':%s' % self.msg)
return ' '.join(words)
class Welcome(IRCReply):
"""001 RPL_WELCOME"""
def __init__(self, nick):
IRCReply.__init__(self, SERVER, '001', msg_args=nick)
class YourHost(IRCReply):
"""002 RPL_YOURHOST"""
def __init__(self, server, version):
IRCReply.__init__(self, SERVER, '002', msg_args=(server, version))
class Created(IRCReply):
"""003 RPL_CREATED"""
def __init__(self, date):
IRCReply.__init__(self, SERVER, '003', msg_args=date)
class MyInfo(IRCReply): # FIXME
"""004 RPL_MYINFO"""
def __init__(self):
IRCReply.__init__(self, SERVER, '004')
class Names(IRCReply):
"""353 RPL_NAMREPLY"""
def __init__(self, channel_name, names):
msg = ' '.join(names)
IRCReply.__init__(self, SERVER, '353', ['@', channel_name], msg=msg)
class EndOfNames(IRCReply):
"""366 RPL_ENDOFNAMES"""
def __init__(self, channel_name):
IRCReply.__init__(self, SERVER, '366', [channel_name])
class WhoIsUser(IRCReply):
"""311 RPL_WHOISUSER"""
def __init__(self, nick, user, host, realname):
args = [nick, user, host, '*']
IRCReply.__init__(self, SERVER, '311', args, msg=realname)
class WhoIsServer(IRCReply):
"""312 RPL_WHOISSERVER"""
def __init__(self, nick, server_name, server_info):
args = [nick, server_name]
IRCReply.__init__(self, SERVER, '312', args, msg=server_info)
class EndOfWhoIs(IRCReply):
"""318 RPL_ENDOFWHOIS"""
def __init__(self, nick):
IRCReply.__init__(self, SERVER, '318', [nick])
class WhoIsChannels(IRCReply):
"""319 RPL_WHOISCHANNELS"""
def __init__(self, nick, channels):
if isinstance(channels, list):
channels = ' '.join(channels)
IRCReply.__init__(self, SERVER, '319', [nick], msg=channels)
class Ping(IRCReply):
def __init__(self, server):
IRCReply.__init__(self, SERVER, 'PING', [server])
class Pong(IRCReply):
def __init__(self, server):
IRCReply.__init__(self, SERVER, 'PONG', [server])
class Nick(IRCReply):
def __init__(self, user, new_nick):
IRCReply.__init__(self, user.mask, 'NICK', [new_nick])
class Join(IRCReply):
def __init__(self, user, channel_name):
IRCReply.__init__(self, user.mask, 'JOIN', [channel_name])
class Part(IRCReply):
def __init__(self, user, channel_name, msg=None):
IRCReply.__init__(self, user.mask, 'PART', [channel_name], msg=msg)
class Topic(IRCReply):
def __init__(self, channel_name, topic):
IRCReply.__init__(self, SERVER, '332', [channel_name], msg=topic)
class TopicSet(IRCReply):
def __init__(self, user, channel_name, topic):
IRCReply.__init__(self, user.mask, 'TOPIC', [channel_name], msg=topic)
class NoTopic(IRCReply):
def __init__(self, channel_name):
IRCReply.__init__(self, SERVER, '331', [channel_name])
class PrivMsg(IRCReply):
def __init__(self, from_user, target, msg):
IRCReply.__init__(self, from_user.mask, 'PRIVMSG', [target], msg=msg)
class Notice(IRCReply):
def __init__(self, from_user, target, msg):
IRCReply.__init__(self, from_user.mask, 'NOTICE', [target], msg=msg)
# -- IRC ERRORS --------------------------------------------------------------
class IRCError(IRCReply, Exception):
def __init__(self, *args, **kwargs):
IRCReply.__init__(self, SERVER, *args, **kwargs)
class NoSuchNick(IRCError):
"""401 ERR_NOSUCHNICK"""
def __init__(self, nick):
IRCError.__init__(self, '401', [nick])
class NoSuchChannel(IRCError):
"""403 ERR_NOSUCHCHANNEL"""
def __init__(self, channel_name):
IRCError.__init__(self, '403', [channel_name])
class CanNotSendToChan(IRCError):
"""404 ERR_CANNOTSENDTOCHAN"""
def __init__(self, channel_name):
IRCError.__init__(self, '404', [channel_name])
class TooManyChannels(IRCError):
"""405 ERR_TOOMANYCHANNELS"""
def __init__(self, channel_name):
IRCError.__init__(self, '405', [channel_name])
class NoRecipient(IRCError):
"""411 ERR_NORECIPIENT"""
def __init__(self, cmd):
IRCError.__init__(self, '411', msg_args=cmd.upper())
class NoTextToSend(IRCError):
"""412 ERR_NOTEXTTOSEND"""
def __init__(self):
IRCError.__init__(self, '412')
class UnknownCommand(IRCError):
"""421 ERR_UNKNOWNCOMMAND"""
def __init__(self, cmd):
IRCError.__init__(self, '421', [cmd.upper()])
class NoNicknameGiven(IRCError):
"""431 ERR_NONICKNAMEGIVEN"""
def __init__(self):
IRCError.__init__(self, '431')
class ErroneousNickname(IRCError):
"""432 ERR_ERRONEUSNICKNAME"""
def __init__(self, nick):
IRCError.__init__(self, '432', [nick])
class NicknameInUse(IRCError):
"""433 ERR_NICKNAMEINUSE"""
def __init__(self, nick):
IRCError.__init__(self, '433', [nick])
class NotOnChannel(IRCError):
"""442 ERR_NOTONCHANNEL"""
def __init__(self, channel_name):
IRCError.__init__(self, '442', [channel_name])
class NotRegistered(IRCError):
"""451 ERR_NOTREGISTERED"""
def __init__(self):
IRCError.__init__(self, '451')
class NeedMoreParams(IRCError):
"""461 ERR_NEEDMOREPARAMS"""
def __init__(self, cmd):
IRCError.__init__(self, '461', [cmd.upper()])
class BadChannelKey(IRCError):
"""475 ERR_BADCHANNELKEY"""
def __init__(self, channel_name):
IRCError.__init__(self, '475', [channel_name])
if __name__ == '__main__':
reply = YourHost('server', 'version')
print str(reply)
try:
raise BadChannelKey('#chan')
except IRCError, e:
print str(e)
| gpl-2.0 | 7,832,151,543,027,428,000 | 26.278388 | 86 | 0.553886 | false |
zojoncj/cleanthehead | nsnitro/nsresources/__init__.py | 1 | 2953 | from nsbaseresource import NSBaseResource
from nsconfig import NSConfig
from nscspolicy import NSCSPolicy
from nscsvserver import NSCSVServer
from nscsvservercspolicybinding import NSCSVServerCSPolicyBinding
from nscsvserverresponderpolicybinding import NSCSVServerResponderPolicyBinding
from nscsvserverrewritepolicybinding import NSCSVServerRewritePolicyBinding
from nslbvserver import NSLBVServer
from nslbvserverservicebinding import NSLBVServerServiceBinding
from nslbvservercsvserverbinding import NSLBVServerCSVserverBinding
from nsresponderaction import NSResponderAction
from nsresponderpolicy import NSResponderPolicy
from nsresponderpolicylabel import NSResponderPolicyLabel
from nsresponderpolicylabelbinding import NSResponderPolicyLabelBinding
from nsresponderpolicycsvserverbinding import NSResponderPolicyCSVServerBinding
from nsrewritepolicy import NSRewritePolicy
from nsrewritepolicycsvserverbinding import NSRewritePolicyCSVServerBinding
from nsservice import NSService
from nsserver import NSServer
from nsservicegroup import NSServiceGroup
from nsservicelbmonitorbinding import NSServiceLBMonitorBinding
from nssslcertkey import NSSSLCertKey
from nssslcertkeysslvserverbinding import NSSSLCertKeySSLVServerBinding
from nssslvserver import NSSSLVServer
from nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding
from nshanode import NSHANode
from nsip import NSIP
from nsvlan import NSVLAN
from nsvlaninterfacebinding import NSVLANInterfaceBinding
from nsvlannsipbinding import NSVLANNSIPBinding
from nsfeature import NSFeature
from nsrewriteaction import NSRewriteAction
from nslbmonitorservicebinding import NSLBMonitorServiceBinding
from nssystemcmdpolicy import NSSystemCMDPolicy
from nsacl import NSAcl
from nsacls import NSAcls
__all__ = ['NSBaseResource',
'NSConfig',
'NSCSPolicy',
'NSCSVServer',
'NSCSVServerCSPolicyBinding',
'NSCSVServerResponderPolicyBinding',
'NSCSVServerRewritePolicyBinding',
'NSLBVServer',
'NSLBVServerServiceBinding',
'NSLBVServerCSVserverBinding',
'NSResponderAction',
'NSResponderPolicy',
'NSResponderPolicyLabel',
'NSResponderPolicyLabelBinding',
'NSResponderPolicyCSVServerBinding',
'NSRewritePolicy',
'NSRewritePolicyCSVServerBinding',
'NSServer',
'NSService',
'NSServiceGroup',
'NSServiceLBMonitorBinding',
'NSSSLCertKey',
'NSSSLCertKeySSLVServerBinding',
'NSSSLVServer',
'NSSSLVServerSSLCertKeyBinding',
'NSHANode',
'NSIP',
'NSVLAN',
'NSVLANInterfaceBinding',
'NSVLANNSIPBinding',
'NSFeature',
'NSRewriteAction',
'NSLBMonitorServiceBinding',
'NSSystemCMDPolicy',
'NSAcl',
'NSAcls'
]
| apache-2.0 | -3,418,641,863,379,671,000 | 38.905405 | 79 | 0.764985 | false |
espressif/esp-idf | tools/mkdfu.py | 1 | 9975 | #!/usr/bin/env python
#
# Copyright 2020-2021 Espressif Systems (Shanghai) CO LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This program creates archives compatible with ESP32-S* ROM DFU implementation.
#
# The archives are in CPIO format. Each file which needs to be flashed is added to the archive
# as a separate file. In addition to that, a special index file, 'dfuinfo0.dat', is created.
# This file must be the first one in the archive. It contains binary structures describing each
# subsequent file (for example, where the file needs to be flashed/loaded).
from __future__ import print_function, unicode_literals
import argparse
import hashlib
import json
import os
import struct
import zlib
from collections import namedtuple
from functools import partial
from future.utils import iteritems
try:
import typing
except ImportError:
# Only used for type annotations
pass
try:
from itertools import izip as zip # type: ignore
except ImportError:
# Python 3
pass
# CPIO ("new ASCII") format related things
CPIO_MAGIC = b'070701'
CPIO_STRUCT = b'=6s' + b'8s' * 13
CPIOHeader = namedtuple(
'CPIOHeader',
[
'magic',
'ino',
'mode',
'uid',
'gid',
'nlink',
'mtime',
'filesize',
'devmajor',
'devminor',
'rdevmajor',
'rdevminor',
'namesize',
'check',
],
)
CPIO_TRAILER = 'TRAILER!!!'
def make_cpio_header(
filename_len, file_len, is_trailer=False
): # type: (int, int, bool) -> CPIOHeader
""" Returns CPIOHeader for the given file name and file size """
def as_hex(val): # type: (int) -> bytes
return '{:08x}'.format(val).encode('ascii')
hex_0 = as_hex(0)
mode = hex_0 if is_trailer else as_hex(0o0100644)
nlink = as_hex(1) if is_trailer else hex_0
return CPIOHeader(
magic=CPIO_MAGIC,
ino=hex_0,
mode=mode,
uid=hex_0,
gid=hex_0,
nlink=nlink,
mtime=hex_0,
filesize=as_hex(file_len),
devmajor=hex_0,
devminor=hex_0,
rdevmajor=hex_0,
rdevminor=hex_0,
namesize=as_hex(filename_len),
check=hex_0,
)
# DFU format related things
# Structure of one entry in dfuinfo0.dat
DFUINFO_STRUCT = b'<I I 64s 16s'
DFUInfo = namedtuple('DFUInfo', ['address', 'flags', 'name', 'md5'])
DFUINFO_FILE = 'dfuinfo0.dat'
# Structure which gets added at the end of the entire DFU file
DFUSUFFIX_STRUCT = b'<H H H H 3s B'
DFUSuffix = namedtuple(
'DFUSuffix', ['bcd_device', 'pid', 'vid', 'bcd_dfu', 'sig', 'len']
)
ESPRESSIF_VID = 12346
# This CRC32 gets added after DFUSUFFIX_STRUCT
DFUCRC_STRUCT = b'<I'
def dfu_crc(data, crc=0): # type: (bytes, int) -> int
""" Calculate CRC32/JAMCRC of data, with an optional initial value """
uint32_max = 0xFFFFFFFF
return uint32_max - (zlib.crc32(data, crc) & uint32_max)
def pad_bytes(b, multiple, padding=b'\x00'): # type: (bytes, int, bytes) -> bytes
""" Pad 'b' to a length divisible by 'multiple' """
padded_len = (len(b) + multiple - 1) // multiple * multiple
return b + padding * (padded_len - len(b))
class EspDfuWriter(object):
def __init__(self, dest_file, pid, part_size): # type: (typing.BinaryIO, int, int) -> None
self.dest = dest_file
self.pid = pid
self.part_size = part_size
self.entries = [] # type: typing.List[bytes]
self.index = [] # type: typing.List[DFUInfo]
def add_file(self, flash_addr, path): # type: (int, str) -> None
"""
Add file to be written into flash at given address
Files are split up into chunks in order avoid timing-out during erasing large regions. Instead of adding
"app.bin" at flash_addr it will add:
1. app.bin at flash_addr # sizeof(app.bin) == self.part_size
2. app.bin.1 at flash_addr + self.part_size
3. app.bin.2 at flash_addr + 2 * self.part_size
...
"""
f_name = os.path.basename(path)
with open(path, 'rb') as f:
for i, chunk in enumerate(iter(partial(f.read, self.part_size), b'')):
n = f_name if i == 0 else '.'.join([f_name, str(i)])
self._add_cpio_flash_entry(n, flash_addr, chunk)
flash_addr += len(chunk)
def finish(self): # type: () -> None
""" Write DFU file """
# Prepare and add dfuinfo0.dat file
dfuinfo = b''.join([struct.pack(DFUINFO_STRUCT, *item) for item in self.index])
self._add_cpio_entry(DFUINFO_FILE, dfuinfo, first=True)
# Add CPIO archive trailer
self._add_cpio_entry(CPIO_TRAILER, b'', trailer=True)
# Combine all the entries and pad the file
out_data = b''.join(self.entries)
cpio_block_size = 10240
out_data = pad_bytes(out_data, cpio_block_size)
# Add DFU suffix and CRC
dfu_suffix = DFUSuffix(0xFFFF, self.pid, ESPRESSIF_VID, 0x0100, b'UFD', 16)
out_data += struct.pack(DFUSUFFIX_STRUCT, *dfu_suffix)
out_data += struct.pack(DFUCRC_STRUCT, dfu_crc(out_data))
# Finally write the entire binary
self.dest.write(out_data)
def _add_cpio_flash_entry(
self, filename, flash_addr, data
): # type: (str, int, bytes) -> None
md5 = hashlib.md5()
md5.update(data)
self.index.append(
DFUInfo(
address=flash_addr,
flags=0,
name=filename.encode('utf-8'),
md5=md5.digest(),
)
)
self._add_cpio_entry(filename, data)
def _add_cpio_entry(
self, filename, data, first=False, trailer=False
): # type: (str, bytes, bool, bool) -> None
filename_b = filename.encode('utf-8') + b'\x00'
cpio_header = make_cpio_header(len(filename_b), len(data), is_trailer=trailer)
entry = pad_bytes(
struct.pack(CPIO_STRUCT, *cpio_header) + filename_b, 4
) + pad_bytes(data, 4)
if not first:
self.entries.append(entry)
else:
self.entries.insert(0, entry)
def action_write(args): # type: (typing.Mapping[str, typing.Any]) -> None
writer = EspDfuWriter(args['output_file'], args['pid'], args['part_size'])
for addr, f in args['files']:
print('Adding {} at {:#x}'.format(f, addr))
writer.add_file(addr, f)
writer.finish()
print('"{}" has been written. You may proceed with DFU flashing.'.format(args['output_file'].name))
if args['part_size'] % (4 * 1024) != 0:
print('WARNING: Partition size of DFU is not multiple of 4k (4096). You might get unexpected behavior.')
def main(): # type: () -> None
parser = argparse.ArgumentParser()
# Provision to add "info" command
subparsers = parser.add_subparsers(dest='command')
write_parser = subparsers.add_parser('write')
write_parser.add_argument('-o', '--output-file',
help='Filename for storing the output DFU image',
required=True,
type=argparse.FileType('wb'))
write_parser.add_argument('--pid',
required=True,
type=lambda h: int(h, 16),
help='Hexa-decimal product indentificator')
write_parser.add_argument('--json',
help='Optional file for loading "flash_files" dictionary with <address> <file> items')
write_parser.add_argument('--part-size',
default=os.environ.get('ESP_DFU_PART_SIZE', 512 * 1024),
type=lambda x: int(x, 0),
help='Larger files are split-up into smaller partitions of this size')
write_parser.add_argument('files',
metavar='<address> <file>', help='Add <file> at <address>',
nargs='*')
args = parser.parse_args()
def check_file(file_name): # type: (str) -> str
if not os.path.isfile(file_name):
raise RuntimeError('{} is not a regular file!'.format(file_name))
return file_name
files = []
if args.files:
files += [(int(addr, 0), check_file(f_name)) for addr, f_name in zip(args.files[::2], args.files[1::2])]
if args.json:
json_dir = os.path.dirname(os.path.abspath(args.json))
def process_json_file(path): # type: (str) -> str
'''
The input path is relative to json_dir. This function makes it relative to the current working
directory.
'''
return check_file(os.path.relpath(os.path.join(json_dir, path), start=os.curdir))
with open(args.json) as f:
files += [(int(addr, 0),
process_json_file(f_name)) for addr, f_name in iteritems(json.load(f)['flash_files'])]
files = sorted([(addr, f_name.decode('utf-8') if isinstance(f_name, type(b'')) else f_name) for addr, f_name in iteritems(dict(files))],
key=lambda x: x[0]) # remove possible duplicates and sort based on the address
cmd_args = {'output_file': args.output_file,
'files': files,
'pid': args.pid,
'part_size': args.part_size,
}
{'write': action_write
}[args.command](cmd_args)
if __name__ == '__main__':
main()
| apache-2.0 | -567,022,162,495,438,100 | 34.37234 | 140 | 0.589975 | false |
johnmgregoire/JCAPdatavis | createdlist_benchmarkingstepCA.py | 1 | 2595 | import numpy, pylab, os, sys, csv, pickle
from echem_plate_fcns import *
from echem_plate_math import *
PyCodePath=os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(os.path.join(PyCodePath,'ternaryplot'))
from myternaryutility import TernaryPlot
from myquaternaryutility import QuaternaryPlot
homefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/benchmarking'
subfold_sample_fnstartl_vshl=[\
('NiFeCoCe50301703/stepCA', 170, ['complete02', 'complete03', 'complete04'], [-0.1813, -0.1818, -0.1838]), \
('NiFeCoCe40202020/stepCA', 725, ['complete02', 'complete03', 'complete04'], [-0.17705, -0.17905, -0.18255]), \
('NiFeCoCe30072043/stepCA', 1326, ['complete02', 'complete03', 'complete04'], [-0.17605, -0.1788, -0.18005]), \
]
savep=os.path.join(homefolder, 'benchmarkingstepCAs_dlist.pck')
dlist=[]
for subfold, sample, fnstartl, vshl in subfold_sample_fnstartl_vshl:
d={}
d['Sample']=sample
fold=os.path.join(homefolder, subfold)
fns=os.listdir(fold)
pl=[[os.path.join(fold, fn) for fn in fns if fn.startswith(fnstart)][0] for fnstart in fnstartl]
for p, fnstart, vsh in zip(pl, fnstartl, vshl):
d[fnstart]={}
f=open(p, mode='r')
f.readline()
f.readline()
dr=csv.DictReader(f, delimiter='\t')
for l in dr:
for kr in l.keys():
k=kr.strip()
if k in ['Unknown']:
continue
if not k in d[fnstart].keys():
d[fnstart][k]=[]
d[fnstart][k]+=[myeval(l[kr].strip())]
for k in d[fnstart].keys():
d[fnstart][k]=numpy.array(d[fnstart][k])
f.close()
try:
x=d[fnstart]['I/mA']/.196
except:
x=d[fnstart]['<I>/mA']/.196
try:
y=d[fnstart]['Ewe/V']+vsh
except:
y=d[fnstart]['<Ewe>/V']+vsh
iinterv=len(x)//9
indsl=[i*iinterv-numpy.arange(50)-5 for i in range(5, 10)]
xv=numpy.array([x[inds].mean() for inds in indsl])
yv=numpy.array([y[inds].mean() for inds in indsl])
iv=numpy.array([inds.mean() for inds in indsl])
pylab.figure()
pylab.plot(x, 'b-')
pylab.plot(iv, xv, 'bo')
pylab.twinx()
pylab.plot(y, 'g-')
pylab.plot(iv, yv, 'go')
pylab.title(subfold+fnstart)
d[fnstart]['I(mAcm2)']=xv
d[fnstart]['Ewe(VOER)']=yv
dlist+=[d]
#pylab.show()
if 1:
f=open(savep, mode='w')
pickle.dump(dlist, f)
f.close()
| bsd-3-clause | -1,207,282,057,356,617,200 | 33.6 | 115 | 0.568401 | false |
ssato/python-jinja2-cli | jinja2_cli/tests/render.py | 1 | 2414 | #
# Copyright (C) 2011 - 2013 Satoru SATOH <ssato at redhat.com>
#
import os
import unittest
import jinja2_cli.render as TT # Stands for Test Target module.
import jinja2_cli.compat
import jinja2_cli.tests.common as C
class Test_00_pure_functions(unittest.TestCase):
def test_00_mk_template_paths__wo_paths(self):
self.assertEquals(TT.mk_template_paths("/a/b/c.yml"),
[os.curdir, "/a/b"])
def test_01_mk_template_paths__w_paths(self):
self.assertEquals(TT.mk_template_paths("/a/b/c.yml", ["/a/d"]),
["/a/d", "/a/b"])
def test_10_tmpl_env(self):
self.assertTrue(isinstance(TT.tmpl_env(["/a/b", ]),
TT.jinja2.Environment))
def test_20_render_s(self):
tmpl_s = 'a = {{ a }}, b = "{{ b }}"'
self.assertEquals(TT.render_s(tmpl_s, {'a': 1, 'b': 'bbb'}),
'a = 1, b = "bbb"')
class Test_10_effectful_functions(unittest.TestCase):
def setUp(self):
self.workdir = C.setup_workdir()
def test_10_render_impl(self):
tmpl = "a.j2"
open(os.path.join(self.workdir, tmpl), 'w').write("a = {{ a }}")
r = TT.render_impl(tmpl, {'a': "aaa", }, [self.workdir])
self.assertEquals(r, "a = aaa")
def test_20_render(self):
tmpl = "a.j2"
open(os.path.join(self.workdir, tmpl), 'w').write("a = {{ a }}")
r = TT.render(tmpl, {'a': "aaa", }, [self.workdir])
self.assertEquals(r, "a = aaa")
def test_22_render__ask(self):
"""FIXME: Write tests for jinja2_cli.render.render"""
pass
def test_30_template_path(self):
tmpl = "a.j2"
open(os.path.join(self.workdir, tmpl), 'w').write("a = {{ a }}")
self.assertEquals(TT.template_path(tmpl, [self.workdir]),
os.path.join(self.workdir, tmpl))
def test_32_template_path__not_exist(self):
tmpl = "template_not_exist.j2"
self.assertTrue(TT.template_path(tmpl, [self.workdir]) is None)
def test_50_renderto(self):
tmpl = "a.j2"
output = os.path.join(self.workdir, "a.out")
open(os.path.join(self.workdir, tmpl), 'w').write("a = {{ a }}")
TT.renderto(tmpl, dict(a="aaa", ), [self.workdir], output, False)
self.assertEquals(jinja2_cli.compat.copen(output).read(), "a = aaa")
# vim:sw=4:ts=4:et:
| bsd-3-clause | -3,658,749,903,160,472,000 | 31.186667 | 76 | 0.553853 | false |
odbelix/mnTool | ModulosDevice/ComunicacionDB/Comunication.py | 1 | 3354 | #!/usr/bin/env python
# Comunication.py, script that manages the communication with the databas,
# as requirement it needs that exist a file with the information of the database,
# which content is :
# host => name of the host
# data base => name of database
# user => user name
# password => keyword
#
# This file is contained in ModulosDevice/ComunicacionDB and it's name is conexion.py
# any change in the information of the database make it in that file
# It contains functions for managing communication and functions that allow a simple query
#
# Copyright (C) 21/10/2015 David Alfredo Medina Ortiz [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#Modules used
import sys
import psycopg2
import ConfigParser
#Get database information from file
def getDataBaseInfo():
#instance configparser
cfg = ConfigParser.ConfigParser()
cfg.read(["/etc/mn_Tools/db_configuration.cfg"])#read information params in fiel configuration
#Get the information from the database
host = cfg.get("connection", "host")
name = cfg.get("connection", "data_base")
user = cfg.get("connection", "user")
password = cfg.get("connection", "password")
database = {}
database.update({'host': host})
database.update({'name': name})
database.update({'user': user})
database.update({'password': password})
return database
#function that handles communication with the database ...
def BeginComunication():
database = getDataBaseInfo()
conn_string = "host='%s' dbname='%s' user='%s' password='%s'" % (database['host'],database['name'],database['user'],database['password'])
#error handling...
try:
db = psycopg2.connect(conn_string)
return db, db.cursor()#sthe connection is returned to the database and the cursor
except psycopg2.DatabaseError, e:
print 'Error %s', e
sys.exit(1)
#function that manages the query to the database based on the parameters received by argument
def MakeQueryDB (query, database_cursor):
response = []#list with information from the database ...
try:
database_cursor.execute(query)
for row in database_cursor:
response.append(row)
return response
except psycopg2.DatabaseError, e:
print 'Error %s', e
<<<<<<< HEAD
sys.exit(1)
#function that manages query of kind delete or insert in data base
def QueryDB(query, database_cursor, connection):
try:
database_cursor.execute(query)
connection.commit()
return 0
except psycopg2.DatabaseError, e:
print 'Error %s', e
sys.exit(1)
=======
sys.exit(1)
>>>>>>> 02c3ebb0958c363c2794afd4063fb21fbbed3589
| gpl-2.0 | -104,137,734,422,662,380 | 34.305263 | 141 | 0.703339 | false |
Bakuutin/Pitchers | pitchers.py | 1 | 6675 | """
Программа ищет оптимальное решение для задачи про два кувшина
Для работы требуется Python 3
"""
from enum import Enum
class Action(Enum):
"""
Действия с кувшинами
"""
empty = 0
fill = 1
transfer = 2
start = 3
def get_pitchers_volume():
"""
Возвращает список из целочисленных значений объёмов кувшинов
"""
while True: # pitchers_volume это list из целых чисел, полученных разбиением вводимой строки
pitchers_volume = list(int(pitcher) for pitcher in input("Введите через пробел объёмы двух кувшинов: ").split())
if len(pitchers_volume) != 2: # Мы рассматриваем только случаи с двумя кувшинами
print('Айайай! Попробуйте заново.')
else:
return pitchers_volume
def get_target():
"""
Возвращает целочисленный желаемый объём
"""
return int(input("Введите желаемый объём: "))
def greatest_common_divisor(a, b):
"""
Считает наибольший общий делитель.
"""
while b:
a, b = b, a % b
return a
def make_edges(i, j, i_max, j_max):
"""
Создаёт список из всех исходящих ветвей графа в точке (i, j)
Где i и j — наполненность первого и второго кувшинов
"""
edges = dict()
# Если кувшины не пусты, их можно опустошить
if i != 0:
edges[(0, j)] = Action.empty
if j != 0:
edges[(i, 0)] = Action.empty
# Если кувшины не полные, их можно наполнить
if i != i_max:
edges[(i_max, j)] = Action.fill
if j != j_max:
edges[(i, j_max)] = Action.fill
# Из непустого кувшина можно перелить в неполный
if i != 0 and j_max-j >= i:
edges[(0, j+i)] = Action.transfer
if j != 0 and i_max-i >= j:
edges[(i+j, 0)] = Action.transfer
# Причем, если в неполном не хватит места,
# то оба кувшина останутся непустыми
if j != 0 and 0 < i_max-i < j:
edges[(i_max, j - (i_max-i))] = Action.transfer
if i != 0 and 0 < j_max-j < i:
edges[(i - (j_max-j), j_max)] = Action.transfer
return edges
def make_pitchers_graph(pitchers_volume):
"""
Создаёт словарь, в котором ключи — все комбинации наполненности кувшинов,
а значения — возможные переходы из каждой комбинации
"""
pitchers_graph = dict()
gcd = greatest_common_divisor(pitchers_volume[0], pitchers_volume[1])
for i in range(0, int(pitchers_volume[0]/gcd)+1): # Найдём наименьшее общее кратное у объёмов кувшинов
for j in range(0, int(pitchers_volume[1]/gcd)+1): # И поделим каждый из объёмов на него, для оптимизации
pitchers_graph[(i*gcd, j*gcd)] = make_edges(i*gcd, j*gcd, pitchers_volume[0], pitchers_volume[1])
return pitchers_graph
def dijkstra(graph, start_node, target):
"""
Находит кратчайший путь в графе
"""
distance = dict.fromkeys(graph, float('inf'))
path = dict()
path[start_node] = [[[start_node], [Action.start]]] # Путь записывается в виде словаря, в котором к каждому из
distance[start_node] = 0 # имён узлов сосоставляется list из предыдущих узлов
node_set = set(graph) # с добавлением типа действия с кувшинами
targets_list = [node for node in node_set # Цели хранятся как list из всех узлов, которые
if node[0] == target or node[1] == target] # подходят в качестве финиша
while node_set:
node = min(node_set, key=distance.get)
if node in targets_list and node in path: # Как только нашли подходящий узел — выходим. Поскольку мы ищем
return path[node] # от точки (0, 0), а вес каждого ребра считаем одинаковым,
node_set.remove(node) # то первый найденный узел и будет оптимальным
for child_node in graph[node].keys():
if distance[child_node] >= distance[node] + 1: # Вес каждого ребра считаем за единицу
distance[child_node] = distance[node] + 1
path[child_node] = list()
path[child_node].extend(path[node]) # Путь до нового узла состоит из пути до его родителя
path[child_node].append([[child_node], # плюс сам переход
[graph[node].get(child_node)]]) # с добавлением типа действия
def show_answer(path, target):
"""
Выводит ответ в человекочитаемом виде
"""
if path is not None:
print('Требуется шагов: {}'.format(len(path)-1))
for node in path:
print(node[0][0], end=' ') # Состояние кувшинов
print(node[1][0].name) # Тип действия
else:
print('Нельзя получить {}л., имея только данные кувшины.'.format(target))
pitchers_volume = get_pitchers_volume() # Получаем с клавиатуры объёмы кувшинов
target_node = get_target() # И желаемый объём
start_node = (0, 0) # Начинаем с пустых кувшинов
pitchers_graph = make_pitchers_graph(pitchers_volume) # Создаём граф из всех состояний кувшинов
path = dijkstra(pitchers_graph, start_node, target_node) # Находим кратчайший путь
show_answer(path, target_node) # Выводим результат | mit | -342,281,821,695,193,660 | 36.282609 | 120 | 0.611975 | false |
gpuhalla/discord_bot | newMusic.py | 1 | 16747 | """
Adapted from: https://gist.github.com/vbe0201/ade9b80f2d3b64643d854938d40a0a2d
"""
import asyncio
import functools
import itertools
import math
import random
import discord
import youtube_dlc
from async_timeout import timeout
from discord.ext import commands
# Silence useless bug reports messages
youtube_dlc.utils.bug_reports_message = lambda: ''
class VoiceError(Exception):
pass
class YTDLError(Exception):
pass
class YTDLSource(discord.PCMVolumeTransformer):
YTDL_OPTIONS = {
'format': 'bestaudio/best',
'extractaudio': True,
'audioformat': 'mp3',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0',
}
FFMPEG_OPTIONS = {
'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',
'options': '-vn',
}
ytdl = youtube_dlc.YoutubeDL(YTDL_OPTIONS)
def __init__(self, ctx: commands.Context, source: discord.FFmpegPCMAudio, *, data: dict, volume: float = 0.5):
super().__init__(source, volume)
self.requester = ctx.author
self.channel = ctx.channel
self.data = data
self.uploader = data.get('uploader')
self.uploader_url = data.get('uploader_url')
date = data.get('upload_date')
self.upload_date = date[6:8] + '.' + date[4:6] + '.' + date[0:4]
self.title = data.get('title')
self.thumbnail = data.get('thumbnail')
self.description = data.get('description')
self.duration = self.parse_duration(int(data.get('duration')))
self.tags = data.get('tags')
self.url = data.get('webpage_url')
self.views = data.get('view_count')
self.likes = data.get('like_count')
self.dislikes = data.get('dislike_count')
self.stream_url = data.get('url')
def __str__(self):
return '**{0.title}** by **{0.uploader}**'.format(self)
@classmethod
async def create_source(cls, ctx: commands.Context, search: str, *, loop: asyncio.BaseEventLoop = None):
loop = loop or asyncio.get_event_loop()
partial = functools.partial(cls.ytdl.extract_info, search, download=False, process=False)
data = await loop.run_in_executor(None, partial)
if data is None:
raise YTDLError('Couldn\'t find anything that matches `{}`'.format(search))
if 'entries' not in data:
process_info = data
else:
process_info = None
for entry in data['entries']:
if entry:
process_info = entry
break
if process_info is None:
raise YTDLError('Couldn\'t find anything that matches `{}`'.format(search))
webpage_url = process_info['webpage_url']
partial = functools.partial(cls.ytdl.extract_info, webpage_url, download=False)
processed_info = await loop.run_in_executor(None, partial)
if processed_info is None:
raise YTDLError('Couldn\'t fetch `{}`'.format(webpage_url))
if 'entries' not in processed_info:
info = processed_info
else:
info = None
while info is None:
try:
info = processed_info['entries'].pop(0)
except IndexError:
raise YTDLError('Couldn\'t retrieve any matches for `{}`'.format(webpage_url))
return cls(ctx, discord.FFmpegPCMAudio(info['url'], **cls.FFMPEG_OPTIONS), data=info)
@staticmethod
def parse_duration(duration: int):
minutes, seconds = divmod(duration, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
duration = []
if days > 0:
duration.append('{} days'.format(days))
if hours > 0:
duration.append('{} hours'.format(hours))
if minutes > 0:
duration.append('{} minutes'.format(minutes))
if seconds > 0:
duration.append('{} seconds'.format(seconds))
return ', '.join(duration)
class Song:
__slots__ = ('source', 'requester')
def __init__(self, source: YTDLSource):
self.source = source
self.requester = source.requester
def create_embed(self):
embed = (discord.Embed(title='Now playing',
description='```css\n{0.source.title}\n```'.format(self),
color=discord.Color.blurple())
.add_field(name='Duration', value=self.source.duration)
.add_field(name='Requested by', value=self.requester.mention)
.add_field(name='Uploader', value='[{0.source.uploader}]({0.source.uploader_url})'.format(self))
.add_field(name='URL', value='[Click]({0.source.url})'.format(self))
.set_thumbnail(url=self.source.thumbnail))
return embed
class SongQueue(asyncio.Queue):
def __getitem__(self, item):
if isinstance(item, slice):
return list(itertools.islice(self._queue, item.start, item.stop, item.step))
else:
return self._queue[item]
def __iter__(self):
return self._queue.__iter__()
def __len__(self):
return self.qsize()
def clear(self):
self._queue.clear()
def shuffle(self):
random.shuffle(self._queue)
def remove(self, index: int):
del self._queue[index]
class VoiceState:
def __init__(self, bot: commands.Bot, ctx: commands.Context):
self.bot = bot
self._ctx = ctx
self.current = None
self.voice = None
self.next = asyncio.Event()
self.songs = SongQueue()
self._loop = False
self._volume = 0.5
self.skip_votes = set()
self.audio_player = bot.loop.create_task(self.audio_player_task())
def __del__(self):
self.audio_player.cancel()
@property
def loop(self):
return self._loop
@loop.setter
def loop(self, value: bool):
self._loop = value
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value: float):
self._volume = value
@property
def is_playing(self):
return self.voice and self.current
async def audio_player_task(self):
while True:
self.next.clear()
if not self.loop:
# Try to get the next song within 3 minutes.
# If no song will be added to the queue in time,
# the player will disconnect due to performance
# reasons.
try:
async with timeout(180): # 3 minutes
self.current = await self.songs.get()
except asyncio.TimeoutError:
self.bot.loop.create_task(self.stop())
return
self.current.source.volume = self._volume
self.voice.play(self.current.source, after=self.play_next_song)
await self.current.source.channel.send(embed=self.current.create_embed())
await self.next.wait()
def play_next_song(self, error=None):
if error:
raise VoiceError(str(error))
self.next.set()
def skip(self):
self.skip_votes.clear()
if self.is_playing:
self.voice.stop()
async def stop(self):
self.songs.clear()
if self.voice:
await self.voice.disconnect()
self.voice = None
class Music(commands.Cog, name='Music'):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.voice_states = {}
def get_voice_state(self, ctx: commands.Context):
state = self.voice_states.get(ctx.guild.id)
if not state:
state = VoiceState(self.bot, ctx)
self.voice_states[ctx.guild.id] = state
return state
def cog_unload(self):
for state in self.voice_states.values():
self.bot.loop.create_task(state.stop())
def cog_check(self, ctx: commands.Context):
if not ctx.guild:
raise commands.NoPrivateMessage('This command can\'t be used in DM channels.')
return True
async def cog_before_invoke(self, ctx: commands.Context):
ctx.voice_state = self.get_voice_state(ctx)
async def cog_command_error(self, ctx: commands.Context, error: commands.CommandError):
await ctx.send('An error occurred: {}'.format(str(error)))
@commands.command(name='join', invoke_without_subcommand=True)
async def _join(self, ctx: commands.Context):
"""Joins a voice channel."""
destination = ctx.author.voice.channel
if ctx.voice_state.voice:
await ctx.voice_state.voice.move_to(destination)
return
ctx.voice_state.voice = await destination.connect()
@commands.command(name='summon')
@commands.guild_only()
async def _summon(self, ctx: commands.Context, *, channel: discord.VoiceChannel = None):
"""Summons the bot to a voice channel.
If no channel was specified, it joins your channel.
"""
if not channel and not ctx.author.voice:
raise VoiceError('You are neither connected to a voice channel nor specified a channel to join.')
destination = channel or ctx.author.voice.channel
if ctx.voice_state.voice:
await ctx.voice_state.voice.move_to(destination)
return
ctx.voice_state.voice = await destination.connect()
@commands.command(name='leave', aliases=['disconnect'])
@commands.guild_only()
async def _leave(self, ctx: commands.Context):
"""Clears the queue and leaves the voice channel."""
if not ctx.voice_state.voice:
return await ctx.send('Not connected to any voice channel.')
await ctx.voice_state.stop()
del self.voice_states[ctx.guild.id]
@commands.command(name='volume')
async def _volume(self, ctx: commands.Context, *, volume: int):
"""Sets the volume of the player."""
if not ctx.voice_state.is_playing:
return await ctx.send('Nothing being played at the moment.')
if 0 > volume > 100:
return await ctx.send('Volume must be between 0 and 100')
ctx.voice_state.volume = volume / 100
await ctx.send('Volume of the player set to {}%'.format(volume))
@commands.command(name='now', aliases=['current', 'playing'])
async def _now(self, ctx: commands.Context):
"""Displays the currently playing song."""
await ctx.send(embed=ctx.voice_state.current.create_embed())
@commands.command(name='pause')
@commands.guild_only()
async def _pause(self, ctx: commands.Context):
"""Pauses the currently playing song."""
if ctx.voice_state.is_playing and ctx.voice_state.voice.is_playing():
ctx.voice_state.voice.pause()
await ctx.message.add_reaction('⏯')
@commands.command(name='resume')
@commands.guild_only()
async def _resume(self, ctx: commands.Context):
"""Resumes a currently paused song."""
if ctx.voice_state.is_playing and ctx.voice_state.voice.is_paused():
ctx.voice_state.voice.resume()
await ctx.message.add_reaction('⏯')
@commands.command(name='stop')
@commands.guild_only()
async def _stop(self, ctx: commands.Context):
"""Stops playing song and clears the queue."""
ctx.voice_state.songs.clear()
if ctx.voice_state.is_playing:
ctx.voice_state.voice.stop()
await ctx.message.add_reaction('⏹')
@commands.command(name='skip')
async def _skip(self, ctx: commands.Context):
"""Vote to skip a song. The requester can automatically skip.
3 skip votes are needed for the song to be skipped.
"""
if not ctx.voice_state.is_playing:
return await ctx.send('Not playing any music right now...')
voter = ctx.message.author
if voter == ctx.voice_state.current.requester:
await ctx.message.add_reaction('⏭')
ctx.voice_state.skip()
elif voter.id not in ctx.voice_state.skip_votes:
ctx.voice_state.skip_votes.add(voter.id)
total_votes = len(ctx.voice_state.skip_votes)
if total_votes >= 3:
await ctx.message.add_reaction('⏭')
ctx.voice_state.skip()
else:
await ctx.send('Skip vote added, currently at **{}/3**'.format(total_votes))
else:
await ctx.send('You have already voted to skip this song.')
@commands.command(name='queue')
async def _queue(self, ctx: commands.Context, *, page: int = 1):
"""Shows the player's queue.
You can optionally specify the page to show. Each page contains 10 elements.
"""
if len(ctx.voice_state.songs) == 0:
return await ctx.send('Empty queue.')
items_per_page = 10
pages = math.ceil(len(ctx.voice_state.songs) / items_per_page)
start = (page - 1) * items_per_page
end = start + items_per_page
queue = ''
for i, song in enumerate(ctx.voice_state.songs[start:end], start=start):
queue += '`{0}.` [**{1.source.title}**]({1.source.url})\n'.format(i + 1, song)
embed = (discord.Embed(description='**{} tracks:**\n\n{}'.format(len(ctx.voice_state.songs), queue))
.set_footer(text='Viewing page {}/{}'.format(page, pages)))
await ctx.send(embed=embed)
@commands.command(name='shuffle')
async def _shuffle(self, ctx: commands.Context):
"""Shuffles the queue."""
if len(ctx.voice_state.songs) == 0:
return await ctx.send('Empty queue.')
ctx.voice_state.songs.shuffle()
await ctx.message.add_reaction('✅')
@commands.command(name='remove')
async def _remove(self, ctx: commands.Context, index: int):
"""Removes a song from the queue at a given index."""
if len(ctx.voice_state.songs) == 0:
return await ctx.send('Empty queue.')
ctx.voice_state.songs.remove(index - 1)
await ctx.message.add_reaction('✅')
@commands.command(name='loop')
async def _loop(self, ctx: commands.Context):
"""Loops the currently playing song.
Invoke this command again to unloop the song.
"""
if not ctx.voice_state.is_playing:
return await ctx.send('Nothing being played at the moment.')
# Inverse boolean value to loop and unloop.
ctx.voice_state.loop = not ctx.voice_state.loop
await ctx.message.add_reaction('✅')
@commands.command(name='play')
async def _play(self, ctx: commands.Context, *, search: str):
"""Plays a song.
If there are songs in the queue, this will be queued until the
other songs finished playing.
This command automatically searches from various sites if no URL is provided.
A list of these sites can be found here: https://rg3.github.io/youtube-dl/supportedsites.html
"""
if not ctx.voice_state.voice:
await ctx.invoke(self._join)
async with ctx.typing():
try:
source = await YTDLSource.create_source(ctx, search, loop=self.bot.loop)
except YTDLError as e:
await ctx.send('An error occurred while processing this request: {}'.format(str(e)))
else:
song = Song(source)
await ctx.voice_state.songs.put(song)
await ctx.send('Enqueued {}'.format(str(source)))
@commands.command(name='dead')
@commands.has_permissions(manage_guild=True)
async def __dead(self, ctx):
self.__play(self, ctx, "https://www.youtube.com/watch?v=CfihYWRWRTQ")
ctx.send("LEAGUE IS DEAD")
@_join.before_invoke
@_play.before_invoke
async def ensure_voice_state(self, ctx: commands.Context):
if not ctx.author.voice or not ctx.author.voice.channel:
raise commands.CommandError('You are not connected to any voice channel.')
if ctx.voice_client:
if ctx.voice_client.channel != ctx.author.voice.channel:
raise commands.CommandError('Bot is already in a voice channel.')
def setup(bot):
bot.add_cog(Music(bot))
| gpl-3.0 | -7,463,120,260,197,721,000 | 32.001972 | 114 | 0.592254 | false |
zhlinh/leetcode | 0130.Surrounded Regions/solution.py | 1 | 1911 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: [email protected]
Version: 0.0.1
Created Time: 2016-03-11
Last_modify: 2016-03-11
******************************************
'''
'''
Given a 2D board containing 'X' and 'O',
capture all regions surrounded by 'X'.
A region is captured by flipping all 'O's
into 'X's in that surrounded region.
For example,
X X X X
X O O X
X X O X
X O X X
After running your function, the board should be:
X X X X
X X X X
X X X X
X O X X
'''
class Solution(object):
def solve(self, board):
"""
:type board: List[List[str]]
:rtype: void Do not return anything, modify board in-place instead.
"""
m = len(board)
if m < 2:
return
n = len(board[0])
for i in range(m):
self.helper(board, i, 0, m, n)
if n > 1:
self.helper(board, i, n - 1, m, n)
for j in range(n):
self.helper(board, 0, j, m, n)
if m > 1:
self.helper(board, m - 1, j, m, n)
for i in range(m):
for j in range(n):
if board[i][j] == 'O':
board[i][j] = 'X'
if board[i][j] == '1':
board[i][j] = 'O'
def helper(self, board, i, j, m, n):
if board[i][j] == 'O':
board[i][j] = '1'
# trick here, normally it could be i >= 1.
# but the boardary will alays get checked.
# so i == 1, then check 0 is duplicated.
if i > 1:
self.helper(board, i - 1, j, m, n)
if i < m - 2:
self.helper(board, i + 1, j, m, n)
if j > 1:
self.helper(board, i, j - 1, m, n)
if j < n - 2:
self.helper(board, i, j + 1, m, n)
| apache-2.0 | -6,414,174,349,112,948,000 | 25.915493 | 75 | 0.438514 | false |
certtools/contactdb | contactdb/contacts/admin.py | 1 | 1153 | from django.contrib import admin
# Register your models here.
from .models import *
admin.site.register(Address)
admin.site.register(AutonomousSystem)
admin.site.register(AutonomousSystemAutomatic)
#
#admin.site.register(ClassificationIdentifier)
#admin.site.register(ClassificationType)
admin.site.register(Contact)
#admin.site.register(ContactAutomatic)
admin.site.register(Format)
admin.site.register(Fqdn)
#admin.site.register(FqdnAutomatic)
#admin.site.register(Inhibition)
admin.site.register(Network)
#admin.site.register(NetworkAutomatic)
admin.site.register(OrgSector)
admin.site.register(Organisation)
#admin.site.register(OrganisationAutomatic)
admin.site.register(OrganisationToAsn)
admin.site.register(OrganisationToAsnAutomatic)
#admin.site.register(OrganisationToFqdn)
#admin.site.register(OrganisationToFqdnAutomatic)
admin.site.register(OrganisationToNetwork)
#admin.site.register(OrganisationToNetworkAutomatic)
#admin.site.register(OrganisationToTemplate)
#admin.site.register(OrganisationToTemplateAutomatic)
admin.site.register(Role)
#admin.site.register(RoleAutomatic)
admin.site.register(Sector)
admin.site.register(Template)
| agpl-3.0 | -8,570,416,123,354,967,000 | 31.027778 | 53 | 0.846487 | false |
cmjatai/cmj | cmj/utils.py | 1 | 14127 | from datetime import date, datetime, timedelta
from functools import wraps
import re
import subprocess
import threading
from unicodedata import normalize as unicodedata_normalize
from PyPDF4.pdf import PdfFileReader
from asn1crypto import cms
from django.apps import apps
from django.conf import settings
from django.contrib import admin
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.db import connection
from django.template.loaders.filesystem import Loader
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from easy_thumbnails import source_generators
import magic
from reversion.admin import VersionAdmin
from unipath.path import Path
def pil_image(source, exif_orientation=False, **options):
return source_generators.pil_image(source, exif_orientation, **options)
def clear_thumbnails_cache(queryset, field, time_create=0):
now = datetime.now()
for r in queryset:
assert hasattr(r, field), _(
'Objeto da listagem não possui o campo informado')
if not getattr(r, field):
continue
path = Path(getattr(r, field).path)
cache_files = path.parent.walk()
for cf in cache_files:
if cf == path:
continue
if time_create:
data_arquivo = datetime.fromtimestamp(cf.mtime())
if now - data_arquivo < timedelta(time_create):
continue
cf.remove()
def normalize(txt):
return unicodedata_normalize(
'NFKD', txt).encode('ASCII', 'ignore').decode('ASCII')
def get_settings_auth_user_model():
return getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
def register_all_models_in_admin(module_name):
appname = module_name.split('.')
appname = appname[1] if appname[0] == 'cmj' else appname[0]
app = apps.get_app_config(appname)
for model in app.get_models():
class CustomModelAdmin(VersionAdmin):
list_display = [f.name for f in model._meta.fields
if f.name != 'id']
if not admin.site.is_registered(model):
admin.site.register(model, CustomModelAdmin)
def from_to(start, end):
return list(range(start, end + 1))
def make_pagination(index, num_pages):
'''Make a list of adjacent page ranges interspersed with "None"s
The list starts with [1, 2] and end with [num_pages-1, num_pages].
The list includes [index-1, index, index+1]
"None"s separate those ranges and mean ellipsis (...)
Example: [1, 2, None, 10, 11, 12, None, 29, 30]
'''
PAGINATION_LENGTH = 10
if num_pages <= PAGINATION_LENGTH:
return from_to(1, num_pages)
else:
if index - 1 <= 5:
tail = [num_pages - 1, num_pages]
head = from_to(1, PAGINATION_LENGTH - 3)
else:
if index + 1 >= num_pages - 3:
tail = from_to(index - 1, num_pages)
else:
tail = [index - 1, index, index + 1,
None, num_pages - 1, num_pages]
head = from_to(1, PAGINATION_LENGTH - len(tail) - 1)
return head + [None] + tail
def xstr(s):
return '' if s is None else str(s)
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def get_base_url(request):
# TODO substituir por Site.objects.get_current().domain
# from django.contrib.sites.models import Site
current_domain = request.get_host()
protocol = 'https' if request.is_secure() else 'http'
return "{0}://{1}".format(protocol, current_domain)
def create_barcode(value):
'''
creates a base64 encoded barcode PNG image
'''
"""from base64 import b64encode
from reportlab.graphics.barcode import createBarcodeDrawing
barcode = createBarcodeDrawing('Code128',
value=value,
barWidth=170,
height=50,
fontSize=2,
humanReadable=True)
data = b64encode(barcode.asString('png'))
return data.decode('utf-8')"""
def CHOICE_SIGNEDS():
return [('', 'Ambos'),
(1, 'Documentos Com Assinatura Digital'),
(0, 'Documentos Sem Assinatura Digital')]
YES_NO_CHOICES = [(True, _('Sim')), (False, _('Não'))]
NONE_YES_NO_CHOICES = [(None, _('---------')),
(True, _('Sim')), (False, _('Não'))]
def listify(function):
@wraps(function)
def f(*args, **kwargs):
return list(function(*args, **kwargs))
return f
UF = [
('AC', 'Acre'),
('AL', 'Alagoas'),
('AP', 'Amapá'),
('AM', 'Amazonas'),
('BA', 'Bahia'),
('CE', 'Ceará'),
('DF', 'Distrito Federal'),
('ES', 'Espírito Santo'),
('GO', 'Goiás'),
('MA', 'Maranhão'),
('MT', 'Mato Grosso'),
('MS', 'Mato Grosso do Sul'),
('MG', 'Minas Gerais'),
('PR', 'Paraná'),
('PB', 'Paraíba'),
('PA', 'Pará'),
('PE', 'Pernambuco'),
('PI', 'Piauí'),
('RJ', 'Rio de Janeiro'),
('RN', 'Rio Grande do Norte'),
('RS', 'Rio Grande do Sul'),
('RO', 'Rondônia'),
('RR', 'Roraima'),
('SC', 'Santa Catarina'),
('SE', 'Sergipe'),
('SP', 'São Paulo'),
('TO', 'Tocantins'),
('EX', 'Exterior'),
]
RANGE_ANOS = [(year, year) for year in range(date.today().year, 1889, -1)]
RANGE_MESES = [
(1, 'Janeiro'),
(2, 'Fevereiro'),
(3, 'Março'),
(4, 'Abril'),
(5, 'Maio'),
(6, 'Junho'),
(7, 'Julho'),
(8, 'Agosto'),
(9, 'Setembro'),
(10, 'Outubro'),
(11, 'Novembro'),
(12, 'Dezembro'),
]
RANGE_DIAS_MES = [(n, n) for n in range(1, 32)]
TIPOS_MIDIAS_PERMITIDOS = {
'application/pdf': 'pdf',
'application/x-pdf': 'pdf',
'application/acrobat': 'pdf',
'applications/vnd.pdf': 'pdf',
'application/msword': 'doc',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document': '.docx',
'image/jpeg': 'jpg',
'image/jpg': 'jpg',
'image/jpe_': 'jpg',
'image/pjpeg': 'jpg',
'image/vnd.swiftview-jpeg': 'jpg',
'application/jpg': 'jpg',
'application/x-jpg': 'jpg',
'image/pjpeg': 'jpg',
'image/pipeg': 'jpg',
'image/vnd.swiftview-jpeg': 'jpg',
'image/gif': 'gif',
'image/png': 'png',
'application/png': 'png',
'application/x-png': 'png',
}
TIPOS_IMG_PERMITIDOS = {
'image/jpeg',
'image/jpg',
'image/jpe_',
'image/pjpeg',
'image/vnd.swiftview-jpeg',
'application/jpg',
'application/x-jpg',
'image/pjpeg',
'image/pipeg',
'image/vnd.swiftview-jpeg',
'image/x-xbitmap',
'image/bmp',
'image/x-bmp',
'image/x-bitmap',
'image/png',
'application/png',
'application/x-png'
}
def fabrica_validador_de_tipos_de_arquivo(lista, nome):
def restringe_tipos_de_arquivo(value):
mime = magic.from_buffer(value.read(), mime=True)
if mime not in lista:
raise ValidationError(_('Tipo de arquivo não suportado'))
return mime, lista[mime]
# o nome é importante para as migrations
restringe_tipos_de_arquivo.__name__ = nome
return restringe_tipos_de_arquivo
restringe_tipos_de_arquivo_midias = fabrica_validador_de_tipos_de_arquivo(
TIPOS_MIDIAS_PERMITIDOS, 'restringe_tipos_de_arquivo_midias')
def intervalos_tem_intersecao(a_inicio, a_fim, b_inicio, b_fim):
maior_inicio = max(a_inicio, b_inicio)
menor_fim = min(a_fim, b_fim)
return maior_inicio <= menor_fim
media_protected_storage = FileSystemStorage(
location=settings.MEDIA_PROTECTED_ROOT, base_url='DO_NOT_USE')
def texto_upload_path(instance, filename, subpath='', pk_first=False):
filename = re.sub('\s', '_', normalize(filename.strip()).lower())
prefix = 'public'
str_path = ('./cmj/%(prefix)s/%(model_name)s/'
'%(subpath)s/%(pk)s/%(filename)s')
if pk_first:
str_path = ('./cmj/%(prefix)s/%(model_name)s/'
'%(pk)s/%(subpath)s/%(filename)s')
if subpath is None:
subpath = '_'
path = str_path % \
{
'prefix': prefix,
'model_name': instance._meta.model_name,
'pk': instance.pk,
'subpath': subpath,
'filename': filename
}
return path
def run_sql(sql):
with connection.cursor() as cursor:
cursor.execute(sql)
if sql.startswith('select'):
rows = cursor.fetchall()
if settings.DEBUG:
print(rows)
def run_signed_name_and_date_via_fields(fields):
signs = {}
for key, field in fields.items():
if '/FT' not in field and field['/FT'] != '/Sig':
continue
if '/V' not in field:
continue
# .format(field['/V']['/Reason'])
nome = 'Nome do assinante não localizado.'
content_sign = field['/V']['/Contents']
try:
signed_data = cms.ContentInfo.load(content_sign)['content']
oun_old = []
for cert in signed_data['certificates']:
subject = cert.native['tbs_certificate']['subject']
oun = subject['organizational_unit_name']
if isinstance(oun, str):
continue
if len(oun) > len(oun_old):
oun_old = oun
nome = subject['common_name'].split(':')[0]
except:
if '/Name' in field['/V']:
nome = field['/V']['/Name']
fd = None
try:
data = str(field['/V']['/M'])
if 'D:' not in data:
data = None
else:
if not data.endswith('Z'):
data = data.replace('Z', '+')
data = data.replace("'", '')
fd = datetime.strptime(data[2:], '%Y%m%d%H%M%S%z')
except:
pass
if nome not in signs:
signs[nome] = fd
return signs
def run_signed_name_and_date_extract(file):
signs = {}
fields = {}
pdfdata = file.read()
# se não tem byterange então não é assinado
byterange = []
n = -1
while True:
n = pdfdata.find(b"/ByteRange", n + 1)
if n == -1:
break
byterange.append(n)
if not byterange:
return signs
# tenta extrair via /Fields
try:
pdf = PdfFileReader(file)
fields = pdf.getFields()
except Exception as e:
try:
pdf = PdfFileReader(file, strict=False)
fields = pdf.getFields()
except Exception as ee:
fields = ee
try:
# se a extração via /Fields ocorrer sem erros e forem capturadas
# tantas assinaturas quanto byteranges
if isinstance(fields, dict):
signs = run_signed_name_and_date_via_fields(fields)
if len(signs) == len(byterange):
return signs
except Exception as e:
pass
for n in byterange:
start = pdfdata.find(b"[", n)
stop = pdfdata.find(b"]", start)
assert n != -1 and start != -1 and stop != -1
n += 1
br = [int(i, 10) for i in pdfdata[start + 1: stop].split()]
contents = pdfdata[br[0] + br[1] + 1: br[2] - 1]
bcontents = bytes.fromhex(contents.decode("utf8"))
data1 = pdfdata[br[0]: br[0] + br[1]]
data2 = pdfdata[br[2]: br[2] + br[3]]
#signedData = data1 + data2
nome = 'Nome do assinante não localizado.'
try:
signed_data = cms.ContentInfo.load(bcontents)['content']
oun_old = []
for cert in signed_data['certificates']:
subject = cert.native['tbs_certificate']['subject']
oun = subject['organizational_unit_name']
if isinstance(oun, str):
continue
if len(oun) > len(oun_old):
oun_old = oun
nome = subject['common_name'].split(':')[0]
if nome not in signs:
signs[nome] = timezone.localtime()
except:
pass
return signs
def signed_name_and_date_extract(file):
try:
signs = run_signed_name_and_date_extract(file)
except:
return {}
signs = list(signs.items())
signs = sorted(signs, key=lambda sign: sign[0])
sr = []
for s in signs:
tt = s[0].title().split(' ')
for idx, t in enumerate(tt):
if t in ('Dos', 'De', 'Da', 'Do', 'Das', 'E'):
tt[idx] = t.lower()
sr.append((' '.join(tt), s[1]))
signs = sr
meta_signs = {
'signs': [],
'hom': []
}
for s in signs:
cn = settings.CERT_PRIVATE_KEY_NAME
meta_signs['hom' if s[0] == cn else 'signs'].append(s)
return meta_signs
# checa se documento está homologado
class ProcessoExterno(object):
def __init__(self, cmd, logger):
self.cmd = cmd
self.process = None
self.logger = logger
def run(self, timeout):
def target():
self.logger.info('Thread started')
self.process = subprocess.Popen(
self.cmd, shell=True, stdout=subprocess.PIPE)
self.process.communicate()
self.logger.info('Thread finished:')
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.logger.info('Terminating process')
self.process.terminate()
return None
# thread.join()
self.logger.info(self.process.returncode)
return self.process.returncode
class CmjLoader(Loader):
def get_dirs(self):
return self.dirs if self.dirs is not None else self.engine.dirs
| gpl-3.0 | -1,395,734,842,059,924,000 | 26.065259 | 87 | 0.552301 | false |
dhalima3/TravelSpark | Main.py | 1 | 7574 | import requests
import os
import time
import random
from flask import Flask, request, redirect, session, url_for, render_template
from flask.json import jsonify, dumps, loads
from requests_oauthlib import OAuth2Session
import requests
import json
import urllib2
import mechanize
from bs4 import BeautifulSoup
from urlparse import urlparse
from apiclient.discovery import build
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
app = Flask(__name__, tmpl_dir)
app.config['DEBUG'] = True
app.config['PROPAGATE_EXCEPTIONS'] = True
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
instagram_client_id = "115a6c0fd0a64bccbf213e4eafec554a"
instagram_client_secret = "72f3282930444d9e826e5f083ede32d1"
instagram_authorization_base_url = "https://api.instagram.com/oauth/authorize"
instagram_token_url = "https://api.instagram.com/oauth/access_token"
instagram_image_search_url = "https://api.instagram.com/v1/media/search"
google_api_key = "AIzaSyCLehiRvLWhFXbwkI6zojampXcICC0-rMU"
google_geocoding_url = "https://maps.googleapis.com/maps/api/geocode/json?address=%s"
@app.route('/')
def instagram_authorization():
if(session.get("instagram_access_key" != None)):
return redirect("/home")
oauth = OAuth2Session(instagram_client_id, redirect_uri="http://127.0.0.1:5000/callback")
authorization_url, state = oauth.authorization_url(instagram_authorization_base_url)
session['oauth_state'] = state
return redirect(authorization_url)
@app.route('/callback', methods=["GET"])
def instagram_token_retrieval():
oauth = OAuth2Session(instagram_client_id, redirect_uri="http://127.0.0.1:5000/callback", state=session['oauth_state'])
# When grabbing the token Instagram requires you give the code you got in the authorization step in the token step, along with client_id + secret -_-
# This fetch token call might not be right for other APIs, it all demands on their requirements
my_token = oauth.fetch_token(instagram_token_url, code=request.args.get('code'), client_secret=instagram_client_secret, client_id=instagram_client_id, authorization_url=request.url)
session['instagram_access_key'] = my_token['access_token']
return redirect("/home")
'''
Route representing the home page
'''
@app.route('/home')
def home():
#TODO: Put in Flickr APi for the home page.
if(session.get('instagram_access_key') == None):
return redirect("/")
#Lets get info on myself the access_token holder
access_token = session['instagram_access_key']
r = requests.request("GET",'https://api.instagram.com/v1/users/self/?access_token=%s' % access_token)
return render_template('home.html', user_data=r.json())
'''
The main route for the collage page
'''
#after user hits submit button.
@app.route('/location/<place>', methods=["POST", "GET"])
def get_collage(place):
#headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
#payload = {'num_photos': 3, 'place': place}
url = 'http://127.0.0.1:5000/location/instagram/'+place
#import urlparse
#url = 'http://127.0.0.1:5000/location/instagram/holder'
#parts = urlparse.urlparse(url)
#parts = parts._replace(path=)
#parts.geturl()
#print payload
response = get_instagram_photos(place)
response2= json.loads(get_google_images(place))
response3= json.loads(get_google_images2(place))
response4 = json.loads(get_tumblr_images(place))
print "RECIEVES"
print response
print "GOOGLE"
print response2
print "TUMBLR"
print response4
place = place.replace("+", " ")
airport = get_airport(place)
price = "Packages for Jetblue start as low as " + str(get_lowest_price(place)) + ". "
average_savings = "And save up to " + str(get_savings_percentage(place)) + " compared to Expedia! Wow Jetblue is so awesome!"
return render_template('collage.html', place=place, photos_display=response, photos_google= response2, photos_tumblr= response4, photos_google2 = response3, lowest_price=price, average_savings=average_savings, airport=airport)
def get_airport(place):
f = open('./jetblue/jetblueresults', 'r')
for line in f:
lineList = line.split(',')
destination = lineList[2].lower()
if (destination == place.lower()):
return lineList[1]
def get_savings_percentage(place):
f = open('./jetblue/jetblueresults', 'r')
for line in f:
lineList = line.split(',')
destination = lineList[2].lower()
if (destination == place.lower()):
return lineList[5][:-1]
def get_lowest_price(place):
f = open('./jetblue/jetblueresults', 'r')
for line in f:
lineList = line.split(',')
destination = lineList[2].lower()
if (destination == place.lower()):
return lineList[4]
'''
Will return a list of image URLs from instagram given the name of a location
'''
def get_instagram_photos(place):
print "hell"
print place
if(session.get('instagram_access_key') == None):
print "REDIRECT"
return redirect("/")
#http://127.0.0.1:5000/location/instagram/Chicago/3
#place, num_photos,
# Use Google Geocoding to convert place to lat and long coordinates
num_photos = 25;
print place
location = requests.get(google_geocoding_url % place)
location = location.json()
print location
lat_coord = location.get("results")[0].get("geometry").get("location").get("lat")
long_coord = location.get("results")[0].get("geometry").get("location").get("lng")
print lat_coord
print long_coord
# Make the API call to get the Models
querys = {"lat": lat_coord, "lng" : long_coord, "min_timestamp": "1262304000", "max_timestamp":"1446940800", "distance" : "10000" , "access_token": session.get('instagram_access_key')}
instagram_models = requests.get(instagram_image_search_url, params=querys)
chosen_images = []
json_object = loads(instagram_models.text)
print json_object
if len(json_object["data"]) > num_photos:
for i in range(0, num_photos):
chosen_images.append(json_object["data"][i]["images"])
else:
for i in range(0, len(json_object["data"])):
chosen_images.append(json_object["data"][i]["images"])
print len(json_object["data"])
print num_photos
print chosen_images
return chosen_images
def get_tumblr_images(place):
print "GETTING TUMBLR"
url = ('https://api.tumblr.com/v2/tagged?tag='+urllib2.quote(place)+"&api_key=YaGrzj5NUOlMDxQyTtkSBz1KEAnVyUYcCRKWT74VzNUJwRbtH4")
print url
req = urllib2.Request(url, headers={'accept': '*/*'})
response = urllib2.urlopen(req)
print "TUMBLR"
ret = response.read()
print ret
return ret
def get_google_images(place):
print "MOVING ON TO GOOGLE"
url = ('https://ajax.googleapis.com/ajax/services/search/images?' + 'v=1.0&q='+ urllib2.quote(place) + "&rsz=8")
print url
req = urllib2.Request(url, headers={'accept': '*/*'})
response = urllib2.urlopen(req)
print "GOOGLE RESPONSE"
print type(response)
print "TYPE OF RESPONSE.READ"
ret = response.read()
print len(ret)
print "RET"
print ret
return ret
def get_google_images2(place):
print "MOVING ON TO GOOGLE"
url = ('https://ajax.googleapis.com/ajax/services/search/images?' + 'v=1.0&q='+ urllib2.quote(place) +'&rsz=8&start=9')
print url
req = urllib2.Request(url, headers={'accept': '*/*'})
response = urllib2.urlopen(req)
print "GOOGLE RESPONSE"
print type(response)
print "TYPE OF RESPONSE.READ"
ret = response.read()
print len(ret)
print "RET"
print ret
return ret
if __name__ == '__main__':
app.run()
| apache-2.0 | 4,999,414,832,929,667,000 | 34.227907 | 231 | 0.695933 | false |
google-research/google-research | hierarchical_foresight/models/vae.py | 1 | 3093 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Variational Autoencoder Models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sonnet as snt
import tensorflow.compat.v1 as tf
class ImageTransformSC(snt.AbstractModule):
"""VAE for the Maze Environment."""
def __init__(self, latentsize, name='itmsc', width=64):
super(ImageTransformSC, self).__init__(name=name)
self.width = width
if self.width == 48:
self.lsz = 2
else:
self.lsz = 3
self.latentsize = latentsize
self.enc = snt.nets.ConvNet2D([16, 32, 64, 128], [3, 3, 3, 3],
[2, 2, 2, 2], ['VALID'])
self.dec = self.enc.transpose()
self.lin1 = snt.Linear(output_size=512, name='lin1')
self.lin2 = snt.Linear(output_size=self.latentsize*2, name='lin2')
self.lin3 = snt.Linear(output_size=self.lsz *3*128, name='lin3')
self.f1 = snt.Linear(output_size=512, name='f1')
self.f2 = snt.Linear(output_size=512, name='f2')
self.f3 = snt.Linear(output_size=256, name='f3')
self.fc1 = snt.Linear(output_size=256, name='fc')
self.fc2 = snt.Linear(output_size=256, name='fc')
self.fc3 = snt.Linear(output_size=256, name='fc')
def _build(self, bs):
self.s1 = tf.placeholder(tf.float32, shape=[None, self.width, 64, 3])
self.s2 = tf.placeholder(tf.float32, shape=[None, self.width, 64, 3])
c1 = self.enc(self.s1)
c2 = self.enc(self.s2)
e1 = tf.reshape(c1, [-1, self.lsz *3*128])
e2 = tf.reshape(c2, [-1, self.lsz *3*128])
e = tf.concat([e1, e2], 1)
l1 = tf.nn.relu(self.lin1(e))
l2 = self.lin2(l1)
mu, std = l2[:, :self.latentsize], tf.nn.relu(l2[:, self.latentsize:])
n = tf.distributions.Normal(loc=[0.]*self.latentsize,
scale=[1.]*self.latentsize)
a = n.sample(bs)
self.z = mu + std * a
emb1 = tf.nn.relu(self.f1(e1))
emb2 = tf.nn.relu(self.f2(emb1))
emb3 = self.f3(emb2)
s2emb = tf.nn.relu(self.fc1(tf.concat([emb3, self.z], 1)))
s2emb = tf.nn.relu(self.fc2(s2emb))
s2emb = self.fc3(s2emb)
ll = self.lin3(emb3)
ll = tf.reshape(ll, [-1, self.lsz, 3, 128])
dec1_3 = self.dec(ll+c1)
rec = tf.nn.sigmoid(dec1_3)
rec = tf.clip_by_value(rec, 1e-5, 1 - 1e-5)
l3 = self.lin3(s2emb)
l3 = tf.reshape(l3, [-1, self.lsz, 3, 128])
dec2_3 = self.dec(l3+c1)
o = tf.nn.sigmoid(dec2_3)
o = tf.clip_by_value(o, 1e-5, 1 - 1e-5)
return o, rec, mu, std**2
| apache-2.0 | -1,294,222,527,283,148,800 | 33.752809 | 74 | 0.628839 | false |
a365541453/django | django/blog/docker_part/admin.py | 1 | 1206 | # coding=utf-8
from django.contrib import admin
from docker_part.models import docker_article
# Register your models here.
from django import forms
from django.forms import fields
# Register your models here.
# form是用来控制显示的,这段代码是用来修改form的默认显示
class FlatPageForm(forms.ModelForm):
class Meta:
model = docker_article
exclude = []
widgets = {
"type": fields.Select(choices=[
(1, "主页"),
(2, "vmware"),
(3, "自动化"),
(4, "KVM"),
(5, "LInux"),
(6, "Docker"),
(7, "数据库"),
])
}
# FlatPageForm是form类,不能用来与article一起注册
# 所以要将修改好的form类和admin类关联起来
class FlatPageAdmin(admin.ModelAdmin):
form = FlatPageForm
# 下面是添加富文本编辑器的代码
list_display = ('title', 'time', 'type')
class Media:
# 在管理后台的HTML文件中加入js文件, 每一个路径都会追加STATIC_URL/
js = (
'kindeditor/kindeditor-all.js',
'kindeditor/lang/zh_CN.js',
'kindeditor/config.js',
)
# 再将修改后的admin类的方式来注册article
# article要用admin类的方法来注册
admin.site.register(docker_article, FlatPageAdmin)
| gpl-3.0 | 5,791,728,908,724,285,000 | 19.291667 | 50 | 0.693018 | false |
littmus/kutime_web | kutime/views.py | 1 | 2776 | # -*- coding: utf-8 -*-
try:
import simplejson as json
except ImportError:
import json
from django.shortcuts import render
from django.http import HttpResponse
from django.core import serializers
from django.views.decorators.csrf import csrf_exempt
import watson
from models import *
def JsonResponse(json):
return HttpResponse(json, content_type='application/json')
def index(request):
list_col = College.objects.all()
list_col_major_anam = list_col.filter(type='M', campus='A')
list_col_major_sejong = list_col.filter(type='M', campus='S')
list_col_etc_anam = list_col.filter(type='E', campus='A')
list_col_etc_sejong = list_col.filter(type='E', campus='S')
return render(
request,
'index.html',
{
'cols_major_anam': list_col_major_anam,
'cols_major_sejong': list_col_major_sejong,
'cols_etc_anam': list_col_etc_anam,
'cols_etc_sejong': list_col_etc_sejong,
'timetable_range': range(1, 13),
}
)
@csrf_exempt
def dept(request, col_num):
data = None
if col_num is not None:
list_dept = Department.objects.filter(col__number=col_num)
data = serializers.serialize('json', list_dept, fields=('name', 'number'))
return JsonResponse(data)
@csrf_exempt
def lec(request, dept_num):
data = None
if dept_num is not None:
if dept_num[0] in ['A', 'S']:
campus = dept_num[0]
num = dept_num[1:]
list_lec = Lecture.objects.filter(col__campus=campus, dept__number=num)
else:
list_lec = Lecture.objects.filter(dept__number=dept_num)
data = serializers.serialize('json', list_lec)
return JsonResponse(data)
@csrf_exempt
def search(request):
if request.method == 'GET':
data = None
q = request.GET.get('q', None)
if q is not None:
if u'교시' in q:
pass
if 'LP' in q:
q = q.replace('LP', 'L-P')
if u'관' in q:
q = q.replace(u'관', '')
# for _q in q.split(','):
# if q.endswith(u'교시'):
#result = [s.object for s in watson.search(q)]
""" TODO
- 검색어 유형 따라 필터 적용
ex) 5교시 -> dayAndPeriod 에서만 검색
"""
result = watson.filter(Lecture, q)
data = serializers.serialize('json', result)
return JsonResponse(data)
else:
return HttpResponse(status=404)
"""
from selenium import webdriver
def capture(request):
if request.method == 'POST':
drvier = webdriver.PhantomJS()
else:
return HttpResponse(status=404)
"""
| mit | -1,254,733,528,694,133,200 | 26.836735 | 83 | 0.569648 | false |
SnabbCo/neutron | neutron/tests/unit/test_l3_agent.py | 1 | 54667 | # Copyright 2012 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import mock
from oslo.config import cfg
from testtools import matchers
from neutron.agent.common import config as agent_config
from neutron.agent import l3_agent
from neutron.agent.linux import interface
from neutron.common import config as base_config
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.openstack.common import processutils
from neutron.openstack.common import uuidutils
from neutron.tests import base
_uuid = uuidutils.generate_uuid
HOSTNAME = 'myhost'
FAKE_ID = _uuid()
class TestBasicRouterOperations(base.BaseTestCase):
def setUp(self):
super(TestBasicRouterOperations, self).setUp()
self.conf = cfg.ConfigOpts()
self.conf.register_opts(base_config.core_opts)
self.conf.register_opts(l3_agent.L3NATAgent.OPTS)
agent_config.register_interface_driver_opts_helper(self.conf)
agent_config.register_use_namespaces_opts_helper(self.conf)
agent_config.register_root_helper(self.conf)
self.conf.register_opts(interface.OPTS)
self.conf.set_override('router_id', 'fake_id')
self.conf.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
self.conf.root_helper = 'sudo'
self.device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager')
self.external_process = self.external_process_p.start()
self.send_arp_p = mock.patch(
'neutron.agent.l3_agent.L3NATAgent._send_gratuitous_arp_packet')
self.send_arp = self.send_arp_p.start()
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = self.dvr_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = self.mock_driver
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
self.l3pluginApi_cls_p = mock.patch(
'neutron.agent.l3_agent.L3PluginApi')
l3pluginApi_cls = self.l3pluginApi_cls_p.start()
self.plugin_api = mock.Mock()
l3pluginApi_cls.return_value = self.plugin_api
self.looping_call_p = mock.patch(
'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
def test_router_info_create(self):
id = _uuid()
ri = l3_agent.RouterInfo(id, self.conf.root_helper,
self.conf.use_namespaces, None)
self.assertTrue(ri.ns_name.endswith(id))
def test_router_info_create_with_router(self):
id = _uuid()
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'subnet_id': _uuid()}],
'subnet': {'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}}
router = {
'id': _uuid(),
'enable_snat': True,
'routes': [],
'gw_port': ex_gw_port}
ri = l3_agent.RouterInfo(id, self.conf.root_helper,
self.conf.use_namespaces, router)
self.assertTrue(ri.ns_name.endswith(id))
self.assertEqual(ri.router, router)
def test_agent_create(self):
l3_agent.L3NATAgent(HOSTNAME, self.conf)
def _test_internal_network_action(self, action):
port_id = _uuid()
router_id = _uuid()
network_id = _uuid()
ri = l3_agent.RouterInfo(router_id, self.conf.root_helper,
self.conf.use_namespaces, None)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
cidr = '99.0.1.9/24'
mac = 'ca:fe:de:ad:be:ef'
interface_name = agent.get_internal_device_name(port_id)
if action == 'add':
self.device_exists.return_value = False
agent.internal_network_added(ri, network_id,
port_id, cidr, mac)
self.assertEqual(self.mock_driver.plug.call_count, 1)
self.assertEqual(self.mock_driver.init_l3.call_count, 1)
self.send_arp.assert_called_once_with(ri, interface_name,
'99.0.1.9')
elif action == 'remove':
self.device_exists.return_value = True
agent.internal_network_removed(ri, port_id, cidr)
self.assertEqual(self.mock_driver.unplug.call_count, 1)
else:
raise Exception("Invalid action %s" % action)
def test_agent_add_internal_network(self):
self._test_internal_network_action('add')
def test_agent_remove_internal_network(self):
self._test_internal_network_action('remove')
def _test_external_gateway_action(self, action):
router_id = _uuid()
ri = l3_agent.RouterInfo(router_id, self.conf.root_helper,
self.conf.use_namespaces, None)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
internal_cidrs = ['100.0.1.0/24', '200.74.0.0/16']
ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
interface_name = agent.get_external_device_name(ex_gw_port['id'])
if action == 'add':
self.device_exists.return_value = False
ri.router = mock.Mock()
ri.router.get.return_value = [{'floating_ip_address':
'192.168.1.34'}]
agent.external_gateway_added(ri, ex_gw_port,
interface_name, internal_cidrs)
self.assertEqual(self.mock_driver.plug.call_count, 1)
self.assertEqual(self.mock_driver.init_l3.call_count, 1)
self.send_arp.assert_called_once_with(ri, interface_name,
'20.0.0.30')
kwargs = {'preserve_ips': ['192.168.1.34/32'],
'namespace': 'qrouter-' + router_id,
'gateway': '20.0.0.1'}
self.mock_driver.init_l3.assert_called_with(interface_name,
['20.0.0.30/24'],
**kwargs)
elif action == 'remove':
self.device_exists.return_value = True
agent.external_gateway_removed(ri, ex_gw_port,
interface_name, internal_cidrs)
self.assertEqual(self.mock_driver.unplug.call_count, 1)
else:
raise Exception("Invalid action %s" % action)
def test_agent_add_external_gateway(self):
self._test_external_gateway_action('add')
def _test_arping(self, namespace):
if not namespace:
self.conf.set_override('use_namespaces', False)
router_id = _uuid()
ri = l3_agent.RouterInfo(router_id, self.conf.root_helper,
self.conf.use_namespaces, None)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
floating_ip = '20.0.0.101'
interface_name = agent.get_external_device_name(router_id)
agent._arping(ri, interface_name, floating_ip)
arping_cmd = ['arping', '-A',
'-I', interface_name,
'-c', self.conf.send_arp_for_ha,
floating_ip]
self.mock_ip.netns.execute.assert_any_call(
arping_cmd, check_exit_code=True)
def test_arping_namespace(self):
self._test_arping(namespace=True)
def test_arping_no_namespace(self):
self._test_arping(namespace=False)
def test_agent_remove_external_gateway(self):
self._test_external_gateway_action('remove')
def _check_agent_method_called(self, agent, calls, namespace):
self.mock_ip.netns.execute.assert_has_calls(
[mock.call(call, check_exit_code=False) for call in calls],
any_order=True)
def _test_routing_table_update(self, namespace):
if not namespace:
self.conf.set_override('use_namespaces', False)
router_id = _uuid()
ri = l3_agent.RouterInfo(router_id, self.conf.root_helper,
self.conf.use_namespaces,
None)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fake_route1 = {'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}
fake_route2 = {'destination': '135.207.111.111/32',
'nexthop': '1.2.3.4'}
agent._update_routing_table(ri, 'replace', fake_route1)
expected = [['ip', 'route', 'replace', 'to', '135.207.0.0/16',
'via', '1.2.3.4']]
self._check_agent_method_called(agent, expected, namespace)
agent._update_routing_table(ri, 'delete', fake_route1)
expected = [['ip', 'route', 'delete', 'to', '135.207.0.0/16',
'via', '1.2.3.4']]
self._check_agent_method_called(agent, expected, namespace)
agent._update_routing_table(ri, 'replace', fake_route2)
expected = [['ip', 'route', 'replace', 'to', '135.207.111.111/32',
'via', '1.2.3.4']]
self._check_agent_method_called(agent, expected, namespace)
agent._update_routing_table(ri, 'delete', fake_route2)
expected = [['ip', 'route', 'delete', 'to', '135.207.111.111/32',
'via', '1.2.3.4']]
self._check_agent_method_called(agent, expected, namespace)
def test_agent_routing_table_updated(self):
self._test_routing_table_update(namespace=True)
def test_agent_routing_table_updated_no_namespace(self):
self._test_routing_table_update(namespace=False)
def test_routes_updated(self):
self._test_routes_updated(namespace=True)
def test_routes_updated_no_namespace(self):
self._test_routes_updated(namespace=False)
def _test_routes_updated(self, namespace=True):
if not namespace:
self.conf.set_override('use_namespaces', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
ri = l3_agent.RouterInfo(router_id, self.conf.root_helper,
self.conf.use_namespaces,
None)
ri.router = {}
fake_old_routes = []
fake_new_routes = [{'destination': "110.100.31.0/24",
'nexthop': "10.100.10.30"},
{'destination': "110.100.30.0/24",
'nexthop': "10.100.10.30"}]
ri.routes = fake_old_routes
ri.router['routes'] = fake_new_routes
agent.routes_updated(ri)
expected = [['ip', 'route', 'replace', 'to', '110.100.30.0/24',
'via', '10.100.10.30'],
['ip', 'route', 'replace', 'to', '110.100.31.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(agent, expected, namespace)
fake_new_routes = [{'destination': "110.100.30.0/24",
'nexthop': "10.100.10.30"}]
ri.router['routes'] = fake_new_routes
agent.routes_updated(ri)
expected = [['ip', 'route', 'delete', 'to', '110.100.31.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(agent, expected, namespace)
fake_new_routes = []
ri.router['routes'] = fake_new_routes
agent.routes_updated(ri)
expected = [['ip', 'route', 'delete', 'to', '110.100.30.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(agent, expected, namespace)
def _verify_snat_rules(self, rules, router, negate=False):
interfaces = router[l3_constants.INTERFACE_KEY]
source_cidrs = []
for interface in interfaces:
prefix = interface['subnet']['cidr'].split('/')[1]
source_cidr = "%s/%s" % (interface['fixed_ips'][0]['ip_address'],
prefix)
source_cidrs.append(source_cidr)
source_nat_ip = router['gw_port']['fixed_ips'][0]['ip_address']
interface_name = ('qg-%s' % router['gw_port']['id'])[:14]
expected_rules = [
'! -i %s ! -o %s -m conntrack ! --ctstate DNAT -j ACCEPT' %
(interface_name, interface_name)]
for source_cidr in source_cidrs:
value_dict = {'source_cidr': source_cidr,
'source_nat_ip': source_nat_ip}
expected_rules.append('-s %(source_cidr)s -j SNAT --to-source '
'%(source_nat_ip)s' % value_dict)
for r in rules:
if negate:
self.assertNotIn(r.rule, expected_rules)
else:
self.assertIn(r.rule, expected_rules)
def _prepare_router_data(self, enable_snat=None, num_internal_ports=1):
router_id = _uuid()
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'subnet_id': _uuid()}],
'subnet': {'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}}
int_ports = []
for i in range(num_internal_ports):
int_ports.append({'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': [{'ip_address': '35.4.%s.4' % i,
'subnet_id': _uuid()}],
'mac_address': 'ca:fe:de:ad:be:ef',
'subnet': {'cidr': '35.4.%s.0/24' % i,
'gateway_ip': '35.4.%s.1' % i}})
router = {
'id': router_id,
l3_constants.INTERFACE_KEY: int_ports,
'routes': [],
'gw_port': ex_gw_port}
if enable_snat is not None:
router['enable_snat'] = enable_snat
return router
def test_process_router(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fake_fip_id = 'fake_fip_id'
agent.process_router_floating_ip_addresses = mock.Mock()
agent.process_router_floating_ip_nat_rules = mock.Mock()
agent.process_router_floating_ip_addresses.return_value = {
fake_fip_id: 'ACTIVE'}
agent.external_gateway_added = mock.Mock()
router = self._prepare_router_data()
fake_floatingips1 = {'floatingips': [
{'id': fake_fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': _uuid()}]}
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.process_router(ri)
ex_gw_port = agent._get_ex_gw_port(ri)
agent.process_router_floating_ip_addresses.assert_called_with(
ri, ex_gw_port)
agent.process_router_floating_ip_addresses.reset_mock()
agent.process_router_floating_ip_nat_rules.assert_called_with(ri)
agent.process_router_floating_ip_nat_rules.reset_mock()
# remap floating IP to a new fixed ip
fake_floatingips2 = copy.deepcopy(fake_floatingips1)
fake_floatingips2['floatingips'][0]['fixed_ip_address'] = '7.7.7.8'
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips']
agent.process_router(ri)
ex_gw_port = agent._get_ex_gw_port(ri)
agent.process_router_floating_ip_addresses.assert_called_with(
ri, ex_gw_port)
agent.process_router_floating_ip_addresses.reset_mock()
agent.process_router_floating_ip_nat_rules.assert_called_with(ri)
agent.process_router_floating_ip_nat_rules.reset_mock()
# remove just the floating ips
del router[l3_constants.FLOATINGIP_KEY]
agent.process_router(ri)
ex_gw_port = agent._get_ex_gw_port(ri)
agent.process_router_floating_ip_addresses.assert_called_with(
ri, ex_gw_port)
agent.process_router_floating_ip_addresses.reset_mock()
agent.process_router_floating_ip_nat_rules.assert_called_with(ri)
agent.process_router_floating_ip_nat_rules.reset_mock()
# now no ports so state is torn down
del router[l3_constants.INTERFACE_KEY]
del router['gw_port']
agent.process_router(ri)
self.assertEqual(self.send_arp.call_count, 1)
self.assertFalse(agent.process_router_floating_ip_addresses.called)
self.assertFalse(agent.process_router_floating_ip_nat_rules.called)
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_floating_ip_addresses_add(self, IPDevice):
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1'
}
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = []
ri = mock.MagicMock()
ri.router.get.return_value = [fip]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE},
fip_statuses)
device.addr.add.assert_called_once_with(4, '15.1.2.3/32', '15.1.2.3')
def test_process_router_floating_ip_nat_rules_add(self):
fip = {
'id': _uuid(), 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1'
}
ri = mock.MagicMock()
ri.router.get.return_value = [fip]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.process_router_floating_ip_nat_rules(ri)
nat = ri.iptables_manager.ipv4['nat']
nat.clear_rules_by_tag.assert_called_once_with('floating_ip')
rules = agent.floating_forward_rules('15.1.2.3', '192.168.0.1')
for chain, rule in rules:
nat.add_rule.assert_any_call(chain, rule, tag='floating_ip')
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_floating_ip_addresses_remove(self, IPDevice):
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}]
ri = mock.MagicMock()
ri.router.get.return_value = []
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertEqual({}, fip_statuses)
device.addr.delete.assert_called_once_with(4, '15.1.2.3/32')
def test_process_router_floating_ip_nat_rules_remove(self):
ri = mock.MagicMock()
ri.router.get.return_value = []
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.process_router_floating_ip_nat_rules(ri)
nat = ri.iptables_manager.ipv4['nat']
nat = ri.iptables_manager.ipv4['nat`']
nat.clear_rules_by_tag.assert_called_once_with('floating_ip')
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_floating_ip_addresses_remap(self, IPDevice):
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2'
}
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}]
ri = mock.MagicMock()
ri.router.get.return_value = [fip]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE},
fip_statuses)
self.assertFalse(device.addr.add.called)
self.assertFalse(device.addr.delete.called)
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_with_disabled_floating_ip(self, IPDevice):
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2'
}
ri = mock.MagicMock()
ri.floating_ips = [fip]
ri.router.get.return_value = []
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertIsNone(fip_statuses.get(fip_id))
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_floating_ip_with_device_add_error(self, IPDevice):
IPDevice.return_value = device = mock.Mock()
device.addr.add.side_effect = processutils.ProcessExecutionError
device.addr.list.return_value = []
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2'
}
ri = mock.MagicMock()
ri.router.get.return_value = [fip]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ERROR},
fip_statuses)
def test_process_router_snat_disabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = self._prepare_router_data(enable_snat=True)
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.external_gateway_added = mock.Mock()
# Process with NAT
agent.process_router(ri)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Reprocess without NAT
router['enable_snat'] = False
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in orig_nat_rules
if r not in ri.iptables_manager.ipv4['nat'].rules]
self.assertEqual(len(nat_rules_delta), 2)
self._verify_snat_rules(nat_rules_delta, router)
self.assertEqual(self.send_arp.call_count, 1)
def test_process_router_snat_enabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = self._prepare_router_data(enable_snat=False)
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.external_gateway_added = mock.Mock()
# Process without NAT
agent.process_router(ri)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Reprocess with NAT
router['enable_snat'] = True
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules
if r not in orig_nat_rules]
self.assertEqual(len(nat_rules_delta), 2)
self._verify_snat_rules(nat_rules_delta, router)
self.assertEqual(self.send_arp.call_count, 1)
def test_process_router_interface_added(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = self._prepare_router_data()
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.external_gateway_added = mock.Mock()
# Process with NAT
agent.process_router(ri)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Add an interface and reprocess
router[l3_constants.INTERFACE_KEY].append(
{'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': [{'ip_address': '35.4.1.4',
'subnet_id': _uuid()}],
'mac_address': 'ca:fe:de:ad:be:ef',
'subnet': {'cidr': '35.4.1.0/24',
'gateway_ip': '35.4.1.1'}})
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules
if r not in orig_nat_rules]
self.assertEqual(len(nat_rules_delta), 1)
self._verify_snat_rules(nat_rules_delta, router)
# send_arp is called both times process_router is called
self.assertEqual(self.send_arp.call_count, 2)
def test_process_router_interface_removed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = self._prepare_router_data(num_internal_ports=2)
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.external_gateway_added = mock.Mock()
# Process with NAT
agent.process_router(ri)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Add an interface and reprocess
del router[l3_constants.INTERFACE_KEY][1]
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in orig_nat_rules
if r not in ri.iptables_manager.ipv4['nat'].rules]
self.assertEqual(len(nat_rules_delta), 1)
self._verify_snat_rules(nat_rules_delta, router, negate=True)
# send_arp is called both times process_router is called
self.assertEqual(self.send_arp.call_count, 2)
def test_process_router_internal_network_added_unexpected_error(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = self._prepare_router_data()
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.external_gateway_added = mock.Mock()
with mock.patch.object(
l3_agent.L3NATAgent,
'internal_network_added') as internal_network_added:
# raise RuntimeError to simulate that an unexpected exception
# occurrs
internal_network_added.side_effect = RuntimeError
self.assertRaises(RuntimeError, agent.process_router, ri)
self.assertNotIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
internal_network_added.side_effect = None
# _sync_routers_task finds out that _rpc_loop failed to process the
# router last time, it will retry in the next run.
agent.process_router(ri)
# We were able to add the port to ri.internal_ports
self.assertIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_process_router_internal_network_removed_unexpected_error(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = self._prepare_router_data()
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.external_gateway_added = mock.Mock()
# add an internal port
agent.process_router(ri)
with mock.patch.object(
l3_agent.L3NATAgent,
'internal_network_removed') as internal_net_removed:
# raise RuntimeError to simulate that an unexpected exception
# occurrs
internal_net_removed.side_effect = RuntimeError
ri.internal_ports[0]['admin_state_up'] = False
# The above port is set to down state, remove it.
self.assertRaises(RuntimeError, agent.process_router, ri)
self.assertIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
internal_net_removed.side_effect = None
# _sync_routers_task finds out that _rpc_loop failed to process the
# router last time, it will retry in the next run.
agent.process_router(ri)
# We were able to remove the port from ri.internal_ports
self.assertNotIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_process_router_floatingip_disabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
with mock.patch.object(
agent.plugin_rpc,
'update_floatingip_statuses') as mock_update_fip_status:
fip_id = _uuid()
router = self._prepare_router_data(num_internal_ports=1)
router[l3_constants.FLOATINGIP_KEY] = [
{'id': fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}]
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.external_gateway_added = mock.Mock()
agent.process_router(ri)
# Assess the call for putting the floating IP up was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE})
mock_update_fip_status.reset_mock()
# Process the router again, this time without floating IPs
router[l3_constants.FLOATINGIP_KEY] = []
ri.router = router
agent.process_router(ri)
# Assess the call for putting the floating IP up was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_DOWN})
def test_process_router_floatingip_exception(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.process_router_floating_ip_addresses = mock.Mock()
agent.process_router_floating_ip_addresses.side_effect = RuntimeError
with mock.patch.object(
agent.plugin_rpc,
'update_floatingip_statuses') as mock_update_fip_status:
fip_id = _uuid()
router = self._prepare_router_data(num_internal_ports=1)
router[l3_constants.FLOATINGIP_KEY] = [
{'id': fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}]
ri = l3_agent.RouterInfo(router['id'], self.conf.root_helper,
self.conf.use_namespaces, router=router)
agent.external_gateway_added = mock.Mock()
agent.process_router(ri)
# Assess the call for putting the floating IP into Error
# was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_ERROR})
def test_handle_router_snat_rules_add_back_jump(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = mock.MagicMock()
port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]}
agent._handle_router_snat_rules(ri, port, [], "iface", "add_rules")
nat = ri.iptables_manager.ipv4['nat']
nat.empty_chain.assert_any_call('snat')
nat.add_rule.assert_any_call('snat', '-j $float-snat')
for call in nat.mock_calls:
name, args, kwargs = call
if name == 'add_rule':
self.assertEqual(args, ('snat', '-j $float-snat'))
self.assertEqual(kwargs, {})
break
def test_handle_router_snat_rules_add_rules(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3_agent.RouterInfo(_uuid(), self.conf.root_helper,
self.conf.use_namespaces, None)
ex_gw_port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]}
internal_cidrs = ['10.0.0.0/24']
agent._handle_router_snat_rules(ri, ex_gw_port, internal_cidrs,
"iface", "add_rules")
nat_rules = map(str, ri.iptables_manager.ipv4['nat'].rules)
wrap_name = ri.iptables_manager.wrap_name
jump_float_rule = "-A %s-snat -j %s-float-snat" % (wrap_name,
wrap_name)
internal_net_rule = ("-A %s-snat -s %s -j SNAT --to-source %s") % (
wrap_name, internal_cidrs[0],
ex_gw_port['fixed_ips'][0]['ip_address'])
self.assertIn(jump_float_rule, nat_rules)
self.assertIn(internal_net_rule, nat_rules)
self.assertThat(nat_rules.index(jump_float_rule),
matchers.LessThan(nat_rules.index(internal_net_rule)))
def test_process_router_delete_stale_internal_devices(self):
class FakeDev(object):
def __init__(self, name):
self.name = name
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_devlist = [FakeDev('qr-a1b2c3d4-e5'),
FakeDev('qr-b2c3d4e5-f6')]
stale_devnames = [dev.name for dev in stale_devlist]
get_devices_return = []
get_devices_return.extend(stale_devlist)
self.mock_ip.get_devices.return_value = get_devices_return
router = self._prepare_router_data(enable_snat=True,
num_internal_ports=1)
ri = l3_agent.RouterInfo(router['id'],
self.conf.root_helper,
self.conf.use_namespaces,
router=router)
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
self.assertEqual(len(internal_ports), 1)
internal_port = internal_ports[0]
with contextlib.nested(mock.patch.object(l3_agent.L3NATAgent,
'internal_network_removed'),
mock.patch.object(l3_agent.L3NATAgent,
'internal_network_added'),
mock.patch.object(l3_agent.L3NATAgent,
'external_gateway_removed'),
mock.patch.object(l3_agent.L3NATAgent,
'external_gateway_added')
) as (internal_network_removed,
internal_network_added,
external_gateway_removed,
external_gateway_added):
agent.process_router(ri)
self.assertEqual(external_gateway_added.call_count, 1)
self.assertFalse(external_gateway_removed.called)
self.assertFalse(internal_network_removed.called)
internal_network_added.assert_called_once_with(
ri,
internal_port['network_id'],
internal_port['id'],
internal_port['ip_cidr'],
internal_port['mac_address'])
self.assertEqual(self.mock_driver.unplug.call_count,
len(stale_devnames))
calls = [mock.call(stale_devname,
namespace=ri.ns_name,
prefix=l3_agent.INTERNAL_DEV_PREFIX)
for stale_devname in stale_devnames]
self.mock_driver.unplug.assert_has_calls(calls, any_order=True)
def test_process_router_delete_stale_external_devices(self):
class FakeDev(object):
def __init__(self, name):
self.name = name
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_devlist = [FakeDev('qg-a1b2c3d4-e5')]
stale_devnames = [dev.name for dev in stale_devlist]
router = self._prepare_router_data(enable_snat=True,
num_internal_ports=1)
del router['gw_port']
ri = l3_agent.RouterInfo(router['id'],
self.conf.root_helper,
self.conf.use_namespaces,
router=router)
self.mock_ip.get_devices.return_value = stale_devlist
agent.process_router(ri)
self.mock_driver.unplug.assert_called_with(
stale_devnames[0],
bridge="br-ex",
namespace=ri.ns_name,
prefix=l3_agent.EXTERNAL_DEV_PREFIX)
def test_router_deleted(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.router_deleted(None, FAKE_ID)
# verify that will set fullsync
self.assertIn(FAKE_ID, agent.removed_routers)
def test_routers_updated(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.routers_updated(None, [FAKE_ID])
# verify that will set fullsync
self.assertIn(FAKE_ID, agent.updated_routers)
def test_removed_from_agent(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.router_removed_from_agent(None, {'router_id': FAKE_ID})
# verify that will set fullsync
self.assertIn(FAKE_ID, agent.removed_routers)
def test_added_to_agent(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.router_added_to_agent(None, [FAKE_ID])
# verify that will set fullsync
self.assertIn(FAKE_ID, agent.updated_routers)
def test_process_router_delete(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'subnet_id': _uuid()}],
'subnet': {'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}}
router = {
'id': _uuid(),
'enable_snat': True,
'routes': [],
'gw_port': ex_gw_port}
agent._router_added(router['id'], router)
agent.router_deleted(None, router['id'])
agent._process_router_delete()
self.assertFalse(list(agent.removed_routers))
def test_destroy_router_namespace_skips_ns_removal(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._destroy_router_namespace("fakens")
self.assertEqual(self.mock_ip.netns.delete.call_count, 0)
def test_destroy_router_namespace_removes_ns(self):
self.conf.set_override('router_delete_namespaces', True)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._destroy_router_namespace("fakens")
self.mock_ip.netns.delete.assert_called_once_with("fakens")
def _configure_metadata_proxy(self, enableflag=True):
if not enableflag:
self.conf.set_override('enable_metadata_proxy', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
router = {'id': _uuid(),
'external_gateway_info': {},
'routes': []}
with mock.patch.object(
agent, '_destroy_metadata_proxy') as destroy_proxy:
with mock.patch.object(
agent, '_spawn_metadata_proxy') as spawn_proxy:
agent._router_added(router_id, router)
if enableflag:
spawn_proxy.assert_called_with(mock.ANY, mock.ANY)
else:
self.assertFalse(spawn_proxy.call_count)
agent._router_removed(router_id)
if enableflag:
destroy_proxy.assert_called_with(mock.ANY, mock.ANY)
else:
self.assertFalse(destroy_proxy.call_count)
def test_enable_metadata_proxy(self):
self._configure_metadata_proxy()
def test_disable_metadata_proxy_spawn(self):
self._configure_metadata_proxy(enableflag=False)
def test_metadata_nat_rules(self):
self.conf.set_override('enable_metadata_proxy', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual([], agent.metadata_nat_rules())
self.conf.set_override('metadata_port', '8775')
self.conf.set_override('enable_metadata_proxy', True)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
rules = ('PREROUTING', '-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 -j REDIRECT --to-port 8775')
self.assertEqual([rules], agent.metadata_nat_rules())
def test_router_id_specified_in_conf(self):
self.conf.set_override('use_namespaces', False)
self.conf.set_override('router_id', '')
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
self.conf.set_override('router_id', '1234')
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual(['1234'], agent._router_ids())
self.assertFalse(agent._clean_stale_namespaces)
def test_process_routers_with_no_ext_net_in_conf(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
routers = [
{'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}]
agent._process_routers(routers)
self.assertIn(routers[0]['id'], agent.router_info)
self.plugin_api.get_external_network_id.assert_called_with(
agent.context)
def test_process_routers_with_cached_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
agent.target_ex_net_id = 'aaa'
routers = [
{'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}]
agent._process_routers(routers)
self.assertIn(routers[0]['id'], agent.router_info)
self.assertFalse(self.plugin_api.get_external_network_id.called)
def test_process_routers_with_stale_cached_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
agent.target_ex_net_id = 'bbb'
routers = [
{'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}]
agent._process_routers(routers)
self.assertIn(routers[0]['id'], agent.router_info)
self.plugin_api.get_external_network_id.assert_called_with(
agent.context)
def test_process_routers_with_no_ext_net_in_conf_and_two_net_plugin(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
routers = [
{'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}]
agent.router_info = {}
self.plugin_api.get_external_network_id.side_effect = (
n_exc.TooManyExternalNetworks())
self.assertRaises(n_exc.TooManyExternalNetworks,
agent._process_routers,
routers)
self.assertNotIn(routers[0]['id'], agent.router_info)
def test_process_routers_with_ext_net_in_conf(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
routers = [
{'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}},
{'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'bbb'}}]
agent.router_info = {}
self.conf.set_override('gateway_external_network_id', 'aaa')
agent._process_routers(routers)
self.assertIn(routers[0]['id'], agent.router_info)
self.assertNotIn(routers[1]['id'], agent.router_info)
def test_process_routers_with_no_bridge_no_ext_net_in_conf(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
routers = [
{'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}},
{'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'bbb'}}]
agent.router_info = {}
self.conf.set_override('external_network_bridge', '')
agent._process_routers(routers)
self.assertIn(routers[0]['id'], agent.router_info)
self.assertIn(routers[1]['id'], agent.router_info)
def test_nonexistent_interface_driver(self):
self.conf.set_override('interface_driver', None)
with mock.patch.object(l3_agent, 'LOG') as log:
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
msg = 'An interface driver must be specified'
log.error.assert_called_once_with(msg)
self.conf.set_override('interface_driver', 'wrong_driver')
with mock.patch.object(l3_agent, 'LOG') as log:
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
msg = "Error importing interface driver 'wrong_driver'"
log.error.assert_called_once_with(msg)
def test_metadata_filter_rules(self):
self.conf.set_override('enable_metadata_proxy', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual([], agent.metadata_filter_rules())
self.conf.set_override('metadata_port', '8775')
self.conf.set_override('enable_metadata_proxy', True)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
rules = ('INPUT', '-s 0.0.0.0/0 -d 127.0.0.1 '
'-p tcp -m tcp --dport 8775 -j ACCEPT')
self.assertEqual([rules], agent.metadata_filter_rules())
def _cleanup_namespace_test(self,
stale_namespace_list,
router_list,
other_namespaces):
self.conf.set_override('router_delete_namespaces', True)
good_namespace_list = [l3_agent.NS_PREFIX + r['id']
for r in router_list]
self.mock_ip.get_namespaces.return_value = (stale_namespace_list +
good_namespace_list +
other_namespaces)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertTrue(agent._clean_stale_namespaces)
pm = self.external_process.return_value
pm.reset_mock()
agent._destroy_router_namespace = mock.MagicMock()
agent._cleanup_namespaces(router_list)
self.assertEqual(pm.disable.call_count, len(stale_namespace_list))
self.assertEqual(agent._destroy_router_namespace.call_count,
len(stale_namespace_list))
expected_args = [mock.call(ns) for ns in stale_namespace_list]
agent._destroy_router_namespace.assert_has_calls(expected_args,
any_order=True)
self.assertFalse(agent._clean_stale_namespaces)
def test_cleanup_namespace(self):
self.conf.set_override('router_id', None)
stale_namespaces = [l3_agent.NS_PREFIX + 'foo',
l3_agent.NS_PREFIX + 'bar']
other_namespaces = ['unknown']
self._cleanup_namespace_test(stale_namespaces,
[],
other_namespaces)
def test_cleanup_namespace_with_registered_router_ids(self):
self.conf.set_override('router_id', None)
stale_namespaces = [l3_agent.NS_PREFIX + 'cccc',
l3_agent.NS_PREFIX + 'eeeee']
router_list = [{'id': 'foo'}, {'id': 'aaaa'}]
other_namespaces = ['qdhcp-aabbcc', 'unknown']
self._cleanup_namespace_test(stale_namespaces,
router_list,
other_namespaces)
def test_cleanup_namespace_with_conf_router_id(self):
self.conf.set_override('router_id', 'bbbbb')
stale_namespaces = [l3_agent.NS_PREFIX + 'cccc',
l3_agent.NS_PREFIX + 'eeeee',
l3_agent.NS_PREFIX + self.conf.router_id]
router_list = [{'id': 'foo'}, {'id': 'aaaa'}]
other_namespaces = ['qdhcp-aabbcc', 'unknown']
self._cleanup_namespace_test(stale_namespaces,
router_list,
other_namespaces)
class TestL3AgentEventHandler(base.BaseTestCase):
def setUp(self):
super(TestL3AgentEventHandler, self).setUp()
cfg.CONF.register_opts(l3_agent.L3NATAgent.OPTS)
agent_config.register_interface_driver_opts_helper(cfg.CONF)
agent_config.register_use_namespaces_opts_helper(cfg.CONF)
cfg.CONF.set_override(
'interface_driver', 'neutron.agent.linux.interface.NullDriver'
)
cfg.CONF.set_override('use_namespaces', True)
agent_config.register_root_helper(cfg.CONF)
device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
device_exists_p.start()
utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
utils_exec_p.start()
drv_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = drv_cls_p.start()
mock_driver = mock.MagicMock()
mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = mock_driver
l3_plugin_p = mock.patch(
'neutron.agent.l3_agent.L3PluginApi')
l3_plugin_cls = l3_plugin_p.start()
l3_plugin_cls.return_value = mock.Mock()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager'
)
self.external_process_p.start()
looping_call_p = mock.patch(
'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
looping_call_p.start()
self.agent = l3_agent.L3NATAgent(HOSTNAME)
def test_spawn_metadata_proxy(self):
router_id = _uuid()
metadata_port = 8080
ip_class_path = 'neutron.agent.linux.ip_lib.IPWrapper'
cfg.CONF.set_override('metadata_port', metadata_port)
cfg.CONF.set_override('log_file', 'test.log')
cfg.CONF.set_override('debug', True)
self.external_process_p.stop()
ns = 'qrouter-' + router_id
try:
with mock.patch(ip_class_path) as ip_mock:
self.agent._spawn_metadata_proxy(router_id, ns)
ip_mock.assert_has_calls([
mock.call('sudo', ns),
mock.call().netns.execute([
'neutron-ns-metadata-proxy',
mock.ANY,
mock.ANY,
'--router_id=%s' % router_id,
mock.ANY,
'--metadata_port=%s' % metadata_port,
'--debug',
'--log-file=neutron-ns-metadata-proxy-%s.log' %
router_id
])
])
finally:
self.external_process_p.start()
| apache-2.0 | 4,219,393,043,147,675,600 | 42.044882 | 79 | 0.562021 | false |
cjh1/cumulus | cumulus/ssh/tasks/key.py | 1 | 3038 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2015 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import random
import string
import os
import stat
from paramiko.rsakey import RSAKey
import requests
import starcluster.logger
import cumulus
from cumulus.celery import command
from cumulus.common import check_status
def _key_path(profile):
return os.path.join(cumulus.config.ssh.keyStore, str(profile['_id']))
@command.task
def generate_key_pair(cluster, girder_token=None):
'''
Task to generate a new key pair for a user.
'''
cluster_id = cluster['_id']
status_url = '%s/clusters/%s' \
% (cumulus.config.girder.baseUrl, cluster_id)
log = starcluster.logger.get_starcluster_logger()
headers = {'Girder-Token': girder_token}
try:
new_key = RSAKey.generate(bits=4096)
passphrase = ''.join(random.SystemRandom()
.choice(string.ascii_uppercase +
string.digits) for _ in range(64))
key_path = os.path.join(cumulus.config.ssh.keyStore, cluster_id)
new_key.write_private_key_file(key_path, password=passphrase)
# Allow group read as well
os.chmod(key_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IRGRP)
comment = 'cumulus generated access key'
public_key = '%s %s %s' % (new_key.get_name(), new_key.get_base64(),
comment)
# Update passphrase and public key on cluster model
config_update = {
'config': {
'ssh': {
'passphrase': passphrase,
'publicKey': public_key
}
},
'status': 'created'
}
patch_url = '%s/clusters/%s' % (cumulus.config.girder.baseUrl,
cluster_id)
request = requests.patch(patch_url, json=config_update, headers=headers)
check_status(request)
except Exception as ex:
r = requests.patch(status_url, headers=headers,
json={'status': 'error'})
check_status(r)
# Log the error message
log.error(ex.message)
@command.task
def delete_key_pair(aws_profile, girder_token):
path = _key_path(aws_profile)
if os.path.exists(path):
os.remove(path)
| apache-2.0 | -5,409,938,265,857,967,000 | 32.384615 | 80 | 0.579658 | false |
dblalock/bolt | experiments/python/compress.py | 1 | 10068 | #!/usr/bin/env python
import numpy as np
import numba
import zstandard as zstd # pip install zstandard
# ================================================================ Funcs
def nbits_cost(diffs, signed=True):
"""
>>> [nbits_cost(i) for i in [0, 1, 2, 3, 4, 5, 7, 8, 9]]
[0, 2, 3, 3, 4, 4, 4, 5, 5]
>>> [nbits_cost(i) for i in [-1, -2, -3, -4, -5, -7, -8, -9]]
[1, 2, 3, 3, 4, 4, 4, 5]
>>> nbits_cost([])
array([], dtype=int32)
>>> nbits_cost([0, 2, 1, 0])
array([0, 3, 2, 0], dtype=int32)
>>> nbits_cost([0, 2, 1, 3, 4, 0], signed=False)
array([0, 2, 1, 2, 3, 0], dtype=int32)
"""
if diffs is None:
return None
diffs = np.asarray(diffs, dtype=np.int32)
if diffs.size == 0:
return np.array([], dtype=np.int32)
if not signed:
assert np.all(diffs >= 0)
pos_idxs = diffs > 0
nbits = np.zeros(diffs.shape, dtype=np.int32)
nbits[pos_idxs] = np.floor(np.log2(diffs[pos_idxs])) + 1
nbits[~pos_idxs] = 0
return nbits
# shape = diffs.shape
# diffs = diffs.ravel()
# zero_idxs = (diffs == 0)
# # nbits[zero_idxs] = 0
# nbits = np.zeros(len(diffs), dtype=np.int32)
# diffs = diffs[~zero_idxs]
# equiv_diffs = np.abs(diffs) + (diffs >= 0).astype(np.int32) # +1 if < 0
# # assert np.all(np.abs(diffs) > 0)
# # assert np.all(equiv_diffs > 0)
# nbits[~zero_idxs] = np.ceil(np.log2(equiv_diffs)) + 1
# nbits = np.asarray(nbits, dtype=np.int32) # next line can't handle scalar
# assert np.all(nbits >= 0)
shape = diffs.shape
diffs = diffs.ravel()
equiv_diffs = np.abs(diffs) + (diffs >= 0).astype(np.int32) # +1 if < 0
nbits = np.ceil(np.log2(equiv_diffs)) + 1
nbits = np.asarray(nbits, dtype=np.int32) # next line can't handle scalar
nbits[diffs == 0] = 0
assert np.all(nbits >= 0)
return nbits.reshape(shape) if nbits.size > 1 else nbits[0] # unpack if scalar
@numba.njit(fastmath=True)
def zigzag_encode(x):
"""
>>> [zigzag_encode(i) for i in [0,1,-1,2,-2,3,-3]]
[0, 1, 2, 3, 4, 5, 6]
>>> zigzag_encode([0,1,-1,2,-2,3,-3])
array([0, 1, 2, 3, 4, 5, 6], dtype=int32)
"""
x = np.asarray(x, dtype=np.int32)
return (np.abs(x) << 1) - (x > 0).astype(np.int32)
@numba.njit(fastmath=True)
def zigzag_decode(x):
return np.bitwise_xor(x >> 1, -np.bitwise_and(x, 1))
def quantize(X, nbits=16, minval=None, maxval=None):
minval = np.min(X) if minval is None else minval
maxval = np.max(X) if maxval is None else maxval
unsigned_max = (1 << nbits) - 1
dtype_min = 1 << (nbits - 1)
scale = float(unsigned_max) / maxval
X = np.maximum(0, X - minval)
X = np.minimum(unsigned_max, X * scale)
X -= dtype_min # center at 0
dtype = {16: np.int16, 12: np.int16, 8: np.int8}[nbits]
return X.astype(dtype)
# ================================================================
def zstd_compress(buff, comp=None):
comp = zstd.ZstdCompressor() if comp is None else comp
if isinstance(buff, str):
buff = bytes(buff, encoding='utf8')
return comp.compress(buff)
def zstd_decompress(buff, decomp=None):
decomp = zstd.ZstdDecompressor() if decomp is None else decomp
return decomp.decompress(decomp)
# ============================================================== sprintz
# except without the predictive coding part because we do that manually;
# we also omit the run-length encoding because the author says that's a
# huge pain to code and won't change the results much for our fast-changing
# time series; also we don't do the grouping thing since it only
# affects the decoding speed (it could affect the ratio slightly if the
# number of variables were really low and not a multiple of 8, but neither
# is the case for us)
# def bitpack_vec(x, nbits_per_element):
# n = len(x)
# total_nbits = n * nbits_per_element
# bitvec = np.zeros(total_nbits, dtype=np.bool)
# for i, val in enumerate(x):
# start_idx = i * nbits_per_element
# for b in range(nbits_per_element):
# bit = (val >> b) & 1
# bitvec[start_idx + b] = bit
# return np.packbits(bitvec)
# def bitunpack(X, nbits_per_element):
# was_1d = X.ndim == 1
# X = np.atleast_2d(X)
# N, D = X.shape
# ret = np.unpackbits(X, axis=1)
# if was_1d:
# ret = ret.squeeze()
# return ret
# @numba.njit(fastmath=True)
def bitpack(X, nbits_per_element):
was_1d = X.ndim == 1
X = np.atleast_2d(X)
N, D = X.shape
# orig_elemsz = X.dtype.itemsize
orig_elemsz_bits = 8 * X.dtype.itemsize
assert X.dtype in (np.uint8, np.uint16)
assert X.dtype in (np.uint8, np.uint16)
if nbits_per_element == orig_elemsz_bits:
ret = X
elif X.dtype == np.uint8:
# print("N, D, nbits: ", N, D, nbits_per_element)
# shape = X.shape
X = X.ravel()
# unpacked = np.unpackbits(X, count=nbits_per_element, bitorder='little', axis=-1)
unpacked = np.unpackbits(X, bitorder='little', axis=-1)
# print("unpacked initial shape: ", unpacked.shape)
unpacked = unpacked.reshape(N * D, 8)[:, :nbits_per_element]
# print("unpacked new shape: ", unpacked.shape)
ret = np.packbits(unpacked.reshape(N, -1), axis=1)
# ret = ret.reshape(N, -1)
# print("ret.shape: ", ret.shape)
else:
# X_low = (X & 0xff)[:, :, np.newaxis]
# X_high = ((X & 0xff00) >> 8)[:, :, np.newaxis]
# X_combined = np.concatenate([X_low, X_high], axis=-1)
# X = X[:, :, np.newaxis]
# X = np.concatenate([X, X], axis=-1)
# X[:, :, 0] = X[:, :, 0] & 0xff
# X[:, :, 1] = (X[:, :, 1] & 0xff00) >> 8
# X = X.reshape(N, 2 * D).astype(np.uint8)
X = np.ascontiguousarray(X).view(np.uint8).reshape(N, 2 * D)
# print("X shape: ", X.shape)
unpacked = np.unpackbits(X, axis=1, bitorder='little')
unpacked = unpacked.reshape(N, orig_elemsz_bits, D)
# unpacked = unpacked[:, ::-1, :] # low bits in low idxs
unpacked = np.ascontiguousarray(unpacked[:, :nbits_per_element])
ret = np.packbits(unpacked.reshape(N, -1))
# nbits_per_row = D * nbits_per_element
# bitmat = np.zeros((N, nbits_per_row), dtype=np.uint8)
# for j in range(D):
# col = X[:, j]
# start_idx = j * nbits_per_element
# for b in range(nbits_per_element):
# bit = (col >> b) & 1
# bitmat[:, start_idx + b] = bit
# ret = np.packbits(bitmat, axis=1)
if was_1d:
ret = ret.squeeze()
return ret
@numba.njit(fastmath=True)
def _sprintz_header_sz(headers, header_elem_nbits):
_, D = headers.shape
header_row_sz = int(np.ceil(D * header_elem_nbits / 8))
rows_total_nbits = headers.sum(axis=1)
# zero_rows = rows_total_nbits == 0
# header_sz = np.sum(nzero_rows) # one byte for run length
# pair_sums = zero_rows +
header_sz = 0
prev_was_zero = False
for row in rows_total_nbits:
is_zero = row == 0
if is_zero:
if prev_was_zero:
continue
else:
header_sz += 1 # start of run
else:
header_sz += header_row_sz
prev_was_zero = is_zero
return header_sz
# def sprintz_packed_size(X, nbits=None, just_return_sz=False, postproc='zstd'):
def sprintz_packed_size(X, nbits=None, just_return_sz=True, postproc=None):
if nbits is None:
nbits = {1: 8, 2: 16}.get(X.dtype.itemsize, 16)
unsigned_dtype = {8: np.uint8, 16: np.uint16}[nbits]
window_len = 8
pad_nrows = X.shape[0] % window_len
if pad_nrows != 0:
pad_rows = np.zeros((pad_nrows, X.shape[1]), dtype=X.dtype)
X = np.vstack([X, pad_rows])
N, D = X.shape
if X.dtype.itemsize > 2: # basically just catching floats
# print("sprintz: quantizing X...WTF")
X = quantize(X, nbits=nbits)
if np.min(X) < 0:
# print("sprintz: zigzag_encoding X!")
X = zigzag_encode(X).astype(unsigned_dtype)
# else:
# print("sprintz: not zigzag_encoding X!")
header_elem_nbits = {8: 3, 16: 4}[nbits]
X_nbits = nbits_cost(X, signed=False)
X_nbits = np.asfarray(X_nbits).reshape(N // window_len, window_len, -1)
block_nbits = X_nbits.max(axis=1).astype(np.uint8)
block_nbits[block_nbits == (nbits - 1)] = nbits
headers = block_nbits
if just_return_sz:
payload_sz = int(block_nbits.sum() * window_len / 8)
header_sz = _sprintz_header_sz(headers, header_elem_nbits)
# print("header sz: ", header_sz)
return header_sz + payload_sz
nwindows = N // window_len
payloads = []
for i in range(nwindows):
start_idx = i * window_len
end_idx = start_idx + window_len
X_slice = X[start_idx:end_idx]
for j in range(D):
col = X_slice[:, j]
payloads.append(bitpack(col, headers[i, j]))
headers = bitpack(headers, header_elem_nbits)
payloads = np.hstack(payloads)
if postproc is None:
return headers.nbytes + payloads.nbytes
elif postproc == 'zstd':
return len(zstd_compress(headers)) + len(zstd_compress(payloads))
# # nbits_slice = nbits_cost(X_slice, signed=False)
# nbits_slice = X_nbits[start_idx:end_idx]
# max_nbits = nbits_slice.max(axis=0)
# headers[i] = np.minimum(max_nbits, nbits - 1) # 8->7, 16->15
# max_nbits[max_nbits == nbits - 1] = nbits # 7->8, 15->16
# for j in range(D):
# col = X_slice[:, j]
# payloads.append(bitpack(col, max_nbits[j]))
# headers = bitpack(headers, header_elem_nbits)
# payloads = np.hstack(payloads)
# header_bytes = headers.tobytes()
# # payload_bytes = headers.tobytes()
# blosc.compress(buff, typesize=elem_sz,
# cname=compressor, shuffle=shuffle)
#
if __name__ == '__main__':
import doctest
doctest.testmod()
| mpl-2.0 | -8,800,742,138,520,516,000 | 31.794788 | 90 | 0.562078 | false |
NoBodyCam/TftpPxeBootBareMetal | nova/api/openstack/wsgi.py | 1 | 42066 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import math
import time
from xml.dom import minidom
from xml.parsers import expat
from lxml import etree
import webob
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import wsgi
XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
LOG = logging.getLogger(__name__)
# The vendor content types should serialize identically to the non-vendor
# content types. So to avoid littering the code with both options, we
# map the vendor to the other when looking up the type
_CONTENT_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'application/json',
'application/vnd.openstack.compute+xml': 'application/xml',
}
SUPPORTED_CONTENT_TYPES = (
'application/json',
'application/vnd.openstack.compute+json',
'application/xml',
'application/vnd.openstack.compute+xml',
)
_MEDIA_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'json',
'application/json': 'json',
'application/vnd.openstack.compute+xml': 'xml',
'application/xml': 'xml',
'application/atom+xml': 'atom',
}
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def __init__(self, *args, **kwargs):
super(Request, self).__init__(*args, **kwargs)
self._extension_data = {'db_instances': {}}
def cache_db_instances(self, instances):
"""
Allow API methods to store instances from a DB query to be
used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
db_instances = self._extension_data['db_instances']
for instance in instances:
db_instances[instance['uuid']] = instance
def cache_db_instance(self, instance):
"""
Allow API methods to store an instance from a DB query to be
used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
self.cache_db_instances([instance])
def get_db_instances(self):
"""
Allow an API extension to get previously stored instances within
the same API request.
Note that the instance data will be slightly stale.
"""
return self._extension_data['db_instances']
def get_db_instance(self, instance_uuid):
"""
Allow an API extension to get a previously stored instance
within the same API request.
Note that the instance data will be slightly stale.
"""
return self._extension_data['db_instances'].get(instance_uuid)
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'nova.best_content_type' not in self.environ:
# Calculate the best MIME type
content_type = None
# Check URL path suffix
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in SUPPORTED_CONTENT_TYPES:
content_type = possible_type
if not content_type:
content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)
self.environ['nova.best_content_type'] = (content_type or
'application/json')
return self.environ['nova.best_content_type']
def get_content_type(self):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if not "Content-Type" in self.headers:
return None
content_type = self.content_type
# NOTE(markmc): text/plain is the default for eventlet and
# other webservers which use mimetools.Message.gettype()
# whereas twisted defaults to ''.
if not content_type or content_type == 'text/plain':
return None
if content_type not in SUPPORTED_CONTENT_TYPES:
raise exception.InvalidContentType(content_type=content_type)
return content_type
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization"""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class XMLDeserializer(TextDeserializer):
def __init__(self, metadata=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
"""
super(XMLDeserializer, self).__init__()
self.metadata = metadata or {}
def _from_xml(self, datastring):
plurals = set(self.metadata.get('plurals', {}))
try:
node = minidom.parseString(datastring).childNodes[0]
return {node.nodeName: self._from_xml_node(node, plurals)}
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
def _from_xml_node(self, node, listnames):
"""Convert a minidom node to a simple Python type.
:param listnames: list of XML node names whose subnodes should
be considered list items.
"""
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
return node.childNodes[0].nodeValue
elif node.nodeName in listnames:
return [self._from_xml_node(n, listnames) for n in node.childNodes]
else:
result = dict()
for attr in node.attributes.keys():
result[attr] = node.attributes[attr].nodeValue
for child in node.childNodes:
if child.nodeType != node.TEXT_NODE:
result[child.nodeName] = self._from_xml_node(child,
listnames)
return result
def find_first_child_named(self, parent, name):
"""Search a nodes children for the first child with a given name"""
for node in parent.childNodes:
if node.nodeName == name:
return node
return None
def find_children_named(self, parent, name):
"""Return all of a nodes children who have the given name"""
for node in parent.childNodes:
if node.nodeName == name:
yield node
def extract_text(self, node):
"""Get the text field contained by the given node"""
if len(node.childNodes) == 1:
child = node.childNodes[0]
if child.nodeType == child.TEXT_NODE:
return child.nodeValue
return ""
def find_attribute_or_element(self, parent, name):
"""Get an attribute value; fallback to an element if not found"""
if parent.hasAttribute(name):
return parent.getAttribute(name)
node = self.find_first_child_named(parent, name)
if node:
return self.extract_text(node)
return None
def default(self, datastring):
return {'body': self._from_xml(datastring)}
class MetadataXMLDeserializer(XMLDeserializer):
def extract_metadata(self, metadata_node):
"""Marshal the metadata attribute of a parsed request"""
metadata = {}
if metadata_node is not None:
for meta_node in self.find_children_named(metadata_node, "meta"):
key = meta_node.getAttribute("key")
metadata[key] = self.extract_text(meta_node)
return metadata
class DictSerializer(ActionDispatcher):
"""Default request body serialization"""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization"""
def default(self, data):
return jsonutils.dumps(data)
class XMLDictSerializer(DictSerializer):
def __init__(self, metadata=None, xmlns=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
:param xmlns: XML namespace to include with serialized xml
"""
super(XMLDictSerializer, self).__init__()
self.metadata = metadata or {}
self.xmlns = xmlns
def default(self, data):
# We expect data to contain a single key which is the XML root.
root_key = data.keys()[0]
doc = minidom.Document()
node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
return self.to_xml_string(node)
def to_xml_string(self, node, has_atom=False):
self._add_xmlns(node, has_atom)
return node.toxml('UTF-8')
#NOTE (ameade): the has_atom should be removed after all of the
# xml serializers and view builders have been updated to the current
# spec that required all responses include the xmlns:atom, the has_atom
# flag is to prevent current tests from breaking
def _add_xmlns(self, node, has_atom=False):
if self.xmlns is not None:
node.setAttribute('xmlns', self.xmlns)
if has_atom:
node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom")
def _to_xml_node(self, doc, metadata, nodename, data):
"""Recursive method to convert data members to XML nodes."""
result = doc.createElement(nodename)
# Set the xml namespace if one is specified
# TODO(justinsb): We could also use prefixes on the keys
xmlns = metadata.get('xmlns', None)
if xmlns:
result.setAttribute('xmlns', xmlns)
#TODO(bcwaldon): accomplish this without a type-check
if isinstance(data, list):
collections = metadata.get('list_collections', {})
if nodename in collections:
metadata = collections[nodename]
for item in data:
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(item))
result.appendChild(node)
return result
singular = metadata.get('plurals', {}).get(nodename, None)
if singular is None:
if nodename.endswith('s'):
singular = nodename[:-1]
else:
singular = 'item'
for item in data:
node = self._to_xml_node(doc, metadata, singular, item)
result.appendChild(node)
#TODO(bcwaldon): accomplish this without a type-check
elif isinstance(data, dict):
collections = metadata.get('dict_collections', {})
if nodename in collections:
metadata = collections[nodename]
for k, v in data.items():
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(k))
text = doc.createTextNode(str(v))
node.appendChild(text)
result.appendChild(node)
return result
attrs = metadata.get('attributes', {}).get(nodename, {})
for k, v in data.items():
if k in attrs:
result.setAttribute(k, str(v))
else:
node = self._to_xml_node(doc, metadata, k, v)
result.appendChild(node)
else:
# Type is atom
node = doc.createTextNode(str(data))
result.appendChild(node)
return result
def _create_link_nodes(self, xml_doc, links):
link_nodes = []
for link in links:
link_node = xml_doc.createElement('atom:link')
link_node.setAttribute('rel', link['rel'])
link_node.setAttribute('href', link['href'])
if 'type' in link:
link_node.setAttribute('type', link['type'])
link_nodes.append(link_node)
return link_nodes
def _to_xml(self, root):
"""Convert the xml object to an xml string."""
return etree.tostring(root, encoding='UTF-8', xml_declaration=True)
def serializers(**serializers):
"""Attaches serializers to a method.
This decorator associates a dictionary of serializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_serializers'):
func.wsgi_serializers = {}
func.wsgi_serializers.update(serializers)
return func
return decorator
def deserializers(**deserializers):
"""Attaches deserializers to a method.
This decorator associates a dictionary of deserializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_deserializers'):
func.wsgi_deserializers = {}
func.wsgi_deserializers.update(deserializers)
return func
return decorator
def response(code):
"""Attaches response code to a method.
This decorator associates a response code with a method. Note
that the function attributes are directly manipulated; the method
is not wrapped.
"""
def decorator(func):
func.wsgi_code = code
return func
return decorator
class ResponseObject(object):
"""Bundles a response object with appropriate serializers.
Object that app methods may return in order to bind alternate
serializers with a response object to be serialized. Its use is
optional.
"""
def __init__(self, obj, code=None, headers=None, **serializers):
"""Binds serializers with an object.
Takes keyword arguments akin to the @serializer() decorator
for specifying serializers. Serializers specified will be
given preference over default serializers or method-specific
serializers on return.
"""
self.obj = obj
self.serializers = serializers
self._default_code = 200
self._code = code
self._headers = headers or {}
self.serializer = None
self.media_type = None
def __getitem__(self, key):
"""Retrieves a header with the given name."""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""Sets a header with the given name to the given value."""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""Deletes the header with the given name."""
del self._headers[key.lower()]
def _bind_method_serializers(self, meth_serializers):
"""Binds method serializers with the response object.
Binds the method serializers with the response object.
Serializers specified to the constructor will take precedence
over serializers specified to this method.
:param meth_serializers: A dictionary with keys mapping to
response types and values containing
serializer objects.
"""
# We can't use update because that would be the wrong
# precedence
for mtype, serializer in meth_serializers.items():
self.serializers.setdefault(mtype, serializer)
def get_serializer(self, content_type, default_serializers=None):
"""Returns the serializer for the wrapped object.
Returns the serializer for the wrapped object subject to the
indicated content type. If no serializer matching the content
type is attached, an appropriate serializer drawn from the
default serializers will be used. If no appropriate
serializer is available, raises InvalidContentType.
"""
default_serializers = default_serializers or {}
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in self.serializers:
return mtype, self.serializers[mtype]
else:
return mtype, default_serializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
def preserialize(self, content_type, default_serializers=None):
"""Prepares the serializer that will be used to serialize.
Determines the serializer that will be used and prepares an
instance of it for later call. This allows the serializer to
be accessed by extensions for, e.g., template extension.
"""
mtype, serializer = self.get_serializer(content_type,
default_serializers)
self.media_type = mtype
self.serializer = serializer()
def attach(self, **kwargs):
"""Attach slave templates to serializers."""
if self.media_type in kwargs:
self.serializer.attach(kwargs[self.media_type])
def serialize(self, request, content_type, default_serializers=None):
"""Serializes the wrapped object.
Utility method for serializing the wrapped object. Returns a
webob.Response object.
"""
if self.serializer:
serializer = self.serializer
else:
_mtype, _serializer = self.get_serializer(content_type,
default_serializers)
serializer = _serializer()
response = webob.Response()
response.status_int = self.code
for hdr, value in self._headers.items():
response.headers[hdr] = value
response.headers['Content-Type'] = content_type
if self.obj is not None:
response.body = serializer.serialize(self.obj)
return response
@property
def code(self):
"""Retrieve the response status."""
return self._code or self._default_code
@property
def headers(self):
"""Retrieve the headers."""
return self._headers.copy()
def action_peek_json(body):
"""Determine action to invoke."""
try:
decoded = jsonutils.loads(body)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
# Make sure there's exactly one key...
if len(decoded) != 1:
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
# Return the action and the decoded body...
return decoded.keys()[0]
def action_peek_xml(body):
"""Determine action to invoke."""
dom = minidom.parseString(body)
action_node = dom.childNodes[0]
return action_node.tagName
class ResourceExceptionHandler(object):
"""Context manager to handle Resource exceptions.
Used when processing exceptions generated by API implementation
methods (or their extensions). Converts most exceptions to Fault
exceptions, with the appropriate logging.
"""
def __enter__(self):
return None
def __exit__(self, ex_type, ex_value, ex_traceback):
if not ex_value:
return True
if isinstance(ex_value, exception.NotAuthorized):
msg = unicode(ex_value)
raise Fault(webob.exc.HTTPForbidden(explanation=msg))
elif isinstance(ex_value, exception.Invalid):
raise Fault(exception.ConvertedException(
code=ex_value.code, explanation=unicode(ex_value)))
# Under python 2.6, TypeError's exception value is actually a string,
# so test # here via ex_type instead:
# http://bugs.python.org/issue7853
elif issubclass(ex_type, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_('Exception handling resource: %s') % ex_value,
exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_("Fault thrown: %s"), unicode(ex_value))
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value))
raise Fault(ex_value)
# We didn't handle the exception
return False
class Resource(wsgi.Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
Exceptions derived from webob.exc.HTTPException will be automatically
wrapped in Fault() to provide API friendly error responses.
"""
def __init__(self, controller, action_peek=None, inherits=None,
**deserializers):
"""
:param controller: object that implement methods created by routes lib
:param action_peek: dictionary of routines for peeking into an action
request body to determine the desired action
:param inherits: another resource object that this resource should
inherit extensions from. Any action extensions that
are applied to the parent resource will also apply
to this resource.
"""
self.controller = controller
default_deserializers = dict(xml=XMLDeserializer,
json=JSONDeserializer)
default_deserializers.update(deserializers)
self.default_deserializers = default_deserializers
self.default_serializers = dict(xml=XMLDictSerializer,
json=JSONDictSerializer)
self.action_peek = dict(xml=action_peek_xml,
json=action_peek_json)
self.action_peek.update(action_peek or {})
# Copy over the actions dictionary
self.wsgi_actions = {}
if controller:
self.register_actions(controller)
# Save a mapping of extensions
self.wsgi_extensions = {}
self.wsgi_action_extensions = {}
self.inherits = inherits
def register_actions(self, controller):
"""Registers controller actions with this resource."""
actions = getattr(controller, 'wsgi_actions', {})
for key, method_name in actions.items():
self.wsgi_actions[key] = getattr(controller, method_name)
def register_extensions(self, controller):
"""Registers controller extensions with this resource."""
extensions = getattr(controller, 'wsgi_extensions', [])
for method_name, action_name in extensions:
# Look up the extending method
extension = getattr(controller, method_name)
if action_name:
# Extending an action...
if action_name not in self.wsgi_action_extensions:
self.wsgi_action_extensions[action_name] = []
self.wsgi_action_extensions[action_name].append(extension)
else:
# Extending a regular method
if method_name not in self.wsgi_extensions:
self.wsgi_extensions[method_name] = []
self.wsgi_extensions[method_name].append(extension)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
# NOTE(Vek): Check for get_action_args() override in the
# controller
if hasattr(self.controller, 'get_action_args'):
return self.controller.get_action_args(request_environment)
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except (KeyError, IndexError, AttributeError):
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def get_body(self, request):
try:
content_type = request.get_content_type()
except exception.InvalidContentType:
LOG.debug(_("Unrecognized Content-Type provided in request"))
return None, ''
if not content_type:
LOG.debug(_("No Content-Type provided in request"))
return None, ''
if len(request.body) <= 0:
LOG.debug(_("Empty body provided in request"))
return None, ''
return content_type, request.body
def deserialize(self, meth, content_type, body):
meth_deserializers = getattr(meth, 'wsgi_deserializers', {})
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in meth_deserializers:
deserializer = meth_deserializers[mtype]
else:
deserializer = self.default_deserializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
return deserializer().deserialize(body)
def pre_process_extensions(self, extensions, request, action_args):
# List of callables for post-processing extensions
post = []
for ext in extensions:
if inspect.isgeneratorfunction(ext):
response = None
# If it's a generator function, the part before the
# yield is the preprocessing stage
try:
with ResourceExceptionHandler():
gen = ext(req=request, **action_args)
response = gen.next()
except Fault as ex:
response = ex
# We had a response...
if response:
return response, []
# No response, queue up generator for post-processing
post.append(gen)
else:
# Regular functions only perform post-processing
post.append(ext)
# Run post-processing in the reverse order
return None, reversed(post)
def post_process_extensions(self, extensions, resp_obj, request,
action_args):
for ext in extensions:
response = None
if inspect.isgenerator(ext):
# If it's a generator, run the second half of
# processing
try:
with ResourceExceptionHandler():
response = ext.send(resp_obj)
except StopIteration:
# Normal exit of generator
continue
except Fault as ex:
response = ex
else:
# Regular functions get post-processing...
try:
with ResourceExceptionHandler():
response = ext(req=request, resp_obj=resp_obj,
**action_args)
except Fault as ex:
response = ex
# We had a response...
if response:
return response
return None
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.info("%(method)s %(url)s" % {"method": request.method,
"url": request.url})
# Identify the action, its arguments, and the requested
# content type
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
content_type, body = self.get_body(request)
accept = request.best_match_content_type()
# NOTE(Vek): Splitting the function up this way allows for
# auditing by external tools that wrap the existing
# function. If we try to audit __call__(), we can
# run into troubles due to the @webob.dec.wsgify()
# decorator.
return self._process_stack(request, action, action_args,
content_type, body, accept)
def _process_stack(self, request, action, action_args,
content_type, body, accept):
"""Implement the processing stack."""
# Get the implementing method
try:
meth, extensions = self.get_method(request, action,
content_type, body)
except (AttributeError, TypeError):
return Fault(webob.exc.HTTPNotFound())
except KeyError as ex:
msg = _("There is no such action: %s") % ex.args[0]
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Now, deserialize the request body...
try:
if content_type:
contents = self.deserialize(meth, content_type, body)
else:
contents = {}
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Update the action args
action_args.update(contents)
project_id = action_args.pop("project_id", None)
context = request.environ.get('nova.context')
if (context and project_id and (project_id != context.project_id)):
msg = _("Malformed request url")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Run pre-processing extensions
response, post = self.pre_process_extensions(extensions,
request, action_args)
if not response:
try:
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
response = ex
if not response:
# No exceptions; convert action_result into a
# ResponseObject
resp_obj = None
if type(action_result) is dict or action_result is None:
resp_obj = ResponseObject(action_result)
elif isinstance(action_result, ResponseObject):
resp_obj = action_result
else:
response = action_result
# Run post-processing extensions
if resp_obj:
_set_request_id_header(request, resp_obj)
# Do a preserialize to set up the response object
serializers = getattr(meth, 'wsgi_serializers', {})
resp_obj._bind_method_serializers(serializers)
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
resp_obj.preserialize(accept, self.default_serializers)
# Process post-processing extensions
response = self.post_process_extensions(post, resp_obj,
request, action_args)
if resp_obj and not response:
response = resp_obj.serialize(request, accept,
self.default_serializers)
try:
msg_dict = dict(url=request.url, status=response.status_int)
msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
except AttributeError, e:
msg_dict = dict(url=request.url, e=e)
msg = _("%(url)s returned a fault: %(e)s") % msg_dict
LOG.info(msg)
return response
def get_method(self, request, action, content_type, body):
meth, extensions = self._get_method(request,
action,
content_type,
body)
if self.inherits:
_meth, parent_ext = self.inherits.get_method(request,
action,
content_type,
body)
extensions.extend(parent_ext)
return meth, extensions
def _get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
# Look up the method
try:
if not self.controller:
meth = getattr(self, action)
else:
meth = getattr(self.controller, action)
except AttributeError:
if (not self.wsgi_actions or
action not in ['action', 'create', 'delete']):
# Propagate the error
raise
else:
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
# OK, it's an action; figure out which action...
mtype = _MEDIA_TYPE_MAP.get(content_type)
action_name = self.action_peek[mtype](body)
else:
action_name = action
# Look up the action method
return (self.wsgi_actions[action_name],
self.wsgi_action_extensions.get(action_name, []))
def dispatch(self, method, request, action_args):
"""Dispatch a call to the action-specific method."""
return method(req=request, **action_args)
def action(name):
"""Mark a function as an action.
The given name will be taken as the action key in the body.
This is also overloaded to allow extensions to provide
non-extending definitions of create and delete operations.
"""
def decorator(func):
func.wsgi_action = name
return func
return decorator
def extends(*args, **kwargs):
"""Indicate a function extends an operation.
Can be used as either::
@extends
def index(...):
pass
or as::
@extends(action='resize')
def _action_resize(...):
pass
"""
def decorator(func):
# Store enough information to find what we're extending
func.wsgi_extends = (func.__name__, kwargs.get('action'))
return func
# If we have positional arguments, call the decorator
if args:
return decorator(*args)
# OK, return the decorator instead
return decorator
class ControllerMetaclass(type):
"""Controller metaclass.
This metaclass automates the task of assembling a dictionary
mapping action keys to method names.
"""
def __new__(mcs, name, bases, cls_dict):
"""Adds the wsgi_actions dictionary to the class."""
# Find all actions
actions = {}
extensions = []
for key, value in cls_dict.items():
if not callable(value):
continue
if getattr(value, 'wsgi_action', None):
actions[value.wsgi_action] = key
elif getattr(value, 'wsgi_extends', None):
extensions.append(value.wsgi_extends)
# Add the actions and extensions to the class dict
cls_dict['wsgi_actions'] = actions
cls_dict['wsgi_extensions'] = extensions
return super(ControllerMetaclass, mcs).__new__(mcs, name, bases,
cls_dict)
class Controller(object):
"""Default controller."""
__metaclass__ = ControllerMetaclass
_view_builder_class = None
def __init__(self, view_builder=None):
"""Initialize controller with a view builder instance."""
if view_builder:
self._view_builder = view_builder
elif self._view_builder_class:
self._view_builder = self._view_builder_class()
else:
self._view_builder = None
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {
400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
501: "notImplemented",
503: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
self.status_int = exception.status_int
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
fault_data = {
fault_name: {
'code': code,
'message': self.wrapped_exc.explanation}}
if code == 413:
retry = self.wrapped_exc.headers['Retry-After']
fault_data[fault_name]['retryAfter'] = retry
# 'code' is an attribute on the fault tag itself
metadata = {'attributes': {fault_name: 'code'}}
xml_serializer = XMLDictSerializer(metadata, XMLNS_V11)
content_type = req.best_match_content_type()
serializer = {
'application/xml': xml_serializer,
'application/json': JSONDictSerializer(),
}[content_type]
self.wrapped_exc.body = serializer.serialize(fault_data)
self.wrapped_exc.content_type = content_type
_set_request_id_header(req, self.wrapped_exc.headers)
return self.wrapped_exc
def __str__(self):
return self.wrapped_exc.__str__()
class OverLimitFault(webob.exc.HTTPException):
"""
Rate-limited request response.
"""
def __init__(self, message, details, retry_time):
"""
Initialize new `OverLimitFault` with relevant information.
"""
hdrs = OverLimitFault._retry_after(retry_time)
self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs)
self.content = {
"overLimitFault": {
"code": self.wrapped_exc.status_int,
"message": message,
"details": details,
},
}
@staticmethod
def _retry_after(retry_time):
delay = int(math.ceil(retry_time - time.time()))
retry_after = delay if delay > 0 else 0
headers = {'Retry-After': '%d' % retry_after}
return headers
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""
Return the wrapped exception with a serialized body conforming to our
error format.
"""
content_type = request.best_match_content_type()
metadata = {"attributes": {"overLimitFault": "code"}}
xml_serializer = XMLDictSerializer(metadata, XMLNS_V11)
serializer = {
'application/xml': xml_serializer,
'application/json': JSONDictSerializer(),
}[content_type]
content = serializer.serialize(self.content)
self.wrapped_exc.body = content
return self.wrapped_exc
def _set_request_id_header(req, headers):
context = req.environ.get('nova.context')
if context:
headers['x-compute-request-id'] = context.request_id
| apache-2.0 | -310,886,773,118,716,200 | 34.113523 | 79 | 0.590429 | false |
thtrieu/qclass_dl | lstm.py | 1 | 3699 | import tensorflow as tf
from tensorflow.models.rnn import rnn_cell, rnn
from tensorflow.models.rnn import seq2seq
from tensorflow.models.rnn.ptb import reader
import numpy as np
class lstm_class(object):
def __init__(
self, embedding_mat, non_static, lstm_type, hidden_unit, sequence_length, num_classes, vocab_size,
embedding_size, l2_reg_lambda=0.0):
# Placeholders for input, output and dropout
self.batch_size = tf.placeholder(tf.int32, name = "batch_size")
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
self.real_len = tf.placeholder(tf.int32, [None], name = "real_len")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Lookup
with tf.device('/cpu:0'), tf.name_scope("embedding"):
if not non_static:
W = tf.constant(embedding_mat, name = "W")
else:
W = tf.Variable(self.embedding_mat, name = "W")
inputs = tf.nn.embedding_lookup(W, self.input_x)
# LSTM
if lstm_type == "gru":
lstm_cell = rnn_cell.GRUCell(num_units = hidden_unit, input_size = embedding_size)
else:
if lstm_type == "basic":
lstm_cell = rnn_cell.BasicLSTMCell(num_units = hidden_unit, input_size = embedding_size)
else:
lstm_cell = rnn_cell.LSTMCell(num_units = hidden_unit, input_size = embedding_size, use_peepholes = True)
lstm_cell = rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob = self.dropout_keep_prob)
self._initial_state = lstm_cell.zero_state(self.batch_size, tf.float32)
inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(1, sequence_length, inputs)]
outputs, state = rnn.rnn(lstm_cell, inputs, initial_state=self._initial_state, sequence_length = self.real_len)
# Collect the appropriate last words into variable output (dimension = batch x embedding_size)
output = outputs[0]
one = tf.ones([1, hidden_unit], tf.float32)
with tf.variable_scope("Output"):
tf.get_variable_scope().reuse_variables()
for i in range(1,len(outputs)):
ind = self.real_len < (i+1)
ind = tf.to_float(ind)
ind = tf.expand_dims(ind, -1)
mat = tf.matmul(ind, one)
output = tf.add(tf.mul(output, mat),tf.mul(outputs[i], 1.0 - mat))
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
self.W = tf.Variable(tf.truncated_normal([hidden_unit, num_classes], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
l2_loss += tf.nn.l2_loss(self.W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(output, self.W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# CalculateMean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(self.scores, self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
| gpl-3.0 | 4,280,998,755,685,231,600 | 48.32 | 121 | 0.600703 | false |
Kentzo/Power | power/win32.py | 1 | 3384 | # coding=utf-8
"""
Implements PowerManagement functions using GetSystemPowerStatus.
Requires Windows XP+.
Observing is not supported
"""
from ctypes import Structure, wintypes, POINTER, windll, WinError, pointer, WINFUNCTYPE
import warnings
from power import common
# GetSystemPowerStatus
# Returns brief description of current system power status.
# Windows XP+
# REQUIRED.
GetSystemPowerStatus = None
try:
GetSystemPowerStatus = windll.kernel32.GetSystemPowerStatus
class SYSTEM_POWER_STATUS(Structure):
_fields_ = [
('ACLineStatus', wintypes.c_ubyte),
('BatteryFlag', wintypes.c_ubyte),
('BatteryLifePercent', wintypes.c_ubyte),
('Reserved1', wintypes.c_ubyte),
('BatteryLifeTime', wintypes.DWORD),
('BatteryFullLifeTime', wintypes.DWORD)
]
GetSystemPowerStatus.argtypes = [POINTER(SYSTEM_POWER_STATUS)]
GetSystemPowerStatus.restype = wintypes.BOOL
except AttributeError as e:
raise RuntimeError("Unable to load GetSystemPowerStatus."
"The system does not provide it (Win XP is required) or kernel32.dll is damaged.")
POWER_TYPE_MAP = {
0: common.POWER_TYPE_BATTERY,
1: common.POWER_TYPE_AC,
255: common.POWER_TYPE_AC
}
class PowerManagement(common.PowerManagementBase):
def get_providing_power_source_type(self):
"""
Returns GetSystemPowerStatus().ACLineStatus
@raise: WindowsError if any underlying error occures.
"""
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
return POWER_TYPE_MAP[power_status.ACLineStatus]
def get_low_battery_warning_level(self):
"""
Returns warning according to GetSystemPowerStatus().BatteryLifeTime/BatteryLifePercent
@raise WindowsError if any underlying error occures.
"""
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
if POWER_TYPE_MAP[power_status.ACLineStatus] == common.POWER_TYPE_AC:
return common.LOW_BATTERY_WARNING_NONE
else:
if power_status.BatteryLifeTime != -1 and power_status.BatteryLifeTime <= 600:
return common.LOW_BATTERY_WARNING_FINAL
elif power_status.BatteryLifePercent <= 22:
return common.LOW_BATTERY_WARNING_EARLY
else:
return common.LOW_BATTERY_WARNING_NONE
def get_time_remaining_estimate(self):
"""
Returns time remaining estimate according to GetSystemPowerStatus().BatteryLifeTime
"""
power_status = SYSTEM_POWER_STATUS()
if not GetSystemPowerStatus(pointer(power_status)):
raise WinError()
if POWER_TYPE_MAP[power_status.ACLineStatus] == common.POWER_TYPE_AC:
return common.TIME_REMAINING_UNLIMITED
elif power_status.BatteryLifeTime == -1:
return common.TIME_REMAINING_UNKNOWN
else:
return float(power_status.BatteryLifeTime) / 60.0
def add_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
def remove_observer(self, observer):
warnings.warn("Current system does not support observing.")
pass
| mit | -1,949,248,720,823,125,000 | 33.886598 | 105 | 0.663121 | false |
tata-antares/LHCb-topo-trigger | BBDTconvert.py | 1 | 3846 | from __future__ import division, absolute_import
__author__ = 'Tatiana Likhomanenko'
import sys
import struct
from scipy.special import expit
import numpy
from rep_ef.estimators._matrixnetapplier import MatrixnetClassifier
def unpack_formula(formula_stream, print_=True):
features = list() # feature names
bins_quantities = list() # bins quantity for each feature
bins = list() # list for bins for each feature
bytes = formula_stream.read(4)
features_quantity = struct.unpack('i', bytes)[0]
for index in range(0, features_quantity):
bytes = formula_stream.read(4)
factor_length = struct.unpack('i', bytes)[0]
features.append(formula_stream.read(factor_length))
bytes = formula_stream.read(4) # skip formula length
used_features_quantity = struct.unpack('I', formula_stream.read(4))[0]
bins_quantities = struct.unpack(
'I' * used_features_quantity,
formula_stream.read(4 * used_features_quantity)
)
bins_total = struct.unpack('I', formula_stream.read(4))[0]
if print_:
print bins_total
for index in range(used_features_quantity):
bins.append(
struct.unpack(
'f' * bins_quantities[index],
formula_stream.read(4 * bins_quantities[index])
)
)
if print_:
print str(features[index]) + " - " + str(bins_quantities[index])
for j in range(len(bins[index])):
print bins[index][j]
print "------------"
return features, bins_quantities, bins
def convert_lookup_index_to_bins(points_in_bins, lookup_indices):
result = numpy.zeros([len(lookup_indices), len(points_in_bins)], dtype=float)
lookup_indices = lookup_indices.copy()
for i, points_in_variable in list(enumerate(points_in_bins))[::-1]:
print(points_in_variable)
n_columns = len(points_in_variable)
result[:, i] = points_in_variable[lookup_indices % n_columns]
lookup_indices //= n_columns
assert numpy.prod([len(x) for x in points_in_bins]) == len(lookup_indices)
return result
def write_formula(inp_file, out_file, threshold):
with open(inp_file) as formula_stream:
features, bins_quantities, bins = unpack_formula(formula_stream, False)
with open(inp_file) as formula_stream:
mx = MatrixnetClassifier(formula_stream)
bins_quantities = list(bins_quantities)
for i in xrange(len(bins)):
bins[i] = sorted(list(bins[i]))
bins[i] = [-10 * abs(bins[i][0])] + bins[i]
bins_quantities[i] += 1
bins_quantities = numpy.array(bins_quantities)
count = numpy.prod(bins_quantities)
points_in_bins = []
for i in range(len(features)):
edges = numpy.array(bins[i])
points_in = (edges[1:] + edges[:-1]) / 2.
points_in = numpy.array(list(points_in) + [edges[-1] + 1.])
points_in_bins.append(points_in)
with open(out_file, "w") as output_stream:
print "Total event count: " + str(count)
output_stream.write(str(len(features)) + " # feature count\n")
output_stream.write(" ".join([str(f) for f in features]) + " # features\n")
output_stream.write(" ".join([str(b) for b in bins_quantities]) + "\n")
for fbins in bins:
output_stream.write(" ".join([str(b) for b in fbins]) + "\n")
fbins.append(abs(fbins[-1]) * 3)
divider = 10000
output_stream.write(str(divider) + "\n")
events = convert_lookup_index_to_bins(points_in_bins, lookup_indices=numpy.arange(count))
predictions = expit(mx.apply(events))
assert len(predictions) == count
for q, pred in enumerate(predictions):
if pred > threshold:
output_stream.write(str(q) + " " + str(int(pred * divider)) + "\n") | apache-2.0 | -4,683,562,495,968,357,000 | 35.638095 | 97 | 0.615965 | false |
sergiopasra/connectsim | conectsim/devices/device.py | 1 | 5726 | from conectsim.optics.basenodes import Node, Source
from conectsim.devices.element import Element
from conectsim.signal import Signal
class Device(Element):
'''Something we can handle.'''
def __init__(self, name=None, parent=None):
self.parent = parent
self.children = []
if self.parent:
self.parent.children.append(self)
super(Device, self).__init__(name)
def config_info(self):
'''Return my configuration information.'''
info = {}
for dev in self.children:
info[dev.name] = dev.config_info()
info['name'] = self.name
return info
def configure(self, meta):
"""Configure myself and my children."""
for dev in self.children:
key = dev.name
if key in meta:
dev.configure(meta[key])
def set_parent(self, newparent):
if self.parent:
self.parent.children.remove(self)
self.parent = newparent
if self.parent:
self.parent.children.append(self)
class ConnectableDevice(Device, Node):
def __init__(self, name=None, parent=None):
Device.__init__(self, name=name, parent=parent)
Node.__init__(self)
class ContainerDevice(ConnectableDevice):
def trace(self):
c = self.current()
d = self
while isinstance(c, ContainerDevice):
d = c
c = c.current()
if isinstance(c, Source):
return [c]
if d.previousnode:
return d.previousnode.trace() + [c]
return [c]
class Carrousel(ContainerDevice):
def __init__(self, capacity, name=None, parent=None):
super(Carrousel, self).__init__(name=name, parent=parent)
# Container is empty
self._container = [None] * capacity
self._capacity = capacity
self._pos = 0
# object in the current position
self._current = self._container[self._pos]
# signals
self.changed = Signal()
self.moved = Signal()
def current(self):
return self._current
def pos(self):
return self._pos
def put_in_pos(self, obj, pos):
if pos >= self._capacity or pos < 0:
raise ValueError('position greater than capacity or negative')
self._container[pos] = obj
self._current = self._container[self._pos]
def move_to(self, pos):
if pos >= self._capacity or pos < 0:
raise ValueError('Position %d out of bounds' % pos)
if pos != self._pos:
self._pos = pos
self._current = self._container[self._pos]
self.changed.emit(self._pos)
self.moved.emit(self._pos)
def select(self, name):
# find pos of object with name
for idx, item in enumerate(self._container):
if item:
if isinstance(item, basestring):
if item == name:
return self.move_to(idx)
elif item.name == name:
return self.move_to(idx)
else:
pass
else:
raise ValueError('No object named %s' % name)
def config_info(self):
if self._current:
if isinstance(self._current, basestring):
label = self._current
else:
label = self._current.name
else:
label = 'Unknown'
return {'name': self.name, 'position': self._pos,
'label': label}
def configure(self, meta):
self.move_to(meta)
class Switch(ContainerDevice):
def __init__(self, capacity, name=None, parent=None):
super(Switch, self).__init__(name=name, parent=parent)
# Container is empty
self._container = [None] * capacity
self._capacity = capacity
self._pos = 0
# object in the current position
self._current = self._container[self._pos]
# signals
self.changed = Signal()
self.moved = Signal()
def current(self):
return self._current
def head(self):
return self
def pos(self):
return self._pos
def connect_to_pos(self, obj, pos):
if pos >= self._capacity or pos < 0:
raise ValueError('position greater than capacity or negative')
obj.connect(self)
self._container[pos] = obj
self._current = self._container[self._pos]
def move_to(self, pos):
if pos >= self._capacity or pos < 0:
raise ValueError('Position %d out of bounds' % pos)
if pos != self._pos:
self._pos = pos
self._current = self._container[self._pos]
self.changed.emit(self._pos)
self._current.connect(self)
self.moved.emit(self._pos)
def select(self, name):
# find pos of object with name
for idx, item in enumerate(self._container):
if item:
if isinstance(item, basestring):
if item == name:
return self.move_to(idx)
elif item.name == name:
return self.move_to(idx)
else:
pass
else:
raise ValueError('No object named %s' % name)
def config_info(self):
if self._current:
if isinstance(self._current, basestring):
label = self._current
else:
label = self._current.name
else:
label = 'Unknown'
return {'name': self.name, 'position': self._pos,
'label': label}
def configure(self, meta):
self.move_to(meta)
| gpl-3.0 | -7,182,442,475,465,436,000 | 26.931707 | 74 | 0.538421 | false |
midnightercz/pulp_docker | pulp-dev.py | 1 | 5020 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import optparse
import os
import sys
from pulp.devel import environment
WARNING_COLOR = '\033[31m'
WARNING_RESET = '\033[0m'
DIRS = ('/var/lib/pulp/published/docker/web',)
#
# Str entry assumes same src and dst relative path.
# Tuple entry is explicit (src, dst)
#
# Please keep alphabetized and by subproject
# Standard directories
DIR_PLUGINS = '/usr/lib/pulp/plugins'
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
LINKS = (
('plugins/etc/httpd/conf.d/pulp_docker.conf', '/etc/httpd/conf.d/pulp_docker.conf'),
('plugins/etc/pulp/server/plugins.conf.d/docker_distributor.json',
'/etc/pulp/server/plugins.conf.d/docker_distributor.json'),
)
def parse_cmdline():
"""
Parse and validate the command line options.
"""
parser = optparse.OptionParser()
parser.add_option('-I', '--install',
action='store_true',
help='install pulp development files')
parser.add_option('-U', '--uninstall',
action='store_true',
help='uninstall pulp development files')
parser.add_option('-D', '--debug',
action='store_true',
help=optparse.SUPPRESS_HELP)
parser.set_defaults(install=False,
uninstall=False,
debug=True)
opts, args = parser.parse_args()
if opts.install and opts.uninstall:
parser.error('both install and uninstall specified')
if not (opts.install or opts.uninstall):
parser.error('neither install or uninstall specified')
return (opts, args)
def warning(msg):
print "%s%s%s" % (WARNING_COLOR, msg, WARNING_RESET)
def debug(opts, msg):
if not opts.debug:
return
sys.stderr.write('%s\n' % msg)
def create_dirs(opts):
for d in DIRS:
if os.path.exists(d) and os.path.isdir(d):
debug(opts, 'skipping %s exists' % d)
continue
debug(opts, 'creating directory: %s' % d)
os.makedirs(d, 0777)
def getlinks():
links = []
for l in LINKS:
if isinstance(l, (list, tuple)):
src = l[0]
dst = l[1]
else:
src = l
dst = os.path.join('/', l)
links.append((src, dst))
return links
def install(opts):
# Install the packages in developer mode
environment.manage_setup_pys('install', ROOT_DIR)
warnings = []
create_dirs(opts)
# Ensure the directory is owned by apache
os.system('chown -R apache:apache /var/lib/pulp/published/docker')
currdir = os.path.abspath(os.path.dirname(__file__))
for src, dst in getlinks():
warning_msg = create_link(opts, os.path.join(currdir, src), dst)
if warning_msg:
warnings.append(warning_msg)
if warnings:
print "\n***\nPossible problems: Please read below\n***"
for w in warnings:
warning(w)
return os.EX_OK
def uninstall(opts):
for src, dst in getlinks():
debug(opts, 'removing link: %s' % dst)
if not os.path.islink(dst):
debug(opts, '%s does not exist, skipping' % dst)
continue
os.unlink(dst)
# Uninstall the packages
environment.manage_setup_pys('uninstall', ROOT_DIR)
return os.EX_OK
def create_link(opts, src, dst):
if not os.path.lexists(dst):
return _create_link(opts, src, dst)
if not os.path.islink(dst):
return "[%s] is not a symbolic link as we expected, please adjust if this " \
"is not what you intended." % (dst)
if not os.path.exists(os.readlink(dst)):
warning('BROKEN LINK: [%s] attempting to delete and fix it to point to %s.' % (dst, src))
try:
os.unlink(dst)
return _create_link(opts, src, dst)
except:
msg = "[%s] was a broken symlink, failed to delete and relink to [%s], " \
"please fix this manually"\
% (dst, src)
return msg
debug(opts, 'verifying link: %s points to %s' % (dst, src))
dst_stat = os.stat(dst)
src_stat = os.stat(src)
if dst_stat.st_ino != src_stat.st_ino:
msg = "[%s] is pointing to [%s] which is different than the intended target [%s]"\
% (dst, os.readlink(dst), src)
return msg
def _create_link(opts, src, dst):
debug(opts, 'creating link: %s pointing to %s' % (dst, src))
try:
os.symlink(src, dst)
except OSError, e:
msg = "Unable to create symlink for [%s] pointing to [%s], received error: <%s>"\
% (dst, src, e)
return msg
# -----------------------------------------------------------------------------
if __name__ == '__main__':
# TODO add something to check for permissions
opts, args = parse_cmdline()
if opts.install:
sys.exit(install(opts))
if opts.uninstall:
sys.exit(uninstall(opts))
| gpl-2.0 | -7,254,177,743,593,166,000 | 27.202247 | 97 | 0.566932 | false |
hanyangii/SummThing | bptree.py | 1 | 18095 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import bisect
import itertools
import operator
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class _BNode(object):
__slots__ = ["tree", "contents", "children"]
def __init__(self, tree, contents=None, children=None):
self.tree = tree
self.contents = contents or []
self.children = children or []
if self.children:
assert len(self.contents) + 1 == len(self.children), \
"one more child than data item required"
def __repr__(self):
name = getattr(self, "children", 0) and "Branch" or "Leaf"
return "<%s %s>" % (name, ", ".join(map(str, self.contents)))
def lateral(self, parent, parent_index, dest, dest_index):
if parent_index > dest_index:
dest.contents.append(parent.contents[dest_index])
parent.contents[dest_index] = self.contents.pop(0)
if self.children:
dest.children.append(self.children.pop(0))
else:
dest.contents.insert(0, parent.contents[parent_index])
parent.contents[parent_index] = self.contents.pop()
if self.children:
dest.children.insert(0, self.children.pop())
def shrink(self, ancestors):
parent = None
if ancestors:
parent, parent_index = ancestors.pop()
# try to lend to the left neighboring sibling
if parent_index:
left_sib = parent.children[parent_index - 1]
if len(left_sib.contents) < self.tree.order:
self.lateral(
parent, parent_index, left_sib, parent_index - 1)
return
# try the right neighbor
if parent_index + 1 < len(parent.children):
right_sib = parent.children[parent_index + 1]
if len(right_sib.contents) < self.tree.order:
self.lateral(
parent, parent_index, right_sib, parent_index + 1)
return
center = len(self.contents) // 2
sibling, push = self.split()
if not parent:
parent, parent_index = self.tree.BRANCH(
self.tree, children=[self]), 0
self.tree._root = parent
# pass the median up to the parent
parent.contents.insert(parent_index, push)
parent.children.insert(parent_index + 1, sibling)
if len(parent.contents) > parent.tree.order:
parent.shrink(ancestors)
def grow(self, ancestors):
parent, parent_index = ancestors.pop()
minimum = self.tree.order // 2
left_sib = right_sib = None
# try to borrow from the right sibling
if parent_index + 1 < len(parent.children):
right_sib = parent.children[parent_index + 1]
if len(right_sib.contents) > minimum:
right_sib.lateral(parent, parent_index + 1, self, parent_index)
return
# try to borrow from the left sibling
if parent_index:
left_sib = parent.children[parent_index - 1]
if len(left_sib.contents) > minimum:
left_sib.lateral(parent, parent_index - 1, self, parent_index)
return
# consolidate with a sibling - try left first
if left_sib:
left_sib.contents.append(parent.contents[parent_index - 1])
left_sib.contents.extend(self.contents)
if self.children:
left_sib.children.extend(self.children)
parent.contents.pop(parent_index - 1)
parent.children.pop(parent_index)
else:
self.contents.append(parent.contents[parent_index])
self.contents.extend(right_sib.contents)
if self.children:
self.children.extend(right_sib.children)
parent.contents.pop(parent_index)
parent.children.pop(parent_index + 1)
if len(parent.contents) < minimum:
if ancestors:
# parent is not the root
parent.grow(ancestors)
elif not parent.contents:
# parent is root, and its now empty
self.tree._root = left_sib or self
def split(self):
center = len(self.contents) // 2
median = self.contents[center]
sibling = type(self)(
self.tree,
self.contents[center + 1:],
self.children[center + 1:])
self.contents = self.contents[:center]
self.children = self.children[:center + 1]
return sibling, median
def insert(self, index, item, ancestors):
self.contents.insert(index, item)
if len(self.contents) > self.tree.order:
self.shrink(ancestors)
def remove(self, index, ancestors):
minimum = self.tree.order // 2
if self.children:
# find the smallest in the right subtree, exchange the value with the current node
# then delete the smallest one, just like the idea in the binary search tree.
# Note: only if len(descendent.contents) > minimum, we do this way in order to avoid 'grow' operation.
# Or we will inspect the left tree and do it any way
# all internal nodes have both left and right subtree.
additional_ancestors = [(self, index + 1)]
descendent = self.children[index + 1]
while descendent.children:
additional_ancestors.append((descendent, 0))
descendent = descendent.children[0]
if len(descendent.contents) > minimum:
ancestors.extend(additional_ancestors)
self.contents[index] = descendent.contents[0]
descendent.remove(0, ancestors)
return
# fall back to the left child, and exchange with the biggest, then delete the biggest anyway.
additional_ancestors = [(self, index)]
descendent = self.children[index]
while descendent.children:
additional_ancestors.append(
(descendent, len(descendent.children) - 1))
descendent = descendent.children[-1]
ancestors.extend(additional_ancestors)
self.contents[index] = descendent.contents[-1]
descendent.remove(len(descendent.children) - 1, ancestors)
else:
self.contents.pop(index)
if len(self.contents) < minimum and ancestors:
self.grow(ancestors)
class _BPlusLeaf(_BNode):
__slots__ = ["tree", "contents", "data", "next"]
def __init__(self, tree, contents=None, data=None, next=None):
self.tree = tree
self.contents = contents or []
self.data = data or []
self.next = next
assert len(self.contents) == len(self.data), "one data per key"
def insert(self, index, key, data, ancestors):
self.contents.insert(index, key)
self.data.insert(index, data)
if len(self.contents) > self.tree.order:
self.shrink(ancestors)
def lateral(self, parent, parent_index, dest, dest_index):
if parent_index > dest_index:
dest.contents.append(self.contents.pop(0))
dest.data.append(self.data.pop(0))
parent.contents[dest_index] = self.contents[0]
else:
dest.contents.insert(0, self.contents.pop())
dest.data.insert(0, self.data.pop())
parent.contents[parent_index] = dest.contents[0]
def split(self):
center = len(self.contents) // 2
median = self.contents[center - 1]
sibling = type(self)(
self.tree,
self.contents[center:],
self.data[center:],
self.next)
self.contents = self.contents[:center]
self.data = self.data[:center]
self.next = sibling
return sibling, sibling.contents[0]
def remove(self, index, ancestors):
minimum = self.tree.order // 2
if index >= len(self.contents):
self, index = self.next, 0
key = self.contents[index]
# if any leaf that could accept the key can do so
# without any rebalancing necessary, then go that route
current = self
while current is not None and current.contents[0] == key:
if len(current.contents) > minimum:
if current.contents[0] == key:
index = 0
else:
index = bisect.bisect_left(current.contents, key)
current.contents.pop(index)
current.data.pop(index)
return
current = current.next
self.grow(ancestors)
def grow(self, ancestors):
minimum = self.tree.order // 2
parent, parent_index = ancestors.pop()
left_sib = right_sib = None
# try borrowing from a neighbor - try right first
if parent_index + 1 < len(parent.children):
right_sib = parent.children[parent_index + 1]
if len(right_sib.contents) > minimum:
right_sib.lateral(parent, parent_index + 1, self, parent_index)
return
# fallback to left
if parent_index:
left_sib = parent.children[parent_index - 1]
if len(left_sib.contents) > minimum:
left_sib.lateral(parent, parent_index - 1, self, parent_index)
return
# join with a neighbor - try left first
if left_sib:
left_sib.contents.extend(self.contents)
left_sib.data.extend(self.data)
parent.remove(parent_index - 1, ancestors)
return
# fallback to right
self.contents.extend(right_sib.contents)
self.data.extend(right_sib.data)
parent.remove(parent_index, ancestors)
class BTree(object):
BRANCH = LEAF = _BNode
def __init__(self, order):
self.order = order
self._root = self._bottom = self.LEAF(self)
def _path_to(self, item):
"""
"""
current = self._root
ancestry = []
while getattr(current, "children", None):
index = bisect.bisect_left(current.contents, item)
ancestry.append((current, index))
if index < len(current.contents) \
and current.contents[index] == item:
return ancestry
current = current.children[index]
index = bisect.bisect_left(current.contents, item)
ancestry.append((current, index))
present = index < len(current.contents)
present = present and current.contents[index] == item
return ancestry
def _present(self, item, ancestors):
last, index = ancestors[-1]
return index < len(last.contents) and last.contents[index] == item
def insert(self, item):
current = self._root
ancestors = self._path_to(item)
node, index = ancestors[-1]
while getattr(node, "children", None):
node = node.children[index]
index = bisect.bisect_left(node.contents, item)
ancestors.append((node, index))
node, index = ancestors.pop()
node.insert(index, item, ancestors)
def remove(self, item):
current = self._root
ancestors = self._path_to(item)
if self._present(item, ancestors):
node, index = ancestors.pop()
node.remove(index, ancestors)
else:
raise ValueError("%r not in %s" % (item, self.__class__.__name__))
def __contains__(self, item):
return self._present(item, self._path_to(item))
def __iter__(self):
def _recurse(node):
if node.children:
for child, item in zip(node.children, node.contents):
for child_item in _recurse(child):
yield child_item
yield item
for child_item in _recurse(node.children[-1]):
yield child_item
else:
for item in node.contents:
yield item
for item in _recurse(self._root):
yield item
def __repr__(self):
def recurse(node, accum, depth):
accum.append((" " * depth) + repr(node))
for node in getattr(node, "children", []):
recurse(node, accum, depth + 1)
accum = []
recurse(self._root, accum, 0)
return "\n".join(accum)
@classmethod
def bulkload(cls, items, order):
tree = object.__new__(cls)
tree.order = order
leaves = tree._build_bulkloaded_leaves(items)
tree._build_bulkloaded_branches(leaves)
return tree
def _build_bulkloaded_leaves(self, items):
minimum = self.order // 2
leaves, seps = [[]], []
for item in items:
if len(leaves[-1]) < self.order:
leaves[-1].append(item)
else:
seps.append(item)
leaves.append([])
if len(leaves[-1]) < minimum and seps:
last_two = leaves[-2] + [seps.pop()] + leaves[-1]
leaves[-2] = last_two[:minimum]
leaves[-1] = last_two[minimum + 1:]
seps.append(last_two[minimum])
return [self.LEAF(self, contents=node) for node in leaves], seps
def _build_bulkloaded_branches(self, (leaves, seps)):
minimum = self.order // 2
levels = [leaves]
while len(seps) > self.order + 1:
items, nodes, seps = seps, [[]], []
for item in items:
if len(nodes[-1]) < self.order:
nodes[-1].append(item)
else:
seps.append(item)
nodes.append([])
if len(nodes[-1]) < minimum and seps:
last_two = nodes[-2] + [seps.pop()] + nodes[-1]
nodes[-2] = last_two[:minimum]
nodes[-1] = last_two[minimum + 1:]
seps.append(last_two[minimum])
offset = 0
for i, node in enumerate(nodes):
children = levels[-1][offset:offset + len(node) + 1]
nodes[i] = self.BRANCH(self, contents=node, children=children)
offset += len(node) + 1
levels.append(nodes)
self._root = self.BRANCH(self, contents=seps, children=levels[-1])
class BPlusTree(BTree):
LEAF = _BPlusLeaf
def _get(self, key):
node, index = self._path_to(key)[-1]
if index == len(node.contents):
if node.next:
node, index = node.next, 0
else:
return
while node.contents[index] == key:
yield node.data[index]
index += 1
if index == len(node.contents):
if node.next:
node, index = node.next, 0
else:
return
def _path_to(self, item):
path = super(BPlusTree, self)._path_to(item)
node, index = path[-1]
while hasattr(node, "children"):
node = node.children[index]
index = bisect.bisect_left(node.contents, item)
path.append((node, index))
return path
def get(self, key, default=None):
try:
return self._get(key).next()
except StopIteration:
return default
def getlist(self, key):
return list(self._get(key))
def insert(self, key, data):
path = self._path_to(key)
node, index = path.pop()
node.insert(index, key, data, path)
def remove(self, key):
path = self._path_to(key)
node, index = path.pop()
node.remove(index, path)
__getitem__ = get
__setitem__ = insert
__delitem__ = remove
def __contains__(self, key):
for item in self._get(key):
return True
return False
def iteritems(self):
node = self._root
while hasattr(node, "children"):
node = node.children[0]
while node:
for pair in itertools.izip(node.contents, node.data):
yield pair
node = node.next
def iterkeys(self):
return itertools.imap(operator.itemgetter(0), self.iteritems())
def itervalues(self):
return itertools.imap(operator.itemgetter(1), self.iteritems())
__iter__ = iterkeys
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def _build_bulkloaded_leaves(self, items):
minimum = self.order // 2
leaves, seps = [[]], []
for item in items:
if len(leaves[-1]) >= self.order:
seps.append(item)
leaves.append([])
leaves[-1].append(item)
if len(leaves[-1]) < minimum and seps:
last_two = leaves[-2] + leaves[-1]
leaves[-2] = last_two[:minimum]
leaves[-1] = last_two[minimum:]
seps.append(last_two[minimum])
leaves = [self.LEAF(
self,
contents=[p[0] for p in pairs],
data=[p[1] for p in pairs])
for pairs in leaves]
for i in xrange(len(leaves) - 1):
leaves[i].next = leaves[i + 1]
return leaves, [s[0] for s in seps]
def main():
bt = BTree(2)
l = range(20, 0, -1)
bt.insert('박씨')
bt.insert('정씨')
bt.insert('김씨')
bt.insert('이씨')
bt.insert('황씨')
BP = BPlusTree(bt)
BP.insert('박', '박씨')
for i in range(0,5):
print list(bt)[i]
print BP.values()[0]
print BP.keys()[0]
print BP.items()[0]
print BP.get('박씨')
#for i, item in enumerate(l):
# bt.insert(item)
# print list(bt)
if __name__ == '__main__':
#unittest.main()
main()
| apache-2.0 | -7,392,944,706,407,312,000 | 32.207721 | 114 | 0.543814 | false |
authman/Python201609 | Nguyen_Ken/Assignments/Flask with MySQL/full_friends/server.py | 1 | 2098 | from flask import Flask, render_template, request, redirect, session, flash
from mysqlconnection import MySQLConnector
app = Flask(__name__)
app.secret_key = 'secretsquirrel'
mysql = MySQLConnector(app, 'friendsdb')
@app.route('/')
def index():
showQuery = 'SELECT * FROM friends'
friends = mysql.query_db(showQuery)
return render_template('index.html', all_friends = friends)
@app.route('/friends/<friend_id>/edit')
def edit(friend_id):
friend_id = friend_id
return render_template('edit.html', friend_id = friend_id)
@app.route('/friends/<friend_id>', methods=['POST'])
def update(friend_id):
data = {
'first_name' : request.form['first_name'],
'last_name' : request.form['last_name'],
'occupation' : request.form['occupation'],
'id' : friend_id
}
updateQuery = "UPDATE friends SET first_name = :first_name, last_name = :last_name, occupation = :occupation WHERE id = :id"
mysql.query_db(updateQuery, data)
return redirect('/')
@app.route('/friends', methods=['POST'])
def create():
data = {
'first_name' : request.form['first_name'],
'last_name' : request.form['last_name'],
'occupation' : request.form['occupation']
}
createQuery = 'INSERT INTO friends (first_name, last_name, occupation, created_at, updated_at) VALUES (:first_name, :last_name, :occupation, NOW(), NOW())'
mysql.query_db(createQuery, data)
return redirect('/')
@app.route('/friends/<friend_id>/confirm')
def confirm(friend_id):
data = {
'id' : friend_id
}
friend_id = friend_id
singleFriendQuery = 'SELECT * FROM friends WHERE id = :id'
oneFriend = mysql.query_db(singleFriendQuery, data)
return render_template('delete.html', friend_id = friend_id, oneFriend = oneFriend)
@app.route('/friends/<friend_id>/delete', methods=['POST'])
def destroy(friend_id):
data = {'id' : friend_id}
deleteQuery = 'DELETE FROM friends WHERE id = :id'
mysql.query_db(deleteQuery, data)
return redirect('/')
app.run(debug=True)
| mit | -6,934,720,650,099,028,000 | 22.054945 | 159 | 0.63918 | false |
vdv7/stap | tasks/stdio/pvt.py | 1 | 1754 | #!/usr/bin/env python3
'''Psychomotor Vigilance Task'''
#########################################################
# STAP constants and stdio
import json,sys
if 'raw_input' in vars(__builtins__): input = raw_input #Fix for Python 2.x raw_input
def send(d): print(json.dumps(d)); sys.stdout.flush()
def recv(): return json.loads(input())
CLEAR = None
def obj(id=None,content=NotImplemented,**options):
if id is not None: options['id']=id
if content is not NotImplemented: options['v']=content
return options
#########################################################
import random,statistics
TRIALS = 10
INSTRUCTIONS = 'Click a button when one appears here'
BUTTON = obj('Click Me',False,onin={'v':CLEAR})
def main():
log=[]
ums=0
#announce required options
send({'require':{'options':['U','onin']},'template':'[type="bin"][level="1"]{height:200px}'})
#display Trial and instructions containers; let user software know that any buttons inside the instructions container should be deleted once user-input (i.e. click) is detected
send([ obj('Trial',1,max=TRIALS),
obj(INSTRUCTIONS,[]) ])
#do trials
for trial in range(1,TRIALS+1):
#set random time for button appearance
buttonAppearanceTime=ums+random.randrange(2000,10000)
#update trial time, wait till buttonAppearanceTime, then add the 'Click me' button
send([ obj('Trial',trial),
obj(INSTRUCTIONS, [BUTTON], U=buttonAppearanceTime) ])
#get participant action
ums=recv()[0]
log.append(ums-buttonAppearanceTime)
send([ obj('Your response time is',log[-1],unit='ms') ])
#display goodbye message in popup
send([ CLEAR,
obj('Your mean response time is',statistics.mean(log)),
'Thank you for your participation.' ])
if __name__=='__main__': main()
| mit | 8,034,561,188,412,784,000 | 30.321429 | 177 | 0.657925 | false |
cheery/pyllisp | compiler/__init__.py | 1 | 33018 | from rpython.rlib import jit
from rpython.rlib.listsort import make_timsort_class
from rpython.rlib.objectmodel import always_inline, specialize
import base
import reader
import space
class ProgramBody:
def __init__(self, blocks, functions, is_generator):
self.blocks = reverse_postorder(blocks[0])
self.functions = functions
self.is_generator = is_generator
self.tmpc = 0
allocate_tmp(self)
@specialize.argtype(0)
def reversed(seq):
for i in range(len(seq), 0, -1):
yield seq[i-1]
# Since the frame is virtualizable now, it copies everything
# from tmp to juggle them.
# Instead of using separate index for every temporary value,
# we can do some live range analysis and reuse the indices.
# for items that quarranteely aren't simultaneously live.
def allocate_tmp(body):
index = 0
base = 0
for block in body.blocks:
block.base = base
block.index = index
block.depends = {}
index += 1
base += len(block)
done = False
while not done:
done = True
for block in reversed(body.blocks):
N = len(block.depends)
for succ in block.succ:
block.depends.update(succ.depends)
for op in reversed(block):
if op in block.depends:
block.depends.pop(op)
for use in op.uses():
block.depends[use] = None
M = len(block.depends)
if N < M:
done = False
live_ranges = {}
for block in body.blocks:
for op in block.depends:
plot_range(live_ranges, op, block.base)
for succ in block.succ:
assert succ.index >= 0
for op in succ.depends:
plot_range(live_ranges, op, block.base + len(block))
i = 0
for op in block:
plot_range(live_ranges, op, block.base+i)
for use in op.uses():
plot_range(live_ranges, use, block.base+i+1)
i += 1
starts = []
stops = []
avail = []
for op, (start, stop) in live_ranges.iteritems():
starts.append((start, stop, op))
sort_starts(starts).sort()
for current, stop, op in starts:
assert current <= stop
if len(avail) > 0:
op.i = avail.pop()
else:
op.i = body.tmpc
body.tmpc += 1
stops.append((stop, op))
sort_ends(stops).sort()
while len(stops) > 0 and stops[0][0] < current:
_, exp = stops.pop(0)
assert exp.i not in avail
avail.append(exp.i)
sort_starts = make_timsort_class(lt=lambda x, y: x[0] < y[0])
sort_ends = make_timsort_class(lt=lambda x, y: x[0] < y[0])
# This is just here in case my register alloc func messes up
# But I will require better tools for debugging my dumps.
# if True:
# tab = {}
# def opval_repr(op):
# return "%s:r%d" % (tab[op], op.i)
# for block in body.blocks:
# i = block.base
# for op in block:
# tab[op] = i
# i += 1
# for block in body.blocks:
# i = block.base
# for op in block:
# if op.start:
# print ("%s:" % op.start.repr()).ljust(8),
# else:
# print "".ljust(8)
# if isinstance(op, Constant):
# print "%4i: r%d = const %s" % (i, op.i, op.value.repr())
# elif isinstance(op, Variable):
# print "%4i: r%d = var %s" % (i, op.i, op.name)
# elif isinstance(op, SetLocal):
# print "%4i: r%d = var %s <- %s" % (i, op.i, op.name, opval_repr(op.value))
# else:
# print "%4i: r%d = %s (%s)" % (i, op.i, op.__class__.__name__, ' '.join(map(opval_repr, op.uses())))
# i += 1
# print "TMPC %d" % body.tmpc
def plot_range(ranges, key, pos):
if key not in ranges:
ranges[key] = (pos, pos)
else:
start, stop = ranges[key]
ranges[key] = (min(start, pos), max(stop, pos))
def reverse_postorder(entry):
seq = postorder_visit([], entry)
seq.reverse()
return seq
def postorder_visit(sequence, block):
if block.visited:
return
block.visited = True
for succ in block.succ:
postorder_visit(sequence, succ)
sequence.append(block)
return sequence
class ActivationRecord:
_immutable_fields_ = ['module', 'parent']
def __init__(self, module, parent):
self.var = {}
self.module = module
self.parent = parent
class Program(space.Object):
_immutable_fields_ = ['body']
def __init__(self, body):
self.body = body
def call(self, argv):
if len(argv) != 1:
raise space.Error(u"program expects module as an argument")
module = argv[0]
assert isinstance(module, space.Module)
frame = ActivationRecord(module, None)
return interpret(self.body, frame)
class Closure(space.Object):
_immutable_fields_ = ['frame', 'func']
def __init__(self, frame, func):
self.frame = frame
self.func = func
def call(self, argv):
argc = len(self.func.args)
if len(argv) < argc:
raise space.Error(u"closure requires %d arguments" % argc)
frame = ActivationRecord(self.frame.module, self.frame)
for i in range(argc):
frame.var[self.func.args[i]] = argv[i]
return interpret(self.func.body, frame)
class Generator(space.Object):
_immutable_fields_ = ['tmp', 'frame']
def __init__(self, block, tmp, frame, loop_break, op_i):
self.block = block
self.tmp = tmp
self.frame = frame
self.loop_break = loop_break
self.op_i = op_i
def iter(self):
return self
@Generator.builtin_method
def next(argv):
self = argv[0]
assert isinstance(self, Generator)
if len(argv) > 1:
self.tmp[self.op_i] = argv[1]
else:
self.tmp[self.op_i] = space.null
try:
interpret_body(self.block, self.tmp, self.frame, self.loop_break)
raise StopIteration()
except YieldIteration as yi:
self.block = yi.block
self.loop_break = yi.loop_break
self.op_i = yi.op_i
return yi.value
class YieldIteration(Exception):
_immutable_fields_ = ['block', 'loop_break', 'op_i', 'value']
def __init__(self, block, loop_break, op_i, value):
self.block = block
self.loop_break = loop_break
self.op_i = op_i
self.value = value
class Block:
_immutable_fields_ = ['index', 'contents[*]', 'succ']
def __init__(self, index, contents, succ):
self.index = index
self.contents = None
self.contents_mut = []
self.succ = succ
self.visited = False
def __iter__(self):
return iter(self.contents)
def __getitem__(self, index):
return self.contents[index]
def __len__(self):
return len(self.contents)
def append(self, op):
assert isinstance(op, Op)
self.contents_mut.append(op)
def freeze(self):
self.contents = self.contents_mut[:]
self.contents_mut = None
# def label(self):
# return "b" + str(self.index)
#
# def repr(self):
# out = "b" + str(self.index) + ":"
# for op in self:
# out += '\n '
# out += op.repr()
# return out
class Scope:
def __init__(self, parent=None):
self.blocks = []
self.block = self.new_block()
self.capture_catch = []
self.functions = []
self.bodies = []
self.chain = []
self.start = None
self.stop = None
self.is_generator = False
self.loop_stack = []
def new_block(self):
block = Block(-1, [], [])
self.blocks.append(block)
return block
def new_function(self, argv, body):
func = Function(argv)
self.functions.append(func)
self.bodies.append(body)
return self.add(func)
def new_label(self):
if len(self.block.contents_mut) > 0:
exit = self.new_block()
self.add(Jump(exit))
self.block = exit
return self.block
def add(self, op):
self.block.append(op)
op.start = self.start
op.stop = self.stop
return op
def capture(self, exp):
if len(self.capture_catch) == 0:
raise space.Error(u"%s: expecting capture" % exp.start.repr())
cap = self.capture_catch
self.capture_catch = []
return cap
def pull_chain(self):
chain = self.chain
self.chain = []
return chain
def close(self):
for block in self.blocks:
block.freeze()
for op in block:
if isinstance(op, Cond):
block.succ.extend([op.then, op.exit])
if isinstance(op, Jump):
block.succ.extend([op.exit])
if isinstance(op, SetBreak):
block.succ.extend([op.block])
if isinstance(op, Yield):
block.succ.extend([op.block])
return ProgramBody(self.blocks, self.functions, self.is_generator)
class Op:
_immutable_fields_ = ['i', 'start', 'stop', 'then', 'exit', 'value', 'body', 'args[*]', 'values[*]', 'name', 'cond', 'dst', 'src', 'it', 'block', 'upscope', 'ref']
i = 0
start = None
stop = None
# def repr(self):
# return str(self.__class__.__name__) + " " + self.args_str()
#
# def args_str(self):
# return "..."
def uses(self):
return []
class Assert(Op):
_immutable_fields_ = ['i', 'start', 'stop', 'value']
def __init__(self, value):
self.value = value
def uses(self):
return [self.value]
class ValuedOp(Op):
pass
# def repr(self):
# return str(self.i) + " = " + str(self.__class__.__name__) + " " + self.args_str()
class Function(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'args', 'body']
def __init__(self, args):
self.args = args
self.body = None
class Call(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'callee', 'args[*]']
def __init__(self, callee, args):
self.callee = callee
self.args = args[:]
def uses(self):
return [self.callee] + self.args
#
# def args_str(self):
# out = str(self.callee.i)
# for a in self.args:
# out += ", " + str(a.i)
# return out
class Cond(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'cond', 'then', 'exit']
def __init__(self, cond):
self.cond = cond
self.then = None
self.exit = None
def uses(self):
return [self.cond]
#
# def args_str(self):
# return str(self.cond.i) + ", " + self.then.label() + ", " + self.exit.label()
class Merge(Op):
_immutable_fields_ = ['i', 'start', 'stop', 'dst', 'src']
def __init__(self, dst, src):
self.dst = dst
self.src = src
def uses(self):
return [self.dst, self.src]
#
# def args_str(self):
# return str(self.dst.i) + ", " + str(self.src.i)
class Jump(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'exit']
def __init__(self, exit):
self.exit = exit
class Iter(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'value']
def __init__(self, value):
self.value = value
def uses(self):
return [self.value]
# It could be that the 'next' should be like 'iter', and that this
# operation should supply contents of SetBreak instead.
class Next(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'it']
def __init__(self, it):
self.it = it
def uses(self):
return [self.it]
class SetBreak(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'block']
def __init__(self, block):
self.block = block
# def args_str(self):
# return self.exit.label()
class Constant(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'value']
def __init__(self, value):
self.value = value
class MakeList(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'values[*]']
def __init__(self, values):
self.values = values[:]
def uses(self):
return self.values
class GetAttr(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'value', 'name']
def __init__(self, value, name):
self.value = value
self.name = name
def uses(self):
return [self.value]
class GetItem(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'value', 'index']
def __init__(self, value, index):
self.value = value
self.index = index
def uses(self):
return [self.value, self.index]
class Variable(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'name']
def __init__(self, name):
self.name = name
# def args_str(self):
# return self.name
class Yield(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'value', 'block']
def __init__(self, value, block):
self.value = value
self.block = block
def uses(self):
return [self.value]
class SetAttr(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'name', 'value']
def __init__(self, obj, name, value):
self.obj = obj
self.name = name
self.value = value
def uses(self):
return [self.obj, self.value]
class SetItem(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'index', 'value']
def __init__(self, obj, index, value):
self.obj = obj
self.index = index
self.value = value
def uses(self):
return [self.obj, self.index, self.value]
class SetLocal(ValuedOp):
_immutable_fields_ = ['i', 'start', 'stop', 'name', 'value', 'upscope']
def __init__(self, name, value, upscope):
assert isinstance(name, unicode)
assert isinstance(value, ValuedOp)
self.name = name
self.value = value
self.upscope = upscope
def uses(self):
return [self.value]
class Return(Op):
_immutable_fields_ = ['i', 'start', 'stop', 'ref']
def __init__(self, ref):
self.ref = ref
def uses(self):
return [self.ref]
class Frame:
_virtualizable_ = ['tmp[*]'] # XXX
def __init__(self, tmp):
self = jit.hint(self, access_directly=True, fresh_virtualizable=True)
self.tmp = tmp
@always_inline
def store(self, index, value):
assert index >= 0
self.tmp[index] = value
@always_inline
def load(self, index):
assert index >= 0
return self.tmp[index]
def interpret(prog, frame):
block = prog.blocks[0]
tmp = []
for i in range(prog.tmpc):
tmp.append(space.null)
#for blk in prog.blocks:
# print blk.repr()
if prog.is_generator:
return Generator(block, tmp, frame, None, 0)
return interpret_body(block, tmp, frame, None)
def get_printable_location(pc, block, loop_break, cl_frame_module):
if loop_break is None:
return "pc=%d block=%d cl_frame_module=%s" % (pc, block.index, cl_frame_module.repr().encode('utf-8'))
return "pc=%d block=%d loop_break=%d cl_frame_module=%s" % (pc, block.index, loop_break.index, cl_frame_module.repr().encode('utf-8'))
#
#def get_printable_location(pc, block, loop_break):
# if loop_break is None:
# return "pc=%d block=%d" % (pc, block.index)
# return "pc=%d block=%d loop_break=%d" % (pc, block.index, loop_break.index)
jitdriver = jit.JitDriver(
greens=['pc', 'block', 'loop_break', 'module'],
reds=['cl_frame', 'frame'],
virtualizables = ['frame'], # XXX
get_printable_location=get_printable_location)
def interpret_body(block, t, cl_frame, loop_break):
frame = Frame(t)
pc = 0
module = jit.promote(cl_frame.module)
try:
while pc < len(block):
try:
jitdriver.jit_merge_point(
pc=pc, block=block, loop_break=loop_break, module=module,
cl_frame=cl_frame, frame=frame)
op = block[pc]
pc += 1
if isinstance(op, Call):
do_call(frame, op)
elif isinstance(op, Assert):
if space.is_false(frame.load(op.value.i)):
raise space.Error(u"Assertion error")
elif isinstance(op, Cond):
pc = 0
if space.is_false(frame.load(op.cond.i)):
block = op.exit
else:
block = op.then
elif isinstance(op, Jump):
pc = 0
block = op.exit
elif isinstance(op, Next):
frame.store(op.i, frame.load(op.it.i).callattr(u'next', []))
elif isinstance(op, Yield):
raise YieldIteration(op.block, loop_break, op.i, frame.load(op.value.i))
elif isinstance(op, SetBreak):
loop_break = op.block
elif isinstance(op, Iter):
frame.store(op.i, frame.load(op.value.i).iter())
elif isinstance(op, Constant):
frame.store(op.i, op.value)
elif isinstance(op, Variable):
frame.store(op.i, lookup(module, cl_frame, op.name))
elif isinstance(op, Merge):
frame.store(op.dst.i, frame.load(op.src.i))
elif isinstance(op, Function):
frame.store(op.i, Closure(cl_frame, op))
elif isinstance(op, MakeList):
contents = []
for val in op.values:
contents.append(frame.load(val.i))
frame.store(op.i, space.List(contents))
elif isinstance(op, GetAttr):
frame.store(op.i, frame.load(op.value.i).getattr(op.name))
elif isinstance(op, GetItem):
frame.store(op.i, frame.load(op.value.i).getitem(frame.load(op.index.i)))
elif isinstance(op, SetAttr):
frame.store(op.i, frame.load(op.obj.i).setattr(op.name, frame.load(op.value.i)))
elif isinstance(op, SetItem):
frame.store(op.i, frame.load(op.obj.i).setitem(
frame.load(op.index.i),
frame.load(op.value.i)))
elif isinstance(op, SetLocal):
frame.store(op.i, set_local(module, cl_frame, op.name, frame.load(op.value.i), op.upscope))
elif isinstance(op, Return):
return frame.load(op.ref.i)
else:
raise space.Error(u"spaced out")
except StopIteration as stopiter:
if loop_break is not None:
block = loop_break
loop_break = None
continue
op = block[pc-1]
error = space.Error(u"stop iteration")
error.stacktrace.append((cl_frame, op.start, op.stop))
raise error
raise space.Error(u"crappy compiler")
except space.Error as e:
op = block[pc-1]
e.stacktrace.append((cl_frame, op.start, op.stop))
raise e
@jit.unroll_safe
def do_call(frame, op):
callee = frame.load(op.callee.i)
argv = []
for arg in op.args:
argv.append(frame.load(arg.i))
frame.store(op.i, callee.call(argv))
def lookup(module, frame, name):
if frame.parent is None:
return module.getattr(name)
if name in frame.var:
return frame.var[name]
return lookup(module, frame.parent, name)
def set_local(module, frame, name, value, upscope):
if frame.parent is None:
return module.setattr(name, value)
elif upscope:
if name in frame.var:
frame.var[name] = value
return value
else:
return set_local(module, frame.parent, name, value, upscope)
else:
frame.var[name] = value
return value
def assert_macro(env, exp):
if len(exp.exps) != 2:
raise space.Error(u"no translation for %s with length != 2" % exp.name)
val = translate(env, exp.exps[1])
env.add(Assert(val))
return val
def func_macro(env, exp):
argv = []
for i in range(1, len(exp.exps)):
arg = exp.exps[i]
if isinstance(arg, reader.Literal) and arg.name == u'symbol':
argv.append(arg.value)
else:
raise space.Error(u"%s: expected symbol inside func" % arg.start.repr())
body = env.capture(exp)
return env.new_function(argv, body)
def if_macro(env, exp):
if len(exp.exps) != 2:
raise space.Error(u"no translation for %s with length != 2" % exp.name)
chain = env.pull_chain()
cond = Cond(translate(env, exp.exps[1]))
env.add(cond)
cond.then = env.block = env.new_block()
cond.exit = exit = env.new_block()
val = translate_flow(env, env.capture(exp))
env.add(Merge(cond, val))
env.add(Jump(exit))
if len(chain) > 0:
first = chain[0]
if len(chain) > 1 and macro_name(first.exps[0]) != u'else' and len(first.exps) != 1:
raise space.Error(u"%s: non-else longer chains not supported" % exp.start.repr())
env.block, exit = exit, env.new_block()
val = translate_flow(env, first.capture)
env.add(Merge(cond, val))
env.add(Jump(exit))
env.block = exit
return cond
def return_macro(env, exp):
if len(exp.exps) != 2:
raise space.Error(u"no translation for %s with length != 2" % exp.name)
return env.add(Return(translate(env, exp.exps[1])))
def while_macro(env, exp):
if len(exp.exps) != 2:
raise space.Error(u"no translation for %s with length != 2" % exp.name)
current_loop = (loop, exit, _) = (env.new_label(), env.new_block(), False)
env.loop_stack.append(current_loop)
loop = env.new_label()
cond = env.add(Cond(translate(env, exp.exps[1])))
cond.then = env.block = env.new_block()
cond.exit = env.new_block()
val = translate_flow(env, env.capture(exp))
env.add(Merge(cond, val))
env.add(Jump(loop))
env.block = cond.exit
loop_exit(env)
return cond
def and_macro(env, exp):
if len(exp.exps) != 3:
raise space.Error(u"no translation for %s with length != 3" % exp.name)
val = translate(env, exp.exps[1])
cond = env.add(Cond(val))
cond.then = env.block = env.new_block()
cond.exit = env.new_block()
env.add(Merge(val, translate(env, exp.exps[2])))
env.add(Jump(cond.exit))
env.block = cond.exit
return val
def or_macro(env, exp):
if len(exp.exps) != 3:
raise space.Error(u"no translation for %s with length != 3" % exp.name)
val = translate(env, exp.exps[1])
cond = env.add(Cond(val))
cond.exit = env.block = env.new_block()
cond.then = env.new_block()
env.add(Merge(val, translate(env, exp.exps[2])))
env.add(Jump(cond.then))
env.block = cond.then
return val
def syntax_chain(env, exp):
if len(exp.exps) < 3:
raise space.Error(u"no translation for %s with length < 3" % exp.name)
and_ = Variable(u'and')
if len(exp.exps) > 3:
env.add(and_)
lhs = translate(env, exp.exps[0])
op = translate(env, exp.exps[1])
rhs = translate(env, exp.exps[2])
res = env.add(Call(op, [lhs, rhs]))
i = 3
while i < len(exp.exps):
lhs = rhs
op = translate(env, exp.exps[i])
rhs = translate(env, exp.exps[i+1])
res = env.add(Call(and_, [
res,
env.add(Call(op, [lhs, rhs]))]))
i += 2
return res
def for_macro(env, exp):
if len(exp.exps) != 3:
raise space.Error(u"no translation for %s with length != 2" % exp.name)
var = exp.exps[1]
if not isinstance(var, reader.Literal):
raise space.Error(u"%s: format: for variable exp" % exp.start.repr())
it = env.add(Iter(translate(env, exp.exps[2])))
current_loop = (loop, exit, _) = (env.new_block(), env.new_block(), True)
env.loop_stack.append(current_loop)
cond = env.add(SetBreak(exit))
env.add(Jump(loop))
env.block = loop
env.add(SetLocal(var.value, env.add(Next(it)), False))
val = translate_flow(env, env.capture(exp))
env.add(Merge(cond, val))
env.add(Jump(loop))
env.block = exit
loop_exit(env)
return cond
def loop_exit(env):
_, exit, _ = env.loop_stack.pop(-1)
if len(env.loop_stack) > 0 and env.loop_stack[-1][2]:
env.add(SetBreak(env.loop_stack[-1][1]))
def break_macro(env, exp):
if len(exp.exps) != 1:
raise space.Error(u"%s: format: break" % exp.start.repr())
if len(env.loop_stack) == 0:
raise space.Error(u"%s: not inside a loop" % exp.start.repr())
return env.add(Jump(env.loop_stack[-1][1]))
def continue_macro(env, exp):
if len(exp.exps) != 1:
raise space.Error(u"%s: format: continue" % exp.start.repr())
if len(env.loop_stack) == 0:
raise space.Error(u"%s: not inside a loop" % exp.start.repr())
return env.add(Jump(env.loop_stack[-1][0]))
def yield_macro(env, exp):
if len(exp.exps) != 2:
raise space.Error(u"%s: format: yield expr" % exp.start.repr())
env.is_generator = True
val = translate(env, exp.exps[1])
yield_ = env.add(Yield(val, env.new_block()))
env.block = yield_.block
return yield_
def from_macro(env, exp):
if len(exp.exps) == 2:
exp.exps.extend(env.capture(exp))
if len(exp.exps) <= 2:
raise space.Error(u"%s: format: from expr symbol..." % exp.start.repr())
val = translate(env, exp.exps[1])
for attr in exp.exps[2:]:
if isinstance(attr, reader.Literal) and attr.name == u'symbol':
var = env.add(GetAttr(val, attr.value))
env.add(SetLocal(attr.value, var, False))
else:
raise space.Error(u"%s: expected symbol" % attr.start.repr())
return val
macros = {
u'break': break_macro,
u'continue': continue_macro,
u'assert': assert_macro,
u'func': func_macro,
u'for': for_macro,
u'if': if_macro,
u'return': return_macro,
u'while': while_macro,
u'and': and_macro,
u'or': or_macro,
u'yield': yield_macro,
u'from': from_macro,
}
chain_macros = [u'else']
def macro_name(exp):
if isinstance(exp, reader.Expr):
if exp.name == u'form' and len(exp.exps) > 0:
first = exp.exps[0]
if isinstance(first, reader.Literal) and first.name == u'symbol':
return first.value
return u""
def translate_flow(env, exps):
val = None
for chain in chains(exps):
val = translate_chain(env, chain)
assert val is not None
return val
def translate_map(env, exps):
res = []
for chain in chains(exps):
res.append(translate_chain(env, chain))
return res
def chains(exps):
out = []
chain = None
for exp in exps:
if chain is None:
chain = [exp]
elif macro_name(exp) in chain_macros:
chain.append(exp)
else:
out.append(chain)
chain = [exp]
if chain is not None:
out.append(chain)
return out
def translate_chain(env, chain):
chain_above = env.chain
exp = chain.pop(0)
env.chain = chain
val = translate(env, exp)
if len(env.chain) > 0:
raise space.Error(u"%s: chain without receiver" % exp.start.repr())
env.chain = chain_above
return val
def translate(env, exp):
start, stop = env.start, env.stop
env.start, env.stop = exp.start, exp.stop
res = translate_(env, exp)
env.start, env.stop = start, stop
return res
def translate_(env, exp):
if isinstance(exp, reader.Literal):
if exp.name == u'string':
return env.add(Constant(space.from_ustring(exp.value)))
elif exp.name == u'int':
return env.add(Constant(space.Integer(int(exp.value.encode('utf-8')))))
elif exp.name == u'hex':
return env.add(Constant(space.Integer(int(exp.value[2:].encode('utf-8'), 16))))
elif exp.name == u'float':
return env.add(Constant(space.Float(float(exp.value.encode('utf-8')))))
elif exp.name == u'symbol':
return env.add(Variable(exp.value))
raise space.Error(u"no translation for " + exp.name)
assert isinstance(exp, reader.Expr), exp.__class__.__name__
if exp.name == u'form' and len(exp.exps) > 0:
if macro_name(exp) in macros:
cc = env.capture_catch
if len(exp.capture) > 0:
env.capture_catch = exp.capture
res = macros[macro_name(exp)](env, exp)
if len(exp.capture) > 0 and len(env.capture_catch) > 0:
raise space.Error(u"%s: capture without receiver" % exp.start.repr())
env.capture_catch = cc
return res
# callattr goes here, if it'll be needed
args = translate_map(env, exp.exps)
callee = args.pop(0)
args.extend(translate_map(env, exp.capture))
return env.add(Call(callee, args))
elif exp.name == u'list':
return env.add(MakeList(translate_map(env, exp.exps)))
elif exp.name == u'attr' and len(exp.exps) == 2:
lhs, name = exp.exps
lhs = translate(env, lhs)
if not isinstance(name, reader.Literal):
raise space.Error(u"%s: bad attribute expr" % exp.repr())
return env.add(GetAttr(lhs, name.value))
sym.value
elif exp.name == u'index' and len(exp.exps) == 2:
lhs, rhs = exp.exps
lhs = translate(env, lhs)
rhs = translate(env, rhs)
return env.add(GetItem(lhs, rhs))
elif exp.name == u'let' or exp.name == u'set':
lhs, rhs = exp.exps
rhs = translate(env, rhs)
return store_value(env, lhs, rhs, exp.name == u'set')
elif exp.name == u'aug' and len(exp.exps) == 3:
aug, lhs, rhs = exp.exps
if not isinstance(aug, reader.Literal):
raise space.Error(u"%s: bad augmented expr" % exp.repr())
rhs = translate(env, rhs)
return store_aug_value(env, aug, lhs, rhs)
elif exp.name == u'chain':
return syntax_chain(env, exp)
raise space.Error(u"no translation for " + exp.name)
def store_value(env, lhs, value, upscope):
if isinstance(lhs, reader.Literal) and lhs.name == u'symbol':
return env.add(SetLocal(lhs.value, value, upscope))
elif isinstance(lhs, reader.Expr) and lhs.name == u'attr' and len(lhs.exps) == 2:
obj, name = lhs.exps
obj = translate(env, obj)
assert isinstance(name, reader.Literal)
return env.add(SetAttr(obj, name.value, value))
elif isinstance(lhs, reader.Expr) and lhs.name == u'index' and len(lhs.exps) == 2:
obj, index = lhs.exps
obj = translate(env, obj)
index = translate(env, index)
return env.add(SetItem(obj, index, value))
else:
raise space.Error(u"no translation for " + lhs.name)
def store_aug_value(env, aug, lhs, value):
aug = env.add(Variable(aug.value))
if isinstance(lhs, reader.Literal) and lhs.name == u'symbol':
name = lhs.value
value = env.add(Call(aug, [env.add(Variable(name)), value]))
return env.add(SetLocal(name, value, True))
elif isinstance(lhs, reader.Expr) and lhs.name == u'attr' and len(lhs.exps) == 2:
obj, name = lhs.exps
assert isinstance(name, reader.Literal)
obj = translate(env, obj)
value = env.add(Call(aug, [env.add(GetAttr(obj, name.value)), value]))
return env.add(SetAttr(obj, name.value, value))
elif isinstance(lhs, reader.Expr) and lhs.name == u'index' and len(lhs.exps) == 2:
obj, index = lhs.exps
obj = translate(env, obj)
index = translate(env, index)
value = env.add(Call(aug, [env.add(GetItem(obj, index)), value]))
return env.add(SetItem(obj, index, value))
else:
raise space.Error(u"no translation for " + lhs.name)
def build_closures(parent):
for i in range(len(parent.functions)):
env = Scope(parent)
func = parent.functions[i]
translate_flow(env, parent.bodies[i])
w = env.add(Constant(space.null))
env.add(Return(w))
build_closures(env)
func.body = env.close()
def to_program(exps):
env = Scope()
if len(exps) == 0:
env.add(Return(env.add(Constant(space.null))))
return Program(env.close())
value = translate_flow(env, exps)
env.add(Return(value))
build_closures(env)
return Program(env.close())
| agpl-3.0 | -1,058,595,052,669,584,400 | 31.952096 | 167 | 0.557847 | false |
start-jsk/jsk_apc | demos/baxtergv6_apc2016/node_scripts/json_saver.py | 1 | 7469 | #!/usr/bin/env python
import datetime
from jsk_arc2017_common.msg import Content
from jsk_arc2017_common.msg import ContentArray
from jsk_arc2017_common.srv import UpdateJSON
from jsk_arc2017_common.srv import UpdateJSONResponse
import json
import os
import os.path as osp
import rospy
import shutil
from std_msgs.msg import String
from std_srvs.srv import Trigger
from std_srvs.srv import TriggerResponse
import threading
class JSONSaver(threading.Thread):
def __init__(self):
super(JSONSaver, self).__init__(target=self._run_services)
json_dir = rospy.get_param('~json_dir', None)
output_dir = rospy.get_param('~output_dir', None)
if json_dir is None:
rospy.logerr('must set json dir path to ~json_dir')
return
if output_dir is None:
rospy.logerr('must set output dir path to ~output_dir')
return
now = datetime.datetime.now()
output_dir = osp.join(output_dir, now.strftime('%Y%m%d_%H%M%S'))
if not osp.exists(output_dir):
os.makedirs(output_dir)
location_path = osp.join(json_dir, 'item_location_file.json')
self.output_json_path = osp.join(
output_dir, 'item_location_file.json')
if osp.exists(location_path):
shutil.copy(location_path, self.output_json_path)
with open(location_path) as location_f:
data = json.load(location_f)
else:
rospy.logerr(
'item_location_file.json does not exists in {}', location_path)
self.bin_contents = {}
for bin_ in data['bins']:
self.bin_contents[bin_['bin_id']] = bin_['contents']
self.tote_contents = data['tote']['contents']
self.cardboard_contents = {}
self.cardboard_ids = {}
# this is for pick task
# order file is only used in pick task
order_path = osp.join(json_dir, 'order_file.json')
if osp.exists(order_path):
output_order_path = osp.join(output_dir, 'order_file.json')
shutil.copy(order_path, output_order_path)
order_path = osp.join(json_dir, 'order_file.json')
with open(order_path) as order_f:
orders = json.load(order_f)['orders']
for order in orders:
size_id = order['size_id']
if len(order['contents']) == 2:
cardboard_id = 'A'
elif len(order['contents']) == 3:
cardboard_id = 'B'
else: # len(order['contents']) == 5
cardboard_id = 'C'
self.cardboard_ids[cardboard_id] = size_id
cardboard_contents = {}
for box in data['boxes']:
size_id = box['size_id']
cardboard_contents[size_id] = box['contents']
for key in 'ABC':
size_id = self.cardboard_ids[key]
self.cardboard_contents[key] = cardboard_contents[size_id]
# publish stamped json_dir
self.pub = rospy.Publisher('~output/json_dir', String, queue_size=1)
self.pub_bin = rospy.Publisher(
'~output/bin_contents',
ContentArray,
queue_size=1)
rate = rospy.get_param('~rate', 1)
self.timer_pub = rospy.Timer(rospy.Duration(1. / rate), self._cb_pub)
self.lock = threading.Lock()
self.daemon = True
def _cb_pub(self, event):
self.pub.publish(String(data=osp.dirname(self.output_json_path)))
contents_msg = ContentArray()
contents = []
for idx_ in range(0, len(self.bin_contents)):
bin_ = chr(ord('A') + idx_)
msg = Content()
msg.bin = bin_
msg.items = self.bin_contents[bin_]
contents.append(msg)
contents_msg.header.stamp = rospy.Time.now()
contents_msg.contents = contents
self.pub_bin.publish(contents_msg)
def _run_services(self):
self.services = []
self.services.append(rospy.Service(
'~update_json', UpdateJSON, self._update))
self.services.append(rospy.Service(
'~save_json', Trigger, self._save))
def _update(self, req):
is_updated = self._update_location(req)
is_saved = self._save_json()
is_updated = is_saved and is_updated
return UpdateJSONResponse(updated=is_updated)
def _save(self, req):
is_saved = self._save_json()
return TriggerResponse(success=is_saved)
def _save_json(self):
separators = (',', ': ')
self.lock.acquire()
is_saved = True
boxes = []
bins = []
if len(self.cardboard_contents.keys()) > 0:
for key in 'ABC':
boxes.append({
'size_id': self.cardboard_ids[key],
'contents': self.cardboard_contents[key]
})
for idx_ in range(0, len(self.bin_contents)):
bin_ = chr(ord('A') + idx_)
bins.append({
'bin_id': bin_,
'contents': self.bin_contents[bin_]
})
location = {
'bins': bins,
'boxes': boxes,
'tote': {
'contents': self.tote_contents,
}
}
try:
with open(self.output_json_path, 'w+') as f:
json.dump(
location, f, sort_keys=True,
indent=4, separators=separators)
except Exception:
rospy.logerr('could not save json in {}'
.format(self.output_json_path))
is_saved = False
self.lock.release()
return is_saved
def _update_location(self, req):
is_updated = True
self.lock.acquire()
item = req.item
src = req.src
dst = req.dst
if src[:3] == 'bin':
src = src[4]
try:
self.bin_contents[src].remove(item)
except Exception:
rospy.logerr('{0} does not exist in bin {1}'.format(item, src))
self.lock.release()
return False
elif src[:9] == 'cardboard':
src = src[10]
try:
self.cardboard_contents[src].remove(item)
except Exception:
rospy.logerr('{0} does not exist in bin {1}'.format(item, src))
self.lock.release()
return False
elif src == 'tote':
try:
self.tote_contents.remove(item)
except Exception:
rospy.logerr('{} does not exist in tote'.format(item))
self.lock.release()
return False
else:
rospy.logerr('Invalid src request {}', src)
is_updated = False
if dst[:3] == 'bin':
dst = dst[4]
self.bin_contents[dst].append(item)
elif dst[:9] == 'cardboard':
dst = dst[10]
self.cardboard_contents[dst].append(item)
elif dst == 'tote':
self.tote_contents.append(item)
else:
rospy.logerr('Invalid dst request {}', dst)
is_updated = False
self.lock.release()
return is_updated
if __name__ == '__main__':
rospy.init_node('json_saver')
json_saver = JSONSaver()
json_saver.start()
rospy.spin()
| bsd-3-clause | 6,995,504,888,770,529,000 | 33.419355 | 79 | 0.528585 | false |
iw3hxn/LibrERP | account_financial_report_aeroo_xls/report/common_report_header.py | 1 | 6668 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) BrowseInfo (http://browseinfo.in)
# Copyright (C) Didotech SRL
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
# Mixin to use with rml_parse, so self.pool will be defined.
class common_report_header(object):
def _sum_debit(self, period_id=False, journal_id=False):
if journal_id and isinstance(journal_id, int):
journal_id = [journal_id]
if period_id and isinstance(period_id, int):
period_id = [period_id]
if not journal_id:
journal_id = self.journal_ids
if not period_id:
period_id = self.period_ids
if not (period_id and journal_id):
return 0.0
self.cr.execute('SELECT SUM(debit) FROM account_move_line l '
'WHERE period_id IN %s AND journal_id IN %s ' + self.query_get_clause + ' ',
(tuple(period_id), tuple(journal_id)))
return self.cr.fetchone()[0] or 0.0
def _sum_credit(self, period_id=False, journal_id=False):
if journal_id and isinstance(journal_id, int):
journal_id = [journal_id]
if period_id and isinstance(period_id, int):
period_id = [period_id]
if not journal_id:
journal_id = self.journal_ids
if not period_id:
period_id = self.period_ids
if not (period_id and journal_id):
return 0.0
self.cr.execute('SELECT SUM(credit) FROM account_move_line l '
'WHERE period_id IN %s AND journal_id IN %s ' + self.query_get_clause + '',
(tuple(period_id), tuple(journal_id)))
return self.cr.fetchone()[0] or 0.0
def _get_start_date(self, data):
if data.get('form', False) and data['form'].get('date_from', False):
return data['form']['date_from']
return ''
def _get_target_move(self, data):
if data.get('form', False) and data['form'].get('target_move', False):
if data['form']['target_move'] == 'all':
return _('All Entries')
return _('All Posted Entries')
return ''
def _get_end_date(self, data):
if data.get('form', False) and data['form'].get('date_to', False):
return data['form']['date_to']
return ''
def get_start_period(self, data):
if data.get('form', False) and data['form'].get('period_from', False):
return self.pool['account.period'].browse(self.cr, self.uid, data['form']['period_from'][0], self.context).name
return ''
def get_end_period(self, data):
if data.get('form', False) and data['form'].get('period_to', False):
return self.pool['account.period'].browse(self.cr, self.uid, data['form']['period_to'][0], self.context).name
return ''
def _get_account(self, data):
if data.get('form', False) and data['form'].get('chart_account_id', False):
return self.pool['account.account'].browse(self.cr, self.uid, data['form']['chart_account_id'][0], self.context).name
return ''
def _get_sortby(self, data):
raise (_('Error!'), _('Not implemented.'))
def _get_filter(self, data):
if data.get('form', False) and data['form'].get('filter', False):
if data['form']['filter'] == 'filter_date':
return self._translate('Date')
elif data['form']['filter'] == 'filter_period':
return self._translate('Periods')
return self._translate('No Filters')
def _sum_debit_period(self, period_id, journal_id=None):
journals = journal_id or self.journal_ids
if not journals:
return 0.0
self.cr.execute('SELECT SUM(debit) FROM account_move_line l '
'WHERE period_id=%s AND journal_id IN %s ' + self.query_get_clause + '',
(period_id, tuple(journals)))
return self.cr.fetchone()[0] or 0.0
def _sum_credit_period(self, period_id, journal_id=None):
journals = journal_id or self.journal_ids
if not journals:
return 0.0
self.cr.execute('SELECT SUM(credit) FROM account_move_line l '
'WHERE period_id=%s AND journal_id IN %s ' + self.query_get_clause + ' ',
(period_id, tuple(journals)))
return self.cr.fetchone()[0] or 0.0
def _get_fiscalyear(self, data):
if data.get('form', False) and data['form'].get('fiscalyear_id', False):
return self.pool['account.fiscalyear'].browse(self.cr, self.uid, data['form']['fiscalyear_id'][0], self.context).name
return ''
def _get_company(self, data):
if data.get('form', False) and data['form'].get('chart_account_id', False):
return self.pool['account.account'].browse(self.cr, self.uid,
data['form']['chart_account_id'][0], self.context).company_id.name
return ''
def _get_journal(self, data):
codes = []
if data.get('form', False) and data['form'].get('journal_ids', False):
self.cr.execute('select code from account_journal where id IN %s', (tuple(data['form']['journal_ids']),))
codes = [x for x, in self.cr.fetchall()]
return codes
def _get_currency(self, data):
if data.get('form', False) and data['form'].get('chart_account_id', False):
return self.pool['account.account'].browse(self.cr, self.uid, data['form'][
'chart_account_id'][0], self.context).company_id.currency_id.symbol
return ''
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -1,271,005,539,179,233,300 | 44.054054 | 129 | 0.574985 | false |
endrebak/epic | tests/blacklist/test_blacklist.py | 1 | 1241 | import pytest
from io import StringIO
from collections import namedtuple
import pandas as pd
from epic.blacklist.compute_poisson import compute_poisson
@pytest.fixture()
def matrix():
return pd.read_table(StringIO(u"""Chromosome Bin f1.bed f2.bed
chr1 0 1.0 1.0
chr1 200 1.0 1.0
chr1 400 1.0 10.0
chr1 600 1.0 1.0
chr1 800 10.0 500.0
chr1 1000 1.0 2.0
chr1 2000 1.0 2.0
chr1 2200 1.0 600.0
chr1 2400 1.0 2.0"""), sep=" ", header=0, index_col=[0, 1])
MockArgs = namedtuple("MockNamespace",
["number_cores", "genome", "keep_duplicates", "window_size",
"fragment_size", "bonferroni", "effective_genome_fraction", "chromosome_sizes"])
@pytest.fixture()
def effective_genome_size_dict():
return {"chr1": 2000}
@pytest.fixture()
def mock_args():
return MockArgs(1, "hg38", True, 200, 150, 0.05, 2000, None)
@pytest.fixture()
def expected_result():
return pd.read_table(StringIO(u"""Chromosome Bin End
0 chr1 800 999
1 chr1 2200 2399"""), index_col=0, header=0, sep="\s+")
def test_compute_poisson(matrix, mock_args, expected_result):
print(matrix)
result = compute_poisson(matrix, mock_args)
print(result)
print(expected_result)
assert result.equals(expected_result)
| mit | 3,399,528,446,603,903,000 | 20.77193 | 89 | 0.684126 | false |
goodmami/pydelphin | delphin/mrs/__init__.py | 1 | 4262 | # -*- coding: utf-8 -*-
"""
This module contains classes and methods related to Minimal Recursion
Semantics [MRS]_. In addition to MRS, there are the related formalisms
Robust Minimal Recursion Semantics [RMRS]_, Elementary Dependency
Structures [EDS]_, and Dependency Minimal Recursion Semantics [DMRS]_.
As a convenience, \*MRS refers to the collection of MRS and related
formalisms (so "MRS" then refers to the original formalism), and
PyDelphin accordingly defines :class:`~delphin.mrs.xmrs.Xmrs` as the
common subclass for the various formalisms.
Users will interact mostly with :class:`~delphin.mrs.xmrs.Xmrs`
objects, but will not often instantiate them directly. Instead, they
are created by serializing one of the various formats (such as
:mod:`delphin.mrs.simplemrs`, :mod:`delphin.mrs.mrx`, or
:mod;`delphin.mrs.dmrx`). No matter what serialization format (or
formalism) is used to load a \*MRS structure, it will be stored the
same way in memory, so any queries or actions taken on these structures
will use the same methods.
.. [MRS] Copestake, Ann, Dan Flickinger, Carl Pollard,
and Ivan A. Sag. "Minimal recursion semantics: An introduction."
Research on language and computation 3, no. 2-3 (2005): 281-332.
.. [RMRS] Copestake, Ann. "Report on the design of RMRS."
DeepThought project deliverable (2003).
.. [EDS] Stephan Oepen, Dan Flickinger, Kristina Toutanova, and
Christopher D Manning. Lingo Redwoods. Research on Language and
Computation, 2(4):575–596, 2004.;
Stephan Oepen and Jan Tore Lønning. Discriminant-based MRS
banking. In Proceedings of the 5th International Conference on
Language Resources and Evaluation, pages 1250–1255, 2006.
.. [DMRS] Copestake, Ann. Slacker Semantics: Why superficiality,
dependency and avoidance of commitment can be the right way to go.
In Proceedings of the 12th Conference of the European Chapter of
the Association for Computational Linguistics, pages 1–9.
Association for Computational Linguistics, 2009.
"""
# these may be order-sensitive
from .components import (
Lnk, Node, ElementaryPredication,
HandleConstraint, Pred, Link
)
from .xmrs import Xmrs, Mrs, Rmrs, Dmrs
from delphin.util import deprecated
__all__ = ['Lnk', 'Node', 'ElementaryPredication',
'HandleConstraint', 'Pred', 'Link', 'Xmrs', 'Mrs', 'Dmrs']
@deprecated(final_version='1.0.0', alternative='delphin.commands.convert()')
def convert(txt, src_fmt, tgt_fmt, single=True, **kwargs):
"""
Convert a textual representation of \*MRS from one the src_fmt
representation to the tgt_fmt representation. By default, only
read and convert a single \*MRS object (e.g. for `mrx` this
starts at <mrs> and not <mrs-list>), but changing the `mode`
argument to `corpus` (alternatively: `list`) reads and converts
multiple \*MRSs.
Args:
txt: A string of semantic data.
src_fmt: The original representation format of txt.
tgt_fmt: The representation format to convert to.
single: If True, assume txt represents a single \*MRS, otherwise
read it as a corpus (or list) of \*MRSs.
kwargs: Any other keyword arguments to pass to the serializer
of the target format. See Notes.
Returns:
A string in the target format.
Notes:
src_fmt and tgt_fmt may be one of the following:
| format | description |
| --------- | ---------------------------- |
| simplemrs | The popular SimpleMRS format |
| mrx | The XML format of MRS |
| dmrx | The XML format of DMRS |
Additional keyword arguments for the serializer may include:
| option | description |
| ------------ | ----------------------------------- |
| pretty_print | print with newlines and indentation |
| color | print with syntax highlighting |
"""
from importlib import import_module
reader = import_module('{}.{}'.format('delphin.mrs', src_fmt.lower()))
writer = import_module('{}.{}'.format('delphin.mrs', tgt_fmt.lower()))
return writer.dumps(
reader.loads(txt, single=single),
single=single,
**kwargs
)
| mit | -4,533,146,085,247,655,000 | 43.789474 | 76 | 0.671445 | false |
DavidPurcell/murano_temp | murano/cfapi/cfapi.py | 1 | 13455 | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from oslo_config import cfg
from oslo_log import log as logging
import retrying
import six
from webob import response
from murano.common.i18n import _LI, _LW
from murano.common import auth_utils # noqa
from murano.common import wsgi
from murano.db.services import cf_connections as db_cf
import muranoclient.client as muranoclient
from muranoclient.common import exceptions
from muranoclient.glance import client as glare_client
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Controller(object):
"""WSGI controller for application catalog resource in Murano v1 API"""
def _package_to_service(self, package):
srv = {}
srv['id'] = package.id
srv['name'] = package.name
if len(package.description) > 256:
srv['description'] = u"{0} ...".format(package.description[:253])
else:
srv['description'] = package.description
srv['bindable'] = True
srv['tags'] = []
for tag in package.tags:
srv['tags'].append(tag)
plan = {'id': package.id + '-1',
'name': 'default',
'description': 'Default plan for the service {name}'.format(
name=package.name)}
srv['plans'] = [plan]
return srv
def _make_service(self, name, package, plan_id):
id = uuid.uuid4().hex
return {"name": name,
"?": {plan_id: {"name": package.name},
"type": package.fully_qualified_name,
"id": id}}
def _get_service(self, env, service_id):
for service in env.services:
if service['?']['id'] == service_id:
return service
return None
def list(self, req):
token = req.headers['X-Auth-Token']
m_cli = _get_muranoclient(token, req)
kwargs = {'type': 'Application'}
packages = m_cli.packages.filter(**kwargs)
services = []
for package in packages:
services.append(self._package_to_service(package))
resp = {'services': services}
return resp
def provision(self, req, body, instance_id):
"""Here is the example of request body given us from Cloud Foundry:
{
"service_id": "service-guid-here",
"plan_id": "plan-guid-here",
"organization_guid": "org-guid-here",
"space_guid": "space-guid-here",
"parameters": {"param1": "value1",
"param2": "value2"}
}
"""
data = json.loads(req.body)
space_guid = data['space_guid']
org_guid = data['organization_guid']
plan_id = data['plan_id']
service_id = data['service_id']
parameters = data['parameters']
self.current_session = None
# Here we'll take an entry for CF org and space from db. If we
# don't have any entries we will create it from scratch.
try:
tenant = db_cf.get_tenant_for_org(org_guid)
except AttributeError:
tenant = req.headers['X-Project-Id']
db_cf.set_tenant_for_org(org_guid, tenant)
LOG.info(_LI("Cloud Foundry {org_id} mapped to tenant "
"{tenant_name}").format(org_id=org_guid,
tenant_name=tenant))
token = req.headers['X-Auth-Token']
m_cli = _get_muranoclient(token, req)
def _set_new_environment_for_space(space_guid, log_msg):
body = {'name': 'my_{uuid}'.format(uuid=uuid.uuid4().hex)}
env = m_cli.environments.create(body)
db_cf.set_environment_for_space(space_guid, env.id)
LOG.info(log_msg.format(space_id=space_guid,
environment_id=env.id))
return env.id
try:
environment_id = db_cf.get_environment_for_space(space_guid)
# NOTE: Check that environment which was previously linked with
# CF space still exist, reset a new environment for space.
try:
env = m_cli.environments.get(environment_id)
except exceptions.HTTPNotFound:
msg = (_LI("Can not find environment_id {environment_id}, "
"will create a new one."
).format(environment_id=environment_id))
LOG.info(msg)
env = {}
if not env:
log_msg = (_LI("Cloud Foundry {space_id} remapped to "
"{environment_id}"))
environment_id = _set_new_environment_for_space(
space_guid, log_msg)
except AttributeError:
log_msg = (_LI("Cloud Foundry {space_id} mapped to "
"{environment_id}"))
environment_id = _set_new_environment_for_space(
space_guid, log_msg)
package = m_cli.packages.get(service_id)
LOG.debug('Adding service {name}'.format(name=package.name))
service = self._make_service(space_guid, package, plan_id)
db_cf.set_instance_for_service(instance_id, service['?']['id'],
environment_id, tenant)
# NOTE(Kezar): Here we are going through JSON and add ids where
# it's necessary. Before that we need to drop '?' key from parameters
# dictionary as far it contains murano package related info which is
# necessary in our scenario
if '?' in parameters.keys():
parameters.pop('?', None)
LOG.warning(_LW("Incorrect input parameters. Package related "
"parameters shouldn't be passed through Cloud "
"Foundry"))
params = [parameters]
while params:
a = params.pop()
for k, v in six.iteritems(a):
if isinstance(v, dict):
params.append(v)
if k == '?':
v['id'] = uuid.uuid4().hex
service.update(parameters)
# Now we need to obtain session to modify the env
session_id = create_session(m_cli, environment_id)
m_cli.services.post(environment_id,
path='/',
data=service,
session_id=session_id)
m_cli.sessions.deploy(environment_id, session_id)
self.current_session = session_id
return response.Response(status=202, json_body={})
def deprovision(self, req, instance_id):
service = db_cf.get_service_for_instance(instance_id)
if not service:
return {}
service_id = service.service_id
environment_id = service.environment_id
token = req.headers['X-Auth-Token']
m_cli = _get_muranoclient(token, req)
session_id = create_session(m_cli, environment_id)
m_cli.services.delete(environment_id, '/' + service_id, session_id)
m_cli.sessions.deploy(environment_id, session_id)
return response.Response(status=202, json_body={})
def bind(self, req, body, instance_id, app_id):
db_service = db_cf.get_service_for_instance(instance_id)
if not db_service:
return {}
service_id = db_service.service_id
environment_id = db_service.environment_id
token = req.headers['X-Auth-Token']
m_cli = _get_muranoclient(token, req)
session_id = create_session(m_cli, environment_id)
env = m_cli.environments.get(environment_id, session_id)
LOG.debug('Got environment {0}'.format(env))
service = self._get_service(env, service_id)
LOG.debug('Got service {0}'.format(service))
# NOTE(starodubcevna): Here we need to find an action which will return
# us needed credentials. By default we will looking for getCredentials
# action.
result = {}
try:
actions = service['?']['_actions']
for action_id in list(actions):
if 'getCredentials' in action_id:
@retrying.retry(retry_on_exception=lambda e: isinstance(e,
TypeError),
wait_random_min=1000,
wait_random_max=10000,
stop_max_delay=30000)
def _get_creds(client, task_id, environment_id):
result = m_cli.actions.get_result(environment_id,
task_id)['result']
return result
task_id = m_cli.actions.call(environment_id, action_id)
result = _get_creds(m_cli, task_id, environment_id)
if not result:
LOG.warning(_LW("This application doesn't have action "
"getCredentials"))
return response.Response(status=500)
except KeyError:
# NOTE(starodubcevna): In CF service broker API spec return
# code for failed bind is not present, so we will return 500.
LOG.warning(_LW("This application doesn't have actions at all"))
return response.Response(status=500)
if 'credentials' in list(result):
return result
else:
return {'credentials': result}
def unbind(self, req, instance_id, app_id):
"""Unsupported functionality
murano doesn't support this kind of functionality, so we just need
to create a stub where the call will come. We can't raise something
like NotImplementedError because we will have problems on Cloud Foundry
side. The best way now it to return empty dict which will be correct
answer for Cloud Foundry.
"""
return {}
def get_last_operation(self, req, instance_id):
service = db_cf.get_service_for_instance(instance_id)
# NOTE(freerunner): Prevent code 500 if requested environment
# already doesn't exist.
if not service:
LOG.warning(_LW('Requested service for instance {} is not found'))
body = {}
resp = response.Response(status=410, json_body=body)
return resp
env_id = service.environment_id
token = req.headers["X-Auth-Token"]
m_cli = _get_muranoclient(token, req)
# NOTE(starodubcevna): we can track only environment status. it's
# murano API limitation.
m_environment = m_cli.environments.get(env_id)
if m_environment.status == 'ready':
body = {'state': 'succeeded',
'description': 'operation succeed'}
resp = response.Response(status=200, json_body=body)
elif m_environment.status in ['pending', 'deleting', 'deploying']:
body = {'state': 'in progress',
'description': 'operation in progress'}
resp = response.Response(status=202, json_body=body)
elif m_environment.status in ['deploy failure', 'delete failure']:
body = {'state': 'failed',
'description': '{0}. Please correct it manually'.format(
m_environment.status)}
resp = response.Response(status=200, json_body=body)
return resp
def _get_muranoclient(token_id, req):
artifacts_client = None
if CONF.cfapi.packages_service in ['glance', 'glare']:
artifacts_client = _get_glareclient(token_id, req)
murano_url = CONF.murano.url or req.endpoints.get('murano')
if not murano_url:
LOG.error('No murano url is specified and no "application-catalog" '
'service is registered in keystone.')
return muranoclient.Client(1, murano_url, token=token_id,
artifacts_client=artifacts_client)
def _get_glareclient(token_id, req):
glare_settings = CONF.glare
url = glare_settings.url or req.endpoints.get('glare')
if not url:
LOG.error('No glare url is specified and no "artifact" '
'service is registered in keystone.')
return glare_client.Client(
endpoint=url, token=token_id,
insecure=glare_settings.insecure,
key_file=glare_settings.key_file or None,
ca_file=glare_settings.ca_file or None,
cert_file=glare_settings.cert_file or None,
type_name='murano',
type_version=1)
def create_session(client, environment_id):
id = client.sessions.configure(environment_id).id
return id
def create_resource():
return wsgi.Resource(Controller(),
serializer=wsgi.ServiceBrokerResponseSerializer())
| apache-2.0 | -3,214,018,305,001,191,000 | 38.690265 | 79 | 0.570197 | false |
mjbrownie/django-cloudmailin | cloudmailin/views.py | 1 | 2088 | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseForbidden, HttpResponseServerError
import hashlib
def generate_signature(params, secret):
sig = "".join(params[k].encode('utf-8') for k in sorted(params.keys()) if k != "signature")
sig = hashlib.md5(sig + secret).hexdigest()
return sig
class MailHandler(object):
csrf_exempt = True
def __init__(self, *args, **kwargs):
super(MailHandler, self).__init__(*args, **kwargs)
self._addresses = {}
def __call__(self, request, *args, **kwargs):
params = dict((k, v) for k, v in request.POST.iteritems())
to = params.get('to', None)
if '+' in to:
lto = to.split('+')
to = lto[0] + "@" + lto[1].split('@')[1]
addr = self._addresses.get(to, None)
if addr is None:
return HttpResponseNotFound("recipient address is not found", mimetype="text/plain")
try:
if not self.is_valid_signature(params, addr['secret']):
return HttpResponseForbidden("invalid message signature", mimetype="text/plain")
addr['callback'](**params)
except Exception, e:
return HttpResponseServerError(e.message, mimetype="text/plain")
resp = HttpResponse("")
resp.csrf_exempt = True
return resp
def is_valid_signature(self, params, secret):
if 'signature' in params:
sig = generate_signature(params, secret)
return params['signature'] == sig
def register_address(self, address, secret, callback):
self._addresses["<%s>" % address] = {
'secret': secret,
'callback': callback,
}
return True
def unregister_address(self, address):
if address in self._addresses:
del self._addresses[address]
return True
return False
| bsd-3-clause | 6,135,572,548,960,023,000 | 31.625 | 106 | 0.573755 | false |
youknowone/instantauth | python/instantauthtests/test_verifier.py | 1 | 1247 |
import pytest
from instantauth.verifiers import BypassVerifier, DataKeyVerifier
from instantauth.verifiers.timehash import TimeHashVerifier
from instantauth.coders.json import JsonCoder
bypass = BypassVerifier('pubkey')
datakey = DataKeyVerifier(JsonCoder(), 'key')
timehash = TimeHashVerifier(now=lambda : 1000000000)
@pytest.mark.parametrize(('verifier', 'testcase'), [
# mod private_key public_key input output
(bypass, [('pvkey', 'pubkey', 'testdata', 'testdata')]),
(datakey, [('pvkey', 'pubkey', {"key": "pubkey"}, '{"key":"pubkey"}')]), # need better test, not round-trip one
(timehash,[('pvkey', 'pubkey', 'testdata', 'pubkey$3b9aca00f63f9ab09b4ea4b5e17e3fde03024c9d598e52ce$testdata')]),
])
def test_verifier(verifier, testcase, secret='SECRET'):
"""Round-trip test"""
for private, public, input, expected_output in testcase:
output = verifier.construct_data(input, private, public, secret)
assert output == expected_output
destructed = verifier.destruct_data(output, secret)
assert destructed.public_key == public
assert destructed.data == input
test = verifier.verify(destructed, private, secret)
assert test
| bsd-2-clause | 4,985,760,965,628,746,000 | 42 | 126 | 0.68324 | false |
costastf/toonlib | _CI/bin/semver.py | 1 | 6205 | # -*- coding: utf-8 -*-
import re
_REGEX = re.compile('^(?P<major>(?:0|[1-9][0-9]*))'
'\.(?P<minor>(?:0|[1-9][0-9]*))'
'\.(?P<patch>(?:0|[1-9][0-9]*))'
'(\-(?P<prerelease>[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?'
'(\+(?P<build>[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$')
_LAST_NUMBER = re.compile(r'(?:[^\d]*(\d+)[^\d]*)+')
if not hasattr(__builtins__, 'cmp'):
cmp = lambda a, b: (a > b) - (a < b)
def parse(version):
"""
Parse version to major, minor, patch, pre-release, build parts.
"""
match = _REGEX.match(version)
if match is None:
raise ValueError('%s is not valid SemVer string' % version)
verinfo = match.groupdict()
verinfo['major'] = int(verinfo['major'])
verinfo['minor'] = int(verinfo['minor'])
verinfo['patch'] = int(verinfo['patch'])
return verinfo
def compare(ver1, ver2):
def nat_cmp(a, b):
a, b = a or '', b or ''
convert = lambda text: (2, int(text)) if re.match('[0-9]+', text) else (1, text)
split_key = lambda key: [convert(c) for c in key.split('.')]
return cmp(split_key(a), split_key(b))
def compare_by_keys(d1, d2):
for key in ['major', 'minor', 'patch']:
v = cmp(d1.get(key), d2.get(key))
if v:
return v
rc1, rc2 = d1.get('prerelease'), d2.get('prerelease')
rccmp = nat_cmp(rc1, rc2)
build_1, build_2 = d1.get('build'), d2.get('build')
build_cmp = nat_cmp(build_1, build_2)
if not rccmp and not build_cmp:
return 0
if not rc1 and not build_1:
return 1
elif not rc2 and not build_2:
return -1
return rccmp or build_cmp
v1, v2 = parse(ver1), parse(ver2)
return compare_by_keys(v1, v2)
def match(version, match_expr):
prefix = match_expr[:2]
if prefix in ('>=', '<=', '=='):
match_version = match_expr[2:]
elif prefix and prefix[0] in ('>', '<', '='):
prefix = prefix[0]
match_version = match_expr[1:]
else:
raise ValueError("match_expr parameter should be in format <op><ver>, "
"where <op> is one of ['<', '>', '==', '<=', '>=']. "
"You provided: %r" % match_expr)
possibilities_dict = {
'>': (1,),
'<': (-1,),
'==': (0,),
'>=': (0, 1),
'<=': (-1, 0)
}
possibilities = possibilities_dict[prefix]
cmp_res = compare(version, match_version)
return cmp_res in possibilities
def max_ver(ver1, ver2):
cmp_res = compare(ver1, ver2)
if cmp_res == 0 or cmp_res == 1:
return ver1
else:
return ver2
def min_ver(ver1, ver2):
cmp_res = compare(ver1, ver2)
if cmp_res == 0 or cmp_res == -1:
return ver1
else:
return ver2
def format_version(major, minor, patch, prerelease=None, build=None):
version = "%d.%d.%d" % (major, minor, patch)
if prerelease is not None:
version = version + "-%s" % prerelease
if build is not None:
version = version + "+%s" % build
return version
def _increment_string(string):
# look for the last sequence of number(s) in a string and increment, from:
# http://code.activestate.com/recipes/442460-increment-numbers-in-a-string/#c1
match = _LAST_NUMBER.search(string)
if match:
next_ = str(int(match.group(1))+1)
start, end = match.span(1)
string = string[:max(end - len(next_), start)] + next_ + string[end:]
return string
def bump_major(version):
verinfo = parse(version)
return format_version(verinfo['major'] + 1, 0, 0)
def bump_minor(version):
verinfo = parse(version)
return format_version(verinfo['major'], verinfo['minor'] + 1, 0)
def bump_patch(version):
verinfo = parse(version)
return format_version(verinfo['major'], verinfo['minor'], verinfo['patch'] + 1)
def bump_prerelease(version):
verinfo = parse(version)
verinfo['prerelease'] = _increment_string(verinfo['prerelease'] or 'rc.0')
return format_version(verinfo['major'], verinfo['minor'], verinfo['patch'],
verinfo['prerelease'])
def bump_build(version):
verinfo = parse(version)
verinfo['build'] = _increment_string(verinfo['build'] or 'build.0')
return format_version(verinfo['major'], verinfo['minor'], verinfo['patch'],
verinfo['prerelease'], verinfo['build'])
# https://github.com/k-bx/python-semver/blob/master/LICENSE.txt
# Downloaded: 20160406 from https://pypi.python.org/pypi/semver version 2.4.1
# Copyright (c) 2013, Konstantine Rybnikov
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the {organization} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| mit | 1,343,708,063,086,418,700 | 32.722826 | 88 | 0.607736 | false |
adriennekarnoski/data-structures | python/data_structures/test_priorityq.py | 1 | 1646 | """Test the functions of priorityq module."""
import pytest
def test_pop_removes_highest_priority():
"""Test pop method removed first value in highest priority."""
from priorityq import PriorityQueue
p = PriorityQueue()
for i in range(3):
p.insert(i)
p.insert(8, 2)
p.insert(10, 2)
assert p.pop() == 8
def test_pop_removes_raises_error_after_popping_all_values():
"""Test pop method removed first value in highest priority."""
from priorityq import PriorityQueue
p = PriorityQueue()
p.insert(8, 2)
p.insert(10, 2)
p.pop()
p.pop()
with pytest.raises(IndexError):
p.pop()
def test_pop_on_new_instance_raises_error():
"""Test pop method on new priority queue raises error."""
from priorityq import PriorityQueue
p = PriorityQueue()
with pytest.raises(IndexError):
p.pop()
def test_pop_on_emptied_queue_raises_error():
"""Test pop method on empties priority queue raises error."""
from priorityq import PriorityQueue
p = PriorityQueue()
p.insert(2)
p.pop()
with pytest.raises(IndexError):
p.pop()
def test_peek_shows_highest_priority():
"""Test the peek method to show highest priority value."""
from priorityq import PriorityQueue
p = PriorityQueue()
for i in range(3):
p.insert(i)
p.insert(8, 2)
p.insert(10, 2)
p.insert(0, 33)
assert p.peek() == 0
def test_peek_on_empty_priority_queue_returns_none():
"""Test the peek method returns none if no values available."""
from priorityq import PriorityQueue
p = PriorityQueue()
assert p.peek() is None
| mit | 834,841,151,389,414,300 | 25.126984 | 67 | 0.651883 | false |
athena-voice/athena-voice-client | athena/apis.py | 1 | 1210 | """
Finds and stores APIs in the 'api_lib' global variable
"""
import pkgutil
import inspect
import traceback
from athena import settings
api_lib = None
def find_apis():
""" Find APIs """
global api_lib
api_lib = {}
print('~ Looking for APIs in:', settings.API_DIRS)
for finder, name, _ in pkgutil.iter_modules(settings.API_DIRS):
try:
file = finder.find_module(name).load_module(name)
for member in dir(file):
obj = getattr(file, member)
if inspect.isclass(obj):
for parent in obj.__bases__:
if 'Api' is parent.__name__:
api = obj()
api_lib[api.key] = api
except Exception as e:
print(traceback.format_exc())
print('\n~ Error loading \''+name+'\' '+str(e))
def verify_apis(user):
""" Verify APIs """
global api_lib
api_lib = dict(api for api in api_lib.items() if api[1].verify_data(user))
def list_apis():
""" List APIs """
global api_lib
print('\n~ APIs: ', end='')
print(str(list(api_lib.keys()))[1:-1]+'\n')
| gpl-3.0 | 3,669,009,364,997,560,000 | 25.5 | 78 | 0.513223 | false |
wkiri/DEMUD | demud/demud.py | 1 | 92208 | #!/usr/bin/env python
# File: demud.py
# Author: Kiri Wagstaff, 2/28/13; James Bedell, summer 2013
#
# Implementation of DEMUD (Discovery through Eigenbasis Modeling of
# Uninteresting Data). See Wagstaff et al., AAAI 2013.
#
# Copyright 2013-2015, by the California Institute of Technology. ALL
# RIGHTS RESERVED. United States Government Sponsorship
# acknowledged. Any commercial use must be negotiated with the Office
# of Technology Transfer at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws and
# regulations. By accepting this document, the user agrees to comply
# with all applicable U.S. export laws and regulations. User has the
# responsibility to obtain export licenses, or other export authority
# as may be required before exporting such information to foreign
# countries or providing access to foreign persons.
import sys, os
import numpy as np
from numpy import linalg
from numpy import nanmean
import math
import copy, base64, time
import csv
import matplotlib
matplotlib.use('Agg')
import pylab
from dataset_uci_classes import GlassData, IrisData, EcoliData, AbaloneData, IsoletData
from dataset_float import FloatDataset
from dataset_float_classes import *
#from dataset_decals import DECaLSData
from dataset_des import DESData
#from dataset_gbtfil import GBTFilterbankData
#from dataset_misr import MISRDataTime
#from dataset_libs import LIBSData
#from dataset_finesse import FINESSEData
#from dataset_envi import ENVIData
#from dataset_envi import SegENVIData
#from dataset_irs import IRSData
#from dataset_kepler import KeplerData
#from dataset_mastcam import MastcamData
#from dataset_tc import TCData
#from dataset_navcam import NavcamData
from dataset_images import ImageData
#from exoplanet_lookup import ExoplanetLookup
#import kepler_lookup
import log
from log import printt
#from PIL import Image
import pickle
import optparse
from optparse import *
__VERSION__ = "1.7" # Adds compatibility for Windows filenames
default_k_values = {}
default_n_value = 10
use_max_n = False
#______________________________score_items_missing__________________________
def compute_error_with_missing(X, U, mu):
"""compute_error_with_missing(X, U, mu, missingmethod):
Calculate the score (reconstruction error) for every item in X,
with respect to the SVD model in U and mean mu for uninteresting items,
when there could be missing values in X (indicated with NaN).
If an item contains entirely NaNs (no good values),
its error will be 0. Maybe should be NaN instead?
Return an array of item reconstruction errors and their reprojections.
"""
# We want to ignore (work around) NaNs, without imputing.
# This is less efficient than with no NaNs:
# we have to process each item individually
# since they might have different missing values.
#diagS = np.diag(S).reshape(len(S), len(S))
reproj = np.zeros(X.shape) * np.nan
err = np.zeros(X.shape)
for i in range(X.shape[1]):
x = X[:,i].reshape(-1, 1)
# Usable features are not NaN in x nor in mu
isgood = ~np.logical_or(np.isnan(x), np.isnan(mu))
goodinds = np.where(isgood)[0]
numgood = len(goodinds)
if numgood == 0: # No good data! Do nothing (err for this item is 0)
pass
elif numgood == x.shape[0]: # All good -- normal processing.
proj = np.dot(U.T, x - mu)
reproj[:,i] = (np.dot(U, proj) + mu).squeeze()
err[:,i] = x.squeeze() - reproj[:,i]
else:
# Imputation/modeling method from Brand 2002
# X = U*(S*(((U*S)+)*X)) (eqn 11)
# Should we be using S? We aren't at the moment.
# Selectively use/fill only goodinds:
proj = np.dot(U[goodinds,:].T,
x[goodinds,0] - mu[goodinds,0])
reproj[goodinds,i] = np.dot(U[goodinds,:], proj) + mu[goodinds,0]
err[goodinds,i] = x[goodinds,0] - reproj[goodinds,i]
return (err, reproj)
#______________________________score_items_________________________________
def score_items(X, U, mu,
scoremethod='lowhigh',
missingmethod='none',
feature_weights=[]):
"""score_items(X, U, scoremethod, missingmethod, feature_weights)
Calculate the score (reconstruction error) for every item in X,
with respect to the SVD model in U and mean mu for uninteresting items.
'scoremethod' indicates which residual values count towards
the interestingness score of each item:
- 'low': negative residuals
- 'high': positive residuals
- 'lowhigh': both
'missingmethod' indicates how to handle missing (NaN) values:
- 'zero': set missing values to zero
- 'ignore': ignore missing values following Brand (2002)
- 'none': assert nothing is missing (NaN). Die horribly if not true.
'feature_weights' influence how much each feature contributes to the score.
Return an array of item reconstruction scores and their reprojections.
"""
# Use U to model and then reconstruct the data in X.
# 1. Project all data in X into space defined by U,
# then reconstruct it.
if missingmethod.lower() != 'ignore':
# All missing values should have been replaced with 0,
# or non-existent.
# 1a. Subtract the mean and project onto U
proj = np.dot(U.T, (X - mu))
# 1b. Reconstruct by projecting back up and adding mean
reproj = np.dot(U, proj) + mu
# 1c. Compute the residual
#print 'X:', X.T
#print 'reproj:', reproj.T
err = X - reproj
#print 'err:', err.T
#raw_input()
else:
# Missing method must be 'ignore' (Brand 2002)
(err, reproj) = compute_error_with_missing(X, U, mu)
# 2. Compute reconstruction error
if scoremethod == 'low': # Blank out all errors > 0
err[err>0] = 0
elif scoremethod == 'high': # Blank out all errors < 0
err[err<0] = 0
else: # default, count everything
pass
# Weight features if requested
if feature_weights != []:
for i in range(len(feature_weights)):
err[i,:] = err[i,:] * feature_weights[i]
if missingmethod.lower() == 'ignore':
# Only tally error for observed features.
# This means that items with missing values are not penalized
# for those features, which is probably the best we can do.
scores = np.nansum(np.array(np.power(err, 2)), axis=0)
else:
scores = np.sum(np.array(np.power(err, 2)), axis=0)
#print 'scores:', scores
#print 'reproj:', reproj
#raw_input()
return (scores, reproj)
#______________________________select_next_________________________________
def select_next(X, U, mu,
scoremethod='lowhigh',
missingmethod='none',
feature_weights=[],
oldscores=[], oldreproj=[]):
"""select_next(X, U, mu, scoremethod, missingmethod, feature_weights)
Select the next most-interesting item in X,
given model U, singular values S, and mean mu for uninteresting items.
'scoremethod' indicates which residual values count towards
the interestingness score of each item:
- 'low': negative residuals
- 'high': positive residuals
- 'lowhigh': both
'missingmethod' indicates how to handle missing (NaN) values:
- 'zero': set missing values to zero
- 'ignore': ignore missing values following Brand (2002)
- 'none': assert nothing is missing (NaN). Die horribly if not true.
'feature_weights' influence how much each feature contributes to the score.
'oldscores' provides the scores calculated in the previous iteration;
if not empty, skip scoring and just return the next best.
Likewise, 'oldreproj' is needed if we do this shortcut.
Return the index of the selected item, its reconstruction,
its reconstruction score, and all items' reconstruction scores.
"""
print "------------ SELECTING --------------"
if U == []:
printt("Empty DEMUD model: selecting item number %d from data set" % \
(log.opts['iitem']))
return log.opts['iitem'], [], []
if X.shape[1] < 1 or U == [] or mu == []:
printt("Error: No data in X and/or U and/or mu.")
return None, [], []
if X.shape[0] != U.shape[0] or X.shape[0] != mu.shape[0]:
printt("Mismatch in dimensions; must have X mxn, U mxk, mu mx1.")
return None, [], []
# If oldscores is empty, compute the score for each item
if oldscores == []:
(scores, reproj) = score_items(X, U, mu, scoremethod, missingmethod)
elif oldreproj == []:
printt("Error: oldscores provided, but not oldreproj.")
return None, [], []
else: # both are valid, so use them here
(scores, reproj) = (oldscores, oldreproj)
# Select and return index of item with max reconstruction error,
# plus the updated scores and reproj
m = scores.argmax()
#print 'mu:',mu
#print 'selected:', X[:,m]
#print 'selected-mu:', (X-mu)[:,m]
#print 'reproj:', reproj[:,m]
#print 'reproj-mu:', (reproj-mu)[:,m]
#raw_input()
return m, scores, reproj
#______________________________select_next_NN______________________________
def select_next_NN(X, x):
"""select_next_NN(X, x)
Select the nearest neighbor to x in X.
Return the index of the selected item.
"""
if X == [] or x == []:
printt("Error: No data in X and/or x.")
return None
if X.shape[0] != x.shape[0]:
printt("Mismatch in dimensions; must have X mxn, x mx1.")
return None
# Compute the (Euclidean) distance from x to all items in X
scores = np.apply_along_axis(linalg.norm, 0, X - x[:,np.newaxis])
# Select and return item with min distance to x
m = scores.argmin()
return m
#______________________________update_model________________________________
def update_model(X, U, S, k, n, mu,
svdmethod='full',
missingmethod='zero'):
"""update_model(X, U, S, k, n, mu, svdmethod, missingmethod)
Update SVD model U,S (dimensionality k)
by either adding items in X to it,
or regenerating a new model from X,
assuming U already models n items with mean mu.
Technically we should have V as well, but it's not needed.
'svdmethod' indicates type of update to do:
- 'full': Recompute SVD from scratch. Discards current U, S.
- 'increm-ross': Ross et al.'s method for incremental update,
with mean tracking.
- 'increm-brand': Brand's incremental SVD method
'missingmethod' indicates how to handle missing (NaN) values:
- 'zero': set missing values to zero
- 'ignore': ignore missing values inspired by Brand (2002)
- 'none': assert nothing is missing (NaN). Die horribly if not true.
Return new U, S, mu, n, and percent variances.
"""
if X == []:
printt("Error: No data in X.")
return None, None, None, -1, None
#print '%d items in X' % X.shape[1]
#print 'init U:', U
# If there is no previous U, and we just got a single item in X,
# set U to all 0's (degenerate SVD),
# and return it with mu.
# (PR #22 sets first value to 1; see decals implementation)
if U == [] and X.shape[1] == 1:
mu = X
# Do this no matter what. Let mu get NaNs in it as needed.
U = np.zeros_like(mu)
U[0] = 1
S = np.array([0])
n = 1
pcts = [1.0]
return U, S, mu, n, pcts
###########################################################################
# Do full SVD of X if this is requested, regardless of what is in U
# Also, if n = 0 or U is empty, start from scratch
output_k = False
if svdmethod == 'full' or U == [] or n == 0:
if n == 0:
if U == []:
printt("----- initial SVD -----")
output_k = True
else:
# Reshape so we don't have an empty dimension (yay python)
U = U.reshape(-1, 1)
elif U == []:
printt("WARNING: N (number of items modeled by U) is %d, not zero, but U is empty!" % n)
# Bootstrap
if missingmethod == 'ignore':
printt("ERROR: ignore with full is not possible under ordinary circumstances.")
printt("Use --increm-brand to impute for NaNs.")
printt("For now, we are filling NaNs with 0.")
X = copy.deepcopy(X)
z = np.where(np.isnan(X))
X[z] = 0
mu = np.mean(X, axis=1).reshape(-1,1)
X = X - mu
U, S, V = linalg.svd(X, full_matrices=False)
printt('Just did full SVD on %d items.' % X.shape[1])
#print 'X:',X
#print 'U:',U
# Reset U to all 0's if we only have one item in X (degenerate SVD)
if X.shape[1] == 1:
U = np.zeros_like(U)
# Keep only the first k components
S_full = S
S = S[0:k]
U = U[:,0:k]
# Update n to number of new items in X
n = X.shape[1]
###########################################################################
# Incremental SVD from Ross
elif svdmethod == 'increm-ross':
# Incremental SVD from Ross et al. 2008
# "Incremental Learning for Robust Visual Tracking"
# based on Lim and Ross's sklm.m implementation in MATLAB.
# This method DOES NOT handle missing values.
if missingmethod == 'ignore':
print 'ERROR: increm-ross cannot handle missing values.'
print 'If they are present, try svdmethod=increm-brand'
print ' or use missingmethod=zero to zero-fill.'
print 'If there are no missing values, specify missingmethod=none.'
sys.exit(1)
n_new = X.shape[1]
# Compute mean
# Weirdly, the later 'X-mu_new' is MUCH faster if you reshape as shown.
# This is because of differences in the way numpy treats a 1d array versus a 2d column.
mu_new = np.mean(X, axis=1).reshape(-1,1)
# Subtract the mean, append it as a column vector, and update mu
# X - mu_new will be zero if X has only 1 item
mu_old = mu
# New mu is a weighted sum of old and new mus
mu = (n * mu_old + n_new * mu_new) / (n + n_new)
B = np.hstack((X - mu,
math.sqrt(n_new * n/float(n_new+n)) * \
(mu_old - mu_new)))
printt("Now tracking mean for %d -> %d items; mu.min %f, mu.max %f " % \
(n, n+n_new, np.nanmin(mu), np.nanmax(mu)))
n = n + n_new
if S.all() == 0:
npcs = U.shape[1]
diagS = np.zeros((npcs, npcs))
else:
diagS = np.diag(S)
# I don't think this is right. At this point B is the augmented
# matrix rather than the single observation.
proj = np.dot(U.T, B)
reproj_err = B - np.dot(U, proj)
# to get orthogonal form of reproj_err
# This should return q with dimensions [d(X) by n_new+1], square
q, dummy = linalg.qr(reproj_err, mode='full')
# print 'q.shape should be 7x2: ', q.shape
Q = np.hstack((U, q))
# From Ross and Lim, 2008
# R = [ [ Sigma, U.T * X ] [ 0, orthog. component of reproj error ] ]
k_now = diagS.shape[0]
new_dim = k_now + n_new + 1
R = np.zeros((new_dim, new_dim))
R[0:k_now,0:k_now] = diagS
R[0:k_now,k_now:] = proj
orthog_reproj_err = np.dot(q.T, reproj_err)
R[k_now:, k_now:] = orthog_reproj_err
# Perform SVD of R. Then finally update U.
U, S, V = linalg.svd(R, full_matrices=False)
printt('Just did increm-ross SVD on %d items.' % n)
U = np.dot(Q, U)
# Keep only the first k components
U = U[:,0:min([n,k])]
S_full = S
S = S[0:min([n,k])]
###########################################################################
# Incremental SVD from Brand
elif svdmethod == 'increm-brand':
# Pulled out James's attempt to handle NaNs into
# increm-brand-james.py. Starting over from scratch here.
n_new = X.shape[1]
if n_new != 1:
print "WARNING: increm-brand will probably only work by adding one item at a time."
raw_input('\nPress enter to continue or ^C/EOF to exit. ')
if missingmethod == 'ignore':
# 1. Update mu
mu_old = mu
mu_new = X
# Be careful! For any pre-existing NaNs in mu,
# let mu_new fill them in. Can't get any worse!
naninds = np.where(np.isnan(mu_old))[0]
if naninds.size > 0:
mu_old[naninds,0] = mu_new[naninds,0]
# And likewise for mu_new -- fill with good values from mu_old.
naninds = np.where(np.isnan(mu_new))[0]
if naninds.size > 0:
mu_new[naninds,0] = mu_old[naninds,0]
# At this point, the only NaNs that should appear are
# values that were NaN for both mu and X to start with.
# They will stay NaN and that's okay.
# New mu is a weighted sum of old and new mus
mu = (n * mu_old + n_new * mu_new) / (n + n_new)
printt("Now tracking mean for %d -> %d items; mu.min %f, mu.max %f " % \
(n, n+n_new, np.nanmin(mu), np.nanmax(mu)))
n = n + n_new
# 2. Subtract off the mean
X = X - mu
# 3. Compute L, the projection of X onto U
# Note: this will only work for a single item in X
goodinds = np.where(~np.isnan(X))[0]
#print 'X: %d of %d are good.' % (len(goodinds), X.shape[0])
diagS = np.diag(S)
# This is Brand's method, which involves S:
L = np.dot(diagS,
np.dot(np.linalg.pinv(np.dot(U[goodinds,:],
diagS)),
X[goodinds,:]))
# Simplified version that does not use S (but is probably wrong):
#L = np.dot(U[goodinds,:].T,
# X[goodinds,:])
# Top row of the Q matrix (eqn 12, Brand 2002)
Q1 = np.hstack([diagS, L])
# 4. Compute J, the orthogonal basis of H, which is
# the component of X orthog to U (i.e., unrepresentable direction)
# 5. Compute K, the projection of X onto J (i.e., unrep. content)
K = linalg.norm(X[goodinds,:] - np.dot(U[goodinds,:],
np.dot(U[goodinds,:].T,
X[goodinds,:])))
# H = X - UL
J = np.zeros((U.shape[0], 1))
J[goodinds] = np.dot(K,
np.linalg.pinv(X[goodinds,:] -
np.dot(U[goodinds,:],
L))).T
# Bottom row of Q matrix (eqn 12, Brand 2002)
Q2 = np.hstack([np.zeros([1, len(S)]), np.array(K).reshape(1,1)])
Q = np.vstack([Q1, Q2])
# 6. Take the SVD of Q
Uq, Sq, Vq = linalg.svd(Q, full_matrices=False)
# 7. Update U and S (eqn 4, Brand 2002)
# Note: Since J is zero-filled for badinds, now U is too.
# Alternatively, we give J NaNs and let them get into U as well.
# I think that is a worse idea though.
U = np.dot(np.hstack([U, J]), Uq)
S = Sq
# Updating V requires knowing old V,
# but we don't need the new one either so it's okay to skip.
printt('Just did increm-brand SVD on %d items.' % n)
############# end ###########
else: # No missing values (or not 'ignore')
# 1. Update mu
mu_old = mu
mu_new = X
# New mu is a weighted sum of old and new mus
mu = (n * mu_old + n_new * mu_new) / (n + n_new)
n = n + n_new
# 2. Subtract off the mean
X = X - mu
# 3. Compute L, the projection of X onto U
L = np.dot(U.T, X)
Q1 = np.hstack([np.diag(S), L])
# 4. Compute J, the orthogonal basis of H, which is
# the component of X orthog to U (i.e., unrepresentable direction)
# 5. Compute K, the projection of X onto J (i.e., unrep. content)
JK = X - np.dot(U, L)
(J, K) = linalg.qr(JK)
Q2 = np.hstack([np.zeros([1, len(S)]), np.array(K).reshape(1,1)])
Q = np.vstack([Q1, Q2])
# 6. Take the SVD of Q
Uq, Sq, Vq = linalg.svd(Q, full_matrices=False)
# 7. Update U and S (eqn 4, Brand 2002)
U = np.dot(np.hstack([U, J]), Uq)
S = Sq
# V requires knowing old V,
# but we don't need the new one either so it's okay.
printt('Just did regular increm SVD on %d items.' % n)
# Keep only the first k components
U = U[:,0:min([n,k])]
S = S[0:min([n,k])]
Usum = U.sum(1)
###########################################################################
# We have a bad svdmethod, but somehow didn't catch it earlier.
else:
printt("504: Bad Gateway in protocol <Skynet_authentication.exe>")
return None, None, None, None, None
indivpcts = None
# This only works if a full SVD was done
if (svdmethod == 'full' and output_k and log.opts['k_var'] == -773038.0):
# Calculate percent variance captured by each
cumsum = np.cumsum(S_full)
#print cumsum.shape
if cumsum[-1] != 0:
indivpcts = S / cumsum[-1]
indivpcts = indivpcts[0:k] # truncate to first k
cumpercents = cumsum / cumsum[-1]
else:
indivpcts = []
# Calculate percent variance captured
if k >= cumsum.shape[0]:
printt('Cannot estimate data variance; specified k (%d) exceeds the number of SVs (%d).' % (k, cumsum.shape[0]))
else:
printt("Selected value of k=%d captures %5.2f%% of the data variance" % \
(k, cumpercents[k-1] * 100))
if log.opts['pause']: raw_input("Press enter to continue\n")
#print 'U:', U
#print 'mu:', mu
return U, S, mu, n, indivpcts
#______________________________demud_______________________________________
def demud(ds, k, nsel, scoremethod='lowhigh', svdmethod='full',
missingmethod='none', feature_weights=[],
start_sol=None, end_sol=None, flush_parameters=False):
"""demud(ds, k, nsel, scoremethod, svdmethod, missingmethod, feature_weights):
Iteratively select nsel items from data set ds,
using an incremental SVD model of already-seen items
with dimensionality k.
'scoremethod' indicates which residual values count towards
the interestingness score of each item:
- 'low': negative residuals
- 'high': positive residuals
- 'lowhigh': both
'svdmethod' indicates type of update to do:
- 'full': Recompute SVD from scratch.
- 'increm-ross': Ross et al.'s method for incremental update,
with mean tracking.
- 'increm-brand': Brand's incremental SVD method
'missingmethod' indicates how to handle missing (NaN) values:
- 'zero': set missing values to zero
- 'ignore': ignore missing values following Brand (2002)
- 'none': assert nothing is missing (NaN). Die horribly if not true. (Default)
"""
# Sanity-check/fix nsel
if nsel > ds.data.shape[1]:
nsel = ds.data.shape[1]
printt("Running DEMUD version %s for %d iterations using k=%d" %
(__VERSION__, nsel, k))
###########################################################################
# Check to ensure that parameters are valid
if ds.data == []:
printt("Error: No data in ds.data.")
return
if k < 1:
printt("Error: k must be at least 1.")
return
if nsel < 1:
printt("Error: nsel must be at least 1.")
return
elif nsel == 0:
printt("Warning: nsel = 0. This means demud will do nothing, slowly.")
if 'iitem' not in log.opts or flush_parameters:
# Temporary hack to allow the demud() method to be called from external scripts.
# Better long-term support for this should probably exist.
log.opts['iitem'] = 'mean'
log.opts['shotfilt'] = 0
log.opts['fft'] = False
log.opts['static'] = False
log.opts['coi'] = None
log.opts['note'] = None
log.opts['coiaction'] = None
log.opts['plotvariance'] = False
log.opts['kepler'] = False
log.opts['plot'] = True
log.opts['mastcam'] = False
log.opts['interactive'] = False
log.opts['alwaysupdate'] = False
log.opts['start_sol'] = start_sol
log.opts['end_sol'] = end_sol
log.opts['log'] = True
log.opts['k'] = k
log.opts['k_var'] = False
log.opts['svdmethod'] = svdmethod
log.opts['missingdatamethod'] = missingmethod
log.opts['svd_print'] = False
log.opts['md_print'] = False
log.opts['clean'] = False
log.opts['printk'] = False
log.opts['score_print'] = False
log.opts['fw_print'] = False
log.opts['fun'] = False # no fun for you!
log.opts['pause'] = False
print "No method of initializing the dataset was chosen. Defaulting to mean."
log.opts['start_sol'] = start_sol
log.opts['end_sol'] = end_sol
###############################################
# Add experiment information to dataset name
# TODO: store this information in a text file within the directory instead,
# and find another way to usefully create distinct directory names (maybe nested)
origname = ds.name
ds.name += '-k=' + str(k)
ds.name += '-dim=' + str(ds.data.shape[0])
ds.name += '-' + svdmethod
if scoremethod != 'lowhigh': ds.name += '-score=' + scoremethod
if missingmethod != "none": ds.name += '-missing=' + missingmethod
if feature_weights != []: ds.name += '-featureweight=' + os.path.basename(log.opts['fw'])
if log.opts['sol'] != -1: ds.name += '-sol%d' % log.opts['sol']
if log.opts['sols'] != None: ds.name += '-sol%d' % log.opts['start_sol']
if log.opts['sols'] != None: ds.name += '-%d' % log.opts['end_sol']
if ds.initfilename != '' and ds.initfilename != None:
ds.name += '-init-file'
if log.opts['init_prior_sols'] == True:
ds.name += '-init-prior'
else:
ds.name += '-init_item=' + str(log.opts['iitem'])
if log.opts['shotfilt'] != 0: ds.name += '-shotfilt=%d' % log.opts['shotfilt']
if log.opts['fft']: ds.name += "-fft"
if log.opts['static']: ds.name += '-static'
if log.opts['coi'] != None: ds.name += '-coi-' + log.opts['coiaction'] \
+ '=' + log.opts['coi']
if log.opts['note'] != None: ds.name += '-' + log.opts['note']
###############################################
# Set up output directories for plots and logging
if not os.path.exists('results'):
os.mkdir('results')
outdir = os.path.join('results', ds.name)
if not os.path.exists(outdir):
os.mkdir(outdir)
log.logfilename = os.path.join(outdir, 'demud.log')
log.logfile = open(log.logfilename, 'w')
# Save RGB visualization, if appropriate
try:
if (isinstance(ds, SegENVIData) or
isinstance(ds, ENVIData)):
ds.write_RGB(os.path.join(outdir,
'%s-rgb-viz.png' % ds.name.split('-')[1]))
except:
printt("SegENVIData and ENVIData not imported.")
###############################################
# Print dataset info
printt("Dataset: " + ds.name)
printt(" Read from " + ds.filename)
printt(" Dimensions (features x items): %d x %d" % ds.data.shape)
###############################################
# Plot variance
if log.opts['plotvariance']:
plot_variance(ds)
###############################################
# Check once and for all if the input data contains NaN's.
X = ds.data
# Warn user what to do if it's true and s/he aren't ready for it
nans = np.isnan(X)
if np.sum(nans) > 0:
#If no missingmethod selected, code will break. Prevent that noisily.
if missingmethod == 'none':
printt('WARNING! Input data contains NaNs but no handling method has been chosen.')
printt('Please use one of the following, knowing ignore is MUCH slower:')
printt('--missingdatamethod=zero/ignore')
sys.exit(-1)
#If we are going to handle NaN's with zeroing, just do it now
if missingmethod == 'zero':
z = np.where(nans)
if len(z) > 0:
printt("Filling NaNs with 0: %d / %d = %0.2f%%" % (np.sum(nans), X.shape[0]*X.shape[1], np.sum(nans)/float(X.shape[0]*X.shape[1])*100))
X[z] = 0
#Let user know we're in ignore NaN mode
if missingmethod == 'ignore':
printt('Missing data (NaNs) will be ignored. This is a very slow operation.')
else:
printt('No NaNs in this data file, rest easy.')
###############################################
# Initialize the model
U = []
# S = np.array([0])
S = np.array([1])
mu = []
n = 0
pcts = []
# Initial dataset is supplied
if ds.initfilename != '' and ds.initfilename != None:
log.opts['iitem'] = -1 # not really, but default is 0
printt('Initializing model with data from %s' % ds.initfilename)
U, S, mu, n, pcts = update_model(ds.initdata, U, S, k, n=0, mu=[],
svdmethod=svdmethod,
missingmethod=missingmethod)
# Doing a full SVD
elif log.opts['static'] or log.opts['iitem'] in ('-1','svd','SVD'):
log.opts['iitem'] = -1
printt('Doing initial SVD to get started.')
U, S, mu, n, pcts = update_model(X, U, S, k, n=0, mu=[],
svdmethod=svdmethod,
missingmethod=missingmethod)
# Select random item
elif log.opts['iitem'] in ('r','R','random','RANDOM'):
randitem = np.random.randint(X.shape[1])
printt('Selecting random item = %d to get started.'%randitem)
log.opts['iitem'] = randitem
# Use dataset mean
elif log.opts['iitem'] in ('mean', 'average', 'mu', '-2', -2):
printt('Initializing model to mean and skipping to selection 1')
mu = nanmean(X, axis=1).reshape(-1,1)
# if we have missingmethod set to 'ignore', and some features are entirely NaNs,
# then there will still be NaNs in mu. We can safely set them to zero for now
# because 'ignore' will mean that these values don't ever get looked at again.
#mu[np.isnan(mu)] = 0
# print "Just calculated mean."
#U, S, V = linalg.svd(mu, full_matrices=False)
# Instead, treat this like we do for first selection normally.
U = np.zeros_like(mu)
U[0] = 1
S = np.array([0])
pcts = [1.0]
log.opts['iitem'] = -2
log.opts['iitem'] = int(log.opts['iitem'])
printt('')
###############################################
# Initialize all of the counters, sums, etc for demud
n_items = X.shape[1]
orig_ind = np.arange(n_items)
seen = ds.initdata
ncois = 0
whencoiswerefound = []
oldscoresum = -1
maxscoresum = -1
sels = []
sels_idx = []
# Create the 'results' directory, if needed
if not os.path.exists('results'):
os.mkdir('results')
###########################################################################
## MAIN ITERATIVE DISCOVERY LOOP
scores = []
reproj = []
for i in range(nsel):
printt("Time elapsed at start of iteration %d/%d:" % (i, nsel-1),
time.clock())
###############################################
# If we just found a COI (class of interest) in the previous round,
# and the coi-action is 'seek',
# then don't use the model to select the next item.
# Instead, do a nearest-neighbor search through X based on sels[-1].
if whencoiswerefound != [] and (whencoiswerefound[-1] == i-1) and \
(log.opts['coiaction'] == 'seek'):
printt("Actively searching for the next COI based on item %d"
% sels[-1])
ind = select_next_NN(X, ds.data[:,sels[-1]])
# There is no reconstruction, r, of the new item.
# But we want something to generate explanations.
# So use the previous selection as the reconstruction.
# Warning: this may be confusing, since this item
# was picked to be SIMILAR to r, not different from it.
r = ds.data[:,sels[-1]]
# We update scores simply by removing this item.
# I think we don't want to do this since scores gets updated
# after plotting info for this choice.
#scores = np.delete(scores, ind)
###############################################
# Get the selection, according to model U
else:
# If using a static model, pass in oldscores and oldreproj
# to avoid re-calculating scores
if log.opts['static']:
ind, scores, reproj = select_next(X, U, mu, scoremethod,
missingmethod, feature_weights,
oldscores=scores, oldreproj=reproj)
else:
ind, scores, reproj = select_next(X, U, mu, scoremethod,
missingmethod, feature_weights)
# If initializing with a specific item,
# then scores and reproj will be empty
if scores == []:
score = 0.0
r = X[:,ind] # reproj is same as item itself
else:
score = scores[ind]
r = reproj[:,ind]
# Update selections
sels += [orig_ind[ind]]
sels_idx += [ind]
printt("%d) Selected item %d (%s), score %g." % \
(i, orig_ind[ind], ds.labels[orig_ind[ind]], score))
###############################################
# Report the fractional change in sum of reconstruction scores
scoresum = sum(scores)
if i > 1:
printt(" Total data set reconstruction error: %g (of %g is %.2f%%)"
% (scoresum, maxscoresum, scoresum/maxscoresum*100))
else:
printt(" Initial total data set reconstruction error: %g" % scoresum)
maxscoresum = scoresum
oldscoresum = scoresum
###############################################
# This selection is x. Print its characteristics
x = X[:,ind]
printt(" Min/max x values: %.2e, %.2e" % (np.nanmin(x),
np.nanmax(x)))
printt(" Min/max r values: %.2e, %.2e" % (np.nanmin(r),
np.nanmax(r)))
diff = x-r
printt(" Min/max/mean residual: %.2f, %.2f, %.2f" % (np.nanmin(diff),
np.nanmax(diff),
np.mean(diff[np.where(~np.isnan(diff))])))
printt(" Class = %s" % ds.labels[orig_ind[ind]])
printt('')
###############################################
# Report on how many nans are in this selection.
if (len(np.where(np.isnan(X))[0]) > 0 or
len(np.where(np.isnan(seen))[0]) > 0):
goodinds = np.where(~np.isnan(x))[0]
print ' Sel. %d: %d (%.2f%%) good indices (not NaN)' % \
(i, len(goodinds), 100*len(goodinds) / float(len(x)))
###############################################
# Plot item using dataset's plotting method.
label = ds.labels[orig_ind[ind]]
if log.opts['kepler']:
dc = log.opts['static'] or (U != [] and U.shape[1] > 1)
dsvd = (log.opts['static'] and i is 0) or (U != [] and U.shape[1] > 1)
if log.opts['plot']:
ds.plot_item(i, orig_ind[ind], x, r, k, label,
U, mu, S, X, pcts, scores, drawsvd=dsvd, drawcloud=dc)
elif log.opts['navcam']:
if log.opts['plot']:
ds.save_rec(r, orig_ind[ind], X[:,ind], k)
ds.plot_item(i, orig_ind[ind], x, r, k, label)
else:
if log.opts['plot']:
ds.plot_item(i, orig_ind[ind], x, r, k, label,
U, scores, feature_weights)
ds.write_selections_csv(i, k, orig_ind[ind], label, ind, scores)
if log.opts['decals']:
#####################################################
# Write a list of selections that are similar to this selection (x).
# First, score all items with respect to a single-item model of x.
# Create a U the same size as x, first value 1 (rest 0),
# and set mu to be x.
this_U = np.zeros_like(x)
this_U[0] = 1
this_U = this_U.reshape(-1, 1)
this_mu = x
this_mu = this_mu.reshape(-1, 1)
(this_item_scores, reproj) = score_items(X, this_U, this_mu,
scoremethod, missingmethod,
feature_weights)
ds.write_similar_html(10, i, k, ind, this_item_scores)
###############################################
# Setup for checking if to update or not.
do_update = True
###############################################
# Don't update if class of interest is found in item label
if log.opts['coi'] is not None:
if log.opts['coi'] in ds.labels[orig_ind[ind]]:
do_update = False
printt("Not updating, it's the class of interest!")
ncois = ncois + 1
whencoiswerefound += [i]
# If the coi-action is 'keep', proceed normally (do_update is false).
# If the coi-action is 'seek', next selection should be
# the nearest neighbor to the last one. This check is done
# in select_next().
# If we've hit n_conts, exit
if ncois == log.opts['n_coi']:
printt("\nFound class of interest on these picks:\n")
if not log.opts['kepler']:
for ff in whencoiswerefound: printt(ff)
else:
el = ExoplanetLookup()
for fff in whencoiswerefound:
kid = int(ds.labels[sels[fff]].split(':')[0])
kidmin = el.min_period(kid)
if kidmin == "Unknown":
printt("%3d) %8d: min period %7s days" % (fff, kid, kidmin))
else:
printt("%3d) %8d: min period %7.4f days" % (fff, kid, float(kidmin)))
return
return
###############################################
# Be interactive!
if log.opts['interactive']:
userpref = raw_input("Do you want to add this to the model of "
"uninteresting data?\n(Y/N) ")
printt(userpref)
if userpref.lower() in ('y', 'yes', 'yes please'):
printt("You said yes!")
pass
elif userpref.lower() in ('n', 'no', 'no thank you'):
printt("You said no!")
do_update = False
else:
bad_input = True
while bad_input:
printt("Sorry, I don't recognize that input. Please choose yes or no.")
userpref = raw_input("(Y/N) ")
if userpref.lower() == 'y' or 'yes' or 'yes please':
bad_input = False
pass
elif userpref.lower() == 'n' or 'no' or 'no thank you':
bad_input = False
do_update = False
###############################################
# Check for static SVD
if log.opts['alwaysupdate'] and do_update == False:
do_update = True
printt("Doing an update!")
if log.opts['static']: do_update = False
###############################################
# Update the model
if do_update:
# We are doing this check because 'full' will only model what's in seen
# increm-ross will add what's in seen to the model U, S
if svdmethod == 'full':
if seen == []:
seen = x.reshape(-1,1)
else:
seen = np.hstack((seen,x.reshape(-1,1)))
else:
seen = x.reshape(-1,1)
# Reset U to empty if this is the first iteration
# in case an SVD was used to select the first item,
# UNLESS an initial data set was specified.
if (i == 0 and ds.initdata == []):
U = []
U, S, mu, n, pcts = update_model(seen, U, S, k, n, mu,
svdmethod=svdmethod,
missingmethod=missingmethod)
else:
printt("Skipped updating model U because data was interesting or static model was specified.")
###############################################
# Remove this item from X and other variables
keep = range(X.shape[1])
keep.remove(ind)
X = X[:,keep]
orig_ind = orig_ind[keep]
if scores != []:
scores = scores[keep]
reproj = reproj[:,keep]
printt() # spacing
###############################################
# Plot the top 4 principal components of the new model
if U != [] and log.opts['plot'] and log.opts['dan']:
ds.plot_pcs(i, U, mu, k, S)
# if log.opts['misr']:
# pylab.clf()
# pylab.imshow(U.reshape([ds.along_track, -1]))
# pylab.show()
# End loop over selections
###############################################
# Report on when observations from the class of interest were found (if any)
if len(whencoiswerefound) > 0:
printt("\nFound class of interest on these picks:\n")
if not log.opts['kepler']:
for ff in whencoiswerefound: printt(ff)
else:
el = ExoplanetLookup()
for fff in whencoiswerefound:
kid = int(ds.labels[sels[fff]].split(':')[0])
minper = el.min_period(kid)
if minper != 'Unknown':
printt("%3d) %8d: min period %7.4f days" % (fff, kid, float(minper)))
else:
printt("%3d) %8d: min period unknown" % (fff, kid))
###############################################
# Return
return (sels, sels_idx)
#______________________________generate_feature_weights____________________
def generate_feature_weights(d, xvals):
"""generate_feature_weights(d, xvals):
Generate feature weights for a dataset with d items based on the
value of the optparse variable 'fw' specified by --featureweightmethod=
This is the place in the code to add more feature weighting methods.
You do not need to worry about modifying the check_opts() method below,
but you may want to edit fw_print() (--featureweightmethods).
"""
if log.opts['fw'] == 'flat' or log.opts['fw'] == '':
return []
elif log.opts['fw'] == 'boostlow':
return [(1. / (i ** (1. / math.log(d, 2)))) for i in range(1, d+1)]
elif log.opts['fw'] == 'boosthigh':
ocelot = [(1. / (i ** (1. / math.log(d, 2)))) for i in range(1, d+1)]
ocelot.reverse()
return ocelot
else:
# Assume it's a filename and attempt to read weights from the file
return read_feature_weights(log.opts['fw'], xvals)
return []
def read_feature_weights(fwfile, xvals):
"""read_feature_weights(fwfile, xvals)
Read feature weights from the specified file.
The data set's xvals must be provided so this method can
sanity-check the number of weights (and match to wavelengths,
if specified).
"""
# If no file was specified, skip this step.
if fwfile == '': # default
return []
if not os.path.exists(fwfile):
printt(' Could not find feature weight file %s, skipping.' % fwfile)
return []
printt('Treating feature-weight argument as a file name (%s).' % fwfile)
# Read in the feature-weight file
f = open(fwfile)
lines = f.readlines()
if len(lines) != len(xvals):
raise ValueError('Looking for %d weights, but got %d.' % (len(xvals), len(lines)))
wolf = [-1] * len(xvals)
for (i,line) in enumerate(lines):
values = line.split()
# Sanity check that we have one of two cases:
# 1. One weight per line
if len(values) == 1:
wolf[i] = float(values[0])
# 2. Two values per line (feature - matches xvals, weight)
elif len(values) == 2:
values = map(float, values)
feat_ind = np.where(xvals == values[0])[0]
if len(feat_ind) == 0:
raise ValueError('Could not find feature %s.' % values[0])
wolf[feat_ind[0]] = values[1]
# Check that feature weights are valid
for weight in wolf:
if weight < 0 or weight > 1:
raise ValueError("Weights must be between 0 and 1.")
return wolf
#______________________________finish_initialization_______________________
# Print out data shape, and check to set N to max.
def finish_initialization(ds, action='reading in dataset'):
global use_max_n, default_n_value
printt("Time elapsed after " + action + ":", time.clock())
if use_max_n or default_n_value > ds.data.shape[1]:
printt("Iterating through all data vectors.")
default_n_value = ds.data.shape[1]
fw = generate_feature_weights(ds.data.shape[0], ds.xvals)
return fw
#______________________________check_if_files_exist________________________
# Takes a list of files and determines if any of them do not exist.
# If so, exits in disgrace.
def check_if_files_exist(files, ftype='input'):
# First file is the data file and must exist
if files[0] == '':
printt("%s file not specified." % ftype)
return False
for f in files:
# Allow .pkl files to not exist, since they will be generated.
if f == '' or f.endswith('.pkl'):
continue
if not os.path.exists(f):
printt("Error: specified %s file %s does not exist" % (ftype, f))
return False
return True
#______________________________report_classes______________________________
# Reports upon classes found. Suppressed with --no-report option.
def report_classes(ds, sels, sels_idx, data_choice):
# print a list of all classes found in first nsel selections
if not (data_choice is 'navcam'):
found = []
printt("CLASSES FOUND:\n")
for (i, s) in enumerate(sels):
if not ds.labels[s] in found:
printt('Class found on selection %d: %s' % (i, ds.labels[s]))
found.append(ds.labels[s])
printt("\nNumber of classes found: %d\n" % len(found))
else:
file_sels = {};
for files in ds.datafiles:
file_sels[files] = [];
for i,idx in enumerate(sels_idx):
file_idx = np.where(ds.img_label_split > idx)[0][0]-1;
file_sels[ds.datafiles[file_idx]] += [sels[i]];
for key in file_sels:
if file_sels[key]:
print("File: %s" %(key));
for csels in file_sels[key]:
i = sels.index(csels)
printt('Class found on selection %d: %s' % (i,ds.labels[csels]))
#______________________________svd_print___________________________________
# print out SVD options and exit.
def svd_print():
printt("'svdmethod' indicates type of update to do:")
printt("- 'full': Recompute SVD from scratch. (Default)")
printt("- 'increm-ross': Ross et al.'s method for incremental update,")
printt(" with mean tracking. Does not handle missing values.")
printt("- 'increm-brand': Brand's method for incremental update,")
printt(" with mean tracking. Can handle missing values.")
printt("")
printt("--increm is a shortcut for --svdmethod=increm-brand.")
printt("")
exit()
#______________________________score_print_________________________________
# Print out scoring options and exit.
def score_print():
printt("'scoremethod' indicates how to score sources by reconstruction error:")
printt("- 'low': negative residuals")
printt("- 'high': positive residuals")
printt("- 'lowhigh': both (Default)")
exit()
#______________________________md_print____________________________________
# Print out missing data options and exit.
def md_print():
printt("'missingdatamethod' indicates how to handle missing (NaN) values:")
printt("- 'zero': set missing values to zero (Default)")
printt("- 'ignore': ignore missing values following Brand (2002)")
printt("- 'none': assert nothing is missing (NaN). Die horribly if not true.")
exit()
#______________________________fw_print____________________________________
# Print out feature weight options and exit.
def fw_print():
printt("'featureweightmethod' indicates how to weight features given:")
printt("- 'flat': all features weighted 1.0 (default)")
printt("- 'boostlow': boost earlier features more on a sliding scale")
printt("- 'boosthigh': boost higher features the same way")
printt("")
printt("Any other argument will be interpreted as a file name to be read from.")
printt(" The file must contain one weight per line as a float or int.")
printt(" Does not currently accept negative weights, since it would be")
printt(" confusing: weights are applied multiplicatively to residuals")
printt(" immediately before scoring, which uses absolute value.")
printt(" This could however be developed further to interact with scoring")
printt(" methods, such that features would be scored differently.")
printt("")
printt("The function used for the boost methods is as follows:")
printt(" For a dataset with n features:")
printt(" [(1.0 / (i ** (1.0 / math.log(n, 2)))) for i in range(1, n+1)]")
printt(" ^ Reverse for boosthigh")
printt(" This will weight the most important feature at 1.0")
printt(" and the least important at 0.5, with an exponential curve between.")
exit()
#______________________________make_config_________________________________
# Remake demud.config.
def clean():
#global log.opts
# Figure out if we're clobbering an existing config file
if os.path.exists(os.path.join(os.getcwd(), 'demud.config')):
printt("WARNING: This will overwrite your current demud.config file.")
printt(" Do you really want to continue?")
y = raw_input(" Enter Y to continue, anything else to abort: ")
if y.lower() != 'y' and y.lower() != 'yes' and y.lower() != 'yes please':
printt("Aborting.")
exit()
if y.lower() == 'yes please':
if log.opts['fun']:
printt("Certainly! Thank you for saying please!")
printt("\nWriting to demud.config\n")
# Do the actual output
outputfile = open(os.path.join(os.getcwd(), 'demud.config'), 'w+')
outputfile.write("Demud.config\nJames Bedell\n06/26/2013\n"
"########### README #################\n\n"
"Each dependency line takes the format:\n"
"mydatafile = /home/jbedell/research/data/sample.data\n"
"(either relative or absolute path)\n"
"Single and double quotes are okay but not necessary.\n\n"
"Lines which begin with a # like Python comments are "
"ignored (leading whitespace okay)\n"
"(comments which begin in the middle of "
"lines may produce unexpected results)\n\n"
"** Only the last (unignored) assignment to any "
"variable is kept. **\n\n"
"Below are the data sets and their dependencies. "
"Headers are of the format:\n"
"----- Sample data classification set: "
"sampledatafile sampledatamap\n"
" -h --help\n\n"
"Do not delete the headers, or demud.py as is "
"may suffer an ignominious demise by IndexError\n\n")
# this is the part in
# the code where a haiku is
# lurking silently
outputfile.write("############ DATASETS #################\n\n"
"----- Glass classification data set: ucidatafile\n"
" -g --glass\n"
"----- Iris classification data set: ucidatafile\n"
" -i --iris\n"
"----- E. Coli classification data set: ucidatafile\n"
" -e --ecoli\n"
"----- Abalone classification data set: ucidatafile\n"
" -o --abalone\n"
"----- ISOLET letter classification: ucidatafile\n"
" -z --isolet\n"
"ucidatafile = \n\n"
"----- Test data set: floatdatafile\n"
" -x --testdata\n"
"----- Pancam spectra data set: floatdatafile\n"
" -p --pancam\n"
"----- APF spectra data set: floatdatafile\n"
" -b --apf\n"
"----- CNN feature data set: floatdatafile\n"
" -v --cnn\n"
"----- DAN spectra data set: floatdatafile\n"
" --dan\n"
"floatdatafile = \n\n"
"floatinitdatafile = \n\n"
"----- GBT filterbank data set: gbtdirname, catalogfile\n"
" --gbtfil\n"
"gbtdirname = \n\n"
"catalogfile = \n\n"
"----- DECaLS FITS data set: decalsfilename\n"
" --decals\n"
"decalsfilename = \n\n"
"----- DES FITS data set: desfilename\n"
" --des\n"
"desfilename = \n\n"
"---- ChemCam: libsdatafile, libsinitdatafile\n"
" -c --chemcam\n"
"libsdatafile = \n"
"libsinitdatafile = \n\n"
"----- FINESSE: finessedirname\n"
" -f --finesse\n"
"finessedirname = \n\n"
"----- MISR aerosol data: misrAODdirname, misrrawdirname, misrdatafile\n"
" -m --misr\n"
"misrAODdirname = \n"
"misrrawdirname = \n"
"misrdatafile = \n\n"
"----- AVIRIS data: avirisdirname, avirisrawfile, avirissegmap\n"
" -a --aviris\n"
"avirisdirname = \n"
"avirisrawfile = \n"
"avirissegmap = \n\n"
"----- IRS Spitzer exoplanet atmosphere data: irsdatafile, irsdatefile, irsmodelfile, irswavecalfile\n"
" -s --spitzer\n"
"irsdatafile = \n"
"irsdatefile = \n"
"irsmodelfile = \n"
"irswavecalfile = \n\n"
"----- Kepler light curve data: keplerdatafolder, keplerdataextension\n"
" -k --kepler\n"
"keplerdatafolder = \n"
"keplerdataextension = \n\n"
"----- TextureCam image data: tcfilename, tcpklname\n"
" -t --texturecam\n"
"tcfilename = \n"
"tcpklname = \n\n"
"----- UCIS hyperspectral image cube: ucisrawfile\n"
" -u --ucis\n"
"ucisrawfile = \n\n"
"----- Mastcam images: mastcamdatafolder\n"
" -j --mastcam\n"
"mastcamdatafolder = \n\n"
"----- Images: imagedatafolder, imageinitdatafolder\n"
" -I --images\n"
"imagedatafolder = \n"
"imageinitdatafolder = \n\n"
"----- Image Sequence data: datafolder, solnumber, initdatasols\n"
"-q --navcam\n"
"datafolder = \n"
"solnumber = \n"
"scaleInvariant = \n"
"initdatasols = \n")
outputfile.close()
exit()
#______________________________parse_args__________________________________
# Set up option parser and parse command-line args
def parse_args():
###########################################################################
# Add command-line options. Store their values.
#
global __VERSION__
vers = __VERSION__
parser = optparse.OptionParser(usage="python %prog [-gecmasktofuzj] [options]",
version=vers)
# Datatype options
dtypes = OptionGroup(parser, "Datatype Options",
"Exactly one must be selected.")
dtypes.add_option('-g', '--glass', help='Glass classification',
default=False, action='store_true', dest='glass')
dtypes.add_option('--iris', help='Iris classification',
default=False, action='store_true', dest='iris')
dtypes.add_option('-e', '--ecoli', help='E. coli classification',
default=False, action='store_true', dest='ecoli')
dtypes.add_option('-o', '--abalone', help='Abalone classification',
default=False, action='store_true', dest='abalone')
dtypes.add_option('-p', '--pancam', help='Pancam spectra',
default=False, action='store_true', dest='pancam')
dtypes.add_option('-b', '--apf', help='APF spectra',
default=False, action='store_true', dest='apf')
dtypes.add_option('-v', '--cnn', help='CNN feature vectors',
default=False, action='store_true', dest='cnn')
dtypes.add_option('--dan', help='DAN spectra',
default=False, action='store_true', dest='dan')
dtypes.add_option('--gbt', help='GBT spectra',
default=False, action='store_true', dest='gbt')
dtypes.add_option('--gbtfil', help='GBT filterbank',
default=False, action='store_true', dest='gbtfil')
dtypes.add_option('--decals', help='DECaLS FITS file',
default=False, action='store_true', dest='decals')
dtypes.add_option('--des', help='DES FITS file',
default=False, action='store_true', dest='des')
dtypes.add_option('-x', '--testdata', help='Test data',
default=False, action='store_true', dest='testdata')
dtypes.add_option('-c', '--chemcam', help='ChemCam data', default=False,
action='store_true', dest='chemcam')
dtypes.add_option('-f', '--finesse', help='FINESSE data', default=False,
action='store_true', dest='finesse')
dtypes.add_option('-m', '--misr', help='MISR aerosol data', default=False,
action='store_true', dest='misr')
dtypes.add_option('-a', '--aviris', help='AVIRIS data', default=False,
action='store_true', dest='aviris')
dtypes.add_option('-s', '--spitzer', help='Spitzer IRS exoplanet atmosphere data',
default=False, action='store_true', dest='irs')
dtypes.add_option('-k', '--kepler', help='Kepler exoplanet data',
default=False, action='store_true', dest='kepler')
dtypes.add_option('-t', '--texturecam', help='TextureCam image data',
default=False, action='store_true', dest='texturecam')
dtypes.add_option('-u', '--ucis', help='UCIS hyperspectral data',
default=False, action='store_true', dest='ucis')
dtypes.add_option('-z', '--isolet', help='ISOLET letter recognition data',
default=False, action='store_true', dest='isolet')
dtypes.add_option('-q', '--navcam', help='Run for sequence of Images (MER purpose)',
default=False, action='store_true', dest='navcam')
dtypes.add_option('-j', '--mastcam', help='MSL Mastcam image data',
default=False, action='store_true', dest='mastcam')
dtypes.add_option('-I', '--images', help='Image data in a directory',
default=False, action='store_true', dest='images')
parser.add_option_group(dtypes)
# Parameter options
params = OptionGroup(parser, "Parameter Options",
"Specify DEMUD parameters."
" Override the defaults in demud.config.")
params.add_option('--k', help='Number of principal components for reconstructing data',
default=-773038, type=int, action='store', dest='k')
params.add_option('--n', '--iters', help='Number of iterations of SVD and selection; default 10',
default=-773038, type=int, action='store', dest='n')
params.add_option('--all', help="Iterate through all data items",
default=False, action='store_true', dest='all')
params.add_option('--svdmethod', help="SVD method to use on each iteration (see --svdmethods for a list)",
default='default', type=str, action='store', dest='svdmethod')
params.add_option('--increm', help="Same as --svdmethod=increm-brand",
default=False, action='store_true', dest='increm')
params.add_option('--missingdatamethod', help="How to handle missing data (see --missingdatamethods for a list)",
default='none', type=str, action='store', dest='missingdatamethod')
params.add_option('--scoremethod', help="How to score data residuals (see --scoremethods for a list)",
default='lowhigh', type=str, action='store', dest='scoremethod')
params.add_option('--featureweightmethod', help="How to weight features for scoring (see --featureweightmethods for a list)",
default='', type=str, action='store', dest='fw')
params.add_option('--static', help='Static SVD: truncate to k vectors and never update again',
default=False, action='store_true', dest='static')
params.add_option('--sol', help="Analyze data from this sol. Use with -c.",
default=-1, type=int, action='store', dest='sol')
params.add_option('--sols', help="Analyze data from this sol range (<int>-<int>). Use with -c.",
default=None, type=str, action='store', dest='sols')
params.add_option('--initpriorsols', help="Initialize with data from sols prior to the specified sol or sol range. Use with -c.",
default=False, action='store_true', dest='init_prior_sols')
params.add_option('--note', help='Note to append to output directory name',
default=None, action='store', dest='note')
parser.add_option_group(params)
# Data processing and output options
dataops = OptionGroup(parser, "Data processing and output options",
"Specify additional preprocessing or postprocessing options.")
dataops.add_option('--init-item', help='Index of initialization item (default: 0; -1 or svd for full-data SVD; r for random; mean for mean)',
default=0, type=str, action='store', dest='iitem')
dataops.add_option('-i', '--interactive', help='Ask for feedback on adding selection to U',
default=False, action='store_true', dest='interactive')
dataops.add_option('--variance', help="Optimize --k to capture this much data variance\n Range: [0.0 1.0]",
default=-773038.0, type=float, action='store', dest='k_var')
dataops.add_option('--shotnoisefilter', help='Apply median filter of specified width. Used with [-cu].',
default=0, type=int, action='store', dest='shotfilt')
dataops.add_option('--fft', help='Perform FFT on data first. Only supported by [-k].',
default=False, action='store_true', dest='fft')
dataops.add_option('--lookup', help="Look up sources' status tags from MAST. Used with [-k].",
default=False, action='store_true', dest='lookup')
dataops.add_option('--no-report', help="Don't report on classes found. Used with [-ego].",
default=True, action='store_false', dest='report')
dataops.add_option('--no-plot', help="Don't plot any output.",
default=True, action='store_false', dest='plot')
dataops.add_option('--no-log', help="Don't log text output.",
default=True, action='store_false', dest='log')
dataops.add_option('--pause', help='Pause after calculating k-variance',
default=False, action='store_true', dest='pause')
# When specifying a class of interest (COI)
dataops.add_option('--coi', help='Class of interest.',
default=None, type=str, action='store', dest='coi')
dataops.add_option('--coi-action', help='What to do when a COI is found (keep or seek).',
default=None, type=str, action='store', dest='coiaction')
dataops.add_option('--n-coi', help='Exit after n items of class of interest found. \nUsed with coi-keep or coi-seek.',
default=-773038, type=int, action='store', dest='n_coi')
dataops.add_option('--always-update', help='Always update model, ignoring COI. Still use COI for output. Total hack.',
default=False, action='store_true', dest='alwaysupdate')
parser.add_option_group(dataops)
# Other options
parser.add_option('--config', help='Specify config file other than demud.config',
default=None, type='string', action='store', dest='config')
parser.add_option('--make-config', help='Re-create demud.config with empty variables; exit.',
default=False, action='store_true', dest='clean')
parser.add_option('--easter-eggs', '--fun', help=optparse.SUPPRESS_HELP, default=False,
action='store_true', dest='fun')
parser.add_option('--svdmethods', help='Provide details on available SVD methods and exit',
default=False, action='store_true', dest='svd_print')
parser.add_option('--scoremethods', help='Provide details on available scoring methods and exit',
default=False, action='store_true', dest='score_print')
parser.add_option('--missingdatamethods', help='Provide details on missing data methods and exit',
default=False, action='store_true', dest='md_print')
parser.add_option('--featureweightmethods', help='Provide details on feature weight methods and exit',
default=False, action='store_true', dest='fw_print')
parser.add_option('--print-default-k', help='Provide details on default values for --k and exit',
default=False, action='store_true', dest='printk')
parser.add_option('--plot-variance', help='Make a plot of k vs. variance explained.',
default=False, action='store_true', dest='plotvariance')
(options, args) = parser.parse_args()
return vars(options)
#______________________________check_opts__________________________________
# Ensure that the arguments supplied make sense
def check_opts(datatypes):
# Check if a function argument was supplied
global opts, use_max_n
if (log.opts['svd_print'] or log.opts['md_print'] or log.opts['clean']
or log.opts['printk'] or log.opts['score_print'] or log.opts['fw_print']):
if len(sys.argv) == 3 and log.opts['fun']:
pass
elif len(sys.argv) > 2:
printt("Error: conflicting arguments. Use -h for help.")
exit()
if log.opts['svd_print']:
svd_print()
elif log.opts['printk']:
print_default_k_values()
elif log.opts['md_print']:
md_print()
elif log.opts['fw_print']:
fw_print()
elif log.opts['clean']:
clean()
elif log.opts['score_print']:
score_print()
else:
printt("Python is tired now. Go bother somebody else.")
exit()
# Check to make sure that exactly one datatype argument was supplied.
sum = 0
selected = None
for k in log.opts:
if log.opts[k] == True and k in datatypes:
sum += 1
selected = k
if sum != 1:
printt("Error: Exactly one datatype argument must be supplied. Use -h for help.")
exit()
# Check to make sure that --k and --variance are not both specified
if log.opts['k'] != -773038 and log.opts['k_var'] != -773038.0:
printt("Error: conflicting arguments: --k and --variance. Use -h for help.")
exit()
# Check to make sure that --missingdatamethod has an appropriate argument
mdmethods = ('none', 'ignore', 'zero')
if (log.opts['missingdatamethod'] != None):
if (log.opts['missingdatamethod'] not in mdmethods):
printt("Error: missing data method %s not supported." %
log.opts['missingdatamethod'])
printt("Choose between 'zero', 'ignore', and 'none'.")
printt("Use --missingdatamethods for more info.")
exit()
# Check to make sure that --svdmethod has an appropriate argument
if log.opts['svdmethod'] != 'increm-brand' and log.opts['svdmethod'] != 'default' and log.opts['increm']:
printt("Error: cannot specify --increm along with different svdmethod.")
printt("Use --svdmethods for more info.")
exit()
if log.opts['svdmethod'] == 'default': log.opts['svdmethod'] = 'full'
if log.opts['increm']:
log.opts['svdmethod'] = 'increm-brand'
printt("Using increm")
svdmethods = ('full', 'increm-ross', 'increm-brand')
if (log.opts['svdmethod'] != None):
if (log.opts['svdmethod'] not in svdmethods):
printt("Error: SVD method %s not supported." % log.opts['svdmethod'])
printt("Choose between 'full', 'increm-ross', and 'increm-brand'.")
printt("Use --svdmethods for more info.")
exit()
# Check to make sure that --scoremethod has an appropriate argument
scoremethods = ('low', 'high', 'lowhigh')
if (log.opts['scoremethod'] != None):
if (log.opts['scoremethod'] not in scoremethods):
printt("Error: score method %s not supported." % log.opts['scoremethod'])
printt("Choose between 'low', 'high', and 'lowhigh'.")
printt("Use --scoremethods for more info.")
exit()
# Check to make sure that --shotnoisefilt was supplied only with a valid argument
if log.opts['shotfilt'] > 0:
if log.opts['shotfilt'] < 3:
printt('Error: Shot noise filter is only meaningful for values >= 3. Odd values are best.')
exit()
if not log.opts['chemcam'] and not log.opts['ucis']:
printt('Error: Shot noise filter is only used for ChemCam (-c) or UCIS (-u) data.')
exit()
# Check to make sure that --fft was supplied only with a valid argument
if (log.opts['fft']):
if not (log.opts['kepler']):
printt("Error: FFT not supported with datatype %s" % selected)
exit()
# Check to make sure that --lookup was only supplied with a valid argument
if (log.opts['lookup']):
if not (log.opts['kepler']):
printt("Error: --lookup supplied with invalid datatype. Use -h for help.")
exit()
# Check to make sure that --no-report was only supplied with a valid argument
if not (log.opts['report']):
if not (log.opts['glass'] or log.opts['iris'] or log.opts['ecoli'] or
log.opts['abalone'] or log.opts['isolet']):
printt("Error: --no-report supplied with invalid datatype. Use -h for help.")
exit()
if selected not in ['glass', 'iris', 'ecoli', 'abalone', 'isolet']:
log.opts['report'] = False
# Check to make sure that a valid value of k or k_var and n were given
if (log.opts['k'] != -773038 and log.opts['k'] < 1):
printt("Error: bad argument to --k. Number of PCs must be at least 1.")
exit()
if (log.opts['k_var'] != -773038.0 and (log.opts['k_var'] < 0 or log.opts['k_var'] > 1)):
printt("Error: bad argument to --variance. Must be between 0.0 and 1.0.")
exit()
if (log.opts['n'] != -773038 and log.opts['n'] < 1):
printt("Error: bad argument to --n. Number of iterations must be at least 1.")
exit()
# Check specified sol number for nonnegative and appropriate data type
if log.opts['sol'] > -1 and log.opts['sols'] != None:
printt("Error: Can only use either -sol or -sols, not both.")
exit()
elif log.opts['sol'] > -1:
if not log.opts['chemcam']:
printt("Error: Sol number specification is only supported for ChemCam (-c).")
exit()
else:
log.opts['start_sol'] = log.opts['sol']
log.opts['end_sol'] = log.opts['sol']
# If a sol range was specified, use that
elif log.opts['sols'] != None:
svals = log.opts['sols'].split('-')
if len(svals) != 2:
printt("Error parsing start and end sols from %s (format: s1-s2)." % log.opts['sols'])
exit()
(start, end) = map(int, svals)
if start >= 0 and end >= 0 and start <= end:
printt("Analyzing data from sols %d-%d, inclusive." % (start, end))
log.opts['start_sol'] = start
log.opts['end_sol'] = end
else:
printt("Error parsing start and end sols from %s." % log.opts['sols'])
exit()
# Check to see if n-coi was given
if (log.opts['n_coi'] != -773038 and log.opts['coi'] == None):
printt("Error: cannot supply --n-coi without specifying --coi.")
exit()
if (log.opts['n_coi'] > 0 and (log.opts['n_coi'] >= log.opts['n'])):
use_max_n = True
# Check the coiaction
if (log.opts['coiaction'] and log.opts['coi'] == None):
printt("Eror: cannot specify --coi-action without specifying --coi.")
exit()
if (log.opts['coiaction'] not in [None, 'keep', 'seek']):
printt("Error: --coi-action must be specified as 'keep' or 'seek'.")
exit()
# Check to see if all was given
if (log.opts['all']):
printt("Using the whole data set.")
use_max_n = True
return selected
#______________________________parse_config_term___________________________
def parse_config_term(config, term):
"""parse_config_term(config, term)
Search for term in config content and return its value (after = sign).
- config: result returned by readlines() applied to config file
- term: a string
"""
# Matching lines
lines = [line for line in config if line.startswith(term)]
# This term may not be defined in the config file
if lines == []:
return ''
# If the term is used multiple times, it uses the last one
return lines[-1].split('=')[-1].strip().replace("'", "").replace('"', '')
#______________________________parse_config________________________________
def parse_config(config, data_choice):
"""parse_config(config, data_choice):
Parse out the filenames needed for the data set of choice.
- config: result returned by readlines() applied to config file
- data_choice: string such as 'glass' or 'kepler'
(assume already validated; returned by check_opts())
"""
# UCI data
ucidatafile = parse_config_term(config, 'ucidatafile')
# Floating point data (or Pancam, APF, GBT, CNN, or DAN)
floatdatafile = parse_config_term(config, 'floatdatafile')
floatinitdatafile = parse_config_term(config, 'floatinitdatafile')
# GBT filterbank
gbtdirname = parse_config_term(config, 'gbtdirname')
catalogfile = parse_config_term(config, 'catalogfile')
# DECaLS
decalsfilename = parse_config_term(config, 'decalsfilename')
# DES
desfilename = parse_config_term(config, 'desfilename')
# ChemCam
libsdatafile = parse_config_term(config, 'libsdatafile')
libsinitdatafile = parse_config_term(config, 'libsinitdatafile')
# FINESSE
finessedirname = parse_config_term(config, 'finessedirname')
# MISR
misrrawdirname = parse_config_term(config, 'misrrawdirname')
misrAODdirname = parse_config_term(config, 'misrAODdirname')
misrdatafile = parse_config_term(config, 'misrdatafile')
# AVIRIS
avirisdirname = parse_config_term(config, 'avirisdirname')
avirisrawfile = parse_config_term(config, 'avirisrawfile')
avirissegmap = parse_config_term(config, 'avirissegmap')
# IRS (Spitzer)
irsdatafile = parse_config_term(config, 'irsdatafile')
irsdatefile = parse_config_term(config, 'irsdatefile')
irsmodelfile = parse_config_term(config, 'irsmodelfile')
irswavecalfile = parse_config_term(config, ' irswavecalfile')
# Kepler
keplerdatafolder = parse_config_term(config, 'keplerdatafolder')
keplerdataextension = parse_config_term(config, 'keplerdataextension')
# Mastcam
mastcamdatafolder = parse_config_term(config, 'mastcamdatafolder')
# Images
imagedatafolder = parse_config_term(config, 'imagedatafolder')
imageinitdatafolder = parse_config_term(config, 'imageinitdatafolder')
# Texturecam (image)
tcfilename = parse_config_term(config, 'tcfilename')
tcpklname = parse_config_term(config, 'tcpklname')
# Navcam
datafolder = parse_config_term(config, 'datafolder')
solnumber = parse_config_term(config, 'solnumber')
initdatasols = parse_config_term(config, 'initdatasols')
scaleInvariant = parse_config_term(config, 'scaleInvariant')
if scaleInvariant != '':
scaleInvariant = int(scaleInvariant)
# UCIS
ucisrawfile = parse_config_term(config, 'ucisrawfile')
if (data_choice == 'glass' or
data_choice == 'iris' or
data_choice == 'abalone' or
data_choice == 'isolet' or
data_choice == 'ecoli'):
return ([ucidatafile],'')
elif data_choice in ['pancam', 'testdata', 'apf', 'dan', 'gbt', 'cnn']:
return ([floatdatafile, floatinitdatafile],'')
elif data_choice == 'gbtfil':
return ([gbtdirname, catalogfile],'')
elif data_choice == 'decals':
return ([decalsfilename],'')
elif data_choice == 'des':
return ([desfilename],'')
elif data_choice == 'chemcam' or data_choice.startswith('libs'):
return ([libsdatafile, libsinitdatafile],'')
elif data_choice == 'finesse':
return ([finessedirname],'')
elif data_choice == 'misr':
return ([misrrawdirname, misrAODdirname, misrdatafile],'')
elif data_choice == 'aviris':
return ([avirisdirname, avirisrawfile, avirissegmap],'')
elif data_choice == 'irs':
return ([irsdatafile, irsdatefile, ismodelfile, irswavecalfile],'')
elif data_choice == 'kepler':
return ([keplerdatafolder], keplerdataextension)
elif data_choice == 'texturecam':
return ([tcfilename, tcpklname],'')
elif data_choice == 'mastcam':
return ([mastcamdatafolder],'')
elif data_choice == 'images':
return ([imagedatafolder, imageinitdatafolder],'')
elif data_choice == 'navcam':
# Parse through initdatasols to convert it into a tuple
initsols = [];
start_sols = [];
end_sols = [];
if len(initdatasols):
for sols in initdatasols.split(','):
sols.replace(" ", "")
if(len(start_sols) > len(end_sols)):
if not (sols[-1] == ')'):
printt('Error: unrecognized data set %s.' % data_choice)
printt('Incorrect initdatasols format.')
printt("Example initdatasols: '(1950,1955),1959'")
end_sols.append(int(sols[:4]));
continue;
if sols[0] == '(':
start_sols.append(int(sols[1:]));
else:
initsols.append(int(sols));
for start,end in zip(start_sols, end_sols):
initsols.extend(range(start,end + 1));
return ([datafolder], (int(solnumber), initsols, scaleInvariant))
elif data_choice == 'ucis':
return ([ucisrawfile],'')
printt('Error: unrecognized data set %s.' % data_choice)
return ()
#______________________________optimize_k__________________________________
def optimize_k(ds, v):
"""optimize_k(ds, v):
choose k intelligently to capture v% of the data variance.
Does a full SVD (inefficient; redundant with first call to update_model).
Assumes missingmethod = zero.
"""
assert v >= 0.0 and v <= 1.0
# If initialization data is present, optimize k using that data
if len(ds.initdata) > 0:
X = ds.initdata
# Otherwise analyze the new data
else:
X = ds.data
if X == []:
printt("Error: No data in input.")
exit()
# Handle NaNs with zeros for this lookup
z = np.where(np.isnan(X))
if z[0] != []:
printt("Filling NaNs with 0: %d of %d total." % \
(z[0].shape[1], X.shape[0] * X.shape[1]))
X = copy.deepcopy(X)
X[z] = 0
# Set up svd so we can operate on S
mu = np.mean(X, axis=1).reshape(-1,1)
X = X - mu
U, S, V = linalg.svd(X, full_matrices=False)
# Calculate percent variance captured
cumsum = np.cumsum(S)
cumpercents = cumsum / cumsum[-1]
if len(cumpercents) < 22:
printt("Cumulative percents:\n%s" % cumpercents)
else:
printt("Cumulative percents:\n%s" % cumpercents[:20])
percents = S / cumsum[-1]
#minind = np.argmin(abs(cumpercents - v))
minind = [i for i, x in enumerate(cumpercents) if x > v][0]
printt("\nChose k = %d, capturing %5.2f%% of the data variance\n" % \
(minind + 1, 100 * cumpercents[minind]))
if log.opts['pause']: raw_input("Press enter to continue\n")
return minind + 1
#______________________________plot_variance_______________________________
def plot_variance(ds):
X = ds.data
U, S, V = linalg.svd(X, full_matrices=False)
pylab.plot([qq+1 for qq in range(S.shape[0])], [sum(S[:a+1]) / float(sum(S)) for a in range(len(S))], '-r')
pylab.xlabel('Number of PCs')
pylab.ylabel('Percentage of variance explained')
pylab.title('PCs vs. variance for dataset %s' % ds.name.split('-')[0])
outdir = os.path.join('results', ds.name)
if not os.path.exists(outdir):
os.mkdir(outdir)
pylab.savefig(os.path.join(outdir, '__variance.pdf'))
#______________________________init_default_k_values_______________________
def init_default_k_values():
global default_k_values
default_k_values = {
'glass' : 5,
'iris' : 3,
'ecoli' : 6,
'pancam' : 2,
'apf' : 2,
'cnn' : 50,
'dan' : 2,
'gbt' : 10,
'gbtfil' : 10,
'decals' : 10,
'des' : 10,
'testdata' : 2,
'chemcam' : 10,
'finesse' : 10,
'misr' : 2, #10,
'abalone' : 4,
'isolet' : 20,
'aviris' : 10,
'irs' : 10,
'kepler' : 50,
'texturecam' : 10,
'navcam' : 10,
'mastcam' : 2,
'images' : 10,
'ucis' : 10,
}
#______________________________print_default_k_values______________________
def print_default_k_values():
init_default_k_values()
global default_k_values
printt(default_k_values)
exit()
#______________________________load_data___________________________________
def load_data(data_choice, data_files, sol_number = None, initsols = None, scaleInvariant = None):
###########################################################################
## GLASS DATA SET (classification)
if data_choice == 'glass':
ds = GlassData(data_files[0])
## IRIS DATA SET (classification)
elif data_choice == 'iris':
ds = IrisData(data_files[0])
## ABALONE DATA SET (classification)
elif data_choice == 'abalone':
ds = AbaloneData(data_files[0])
## E. COLI DATA SET (classification)
elif data_choice == 'ecoli':
ds = EcoliData(data_files[0])
## ISOLET DATA SET (classification)
elif data_choice == 'isolet':
ds = IsoletData(data_files[0])
## PANCAM SPECTRA DATA SET
elif data_choice == 'pancam':
ds = PancamSpectra(data_files[0])
## APF SPECTRA DATA SET
elif data_choice == 'apf':
ds = APFSpectra(data_files[0])
## CNN FEATURE DATA SET
elif data_choice == 'cnn':
ds = CNNFeat(data_files[0], data_files[1])
## DAN SPECTRA DATA SET
elif data_choice == 'dan':
ds = DANSpectra(data_files[0])
## GBT SPECTRA DATA SET
elif data_choice == 'gbt':
ds = GBTSpectra(data_files[0])
## GBT FILTERBANK DATA SET
elif data_choice == 'gbtfil':
ds = GBTFilterbankData(data_files[0], data_files[1])
## DECALS FITS DATA SET
elif data_choice == 'decals':
ds = DECaLSData(data_files[0])
## DES FITS DATA SET
elif data_choice == 'des':
ds = DESData(data_files[0])
## TEST DATA SET
elif data_choice == 'testdata':
ds = Floats(data_files[0], data_files[1])
## CHEMCAM DATA SET
elif data_choice == 'chemcam' or data_choice.startswith('libs'):
ds = LIBSData(data_files[0], data_files[1],
startsol = log.opts['start_sol'],
endsol = log.opts['end_sol'],
initpriorsols = log.opts['init_prior_sols'],
shotnoisefilt = log.opts['shotfilt'])
## FINESSE DATA SET
elif data_choice == 'finesse':
ds = FINESSEData(data_files[0])
## MISR AEROSOL DATA SET
elif data_choice == 'misr':
printt("I see that you want to look at MISR data! Very cool.")
printt("I highly recommend using the following parameters:")
printt(" --missingdatamethod=ignore")
printt(" --svdmethod=increm-brand")
printt(" --init-item=mean")
printt("Using all defaults (zero, full, 0) will also work with worse results.")
printt("Behavior on other parameter combinations is not predictable.")
printt("")
printt("Continuing with execution...")
printt("")
# raw_input('Press enter to continue or enter ^C/EOF to abort.')
ds = MISRDataTime(data_files[0], data_files[1], data_files[2])
'''
#
# THIS IS ALL OLD STUFF FROM KIRI WHICH IS NOW DEAD.
# keeping it for posterity and because things might be informative.
#
# The following calls bypass command-line options given
origname = ds.name
#sels = demud(ds, k=k, nsel=10)
# Selections (for k=10) should be:
# [168, 128, 24, 127, 153, 108, 188, 103, 0, 64]
ds.name = origname
sels , sels_idx = demud(ds, k=k, nsel=1, svdmethod='increm-ross')
# Selections should be:
# k=10 [168, 128, 24, 159, 127, 153, 47, 108, 188, 64]
# k=5 [128, 159, 24, 127, 47, 153, 188, 108, 64, 0]
ds.name = origname
sels , sels_idx = demud(ds, k=k, nsel=1,
svdmethod='increm-ross',
missingmethod='ignore')
# Selections should be:
# k=10 [128, 169, 130, 150, 40, 195, 84, 70, 194, 175]
# k=5 [128, 169, 24, 40, 135, 185, 139, 127, 16, 36]
# [7, 0, 3, 2, 4, 5, 1, 6, 9, 8] # for 10
ds.name = origname
sels, sels_idx = demud(ds, k=k, nsel=1,
scoremethod='high',
svdmethod='increm-ross',
missingmethod='ignore')
# Selections should be:
# k=10
# k=5 [128, 24, 127, 0, 152, 153, 159, 120, 46, 52]
ds.name = origname
'''
## AVIRIS DATA SET
elif data_choice == 'aviris':
#ds = ENVIData(avirisrawfile)
ds = SegENVIData(data_files[1], data_files[2])
## SPITZER IRS DATA SET
elif data_choice == 'irs':
ds = IRSData(data_files[0], data_files[1], data_files[2], data_files[3])
## KEPLER DATA SET (accepts fft)
elif data_choice == 'kepler':
ds = KeplerData(data_files[0], etc)
## MASTCAM DATA SET (accepts fft)
elif data_choice == 'mastcam':
ds = MastcamData(data_files[0])
## IMAGE DATA SET
elif data_choice == 'images':
ds = ImageData(data_files[0], data_files[1])
## TEXTURECAM DATA SET
elif data_choice == 'texturecam':
ds = TCData(data_files[0], data_files[1])
ds.selections = np.zeros((ds.height, ds.width))
## NAVCAM
elif data_choice == 'navcam':
ds = NavcamData(data_files[0], sol_number, initsols, scaleInvariant);
## UCIS (ENVI) DATA SET
elif data_choice == 'ucis':
ds = ENVIData(data_files[0],
shotnoisefilt = log.opts['shotfilt'],
fwfile = log.opts['fw'])
else:
## should never get here
printt("Invalid data set choice.")
exit()
printt("datatype ", type(ds.data))
if ds.data.shape[1] != len(ds.labels):
printt("Error: %d items but %d labels!" % (ds.data.shape[1],
len(ds.labels)))
exit()
return ds
#______________________________main________________________________________
# Main execution
def main():
printt("DEMUD version " + __VERSION__ + "\n")
log.opts = parse_args()
log.opts['start_sol'] = None
log.opts['end_sol'] = None
###########################################################################
## Check to ensure a valid set of arguments was given.
datatypes = ('glass', 'iris', 'ecoli', 'abalone', 'isolet',
'chemcam', 'finesse', 'misr', 'aviris',
'irs', 'kepler', 'texturecam', 'navcam',
'pancam', 'apf', 'dan', 'gbt', 'gbtfil', 'decals',
'mastcam', 'images', 'ucis', 'testdata', 'cnn', 'des')
data_choice = check_opts(datatypes)
(config, fft) = (log.opts['config'], log.opts['fft'])
(lookup, report) = (log.opts['lookup'], log.opts['report'])
(q, sm, mm) = (log.opts['svdmethod'],
log.opts['scoremethod'], log.opts['missingdatamethod'])
###########################################################################
## Check for config file and read it in
# Read in config file
config = 'demud.config' if log.opts['config'] == None else log.opts['config']
if check_if_files_exist([config], 'config') == False:
sys.exit(1)
with open(config) as config_file:
content = config_file.readlines()
data_files,etc = parse_config(content, data_choice)
printt("Elapsed time after parsing args and reading config file:", time.clock())
###########################################################################
## Now we are moving on to the cases which handle each data set.
###########################################################################
init_default_k_values()
global ds
if check_if_files_exist(data_files) == False:
sys.exit(1)
if data_choice == 'navcam':
sol_number, initsols, scaleInvariant = etc
ds = load_data(data_choice, data_files, sol_number, initsols, scaleInvariant)
else:
ds = load_data(data_choice, data_files)
# Proceed with final setup
fw = finish_initialization(ds)
if data_choice == 'kepler' and fft:
ds.fftransform()
fw = finish_initialization(ds, action="performing FFT")
k = log.opts['k'] if log.opts['k'] != -773038 else default_k_values[data_choice]
if log.opts['k_var'] != -773038.0: k = optimize_k(ds, log.opts['k_var'])
n = log.opts['n'] if log.opts['n'] != -773038 else default_n_value
if log.opts['n'] == -1: n = ds.data.shape[1]
# Run DEMUD!
sels, sels_idx = demud(ds, k=k, nsel=n, scoremethod=sm, svdmethod=q,
missingmethod=mm, feature_weights=fw,
start_sol=log.opts['start_sol'],
end_sol=log.opts['end_sol'])
# Report the results
if report:
report_classes(ds, sels, sels_idx, data_choice)
if (data_choice == 'decals' or
data_choice == 'des'):
# Perform final cleanup of HTML selections file
outdir = os.path.join('results', ds.name)
htmlselfile = os.path.join(outdir, 'selections-k%d.html' % k)
htmlfid = open(htmlselfile, 'a')
htmlfid.write('</body>\n</html>\n')
htmlfid.close()
if data_choice == 'mastcam':
for i in ds.segmentation:
seg = ds.segmentation[i]
break
image = len(sels) * np.ones(seg.shape)
for i in range(len(sels)):
image[seg == sels[i]+1] = i
pylab.imshow(image)
pylab.colorbar()
pylab.savefig(os.path.join('results',
'%s-n=%d-segmentation.pdf' %
(ds.name, len(sels))))
pylab.close()
for l in ds.labels:
img = ds.fullimages[l.split('_')[0]]
break
with open(os.path.join('results',
'%s-n=%d-segmentation.pkl' %
(ds.name, len(sels))), 'w') as f:
pickle.dump((sels, seg, img), f)
Image.fromarray(img).save(os.path.join('results',
'%s-n=%d-segmentation.png' %
(ds.name, len(sels))), 'PNG')
if data_choice == 'kepler' and lookup:
# Output a list of selections, and then lookup their labels
found = [int(ds.labels[x].split('-')[0].split(':')[0]) for x in sels]
printt(found)
flags = kepler_lookup.lookup(found)
for i in range(len(found)):
printt("%3d) %d: %s" % (i, found[i], flags[str(found[i])]))
if data_choice == 'navcam':
# Saves the scores to the output folder
outdir = os.path.join('results', ds.name)
if not os.path.exists(outdir):
os.mkdir(outdir)
ds.plot_score(outdir)
###########################################################################
printt("Total elapsed processor time:", time.clock())
log.logfile.close()
print "Wrote log to %s\n" % log.logfilename
if (log.opts['fun']):
print base64.b64decode('VGhhbmsgeW91LCBjb21lIGFnYWlu')
print
if __name__ == "__main__":
main()
#####
# CHANGELOG
#
# 1.2: Interactive feedback and class of interest
# 1.3: First element chosen is data[:,0] unless static SVD
# 1.4: Incremental SVD fully functional; choice of first element
# 1.5: Feature weighting included; full SVD option for init-item=-1 back in
# 1.6: Start of summer 2014, added to include Mastcam support
# 1.7: [inprogress] implementing image processing w/ CNN
#
#####
#####
# To add a new dataset:
#
# - add the appropriate case in the main method
# - add argument to list of datatypes at beginning of main method
# - in the same place, make the local boolean variable
# - add command-line flag
# - update check_opts as appropriate (ie, for lookup, fft, etc)
# - add import
# - add default k value
# - add required files to make-config
#
#####
| apache-2.0 | 4,071,230,157,630,002,700 | 37.759142 | 143 | 0.569658 | false |
Blueshoe/djangocms-workflows | workflows/forms.py | 1 | 2853 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from .models import Action
class ActionForm(forms.Form):
message_ = forms.CharField(
label=_('Message'),
required=False,
help_text=_('You may provide some more information.'),
widget=forms.Textarea
)
editor_ = forms.ModelChoiceField(
label=_('Editor'),
queryset=get_user_model().objects.none(),
help_text=_('Only notify a specific user?'),
required=False
)
def __init__(self, *args, **kwargs):
self.stage = kwargs.pop('stage', None)
self.title = kwargs.pop('title')
self.request = kwargs.pop('request')
self.workflow = kwargs.pop('workflow')
self.action_type = kwargs.pop('action_type') # {open, approve, reject, cancel}
self.next_stage = self.workflow.next_mandatory_stage(self.stage)
self.group = getattr(self.stage, 'group', None)
cr = Action.get_current_request(self.title)
self.current_action = None if (not cr or cr.is_closed()) else cr.last_action()
self.user = self.request.user
super(ActionForm, self).__init__(*args, **kwargs)
self.adjust_editor()
@property
def message(self):
"""
:rtype: str
"""
return self.cleaned_data.get('message_', '')
@property
def editor(self):
"""
:rtype: django.contrib.auth.models.User
"""
return self.cleaned_data.get('editor')
@property
def editors(self):
if self.next_stage is None:
raise ValueError('No next stage!')
if self.editor:
return get_user_model().objects.filter(pk=self.editor.pk)
return self.next_stage.group.user_set.all()
def adjust_editor(self):
if self.action_type in (Action.CANCEL, Action.REJECT) or self.next_stage is None:
self.fields.pop('editor_', None) # no editor can be chosen
return
group = self.next_stage.group
self.fields['editor_'].queryset = group.user_set.all()
self.fields['editor_'].empty_label = _('Any {}').format(group.name)
def save(self):
"""
:rtype: Action
"""
init_kwargs = {
attr: getattr(self, attr) for attr in
('message', 'user', 'title', 'workflow', 'stage', 'action_type', 'group')
}
if self.current_action is None:
assert self.action_type == Action.REQUEST # root must be request
return Action.add_root(**init_kwargs)
else:
assert self.action_type != Action.REQUEST # non-root must not be request
return self.current_action.add_child(**init_kwargs)
| mit | 9,073,147,331,475,379,000 | 33.373494 | 89 | 0.592709 | false |
osroom/osroom | apps/modules/theme_setting/process/nav_setting.py | 1 | 5260 | #!/usr/bin/env python
# -*-coding:utf-8-*-
# @Time : 2019/12/2 14:43
# @Author : Allen Woo
from bson import ObjectId
from flask import request, g
from flask_babel import gettext
from apps.app import mdbs, cache
from apps.core.flask.reqparse import arg_verify
from apps.utils.format.obj_format import json_to_pyseq, objid_to_str, str_to_num
@cache.cached(timeout=86400, key_base64=False, db_type="redis")
def get_global_theme_navs(theme_name, lang):
langs = g.site_global["language"]["all_language"].keys()
navs = mdbs["sys"].dbs["theme_nav_setting"].find(
{
"language": lang,
"theme_name": theme_name
},
{"_id": 0}
).sort([("order", 1)])
if navs.count(True):
return list(navs)
else:
for la in langs:
if la == lang:
continue
navs = mdbs["sys"].dbs["theme_nav_setting"].find(
{
"language": la,
"theme_name": theme_name
},
{"_id": 0}
).sort([("order", 1)])
if navs.count(True):
return list(navs)
return []
def get_navs():
theme_name = request.argget.all("theme_name")
lang = request.argget.all("language")
s, r = arg_verify(
[
(gettext("theme name"), theme_name),
(gettext("language"), lang)
],
required=True
)
if not s:
return r
navs = mdbs["sys"].dbs["theme_nav_setting"].find(
{"language": lang, "theme_name": theme_name}
).sort([("order", 1)])
navs = objid_to_str(navs)
data = {
"navs": navs
}
return data
def nav_setting():
"""
Update
:RETURN:
"""
cid = request.argget.all("id")
theme_name = request.argget.all("theme_name")
lang = request.argget.all("language")
display_name = request.argget.all("display_name")
order = str_to_num(request.argget.all("order", 99))
json_data = json_to_pyseq(request.argget.all("json_data"))
s, r = arg_verify(
[(gettext("Display name"), display_name),
(gettext("theme name"), theme_name),
(gettext("language"), lang),
(gettext("Json data"), json_data)
],
required=True
)
if not s:
return r
if not isinstance(json_data, dict):
data = {
"msg": gettext('Value must be of type json'),
"msg_type": "e",
"custom_status": 400
}
return data
if not cid:
updata = {
'theme_name': theme_name,
'display_name': display_name,
'language': lang,
'json_data': json_data,
"order": order
}
r = mdbs["sys"].dbs["theme_nav_setting"].insert_one(updata)
if r.inserted_id:
data = {
"msg": gettext("Navigation added successfully"),
"msg_type": "s",
"custom_status": 200
}
else:
data = {
"msg": gettext("Failed to add navigation"),
"msg_type": "w",
"custom_status": 400
}
else:
updata = {
'theme_name': theme_name,
'display_name': display_name,
'language': lang,
'json_data': json_data,
"order": order
}
r = mdbs["sys"].dbs["theme_nav_setting"].update_one(
{"_id": ObjectId(cid)},
{"$set": updata}
)
if r.modified_count:
data = {
"msg": gettext("Updated successfully"),
"msg_type": "s",
"custom_status": 200
}
elif r.matched_count:
data = {
"msg": gettext("Unmodified"),
"msg_type": "w",
"custom_status": 200
}
else:
data = {
"msg": gettext("Update failed"),
"msg_type": "w",
"custom_status": 400
}
cache.delete_autokey(
fun="get_global_theme_navs",
theme_name=".*",
lang=".*",
db_type="redis",
key_regex=True
)
return data
def del_navs():
ids = json_to_pyseq(request.argget.all("ids"))
s, r = arg_verify(
[(gettext("ids"), ids)],
required=True
)
if not s:
return r
del_ids = []
for id in ids:
del_ids.append(ObjectId(id))
r = mdbs["sys"].dbs["theme_nav_setting"].delete_many({"_id": {"$in": del_ids}})
if r.deleted_count:
data = {
"msg": gettext("Deleted successfully"),
"msg_type": "s",
"custom_status": 200
}
else:
data = {
"msg": gettext("Delete failed"),
"msg_type": "s",
"custom_status": 200
}
cache.delete_autokey(
fun="get_global_theme_navs",
theme_name=".*",
lang=".*",
db_type="redis",
key_regex=True
)
return data
| bsd-2-clause | 6,717,044,610,144,772,000 | 26.128342 | 83 | 0.459125 | false |
RuthAngus/chronometer | chronometer/compare.py | 1 | 3001 | """
Compare the properties injected to the properties recovered.
Particularly the Ages.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import h5py
plotpar = {'axes.labelsize': 18,
'font.size': 10,
'legend.fontsize': 18,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True}
plt.rcParams.update(plotpar)
def get_stats_from_samples(samples):
"""
Take a 2d array of samples and produce medians and confidence intervals.
"""
meds = np.array([np.median(samples[:, i]) for i in
range(np.shape(samples)[1])])
lower = np.array([np.percentile(samples[:, i], 16) for i in
range(np.shape(samples)[1])])
upper = np.array([np.percentile(samples[:, i], 84) for i in
range(np.shape(samples)[1])])
errm, errp = meds - lower, upper - meds
return meds, errm, errp
def make_comparison_plot(true, recovered, errp, errm, iso, ierrp, ierrm,
xlabel, ylabel, fn):
"""
Compare the true property with the injected property.
"""
xs = np.linspace(min(true), max(true))
plt.clf()
plt.errorbar(true, recovered, yerr=[errm, errp], fmt="k.")
plt.errorbar(true, iso, yerr=[ierrm, ierrp], fmt="m.", alpha=.5)
plt.plot(xs, xs, "--", color=".5")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.subplots_adjust(bottom=.15)
plt.savefig(fn)
print(np.median(errm), np.median(errp), np.median(ierrm),
np.median(ierrp))
print(np.mean([np.median(ierrm), np.median(ierrp)])
/np.mean([np.median(errm), np.median(errp)]))
if __name__ == "__main__":
cwd = os.getcwd()
RESULTS_DIR = "/Users/ruthangus/projects/chronometer/chronometer/MH"
DATA_DIR = "/Users/ruthangus/projects/chronometer/chronometer/data"
# Load samples
with h5py.File(os.path.join(RESULTS_DIR, "combined_samples.h5"),
"r") as f:
samples = f["samples"][...]
# Find N stars
npar = np.shape(samples)[1]
N = int((npar - 4)/5)
nglob = 4
print(N, "stars")
# Load iso only samples
with h5py.File(os.path.join(RESULTS_DIR, "combined_samples_iso_only.h5"),
"r") as f:
iso_samples = f["samples"][...]
# Calculate medians and errorbars
recovered_age_samples = samples[:, nglob+N:nglob+2*N]
meds, errm, errp = get_stats_from_samples(np.exp(recovered_age_samples))
iso_age_samples = iso_samples[:, nglob+N:nglob+2*N]
iso, ierrm, ierrp = get_stats_from_samples(np.exp(iso_age_samples))
# Load truths
df = pd.read_csv(os.path.join(DATA_DIR, "fake_data.csv"))
true_ages = df.age.values[:N]
# Make plot
make_comparison_plot(true_ages, meds, errp, errm, iso, ierrp, ierrm,
"$\mathrm{True~age~(Gyr)}$",
"$\mathrm{Recovered~age~(Gyr)}$",
"compare_ages_{}".format(N))
| mit | -151,283,496,425,190,530 | 31.978022 | 77 | 0.586138 | false |
miguelinux/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/GenFds/OptRomInfStatement.py | 1 | 5392 | ## @file
# process OptionROM generation from INF statement
#
# Copyright (c) 2007, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import RuleSimpleFile
import RuleComplexFile
import Section
import OptionRom
import Common.GlobalData as GlobalData
from Common.DataType import *
from Common.String import *
from FfsInfStatement import FfsInfStatement
from GenFdsGlobalVariable import GenFdsGlobalVariable
##
#
#
class OptRomInfStatement (FfsInfStatement):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FfsInfStatement.__init__(self)
self.OverrideAttribs = None
## __GetOptRomParams() method
#
# Parse inf file to get option ROM related parameters
#
# @param self The object pointer
#
def __GetOptRomParams(self):
if self.OverrideAttribs == None:
self.OverrideAttribs = OptionRom.OverrideAttribs()
if self.OverrideAttribs.NeedCompress == None:
self.OverrideAttribs.NeedCompress = self.OptRomDefs.get ('PCI_COMPRESS')
if self.OverrideAttribs.NeedCompress is not None:
if self.OverrideAttribs.NeedCompress.upper() not in ('TRUE', 'FALSE'):
GenFdsGlobalVariable.ErrorLogger( "Expected TRUE/FALSE for PCI_COMPRESS: %s" %self.InfFileName)
self.OverrideAttribs.NeedCompress = \
self.OverrideAttribs.NeedCompress.upper() == 'TRUE'
if self.OverrideAttribs.PciVendorId == None:
self.OverrideAttribs.PciVendorId = self.OptRomDefs.get ('PCI_VENDOR_ID')
if self.OverrideAttribs.PciClassCode == None:
self.OverrideAttribs.PciClassCode = self.OptRomDefs.get ('PCI_CLASS_CODE')
if self.OverrideAttribs.PciDeviceId == None:
self.OverrideAttribs.PciDeviceId = self.OptRomDefs.get ('PCI_DEVICE_ID')
if self.OverrideAttribs.PciRevision == None:
self.OverrideAttribs.PciRevision = self.OptRomDefs.get ('PCI_REVISION')
# InfObj = GenFdsGlobalVariable.WorkSpace.BuildObject[self.PathClassObj, self.CurrentArch]
# RecordList = InfObj._RawData[MODEL_META_DATA_HEADER, InfObj._Arch, InfObj._Platform]
# for Record in RecordList:
# Record = ReplaceMacros(Record, GlobalData.gEdkGlobal, False)
# Name = Record[0]
## GenFfs() method
#
# Generate FFS
#
# @param self The object pointer
# @retval string Generated .efi file name
#
def GenFfs(self):
#
# Parse Inf file get Module related information
#
self.__InfParse__()
self.__GetOptRomParams()
#
# Get the rule of how to generate Ffs file
#
Rule = self.__GetRule__()
GenFdsGlobalVariable.VerboseLogger( "Packing binaries from inf file : %s" %self.InfFileName)
#FileType = Ffs.Ffs.ModuleTypeToFileType[Rule.ModuleType]
#
# For the rule only has simpleFile
#
if isinstance (Rule, RuleSimpleFile.RuleSimpleFile) :
EfiOutputList = self.__GenSimpleFileSection__(Rule)
return EfiOutputList
#
# For Rule has ComplexFile
#
elif isinstance(Rule, RuleComplexFile.RuleComplexFile):
EfiOutputList = self.__GenComplexFileSection__(Rule)
return EfiOutputList
## __GenSimpleFileSection__() method
#
# Get .efi files according to simple rule.
#
# @param self The object pointer
# @param Rule The rule object used to generate section
# @retval string File name of the generated section file
#
def __GenSimpleFileSection__(self, Rule):
#
# Prepare the parameter of GenSection
#
OutputFileList = []
if Rule.FileName != None:
GenSecInputFile = self.__ExtendMacro__(Rule.FileName)
OutputFileList.append(GenSecInputFile)
else:
OutputFileList, IsSect = Section.Section.GetFileList(self, '', Rule.FileExtension)
return OutputFileList
## __GenComplexFileSection__() method
#
# Get .efi by sections in complex Rule
#
# @param self The object pointer
# @param Rule The rule object used to generate section
# @retval string File name of the generated section file
#
def __GenComplexFileSection__(self, Rule):
OutputFileList = []
for Sect in Rule.SectionList:
if Sect.SectionType == 'PE32':
if Sect.FileName != None:
GenSecInputFile = self.__ExtendMacro__(Sect.FileName)
OutputFileList.append(GenSecInputFile)
else:
FileList, IsSect = Section.Section.GetFileList(self, '', Sect.FileExtension)
OutputFileList.extend(FileList)
return OutputFileList
| gpl-2.0 | -6,444,457,340,399,212,000 | 33.793548 | 115 | 0.635386 | false |
hakuna-m/wubiuefi | src/wubi/backends/win32/backend.py | 1 | 37791 | # Copyright (c) 2008 Agostino Russo
#
# Written by Agostino Russo <[email protected]>
#
# This file is part of Wubi the Win32 Ubuntu Installer.
#
# Wubi is free software; you can redistribute it and/or modify
# it under 5the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# Wubi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import _winreg
import ctypes
#import platform
from drive import Drive
from virtualdisk import create_virtual_disk
from eject import eject_cd
import registry
from memory import get_total_memory_mb
from wubi.backends.common.backend import Backend
from wubi.backends.common.utils import run_command, spawn_command, replace_line_in_file, read_file, write_file, join_path, remove_line_in_file
from wubi.backends.common.mappings import country2tz, name2country, gmt2country, country_gmt2tz, gmt2tz
from os.path import abspath, isfile, isdir
import mappings
import shutil
import logging
import tempfile
import struct
log = logging.getLogger('WindowsBackend')
class WindowsBackend(Backend):
'''
Win32-specific backend
'''
def __init__(self, *args, **kargs):
Backend.__init__(self, *args, **kargs)
self.info.iso_extractor = join_path(self.info.bin_dir, '7z.exe')
self.info.cpuid = join_path(self.info.bin_dir, 'cpuid.dll')
log.debug('7z=%s' % self.info.iso_extractor)
self.cache = {}
def fetch_host_info(self):
log.debug("Fetching host info...")
self.info.registry_key = self.get_registry_key()
self.info.windows_version = self.get_windows_version()
self.info.windows_version2 = self.get_windows_version2()
self.info.windows_sp = self.get_windows_sp()
self.info.windows_build = self.get_windows_build()
self.info.gmt = self.get_gmt()
self.info.country = self.get_country()
self.info.timezone = self.get_timezone()
self.info.host_username = self.get_windows_username()
self.info.user_full_name = self.get_windows_user_full_name()
self.info.user_directory = self.get_windows_user_dir()
self.info.windows_language_code = self.get_windows_language_code()
self.info.windows_language = self.get_windows_language()
self.info.processor_name = self.get_processor_name()
self.info.bootloader = self.get_bootloader(self.info.windows_version)
self.info.system_drive = self.get_system_drive()
self.info.drives = self.get_drives()
drives = [(d.path[:2].lower(), d) for d in self.info.drives]
self.info.drives_dict = dict(drives)
self.info.efi = self.check_EFI()
def select_target_dir(self):
target_dir = join_path(self.info.target_drive.path, self.info.distro.installation_dir)
target_dir.replace(' ', '_')
target_dir.replace('__', '_')
if os.path.exists(target_dir):
raise Exception("Cannot install into %s.\nThere is another file or directory with this name.\nPlease remove it before continuing." % target_dir)
self.info.target_dir = target_dir
log.info('Installing into %s' % target_dir)
self.info.icon = join_path(self.info.target_dir, self.info.distro.name + '.ico')
def uncompress_target_dir(self, associated_task):
if self.info.target_drive.is_fat():
return
try:
command = ['compact', self.info.target_dir, '/U', '/A', '/F']
run_command(command)
command = ['compact', join_path(self.info.target_dir,'*.*'), '/U', '/A', '/F']
run_command(command)
except Exception, err:
log.error(err)
def uncompress_files(self, associated_task):
if self.info.target_drive.is_fat():
return
command1 = ['compact', join_path(self.info.install_boot_dir), '/U', '/A', '/F']
command2 = ['compact', join_path(self.info.install_boot_dir,'*.*'), '/U', '/A', '/F']
for command in [command1,command2]:
log.debug(" ".join(command))
try:
run_command(command)
except Exception, err:
log.error(err)
def create_uninstaller(self, associated_task):
uninstaller_name = 'uninstall-%s.exe' % self.info.application_name
uninstaller_name.replace(' ', '_')
uninstaller_name.replace('__', '_')
uninstaller_path = join_path(self.info.target_dir, uninstaller_name)
if os.path.splitext(self.info.original_exe)[-1] == '.exe':
log.debug('Copying uninstaller %s -> %s' % (self.info.original_exe, uninstaller_path))
shutil.copyfile(self.info.original_exe, uninstaller_path)
registry.set_value('HKEY_LOCAL_MACHINE', self.info.registry_key, 'UninstallString', uninstaller_path)
registry.set_value('HKEY_LOCAL_MACHINE', self.info.registry_key, 'InstallationDir', self.info.target_dir)
registry.set_value('HKEY_LOCAL_MACHINE', self.info.registry_key, 'DisplayName', self.info.distro.name)
registry.set_value('HKEY_LOCAL_MACHINE', self.info.registry_key, 'DisplayIcon', self.info.icon)
registry.set_value('HKEY_LOCAL_MACHINE', self.info.registry_key, 'DisplayVersion', self.info.version_revision)
registry.set_value('HKEY_LOCAL_MACHINE', self.info.registry_key, 'Publisher', self.info.distro.name)
registry.set_value('HKEY_LOCAL_MACHINE', self.info.registry_key, 'URLInfoAbout', self.info.distro.website)
registry.set_value('HKEY_LOCAL_MACHINE', self.info.registry_key, 'HelpLink', self.info.distro.support)
def create_virtual_disks(self, associated_task):
self.info.disks_dir
for disk in ["root", "home", "usr", "swap"]:
path = join_path(self.info.disks_dir, disk + ".disk")
size_mb = int(getattr(self.info, disk + "_size_mb"))
if size_mb:
create_virtual_disk(path, size_mb)
def reboot(self):
command = ['shutdown', '-r', '-t', '00']
run_command(command) #TBD make async
def copy_installation_files(self, associated_task):
self.info.custominstall = join_path(self.info.install_dir, 'custom-installation')
src = join_path(self.info.data_dir, 'custom-installation')
dest = self.info.custominstall
log.debug('Copying %s -> %s' % (src, dest))
shutil.copytree(src, dest)
src = join_path(self.info.root_dir, 'winboot')
if isdir(src): # make runpy will fail otherwise as winboot will not be there
dest = join_path(self.info.target_dir, 'winboot')
log.debug('Copying %s -> %s' % (src, dest))
shutil.copytree(src, dest)
dest = join_path(self.info.custominstall, 'hooks', 'failure-command.sh')
msg=_('The installation failed. Logs have been saved in: %s.' \
'\n\nNote that in verbose mode, the logs may include the password.' \
'\n\nThe system will now reboot.')
msg = msg % join_path(self.info.install_dir, 'installation-logs.zip')
msg = "msg=\"%s\"" % msg
msg = str(msg.encode('utf8'))
replace_line_in_file(dest, 'msg=', msg)
src = join_path(self.info.image_dir, self.info.distro.name + '.ico')
dest = self.info.icon
log.debug('Copying %s -> %s' % (src, dest))
shutil.copyfile(src, dest)
def remove_existing_binary(self):
try:
binary = os.path.join(self.get_startup_folder(), 'wubi.exe')
except: # if the startup folder is missing, there is nothing to remove
return
if os.path.exists(binary):
try:
MOVEFILE_DELAY_UNTIL_REBOOT = 4
ctypes.windll.kernel32.MoveFileExW(binary, None,
MOVEFILE_DELAY_UNTIL_REBOOT)
except (OSError, IOError):
log.exception("Couldn't remove Wubi from startup:")
def get_startup_folder(self):
startup_folder = registry.get_value(
'HKEY_LOCAL_MACHINE',
'SOFTWARE\\Microsoft\\Windows\\CurrentVersion'
'\\Explorer\\Shell Folders',
'Common Startup')
log.debug('startup_folder=%s' % startup_folder)
return startup_folder
def get_windows_version2(self):
windows_version2 = registry.get_value(
'HKEY_LOCAL_MACHINE',
'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion',
'ProductName')
log.debug('windows_version2=%s' % windows_version2)
return windows_version2
def get_windows_sp(self):
windows_sp = registry.get_value(
'HKEY_LOCAL_MACHINE',
'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion',
'CSDVersion')
log.debug('windows_sp=%s' % windows_sp)
return windows_sp
def get_windows_build(self):
windows_build = registry.get_value(
'HKEY_LOCAL_MACHINE',
'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion',
'CurrentBuildNumber')
log.debug('windows_build=%s' % windows_build)
return windows_build
def get_processor_name(self):
processor_name = registry.get_value(
'HKEY_LOCAL_MACHINE',
'HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0',
'ProcessorNameString')
log.debug('processor_name=%s' %processor_name)
return processor_name
def get_gmt(self):
gmt = registry.get_value('HKEY_LOCAL_MACHINE', 'SYSTEM\\CurrentControlSet\\Control\\TimeZoneInformation', 'Bias')
if gmt:
gmt = -gmt/60
if not gmt \
or gmt > 12 \
or gmt < -12:
gmt = 0
log.debug('gmt=%s' %gmt)
return gmt
def get_country(self):
icountry = registry.get_value('HKEY_CURRENT_USER', 'Control Panel\\International', 'iCountry')
try:
icountry = int(icountry)
except:
pass
country = mappings.icountry2country.get(icountry)
if not country:
scountry = registry.get_value('HKEY_CURRENT_USER', 'Control Panel\\International', 'sCountry')
country = name2country.get(scountry)
if not country:
country = gmt2country.get(self.info.gmt)
if not country:
country = "US"
log.debug('country=%s' %country)
return country
def get_timezone(self):
timezone = country2tz.get(self.info.country)
timezone = country_gmt2tz.get((self.info.country, self.info.gmt), timezone)
if not timezone:
timezone = gmt2tz.get(self.info.gmt)
if not timezone:
timezone = "America/New_York"
log.debug('timezone=%s' % timezone)
return timezone
def eject_cd(self):
eject_cd(self.info.cd_path)
def get_windows_version(self):
full_version = sys.getwindowsversion()
major, minor, build, platform, txt = full_version
#platform.platform(), platform.system(), platform.release(), platform.version()
if platform == 0:
version = 'win32'
elif platform == 1:
if major == 4:
if minor == 0:
version = '95'
elif minor == 10:
version = '98'
elif minor == 90:
version = 'me'
elif platform == 2:
if major == 4:
version = 'nt'
elif major == 5:
if minor == 0:
version = '2000'
elif minor == 1:
version = 'xp'
elif minor == 2:
version = '2003'
elif major == 6:
version = 'vista'
log.debug('windows version=%s' % version)
return version
def get_bootloader(self, windows_version):
if windows_version in ['vista', '2008']:
bootloader = 'vista'
elif windows_version in ['nt', 'xp', '2000', '2003']:
bootloader = 'xp'
elif windows_version in ['95', '98']:
bootloader = '98'
else:
bootloader = None
log.debug('bootloader=%s' % bootloader)
return bootloader
def get_networking_info(self):
return NotImplemented
#~ win32com.client.Dispatch('WbemScripting.SWbemLocator') but it doesn't
#~ seem to function on win 9x. This script is intended to detect the
#~ computer's network configuration (gateway, dns, ip addr, subnet mask).
#~ Does someone know how to obtain those informations on a win 9x ?
#~ Windows 9x came without support for WMI. You can download WMI Core from
#~ http://www.microsoft.com/downloads/details.aspx?FamilyId=98A4C5BA-337B-4E92-8C18-A63847760EA5&displaylang=en
#~ although the implementation is quite limited
def get_drives(self):
drives = []
for letter in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
drive = Drive(letter)
if drive.type:
log.debug('drive=%s'% str(drive))
drives.append(drive)
return drives
def get_uninstaller_path(self):
uninstaller_path = registry.get_value('HKEY_LOCAL_MACHINE', self.info.registry_key, 'UninstallString')
log.debug('uninstaller_path=%s' % uninstaller_path)
return uninstaller_path
def get_previous_target_dir(self):
previous_target_dir = registry.get_value('HKEY_LOCAL_MACHINE', self.info.registry_key, 'InstallationDir')
log.debug("previous_target_dir=%s" % previous_target_dir)
return previous_target_dir
def get_previous_distro_name(self):
previous_distro_name = registry.get_value('HKEY_LOCAL_MACHINE', self.info.registry_key, 'DisplayName')
log.debug("previous_distro_name=%s" % previous_distro_name)
return previous_distro_name
def get_registry_key(self):
registry_key = 'Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\' + self.info.application_name.capitalize()
log.debug('registry_key=%s' % registry_key)
return registry_key
def get_windows_language_code(self):
#~ windows_language_code = registry.get_value(
#~ 'HKEY_CURRENT_USER',
#~ '\\Control Panel\\International',
#~ 'sLanguage')
windows_language_code = mappings.language2n.get(self.info.language[:2])
log.debug('windows_language_code=%s' % windows_language_code)
if not windows_language_code:
windows_language_code = 1033 #English
return windows_language_code
def get_windows_language(self):
windows_language = mappings.n2fulllanguage.get(self.info.windows_language_code)
log.debug('windows_language=%s' % windows_language)
if not windows_language:
windows_language = 'English'
return windows_language
def get_total_memory_mb(self):
total_memory_mb = get_total_memory_mb()
log.debug('total_memory_mb=%s' % total_memory_mb)
return total_memory_mb
def get_windows_username(self):
windows_username = os.getenv('username')
windows_username = windows_username.decode('ascii', 'ignore')
log.debug('windows_username=%s' % windows_username)
return windows_username
def get_windows_user_full_name(self):
user_full_name = os.getenv('username') #TBD
user_full_name = user_full_name.decode('ascii', 'ignore')
log.debug('user_full_name=%s' % user_full_name)
return user_full_name
def get_windows_user_dir(self):
homedrive = os.getenv('homedrive')
homepath = os.getenv('homepath')
user_directory = ""
if homedrive and homepath:
user_directory = join_path(homedrive, homepath)
user_directory = user_directory.decode('ascii', 'ignore')
log.debug('user_directory=%s' % user_directory)
return user_directory
def get_keyboard_layout(self):
win_keyboard_id = ctypes.windll.user32.GetKeyboardLayout(0)
# lower word is the locale identifier (higher word is a handler to the actual layout)
locale_id = win_keyboard_id & 0x0000FFFF
keyboard_layout = mappings.keymaps.get(locale_id)
if not keyboard_layout:
keyboard_layout = self.info.country.lower()
variant_id = win_keyboard_id & 0xFFFFFFFF
keyboard_variant = mappings.hkl2variant.get(variant_id)
if not keyboard_variant:
keyboard_variant = ""
log.debug('keyboard_id=%s' % win_keyboard_id)
log.debug('keyboard_layout=%s' % keyboard_layout)
log.debug('keyboard_variant=%s' % keyboard_variant)
return keyboard_layout, keyboard_variant
def get_system_drive(self):
system_drive = os.getenv('SystemDrive')
system_drive = Drive(system_drive)
log.debug('system_drive=%s' % system_drive)
return system_drive
def detect_proxy(self):
'''
https://bugs.edge.launchpad.net/wubi/+bug/135815
'''
#TBD
def extract_file_from_iso(self, iso_path, file_path, output_dir=None, overwrite=False):
'''
platform specific
'''
log.debug(" extracting %s from %s" % (file_path, iso_path))
if not iso_path or not os.path.exists(iso_path):
raise Exception('Invalid path %s' % iso_path)
iso_path = abspath(iso_path)
file_path = os.path.normpath(file_path)
if not output_dir:
output_dir = tempfile.gettempdir()
output_file = join_path(output_dir, os.path.basename(file_path))
if os.path.exists(output_file):
if overwrite:
os.unlink(output_file)
else:
raise Exception('Cannot overwrite %s' % output_file)
command = [self.info.iso_extractor, 'e', '-i!' + file_path, '-o' + output_dir, iso_path]
try:
run_command(command)
except Exception, err:
log.exception(err)
output_file = None
if output_file and isfile(output_file):
return output_file
def extract_diskimage(self, associated_task=None):
# TODO: try to pipe download stream into this.
sevenzip = self.info.iso_extractor
xz = self.dimage_path
tarball = os.path.basename(self.dimage_path).strip('.xz')
# 7-zip needs 7z.dll to read the xz format.
dec_xz = [sevenzip, 'e', '-i!' + tarball, '-so', xz]
dec_tar = [sevenzip, 'e', '-si', '-ttar', '-o' + self.info.disks_dir]
dec_xz_subp = spawn_command(dec_xz)
dec_tar_subp = spawn_command(dec_tar, stdin=dec_xz_subp.stdout)
dec_xz_subp.stdout.close()
dec_tar_subp.communicate()
if dec_tar_subp.returncode != 0:
raise Exception, ('Extraction failed with code: %d' %
dec_tar_subp.returncode)
# TODO: Checksum: http://tukaani.org/xz/xz-file-format.txt
# Only remove downloaded image
if not self.info.dimage_path:
os.remove(xz)
def expand_diskimage(self, associated_task=None):
# TODO: might use -p to get percentage to feed into progress.
root = join_path(self.info.disks_dir, 'root.disk')
resize2fs = join_path(self.info.bin_dir, 'resize2fs.exe')
resize_cmd = [resize2fs, '-f', root,
'%dM' % self.info.root_size_mb]
run_command(resize_cmd)
def create_swap_diskimage(self, associated_task=None):
path = join_path(self.info.disks_dir, 'swap.disk')
# fsutil works in bytes.
swap_size = '%d' % (self.info.swap_size_mb * 1024 * 1024)
create_cmd = ['fsutil', 'file', 'createnew', path, swap_size]
run_command(create_cmd)
def diskimage_bootloader(self, associated_task=None):
src = join_path(self.info.root_dir, 'winboot')
dest = join_path(self.info.target_dir, 'winboot')
if isdir(src):
log.debug('Copying %s -> %s' % (src, dest))
shutil.copytree(src, dest)
src = join_path(self.info.disks_dir, 'wubildr')
shutil.copyfile(src, join_path(dest, 'wubildr'))
# Overwrite the copy that's in root_dir.
for drive in self.info.drives:
if drive.type not in ('removable', 'hd'):
continue
dest = join_path(drive.path, 'wubildr')
try:
shutil.copyfile(src, dest)
except: # don't need to worry about failure here
pass
os.unlink(src)
def get_usb_search_paths(self):
'''
Used to detect ISOs in USB keys
'''
return [drive.path for drive in self.info.drives] #TBD only look in USB devices
def get_iso_search_paths(self):
'''
Gets default paths scanned for CD and ISOs
'''
paths = []
paths += [os.path.dirname(self.info.original_exe)]
paths += [drive.path for drive in self.info.drives]
paths += [os.environ.get('Desktop', None)]
paths = [abspath(p) for p in paths if p and os.path.isdir(p)]
return paths
def get_cd_search_paths(self):
return [drive.path for drive in self.info.drives] # if drive.type == 'cd']
def get_iso_file_names(self, iso_path):
iso_path = abspath(iso_path)
if iso_path in self.cache:
return self.cache[iso_path]
else:
self.cache[iso_path] = None
command = [self.info.iso_extractor,'l',iso_path]
try:
output = run_command(command)
except Exception, err:
log.exception(err)
log.debug('command >>%s' % ' '.join(command))
output = None
if not output: return []
lines = output.split(os.linesep)
start = None
new_lines = []
for line in lines:
if line.startswith('---'):
if start is None:
start = True
else:
break
elif start:
new_lines.append(line)
if not new_lines:
return []
lines = new_lines
file_info = [line.split() for line in lines]
file_names = [os.path.normpath(x[-1]) for x in file_info]
self.cache[iso_path] = file_names
return file_names
def remove_registry_key(self):
registry.delete_key(
'HKEY_LOCAL_MACHINE',
self.info.registry_key)
def check_EFI(self):
efi = False
if self.info.bootloader == 'vista':
bcdedit = join_path(os.getenv('SystemDrive'), 'bcdedit.exe')
if not os.path.isfile(bcdedit):
bcdedit = join_path(os.environ['systemroot'], 'sysnative', 'bcdedit.exe')
if not os.path.isfile(bcdedit):
bcdedit = join_path(os.environ['systemroot'], 'System32', 'bcdedit.exe')
if not os.path.isfile(bcdedit):
log.error("Cannot find bcdedit")
return False
command = [bcdedit, '/enum']
result = run_command(command)
result = result.lower()
if "bootmgfw.efi" in result:
efi = True
if "winload.efi" in result:
efi = True
log.debug('EFI boot = %s' % efi)
return efi
def modify_EFI_folder(self, associated_task,bcdedit):
command = [bcdedit, '/enum', '{bootmgr}']
boot_drive = run_command(command)
if 'partition=' in boot_drive:
boot_drive = boot_drive[boot_drive.index('partition=')+10:]
else:
boot_drive = boot_drive[boot_drive.index('device')+24:]
boot_drive = boot_drive[:boot_drive.index('\r')]
log.debug("EFI boot partition %s" % boot_drive)
# if EFI boot partition is mounted we use it
if boot_drive[1]==':':
efi_drive = boot_drive
else:
for efi_drive in 'HIJKLMNOPQRSTUVWXYZ':
drive = Drive(efi_drive)
if not drive.type:
break
efi_drive = efi_drive + ':'
log.debug("Temporary EFI drive %s" % efi_drive)
if efi_drive != boot_drive:
run_command(['mountvol', efi_drive, '/s'])
src = join_path(self.info.root_dir, 'winboot','EFI')
src.replace(' ', '_')
src.replace('__', '_')
dest = join_path(efi_drive, 'EFI',self.info.target_dir[3:])
dest.replace(' ', '_')
dest.replace('__', '_')
if not os.path.exists(dest):
shutil.os.mkdir(dest)
dest = join_path(dest,'wubildr')
if os.path.exists(dest):
shutil.rmtree(dest)
log.debug('Copying EFI folder %s -> %s' % (src, dest))
shutil.copytree(src, dest)
if self.get_efi_arch(associated_task,efi_drive)=="ia32":
efi_path = join_path(dest, 'grubia32.efi')[2:]
else:
efi_path = join_path(dest, 'shimx64.efi')[2:]
if efi_drive != boot_drive:
run_command(['mountvol', efi_drive, '/d'])
return efi_path
def get_efi_arch(self, associated_task, efi_drive):
machine=0
bootmgfw=join_path(efi_drive,'EFI','Microsoft','Boot','bootmgfw.efi')
if os.path.exists(bootmgfw):
f=open(bootmgfw, 'rb')
s=f.read(2)
if s=='MZ':
f.seek(60)
s=f.read(4)
header_offset=struct.unpack("<L", s)[0]
f.seek(header_offset+4)
s=f.read(2)
machine=struct.unpack("<H", s)[0]
f.close()
if machine==332:
efi_arch = "ia32"
elif machine==34404:
efi_arch = "x64"
else:
efi_arch ="unknown"
log.debug("efi_arch=%s" % efi_arch)
return efi_arch
def undo_EFI_folder(self, associated_task):
for efi_drive in 'HIJKLMNOPQRSTUVWXYZ':
drive = Drive(efi_drive)
if not drive.type:
break
efi_drive = efi_drive + ':'
log.debug("Temporary EFI drive %s" % efi_drive)
try:
run_command(['mountvol', efi_drive, '/s'])
dest = join_path(efi_drive, 'EFI',self.info.previous_target_dir[3:],'wubildr')
dest.replace(' ', '_')
dest.replace('__', '_')
if os.path.exists(dest):
log.debug('Removing EFI folder %s' % dest)
shutil.rmtree(dest)
run_command(['mountvol', efi_drive, '/d'])
except Exception, err: #this shouldn't be fatal
log.error(err)
return
def modify_bootloader(self, associated_task):
for drive in self.info.drives:
if drive.type not in ('removable', 'hd'):
continue
mb = None
if self.info.bootloader == 'xp':
mb = associated_task.add_subtask(self.modify_bootini)
elif self.info.bootloader == '98':
mb = associated_task.add_subtask(self.modify_configsys)
elif self.info.bootloader == 'vista':
mb = associated_task.add_subtask(self.modify_bcd)
if mb:
mb(drive)
def undo_bootloader(self, associated_task):
winboot_files = ['wubildr', 'wubildr.mbr', 'wubildr.exe']
self.undo_bcd(associated_task)
for drive in self.info.drives:
if drive.type not in ('removable', 'hd'):
continue
self.undo_bootini(drive, associated_task)
self.undo_configsys(drive, associated_task)
for f in winboot_files:
f = join_path(drive.path, f)
if os.path.isfile(f):
os.unlink(f)
if self.info.efi:
log.debug("Undo EFI boot")
self.undo_EFI_folder(associated_task)
try:
run_command(['powercfg', '/h', 'on'])
except Exception, err: #this shouldn't be fatal
log.error(err)
def modify_bootini(self, drive, associated_task):
log.debug("modify_bootini %s" % drive.path)
bootini = join_path(drive.path, 'boot.ini')
if not os.path.isfile(bootini):
log.debug("Could not find boot.ini %s" % bootini)
return
src = join_path(self.info.root_dir, 'winboot', 'wubildr')
dest = join_path(drive.path, 'wubildr')
shutil.copyfile(src, dest)
src = join_path(self.info.root_dir, 'winboot', 'wubildr.mbr')
dest = join_path(drive.path, 'wubildr.mbr')
shutil.copyfile(src, dest)
run_command(['attrib', '-R', '-S', '-H', bootini])
boot_line = 'C:\wubildr.mbr = "%s"' % self.info.distro.name
old_line = boot_line[:boot_line.index("=")].strip().lower()
# ConfigParser gets confused by the ':' and changes the options order
content = read_file(bootini)
if content[-1] != '\n':
content += '\n'
lines = content.split('\n')
is_section = False
for i,line in enumerate(lines):
if line.strip().lower() == "[operating systems]":
is_section = True
elif line.strip().startswith("["):
is_section = False
if is_section and line.strip().lower().startswith(old_line):
lines[i] = boot_line
break
if is_section and not line.strip():
lines.insert(i, boot_line)
break
content = '\n'.join(lines)
write_file(bootini, content)
run_command(['attrib', '+R', '+S', '+H', bootini])
def undo_bootini(self, drive, associated_task):
log.debug("undo_bootini %s" % drive.path)
bootini = join_path(drive.path, 'boot.ini')
if not os.path.isfile(bootini):
return
run_command(['attrib', '-R', '-S', '-H', bootini])
remove_line_in_file(bootini, 'c:\wubildr.mbr', ignore_case=True)
run_command(['attrib', '+R', '+S', '+H', bootini])
def modify_configsys(self, drive, associated_task):
log.debug("modify_configsys %s" % drive.path)
configsys = join_path(drive.path, 'config.sys')
if not os.path.isfile(configsys):
return
src = join_path(self.info.root_dir, 'winboot', 'wubildr.exe')
dest = join_path(drive.path, 'wubildr.exe')
shutil.copyfile(src, dest)
run_command(['attrib', '-R', '-S', '-H', configsys])
config = read_file(configsys)
if 'REM WUBI MENU START\n' in config:
log.debug("Configsys has already been modified")
return
config += '''
REM WUBI MENU START
[menu]
menucolor=15,0
menuitem=windows,Windows
menuitem=wubildr,$distro
menudefault=windows,10
[wubildr]
device=wubildr.exe
[windows]
REM WUBI MENU END
'''
write_file(configsys, config)
run_command(['attrib', '+R', '+S', '+H', configsys])
def undo_configsys(self, drive, associated_task):
log.debug("undo_configsys %s" % drive)
configsys = join_path(drive.path, 'config.sys')
if not os.path.isfile(configsys):
return
run_command(['attrib', '-R', '-S', '-H', configsys])
config = read_file(configsys)
s = config.find('REM WUBI MENU START\n')
e = config.find('REM WUBI MENU END\n')
if s > 0 and e > 0:
e += len('REM WUBI MENU END')
config = config[:s] + config[e:]
write_file(configsys, config)
run_command(['attrib', '+R', '+S', '+H', configsys])
def modify_bcd(self, drive, associated_task):
log.debug("modify_bcd %s" % drive)
if drive is self.info.system_drive \
or drive.path == "C:" \
or drive.path == os.getenv('SystemDrive').upper() \
or drive.path == self.info.target_drive.path:
src = join_path(self.info.root_dir, 'winboot', 'wubildr')
dest = join_path(drive.path, 'wubildr')
shutil.copyfile(src, dest)
src = join_path(self.info.root_dir, 'winboot', 'wubildr.mbr')
dest = join_path(drive.path, 'wubildr.mbr')
shutil.copyfile(src, dest)
bcdedit = join_path(os.getenv('SystemDrive'), 'bcdedit.exe')
if not os.path.isfile(bcdedit):
bcdedit = join_path(os.environ['systemroot'], 'sysnative', 'bcdedit.exe')
# FIXME: Just test for bcdedit in the PATH. What's the Windows
# equivalent of `type`?
if not os.path.isfile(bcdedit):
bcdedit = join_path(os.environ['systemroot'], 'System32', 'bcdedit.exe')
if not os.path.isfile(bcdedit):
log.error("Cannot find bcdedit")
return
if registry.get_value('HKEY_LOCAL_MACHINE', self.info.registry_key, 'VistaBootDrive'):
log.debug("BCD has already been modified")
return
if self.info.efi:
log.debug("EFI boot")
efi_path = self.modify_EFI_folder(associated_task,bcdedit)
try:
run_command(['powercfg', '/h', 'off'])
except Exception, err: #this shouldn't be fatal
log.error(err)
command = [bcdedit, '/copy', '{bootmgr}', '/d', '%s' % self.info.distro.name]
id = run_command(command)
id = id[id.index('{'):id.index('}')+1]
run_command([bcdedit, '/set', id, 'path', efi_path])
try:
run_command([bcdedit, '/set', '{fwbootmgr}', 'displayorder', id, '/addlast'])
run_command([bcdedit, '/set', '{fwbootmgr}', 'timeout', '10'])
run_command([bcdedit, '/set', '{fwbootmgr}', 'bootsequence', id])
except Exception, err: #this shouldn't be fatal
log.error(err)
registry.set_value(
'HKEY_LOCAL_MACHINE',
self.info.registry_key,
'VistaBootDrive',
id)
return
command = [bcdedit, '/create', '/d', '%s' % self.info.distro.name, '/application', 'bootsector']
id = run_command(command)
id = id[id.index('{'):id.index('}')+1]
mbr_path = join_path(self.info.target_dir, 'winboot', 'wubildr.mbr')[2:]
run_command([bcdedit, '/set', id, 'device', 'partition=%s' % self.info.target_drive.path])
run_command([bcdedit, '/set', id, 'path', mbr_path])
run_command([bcdedit, '/displayorder', id, '/addlast'])
run_command([bcdedit, '/timeout', '10'])
run_command([bcdedit, '/bootsequence', id])
registry.set_value(
'HKEY_LOCAL_MACHINE',
self.info.registry_key,
'VistaBootDrive',
id)
def choose_disk_sizes(self, associated_task):
total_size_mb = self.info.installation_size_mb
home_size_mb = 0
usr_size_mb = 0
swap_size_mb = 256
root_size_mb = total_size_mb - swap_size_mb
if self.info.target_drive.is_fat():
if root_size_mb > 8500:
home_size_mb = root_size_mb - 8000
usr_size_mb = 4000
root_size_mb = 4000
elif root_size_mb > 5500:
usr_size_mb = 4000
root_size_mb -= 4000
elif root_size_mb > 4000:
usr_size_mb = root_size_mb - 1500
root_size_mb = 1500
if home_size_mb > 4000:
home_size_mb = 4000
self.info.home_size_mb = home_size_mb
self.info.usr_size_mb = usr_size_mb
self.info.swap_size_mb = swap_size_mb
self.info.root_size_mb = root_size_mb
log.debug("total size=%s\n root=%s\n swap=%s\n home=%s\n usr=%s" % (total_size_mb, root_size_mb, swap_size_mb, home_size_mb, usr_size_mb))
def undo_bcd(self, associated_task):
bcdedit = join_path(os.getenv('SystemDrive'), 'bcdedit.exe')
if not isfile(bcdedit):
bcdedit = join_path(os.getenv('SystemRoot'), 'sysnative', 'bcdedit.exe')
if not os.path.isfile(bcdedit):
bcdedit = join_path(os.environ['systemroot'], 'System32', 'bcdedit.exe')
if not os.path.isfile(bcdedit):
log.error("Cannot find bcdedit")
return
id = registry.get_value(
'HKEY_LOCAL_MACHINE',
self.info.registry_key,
'VistaBootDrive')
if not id:
log.debug("Could not find bcd id")
return
log.debug("Removing bcd entry %s" % id)
command = [bcdedit, '/delete', id , '/f']
try:
run_command(command)
registry.set_value(
'HKEY_LOCAL_MACHINE',
self.info.registry_key,
'VistaBootDrive',
"")
except Exception, err: #this shouldn't be fatal
log.error(err)
def get_arch(self):
cpuid = ctypes.windll.LoadLibrary(self.info.cpuid)
if cpuid.check_64bit():
arch = "amd64"
else:
arch = "i386"
log.debug("arch=%s" % arch)
return arch
| gpl-2.0 | 8,644,267,584,578,374,000 | 40.574257 | 156 | 0.571988 | false |
barnone/EigenD | app_cmdline/script.py | 2 | 3985 |
#
# Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com
#
# This file is part of EigenD.
#
# EigenD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EigenD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EigenD. If not, see <http://www.gnu.org/licenses/>.
#
from pisession import session
from pi import index,async,timeout,proxy,resource
import optparse
import sys
import piw
import picross
import traceback
class Connector(proxy.AtomProxy,async.Deferred):
monitor = set()
def __init__(self,address):
async.Deferred.__init__(self)
proxy.AtomProxy.__init__(self)
self.__anchor = piw.canchor()
self.__anchor.set_client(self)
self.__anchor.set_address_str(address)
def close_client(self):
proxy.AtomProxy.close_client(self)
def cancel(self):
self.__anchor.set_address_str('')
self.__anchor.set_client(None)
self.__anchor=None
def node_ready(self):
self.succeeded()
class RpcAdapter(async.DeferredDecoder):
def decode(self):
if self.deferred.status() is False:
return async.Coroutine.failure(self.deferred.args()[0])
return self.deferred.args()[0]
def coroutine(lang,script,ctimeout=3000,rtimeout=3000,verbose=True):
connector = Connector(lang)
timer = timeout.Timeout(connector,ctimeout,False,'cant connect to language agent')
yield timer
if not timer.status():
yield async.Coroutine.failure(*timer.args())
return
if verbose:
print 'connected to',lang,connector.status()
for line in script_reader(script):
rpc = connector.invoke_rpc('exec',line,time=rtimeout)
yield rpc
if not rpc.status():
print line,'failed:',rpc.args()[0]
return
if verbose:
print line,'ok'
def script_reader(fp):
for line in fp:
line = line.strip()
if not line or line.startswith('#'): continue
yield line
def open_script(name):
if name == '-':
return sys.stdin
try:
return resource.file_open(name,"r")
except:
return None
def main():
parser = optparse.OptionParser(usage=sys.argv[0]+' [options] agent script')
parser.add_option('--quiet',action='store_true',dest='quiet',default=False,help='quiet')
parser.add_option('--ctimeout',action='store',type='int',dest='ctimeout',default=5000,help='con timeout (5000 ms)')
parser.add_option('--rtimeout',action='store',type='int',dest='rtimeout',default=300000,help='rpc timeout (300000 ms)')
parser.add_option('--verbose',action='store_true',dest='verbose',default=False,help='verbose')
(opts,args) = parser.parse_args(sys.argv)
if len(args) != 3:
parser.error('wrong number of arguments')
lang = args[1]
script = args[2]
fp = open_script(script)
if fp is None:
parser.error('cant open %s' % script)
def handler(ei):
traceback.print_exception(*ei)
return async.Coroutine.failure('internal error')
def failed(msg):
if opts.verbose:
print 'script failed:',msg
picross.exit(-1)
def succeeded():
if opts.verbose:
print 'script finished'
picross.exit(0)
def startup(dummy):
result = async.Coroutine(coroutine(lang,fp,opts.ctimeout,opts.rtimeout,opts.verbose),handler)
result.setErrback(failed).setCallback(succeeded)
return result
picross.pic_set_interrupt()
session.run_session(startup,clock=False)
| gpl-3.0 | -5,788,715,704,505,319,000 | 28.087591 | 123 | 0.65596 | false |
benrudolph/commcare-hq | custom/ewsghana/tests/handlers/utils.py | 1 | 5950 | import datetime
from couchdbkit.exceptions import ResourceNotFound
from casexml.apps.stock.consumption import ConsumptionConfiguration
from couchforms.models import XFormInstance
from corehq import Domain
from corehq.apps.accounting import generator
from corehq.apps.commtrack.models import CommtrackConfig, CommtrackActionConfig, StockState, ConsumptionConfig
from corehq.apps.commtrack.tests.util import TEST_BACKEND, make_loc
from corehq.apps.locations.models import Location, SQLLocation, LocationType
from corehq.apps.products.models import Product, SQLProduct
from corehq.apps.sms.backend import test
from corehq.apps.sms.mixin import MobileBackend
from corehq.apps.users.models import CommCareUser
from custom.ewsghana.models import EWSGhanaConfig
from custom.ewsghana.utils import prepare_domain, bootstrap_user
from custom.logistics.test.test_script import TestScript
from casexml.apps.stock.models import StockReport, StockTransaction
from casexml.apps.stock.models import DocDomainMapping
TEST_DOMAIN = 'ewsghana-test'
class EWSScriptTest(TestScript):
def _create_stock_state(self, product, consumption):
xform = XFormInstance.get('test-xform')
loc = Location.by_site_code(TEST_DOMAIN, 'garms')
now = datetime.datetime.utcnow()
report = StockReport(
form_id=xform._id,
date=(now - datetime.timedelta(days=10)).replace(second=0, microsecond=0),
type='balance',
domain=TEST_DOMAIN
)
report.save()
stock_transaction = StockTransaction(
case_id=loc.linked_supply_point().get_id,
product_id=product.get_id,
sql_product=SQLProduct.objects.get(product_id=product.get_id),
section_id='stock',
type='stockonhand',
stock_on_hand=2 * consumption,
report=report
)
stock_transaction.save()
report = StockReport(
form_id=xform._id,
date=now.replace(second=0, microsecond=0),
type='balance',
domain=TEST_DOMAIN
)
report.save()
stock_transaction = StockTransaction(
case_id=loc.linked_supply_point().get_id,
product_id=product.get_id,
sql_product=SQLProduct.objects.get(product_id=product.get_id),
section_id='stock',
type='stockonhand',
stock_on_hand=consumption,
report=report
)
stock_transaction.save()
def setUp(self):
p1 = Product.get_by_code(TEST_DOMAIN, 'mc')
p2 = Product.get_by_code(TEST_DOMAIN, 'lf')
p3 = Product.get_by_code(TEST_DOMAIN, 'mg')
self._create_stock_state(p1, 5)
self._create_stock_state(p2, 10)
self._create_stock_state(p3, 5)
def tearDown(self):
StockTransaction.objects.all().delete()
StockReport.objects.all().delete()
StockState.objects.all().delete()
DocDomainMapping.objects.all().delete()
@classmethod
def setUpClass(cls):
domain = prepare_domain(TEST_DOMAIN)
p = Product(domain=domain.name, name='Jadelle', code='jd', unit='each')
p.save()
p2 = Product(domain=domain.name, name='Male Condom', code='mc', unit='each')
p2.save()
p3 = Product(domain=domain.name, name='Lofem', code='lf', unit='each')
p3.save()
p4 = Product(domain=domain.name, name='Ng', code='ng', unit='each')
p4.save()
p5 = Product(domain=domain.name, name='Micro-G', code='mg', unit='each')
p5.save()
loc = make_loc(code="garms", name="Test RMS", type="Regional Medical Store", domain=domain.name)
test.bootstrap(TEST_BACKEND, to_console=True)
bootstrap_user(username='stella', domain=domain.name, home_loc=loc)
bootstrap_user(username='super', domain=domain.name, home_loc=loc,
phone_number='222222', user_data={'role': 'In Charge'})
try:
XFormInstance.get(docid='test-xform')
except ResourceNotFound:
xform = XFormInstance(_id='test-xform')
xform.save()
sql_location = loc.sql_location
sql_location.products = SQLProduct.objects.filter(product_id=p5.get_id)
sql_location.save()
config = CommtrackConfig.for_domain(domain.name)
config.actions.append(
CommtrackActionConfig(
action='receipts',
keyword='rec',
caption='receipts'
)
)
config.consumption_config = ConsumptionConfig(min_transactions=0, min_window=0, optimal_window=60)
config.save()
@classmethod
def tearDownClass(cls):
MobileBackend.load_by_name(TEST_DOMAIN, TEST_BACKEND).delete()
CommCareUser.get_by_username('stella').delete()
CommCareUser.get_by_username('super').delete()
SQLLocation.objects.all().delete()
LocationType.objects.all().delete()
for product in Product.by_domain(TEST_DOMAIN):
product.delete()
SQLProduct.objects.all().delete()
EWSGhanaConfig.for_domain(TEST_DOMAIN).delete()
DocDomainMapping.objects.all().delete()
Location.by_site_code(TEST_DOMAIN, 'garms').delete()
generator.delete_all_subscriptions()
Domain.get_by_name(TEST_DOMAIN).delete()
def assign_products_to_location():
ng = SQLProduct.objects.get(domain=TEST_DOMAIN, code='ng')
jd = SQLProduct.objects.get(domain=TEST_DOMAIN, code='jd')
mg = SQLProduct.objects.get(domain=TEST_DOMAIN, code='mg')
location = SQLLocation.objects.get(domain=TEST_DOMAIN, site_code='garms')
location.products = [ng, jd, mg]
location.save()
def restore_location_products():
location = SQLLocation.objects.get(domain=TEST_DOMAIN, site_code='garms')
mg = SQLProduct.objects.get(domain=TEST_DOMAIN, code='mg')
location.products = [mg]
location.save()
| bsd-3-clause | -8,109,241,093,053,428,000 | 39.753425 | 110 | 0.647899 | false |
World-Youth-Days/Dictionary | adapter/old_db_inserter.py | 1 | 6193 | # -*- coding: utf-8 -*-
import codecs
from DbAdapter import DbAdapter
from display_dict import display_dict
db = DbAdapter(None) # define db connection
printable = []
# --------------------------------------------------------------------#
# -------------------------- Open file -------------------------#
# --------------------------------------------------------------------#
def insert_from_file_line_is_record(path_name, delimiter=',', **kwargs):
global printable
tags_pos = None,
try:
f = codecs.open(path_name, "r", 'utf-8')
except SystemError:
print("Error while opening file!")
return 4
print("\nFile: " + path_name + "\n")
rows = ['base', 'mono', 'trans', 'author', 'level']
pos = dict(base=None, mono=None, trans=None, author=None,
level=None) # sorry, I avoid understanding deep/shallow copy specs ;)
const = dict()
# --------------------------------------------------------------------#
# ---------------------- Examine header -------------------------#
# --------------------------------------------------------------------#
header = f.readline().strip().split(delimiter)
print("Header: " + str(header))
print("Kwargs: " + str(kwargs))
for col in rows:
if col in kwargs:
const[col] = kwargs[col]
print("OK: Const " + col + " found")
try:
pos[col] = header.index(col)
print("OK: " + col + " at column " + str(pos[col]))
except ValueError:
print("Info: No " + col + " header found")
del pos[col]
if 'tags' in kwargs: # find sources of tags
const_tags = kwargs['tags'].split(',')
else:
const_tags = None
if 'tags' in header:
tags_pos = header.index('tags')
print("pos: " + str(pos))
print("const: " + str(const))
print("const_tags: " + str(const_tags))
print("tags_pos: " + str(tags_pos))
# --------------------------------------------------------------------#
# ------------------ Check for integrity ----------------------#
# --------------------------------------------------------------------#
if len(pos) + len(const) < 4:
print("Error: Insufficient information provided to fill all columns.")
return 2
if pos['base'] is None:
print("Warning: No base-word, assuming 0-th column as base")
pos['base'] = 0
if 'trans' not in pos and 'mono' not in pos:
print("Error: Neither monolingual nor translation defined, error!")
return 1
if (tags_pos is None) and const_tags is None:
print("Error: No tags provided!")
return 3
# --------------------------------------------------------------------#
# ---------------------- Build records -------------------------#
# --------------------------------------------------------------------#
for line in f:
d = dict()
line = line.strip().split(delimiter)
for key in const:
d[key] = const[key]
for key in pos: # constant values CAN be overridden by those
# taken directly from table (^-^)
d[key] = line[pos[key]]
records.append(d)
# need to print records in purpose of confirmation by human...
# for r in records:
# print r
display_dict(records, rows) # display using new method form display_dict.py
# --------------------------------------------------------------------#
# ---------------------- Human check ;) -------------------------#
# --------------------------------------------------------------------#
if "force_yes" in kwargs and kwargs["force_yes"] == True:
print("Automatic yes chosen...")
elif input("Are those OK?[y/n]") not in ['y', 'yes', 'Y', 'Yes']:
print("Aborting...")
return 5
global db
db.add_words(records) # add words to db
# --------------------------------------------------------------------#
# ---------------------- Add tags -------------------------#
# --------------------------------------------------------------------#
# --------need to add remove-# feature
ids = []
for r in records: # add const_tags
del r['time']
print(r)
print(str(db.find_id(r)))
ids.append((db.find_id(r))[0])
# I'm pretty sure to find one record here...
if const_tags is not None:
db.join(ids, const_tags)
print("Joined all with tags: " + str(const_tags))
f.seek(0) # start new reading, skip header
f.readline()
i = 0
if tags_pos is not None:
for line in f: # add tags form tags_pos
line = line.strip().split(delimiter)
word = db.find_id(records[i])
db.join(word, line[tags_pos:])
print("Joined " + str(word) + "with tags " + str(line[tags_pos:]))
i += 1
print("Closing...")
f.close()
def test_tags_table():
db = DbAdapter(None)
db.set_readable('const_tag_1', 'First Constant Tag')
db.set_readable('rock4ever', 'Rock for Ever')
db.set_flag('const_tag_1', 'hidden')
db.set_flag('live_tag1', 'live')
db.set_flag('live_tag_2', 'live')
print(db.get_tag("heheszki"))
# --------------------------------------------------------------------#
# ---------------------- Call the function-------------------------#
# --------------------------------------------------------------------#
insert_from_file_line_is_record("../data/test1.txt", author="francuski", tags="from_fr,to_pl",
level=10, force_yes=True)
insert_from_file_line_is_record("../data/test2.txt", author="angielski", tags="from_en,to_pl",
level=4, force_yes=True)
insert_from_file_line_is_record("../data/test3.txt", author="śmieszek",
tags="from_pl,to_pl", force_yes=False)
test_tags_table()
#
# --------------------------------------------------------------------#
# ---------------------- CSV import -------------------------#
# --------------------------------------------------------------------#
def import_from_csv(path, **kwargs):
import csv
global db, printable
tags_pos = None,
try:
f = csv.reader(codecs.open("foo.csv", encoding="utf-8"), dialect='excel')
except SystemError:
print("Error while opening file!")
return 4
print("\nFile: " + path_name + "\n")
rows = ['base', 'mono', 'trans', 'author', 'level']
pos = dict(base=None, mono=None, trans=None, author=None,
level=None) # sorry, I avoid understanding deep/shallow copy specs ;)
const = dict()
| gpl-3.0 | 5,694,650,053,456,996,000 | 30.753846 | 94 | 0.465278 | false |
gmalmquist/unix-hollymonitor | src/unix-hollymonitor.py | 1 | 4266 | #!/usr/bin/env python
# This is a script to run the hollymonitor in a little
# standalone webserver, rather than being integrated
# into a larger application.
from __future__ import print_function
from BaseHTTPServer import BaseHTTPRequestHandler
from subprocess import Popen, PIPE, STDOUT
import mimetypes
import os
import re
import SocketServer
import shutil
import sys
SCRIPT_DIR = None
def execute_maybe(file_path):
try:
h = open(file_path, 'r')
line = h.readline()
h.close()
except:
return None
#print(file_path, line)
if line and line.startswith('#!'):
command = line[2:].split(' ')
command = [c.strip() for c in command]
try:
p = Popen(command + [file_path], cwd=SCRIPT_DIR, stdout=PIPE, stderr=STDOUT)
out, err = p.communicate()
return out
except Exception as e:
pass
return None
class HollyHandler(BaseHTTPRequestHandler):
def do_GET(self):
file_path = os.path.join(SCRIPT_DIR, self.path[1:])
file_path = os.path.abspath(file_path)
file_path = os.path.relpath(file_path, SCRIPT_DIR)
if '..' in file_path:
self.send_response(403)
self.end_headers()
return
file_path = os.path.abspath(os.path.join(SCRIPT_DIR, file_path))
content_type = 'text/html; charset=utf-8'
if self.path == '/' or self.path == '':
status_html = os.path.join(SCRIPT_DIR, 'html', 'status.html')
if os.path.exists(status_html):
host = self.headers['Host']
self.send_response(301)
self.send_header('Location', 'http://{host}/html/status.html'.format(host=host))
self.end_headers()
return
if os.path.exists(file_path):
self.send_response(200)
if os.path.isdir(file_path):
message = '''<html>
<head><title>Directory {rel_path}</title></head>
<body>
<h1>Directory {rel_path}</h2>
<ul>
'''.format(rel_path = os.path.relpath(file_path, SCRIPT_DIR))
for f in sorted(os.listdir(file_path),
key = lambda f: (0, f) if os.path.isdir(os.path.join(file_path, f)) else (1, f)):
path = os.path.join(os.path.relpath(file_path, SCRIPT_DIR), f)
name = f
if os.path.isdir(os.path.join(SCRIPT_DIR, path)):
name = name + '/'
message += '<li>'
message += '<a href="{path}">{name}</a>'.format(path=name, name=name)
message += '</li>\n'
message += '</ul>\n</body>\n</html>\n'
else:
message = execute_maybe(file_path)
if message is not None:
self.wfile.write(message)
return
h = open(file_path, 'rb')
message = h.read()
h.close()
mime_type, mime_encoding = mimetypes.guess_type(file_path)
if not mime_type:
#print('Mime-type unknown, defaulting to text/html.')
content_type = 'text/html; charset=utf-8'
else:
#print('Mime-type is', mime_type, mime_encoding)
if not mime_encoding:
content_type = mime_type
else:
content_type = '%s; %s' % (mime_type, mime_encoding)
else:
self.send_response(404)
return
self.send_header('Content-type', content_type)
self.send_header('content-length', len(message))
self.end_headers()
self.wfile.write(message)
def start_cpu_recorder():
p = Popen([
'python',
os.path.join(SCRIPT_DIR, 'cpu-reporter.py'),
os.path.join(SCRIPT_DIR, 'cpu-usage.js')
])
def main(args, script_dir, script_path):
global SCRIPT_DIR
SCRIPT_DIR = script_dir
VARS = {}
FLAGS = set()
for arg in args:
if '=' in arg:
key, val = arg.split('=')
VARS[key] = val
else:
FLAGS.add(arg)
port = 8080
if 'port' in VARS:
if not re.match('^[0-9]+$', VARS['port']):
print('Port "%s"' % VARS['port'], 'is not valid, must be a number.')
else:
port = int(VARS['port'])
print('Starting CPU recorder...')
start_cpu_recorder()
print('Starting standalone webserver on port', port)
print('Use the command-line argument port=xxxx to change the port.')
httpd = SocketServer.TCPServer(('', port), HollyHandler)
httpd.serve_forever()
main(sys.argv[1:], os.path.dirname(sys.argv[0]), sys.argv[0])
| mit | -7,604,787,221,590,789,000 | 28.219178 | 95 | 0.599391 | false |
mtayseer/infoq-downloader | infoq_downloader.py | 1 | 4839 | #!/usr/bin/env python
from __future__ import division, print_function
import os
import sys
import re
import argparse
import requests
import cssselect
import lxml.html
import unicodedata
if sys.version_info.major == 3:
text_type = str
else:
text_type = unicode
# Some settings
download_directory = 'downloads'
cleanup_elements = [
'#footer', '#header', '#topInfo', '.share_this', '.random_links',
'.vendor_vs_popular', '.bottomContent', '#id_300x250_banner_top',
'.presentation_type', '#conference', '#imgPreload', '#text_height_fix_box',
'.download_presentation', '.recorded', 'script[async]',
'script[src*=addthis]'
]
# Set argparse to parse the paramaters
parser = argparse.ArgumentParser(description='Download InfoQ presentations.')
parser.add_argument('url', metavar='URL', type=str,
help='URL of the presentation to download')
# Parse the arguments passed to the script
args = parser.parse_args()
url = args.url
# Tell infoq that I'm an iPad, so it gives me simpler HTML to parse & mp4 file
# qto download
user_agent = (
"Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) "
"AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b "
"Safari/531.21.10')"
)
# Start downloading
print('Downloading HTML file')
content = requests.get(url, headers={'User-Agent': user_agent}).content
html_doc = lxml.html.fromstring(content)
title = html_doc.find(".//title").text
video_url = html_doc.cssselect('video > source')[0].attrib['src']
video_file = os.path.split(video_url)[1]
html_doc.cssselect('video > source')[0].attrib['src'] = video_file
# Clean the page
for elt in html_doc.cssselect(', '.join(e for e in cleanup_elements)):
elt.getparent().remove(elt)
html_doc.cssselect('#wrapper')[0].attrib['style'] = 'background: none'
content = lxml.html.tostring(html_doc).decode('utf-8')
# Make slides links point to local copies
slides_re = re.compile(r"'(/resource/presentations/[^']*?/en/slides/[^']*?)'")
slides = slides_re.findall(content)
# Create a directory for the downloaded presentation if it doesn't exist
if not os.path.exists(download_directory):
os.makedirs(download_directory)
# presentation folder path
if isinstance(title, text_type):
normalized_title = unicodedata.normalize('NFKD', title)
else:
normalized_title = text_type(title)
presentation_directory = os.path.join(download_directory, normalized_title)
# Create a folder with the name of the presentation
if not os.path.exists(presentation_directory):
os.makedirs(presentation_directory)
# Create a slides folder inside the presentation folder
if not os.path.exists('{}/slides'.format(presentation_directory)):
os.makedirs('{}/slides'.format(presentation_directory))
#Write content
content = re.sub(r"/resource/presentations/[^']*?/en/", '', content)
with open('{}/index.html'.format(presentation_directory), 'w') as f:
f.write(content)
f.flush()
# Download slides
slides_dir = os.path.join(presentation_directory, 'slides')
if not os.path.isdir(slides_dir):
os.makedirs(slides_dir)
for i, slide in enumerate(slides):
filename = os.path.split(slide)[1]
full_path = os.path.join(slides_dir, '{0}'.format(filename))
if os.path.exists(full_path):
continue
print('\rDownloading slide {0} of {1}'.format(i+1, len(slides)), end='')
sys.stdout.flush() # Hack for Python 2
url = 'http://www.infoq.com{0}'.format(slide)
with open(full_path, 'wb') as f:
f.write(requests.get(url).content)
print()
# If the video file is already downloaded successfully, don't do anything else
if os.path.exists(video_file):
print('Video file already exists')
sys.exit()
# Download the video file. stream=True here is important to allow me to iterate
# over content
downloaded_file = os.path.join(
presentation_directory, '{}.part'.format(video_file)
)
if os.path.exists(downloaded_file):
bytes_downloaded = os.stat(downloaded_file).st_size
else:
bytes_downloaded = 0
r = requests.get(video_url, stream=True,
headers={'Range': 'bytes={0}-'.format(bytes_downloaded)})
content_length = int(r.headers['content-length']) + bytes_downloaded
with open(downloaded_file, 'ab') as f:
for chunk in r.iter_content(10 * 1024):
f.write(chunk)
f.flush()
# \r used to return the cursor to beginning of line, so I can write
# progress on a single line.
# The comma at the end of line is important, to stop the 'print' command
# from printing an additional new line
percent = f.tell() / content_length * 100
print('\rDownloading video {0:.2f}%'.format(percent), end='')
sys.stdout.flush() # Hack for Python 2
final_video_name = os.path.join(presentation_directory, video_file)
os.rename(downloaded_file, final_video_name)
| mit | 752,140,745,732,130,700 | 33.81295 | 80 | 0.692498 | false |
cwrubiorobots/ramblerVision | backup/shelterfind-serial-kogeto.py | 1 | 4607 | #!/usr/bin/python
import cv, serial, struct
from datetime import datetime
cyril = serial.Serial('/dev/ttyAMA0', 9600) #open first serial port and give it a good name
print "Opened "+cyril.portstr+" for serial access"
centerX = 175 #160
centerY = 140 #120
cropped = None
img = None
# decrease angular resolution for 8-bit serial transport
def derez(x):
if( x < 90 ):
return (-90-x)/2
else:
return (270-x)/2
# allow user to click on image from camera to set the center for transformation
def on_mouse(event, x, y, flags, param):
if event==cv.CV_EVENT_LBUTTONDOWN:
print x, ", ", y, ": ", img[y,x]
#print "Set center ", x, ", ", y, ": ", img[y,x]
#global centerX
#global centerY
#centerX = x
#centerY = y
if __name__ == '__main__':
datalog = open("data.log", "w+")
datalog.write("\n~~~=== Rambler Data Log Opened, " + str(datetime.now()) + " ===~~~\n")
capture = cv.CaptureFromCAM(0)
#capture = cv.CaptureFromFile("../out2.mpg")
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 320)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
polar = cv.CreateImage((360, 360), 8, 3)
cropped = cv.CreateImage((360, 40), 8, 3)
img = cv.CreateImage((320, 240), 8, 3)
cones = cv.CreateImage((360, 40), 8, 1)
cv.NamedWindow('cam')
cv.NamedWindow('unwrapped')
cv.NamedWindow('target')
cv.SetMouseCallback('cam', on_mouse)
on_mouse(cv.CV_EVENT_LBUTTONDOWN, centerX, centerY, None, None)
# These values determine the range of colors to detect as "shelter".
#Calibration A: finding cones in room 817
lower = cv.Scalar(40, 90, 170) # (B, G, R)
upper = cv.Scalar(80, 180, 255)
#Calibration B: finding green paper in 817
#lower = cv.Scalar(10, 90, 10)
#upper = cv.Scalar(99, 255, 90)
M = 69
while True:
img = cv.QueryFrame(capture)
cv.LogPolar(img, polar, (centerX, centerY), M+1, cv.CV_INTER_NN) #possible speedup - get subrect src
#cropped = cv.GetSubRect(polar,(280,0,40,360))
#cv.Transpose(cropped, cropped)
cv.Transpose(cv.GetSubRect(polar,(280,0,40,360)), cropped)
cv.Flip(cropped) #just for viewing (possible speedup)
cv.InRangeS(cropped, lower, upper, cones)
cv.Erode(cones, cones) # just once might be too much
k = cv.CreateStructuringElementEx(3, 43, 1, 1, cv.CV_SHAPE_RECT) # create a 3x43 rectangular dilation element k
cv.Dilate(cones, cones, k, 2)
#scan top row of thresholded, eroded, dilated image, find the number of contiguous segments and their location
s = 0 # size of contiguous segment
ss = 0 #number of contiguous segments
bearingToLandmarks = []
for i in xrange(360-2):
c = cones[0, i] #current
n = cones[0, i+1] #next
#print int(c),
if (c == 0 and n == 255) or \
(c == 255 and n == 255): # this condition marks beginning or middle of contiguous segment
s = s + 1
#print ".",
elif (c == 255 and n == 0): # end of contiguous segment
ss = ss + 1
bearingToLandmarks.append((i-s/2, s))
s = 0
#handle wraparound
if (i == 360-2-1 and s != 0): #TODO: double check this offset
if (cones[0,0] == 255):
#print "edge case A"
bearingToLandmarks[0] = ((bearingToLandmarks[0][0]-s/2)%360, bearingToLandmarks[0][1]+s) #TODO: recalculate center more accurately
else:
#print "edge case B"
bearingToLandmarks.append((c-s/2, s))
#print ".", ss, "."
#bearingToLandmarks.append((derez(g), 12))
#g = (g + 1) % 360
print bearingToLandmarks, len(bearingToLandmarks)
#TODO - Bearing output
if len(bearingToLandmarks) > 0:
output = struct.pack('c','\xfa') \
+ struct.pack('B', 0) \
+ struct.pack('b', derez(bearingToLandmarks[0][0]) ) \
+ struct.pack('B', 0)
cyril.write(output)
#Data Logging
if (cyril.inWaiting() > 0):
logdata = cyril.read(cyril.inWaiting())
a = 0
b = 0
for c in logdata:
if c == '\n':
datalog.write(str(datetime.now().time())+","+logdata[a:b]+"\n")
a = b + 1
b = b + 1
cv.ShowImage('cam', img)
cv.ShowImage('target', cones)
cv.ShowImage('unwrapped', cropped)
key = cv.WaitKey(10) # THIS REQUIRES AT LEAST ONE WINDOW
#print "key ",key
if key > 0:
break
cv.DestroyAllWindows()
cyril.close()
datalog.write("\n~~~=== Rambler Data Log Closed, " + str(datetime.now()) + " ===~~~\n")
datalog.close()
| gpl-2.0 | -3,594,148,619,330,314,000 | 31.907143 | 146 | 0.598003 | false |
cysuncn/python | spark/crm/PROC_O_LNA_XDXT_CUSTOMER_INFO.py | 1 | 8194 | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_O_LNA_XDXT_CUSTOMER_INFO').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
O_CI_XDXT_CUSTOMER_INFO = sqlContext.read.parquet(hdfs+'/O_CI_XDXT_CUSTOMER_INFO/*')
O_CI_XDXT_CUSTOMER_INFO.registerTempTable("O_CI_XDXT_CUSTOMER_INFO")
#任务[12] 001-01::
V_STEP = V_STEP + 1
#先删除原表所有数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_XDXT_CUSTOMER_INFO/*.parquet")
#从昨天备表复制一份全量过来
ret = os.system("hdfs dfs -cp -f /"+dbname+"/F_CI_XDXT_CUSTOMER_INFO_BK/"+V_DT_LD+".parquet /"+dbname+"/F_CI_XDXT_CUSTOMER_INFO/"+V_DT+".parquet")
F_CI_XDXT_CUSTOMER_INFO = sqlContext.read.parquet(hdfs+'/F_CI_XDXT_CUSTOMER_INFO/*')
F_CI_XDXT_CUSTOMER_INFO.registerTempTable("F_CI_XDXT_CUSTOMER_INFO")
sql = """
SELECT A.CUSTOMERID AS CUSTOMERID
,A.CUSTOMERNAME AS CUSTOMERNAME
,A.CUSTOMERTYPE AS CUSTOMERTYPE
,A.CERTTYPE AS CERTTYPE
,A.CERTID AS CERTID
,A.CUSTOMERPASSWORD AS CUSTOMERPASSWORD
,A.INPUTORGID AS INPUTORGID
,A.INPUTUSERID AS INPUTUSERID
,A.INPUTDATE AS INPUTDATE
,A.REMARK AS REMARK
,A.MFCUSTOMERID AS MFCUSTOMERID
,A.STATUS AS STATUS
,A.BELONGGROUPID AS BELONGGROUPID
,A.CHANNEL AS CHANNEL
,A.LOANCARDNO AS LOANCARDNO
,A.CUSTOMERSCALE AS CUSTOMERSCALE
,A.CORPORATEORGID AS CORPORATEORGID
,A.REMEDYFLAG AS REMEDYFLAG
,A.DRAWFLAG AS DRAWFLAG
,A.MANAGERUSERID AS MANAGERUSERID
,A.MANAGERORGID AS MANAGERORGID
,A.DRAWELIGIBILITY AS DRAWELIGIBILITY
,A.BLACKSHEETORNOT AS BLACKSHEETORNOT
,A.CONFIRMORNOT AS CONFIRMORNOT
,A.CLIENTCLASSN AS CLIENTCLASSN
,A.CLIENTCLASSM AS CLIENTCLASSM
,A.BUSINESSSTATE AS BUSINESSSTATE
,A.MASTERBALANCE AS MASTERBALANCE
,A.UPDATEDATE AS UPDATEDATE
,A.FR_ID AS FR_ID
,V_DT AS ODS_ST_DATE
,'LNA' AS ODS_SYS_ID
FROM O_CI_XDXT_CUSTOMER_INFO A --客户基本信息
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_CI_XDXT_CUSTOMER_INFO_INNTMP1 = sqlContext.sql(sql)
F_CI_XDXT_CUSTOMER_INFO_INNTMP1.registerTempTable("F_CI_XDXT_CUSTOMER_INFO_INNTMP1")
#F_CI_XDXT_CUSTOMER_INFO = sqlContext.read.parquet(hdfs+'/F_CI_XDXT_CUSTOMER_INFO/*')
#F_CI_XDXT_CUSTOMER_INFO.registerTempTable("F_CI_XDXT_CUSTOMER_INFO")
sql = """
SELECT DST.CUSTOMERID --客户编号:src.CUSTOMERID
,DST.CUSTOMERNAME --客户名称:src.CUSTOMERNAME
,DST.CUSTOMERTYPE --客户类型:src.CUSTOMERTYPE
,DST.CERTTYPE --证件类型:src.CERTTYPE
,DST.CERTID --证件号:src.CERTID
,DST.CUSTOMERPASSWORD --客户口令:src.CUSTOMERPASSWORD
,DST.INPUTORGID --登记机构:src.INPUTORGID
,DST.INPUTUSERID --登记人:src.INPUTUSERID
,DST.INPUTDATE --登记日期:src.INPUTDATE
,DST.REMARK --备注:src.REMARK
,DST.MFCUSTOMERID --核心客户号:src.MFCUSTOMERID
,DST.STATUS --状态:src.STATUS
,DST.BELONGGROUPID --所属关联集团代码:src.BELONGGROUPID
,DST.CHANNEL --渠道:src.CHANNEL
,DST.LOANCARDNO --贷款卡编号:src.LOANCARDNO
,DST.CUSTOMERSCALE --客户规模:src.CUSTOMERSCALE
,DST.CORPORATEORGID --法人机构号:src.CORPORATEORGID
,DST.REMEDYFLAG --补登标志:src.REMEDYFLAG
,DST.DRAWFLAG --领取标志:src.DRAWFLAG
,DST.MANAGERUSERID --管户人:src.MANAGERUSERID
,DST.MANAGERORGID --管户机构ID:src.MANAGERORGID
,DST.DRAWELIGIBILITY --领取信息:src.DRAWELIGIBILITY
,DST.BLACKSHEETORNOT --是否黑名当客户:src.BLACKSHEETORNOT
,DST.CONFIRMORNOT --是否生效:src.CONFIRMORNOT
,DST.CLIENTCLASSN --当前客户分类:src.CLIENTCLASSN
,DST.CLIENTCLASSM --客户分类调整:src.CLIENTCLASSM
,DST.BUSINESSSTATE --存量字段标志:src.BUSINESSSTATE
,DST.MASTERBALANCE --单户余额:src.MASTERBALANCE
,DST.UPDATEDATE --更新日期:src.UPDATEDATE
,DST.FR_ID --法人代码:src.FR_ID
,DST.ODS_ST_DATE --平台日期:src.ODS_ST_DATE
,DST.ODS_SYS_ID --源系统代码:src.ODS_SYS_ID
FROM F_CI_XDXT_CUSTOMER_INFO DST
LEFT JOIN F_CI_XDXT_CUSTOMER_INFO_INNTMP1 SRC
ON SRC.CUSTOMERID = DST.CUSTOMERID
AND SRC.FR_ID = DST.FR_ID
WHERE SRC.CUSTOMERID IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_CI_XDXT_CUSTOMER_INFO_INNTMP2 = sqlContext.sql(sql)
dfn="F_CI_XDXT_CUSTOMER_INFO/"+V_DT+".parquet"
F_CI_XDXT_CUSTOMER_INFO_INNTMP2=F_CI_XDXT_CUSTOMER_INFO_INNTMP2.unionAll(F_CI_XDXT_CUSTOMER_INFO_INNTMP1)
F_CI_XDXT_CUSTOMER_INFO_INNTMP1.cache()
F_CI_XDXT_CUSTOMER_INFO_INNTMP2.cache()
nrowsi = F_CI_XDXT_CUSTOMER_INFO_INNTMP1.count()
nrowsa = F_CI_XDXT_CUSTOMER_INFO_INNTMP2.count()
F_CI_XDXT_CUSTOMER_INFO_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
F_CI_XDXT_CUSTOMER_INFO_INNTMP1.unpersist()
F_CI_XDXT_CUSTOMER_INFO_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert F_CI_XDXT_CUSTOMER_INFO lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
ret = os.system("hdfs dfs -mv /"+dbname+"/F_CI_XDXT_CUSTOMER_INFO/"+V_DT_LD+".parquet /"+dbname+"/F_CI_XDXT_CUSTOMER_INFO_BK/")
#先删除备表当天数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_XDXT_CUSTOMER_INFO_BK/"+V_DT+".parquet")
#从当天原表复制一份全量到备表
ret = os.system("hdfs dfs -cp -f /"+dbname+"/F_CI_XDXT_CUSTOMER_INFO/"+V_DT+".parquet /"+dbname+"/F_CI_XDXT_CUSTOMER_INFO_BK/"+V_DT+".parquet")
| gpl-3.0 | -7,871,501,833,930,011,000 | 52.517241 | 198 | 0.532474 | false |
xiexiangwei/xGame | gamecenter/main.py | 1 | 2054 | # coding:utf-8
import platform
import sys
sys.path.append("../")
if 'twisted.internet.reactor' not in sys.modules:
if platform.system() == "Linux":
from twisted.internet import epollreactor
epollreactor.install()
else:
from twisted.internet import iocpreactor
iocpreactor.install()
import logging
from logging.handlers import TimedRotatingFileHandler
from twisted.internet import reactor
from twisted.python import log
from common import daemon, utils, const, servermanager
import clientfactory
import config
import random
import time
import redishelper
import mysqlhelper
def MainStop():
pass
def MainRun(isdaemon, id):
random.seed(time.time())
logging.getLogger().setLevel(config.instance.log_level)
handler = TimedRotatingFileHandler(filename=config.instance.log_file, when='D', interval=1)
handler.setLevel(config.instance.log_level)
formatter = logging.Formatter(config.instance.log_format)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
log.PythonLoggingObserver().start()
if not isdaemon:
handler = logging.StreamHandler()
handler.setLevel(config.instance.log_level)
formatter = logging.Formatter(config.instance.log_format)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
redishelper.instance.start()
mysqlhelper.instance.start()
clientfactory.instance.start(u"", config.instance.server_port, config.instance.max_client)
logging.info(u"游戏中心服务器启动成功")
def StartRequest(isdaemon):
config.instance.server_ip = utils.getExternalIP()
servermanager.instance.start(const.CLIENT_TYPE_GAMECENTER,
config.instance,
MainRun,
isdaemon)
reactor.run()
logging.info(u"游戏中心服务器停止运行")
MainStop()
def Run():
daemon.run(config.instance.server_pid, StartRequest)
if __name__ == "__main__":
Run()
| apache-2.0 | -3,971,600,087,697,623,000 | 26.916667 | 95 | 0.695522 | false |
jaredthomas68/FEM | src/fem.py | 1 | 32857 | import math as m
import numpy as np
import scipy.sparse as sparse
from scipy.sparse.linalg import spsolve
import time
import matplotlib.pylab as plt
def ffunc_constant(x, a):
"""
Constant valued forcing function
:param x: point at which to evaluate the forcingg function
:param a: parameter values, in this case the value of the constant
:return: result of function evaluation, in this case the constant 'a'
"""
f = a
return f
def ffunc_linear(x, a=np.array([0, 1])):
"""
Linear forcing function
:param x: point at which to evaluate the forcingg function
:param a: parameter values, in this case an array with two elements
:return: the result of the function evaluation
"""
f = a[0] + a[1]*x
return f
def ffunc_quadratic(x, a=np.array([0, 0, 1])):
"""
Quadratic forcing function
:param x: point at which to evaluate the forcingg function
:param a: parameter values, in this case an array with three elements
:return: the result of the function evaluation
"""
f = a[0] + a[1]*x + a[2]*x**2
return f
def ffunc_cubic(x, a=np.array([0., 0., 0., 10.])):
f = a[0] + a[1]*x + a[2]*x**2 + a[3]*x**3
return f
def ffunc_beam(xr, a=np.array([10, 0.005, 0]), Ndof=6):
"""
Forcing function defined for coding 2 part 2
:param xr: location on beam normalized to be in [0, 1]
:param a: [a, h]
:return: forcing function value
"""
# load cases corresponding to a[-1]
# 0: constant axial load
# 1: constant transverse load
# 2: Linearly distributed transverse load (0 at left, N at right)
f = a[0]
F = np.zeros([Ndof, xr.size])
if a[-1] == 0:
F[2, :] = f
elif a[-1] == 1:
F[0, :] = -f
elif a[-1] == 2:
F[0, :] = -xr*f
return F
def moment_of_inertia_rectangle(b, h):
Ix = (b*h**3)/12.
Iy = (h*b**3)/12.
Ixy = 0.
return Ix, Iy, Ixy
def moment_of_inertia_rod(d):
Ix = (np.pi*d**4)/64.
Iy = (np.pi*d**4)/64.
Ixy = 0.
# J = (np.pi*d**4)/32.
return Ix, Iy, Ixy
def fem_solver(Nell, he, Nint, p, ID, E, I1, I2, J, A, nu, ffunc=ffunc_quadratic, ffunc_args=np.array([0., 0., 1.]), case=2, Ndof=6.):
p = int(p)
Ndof = int(Ndof)
print "entering solver"
# define LM array
IEN = ien_array(Nell, p, Ndof)
LM = lm_array(Nell, p, ID, IEN, Ndof)
# initialize global stiffness matrix
# K = np.zeros((ID[ID[:]>0].shape)) #coding 2
K = np.zeros((int(np.max(ID)), int(np.max(ID)))) # coding 3
# initialize global force vector
F = np.zeros(int(np.max(ID)))
# get quadrature points and weights in local coordinants
xi, w = quadrature_rule(Nint)
# get node locations in global coordinates
xe = node_locations_x(Nell, he)
# find length of beam
L = np.sum(he)
# get the knot vector
S = knot_vector(Nell, xe, p)
# find the Greville Abscissae
ga = greville_abscissae(S, p)
mu = E/(2.*(1.+nu)) # nu = Poisson's ratio
A1s = A2s = 5./6.*A
D = get_D(A, E, mu, A1s, A2s, I1, I2, J)
# loop over elements
for e in np.arange(1, Nell+1):
# print "in element loop"
ke = np.zeros([(p + 1)*Ndof, (p + 1)*Ndof])
fe = np.zeros((p + 1)*Ndof)
# solve for local stiffness matrix and force vector
for i in np.arange(0, Nint):
# print "in integration loop"
B, Bdxi, Bdxidxi = local_bernstein(xi[i], p)
N, Nedxi, Nedxidxi = local_bezier_extraction(p, e, Nell, B, Bdxi, Bdxidxi)
Ndx, Ndxdx, dxdxi, x = global_bezier_extraction(ga[e-1:e+p],N, Nedxi, Nedxidxi)
# get f for each dof at this location
f = ffunc(x/L, ffunc_args, Ndof)
# get base k matrix for element e
for a in np.arange(0, p+1):
Ba = get_B(N[a], Ndx[a])
for b in np.arange(0, p+1):
Bb = get_B(N[b], Ndx[b])
BDB = np.matmul(np.transpose(Ba), np.matmul(D, Bb))
for idof in np.arange(0, Ndof):
for jdof in np.arange(0, Ndof):
ke[int(a*Ndof+idof), int(b*Ndof+jdof)] += BDB[idof, jdof]*dxdxi*w[i]
# K[int(LM[a, e - 1] - 1), int(LM[b, e - 1] - 1)] += Ndx[a]*E*I*Ndx[b]*w[i]*dxdxi
# element force calcs
for a in np.arange(0, p+1):
for idof in np.arange(0, Ndof):
fe[a*Ndof+idof] += N[a] * f[idof] * dxdxi * w[i]
# assemble global stifness matrix and force vector
for a in np.arange(0, p + 1):
for idof in np.arange(0, Ndof):
if LM[a*Ndof+idof, e - 1] == 0:
continue
# global force vector assembly
F[int(LM[a*Ndof+idof, e - 1] - 1)] += fe[a*Ndof+idof]
for b in np.arange(0, p + 1):
for jdof in np.arange(0, Ndof):
if LM[b*Ndof+jdof, e - 1] == 0:
continue
# global stiffness matrix assembly
K[int(LM[a*Ndof+idof, e - 1] - 1), int(LM[b*Ndof+jdof, e - 1] - 1)] += ke[a*Ndof+idof, b*Ndof+jdof]
# solve for d
d = solve_for_d(K, F)
# determine the number of nodes
Nnodes = Nell + p
# get full solution
solution = get_solution(d, p, Nell, Ndof, Nnodes, LM, ID)
return K, F, d, solution
def get_solution(d, p, Nell, Ndof, Nnodes, LM, ID):
sol = np.zeros(Ndof*Nnodes)
for inode in np.arange(0, Nnodes):
for idof in np.arange(0, Ndof):
if ID[idof, inode] == 0:
continue
sol[inode*Ndof+idof] = d[int(ID[idof, inode]-1)]
return sol
def get_D(A, E, mu, A1s, A2s, I1, I2, J):
"""
Defines the relationship matrix between stress and strain
:param A: cross sectional area of the beam
:param E: Young's modulous of elasticity
:param mu: essentiall the shear modulous (E/(2*(1+poisson's ratio))
:param A1s: shear correction (5/6)
:param A2s: shear correction (5/6)
:param I1: moment of inertia
:param I2: moment of inertia
:param J: polar moment of inertia
:return D: variation on stiffness matrix
"""
D = np.array([[E*A, 0., 0., 0., 0., 0.],
[0., mu*A1s, 0., 0., 0., 0.],
[0., 0., mu*A2s, 0., 0., 0.],
[0., 0., 0., E*I1, 0., 0.],
[0., 0., 0., 0., E*I2, 0.],
[0., 0., 0., 0., 0., mu*J]])
return D
def get_B(Na, dNadx):
Ba = np.array([[0., 0., dNadx, 0., 0., 0. ],
[dNadx, 0., 0., 0., -Na, 0. ],
[0., dNadx, 0., Na, 0., 0. ],
[0., 0., 0., dNadx, 0., 0. ],
[0., 0., 0., 0., dNadx, 0. ],
[0., 0., 0., 0., 0., dNadx]])
return Ba
def solve_for_d(K, F):
sK = sparse.csr_matrix(K)
d = spsolve(sK, F)
return d
def solve_for_displacements(d, Nell, he, g=0):
u = np.zeros(Nell+1)
x1 = 0.0
u[0] = (1.-x1)*d[0]
for e in np.arange(1, Nell):
x1 += he[e]
# u[e] = u[e-1] + (1.-x1)*d[e]
# u[e] = (1.-x1)*d[e]
u[e] = d[e]
# u[-1] = u[-2] + g
u[-1] = g
return u
def node_locations_x(Nell, he):
x_el = np.zeros(Nell + 1)
for e in np.arange(1, Nell):
x_el[e] = x_el[e-1] + he[e-1]
x_el[Nell] = x_el[Nell-1] + he[Nell-1]
return x_el
def quadrature_rule(Nint):
if (Nint < 1 or Nint > 3) or type(Nint) != int:
raise ValueError('Nint must be and integer and one of 1, 2, 3')
gp = np.zeros(Nint)
w = np.zeros(Nint)
if Nint == 1:
gp[0] = 0.
w[0] = 2.
elif Nint == 2:
gp[0] = -1./np.sqrt(3.)
gp[1] = 1./np.sqrt(3.)
w[0] = 1.
w[1] = 1.
elif Nint == 3:
gp[0] = -np.sqrt(3./5.)
gp[1] = 0.
gp[2] = np.sqrt(3./5.)
w[0] = 5./9.
w[1] = 8./9.
w[2] = 5./9.
return gp, w
def get_u_of_x_approx(sol, he, Nell, Nint, p, ID, Nsamples, Ndof=6):
# get IEN array
IEN = ien_array(Nell, p, Ndof)
LM = lm_array(Nell, p, ID, IEN, Ndof)
LM_full = np.copy(LM)
[m, n] = np.shape(LM)
count_zeros = 0
for j in np.arange(0, n):
for i in np.arange(0, m):
if LM_full[i, j] == 0:
count_zeros += 1
LM_full[i, j] += count_zeros
# get quadrature points and weights in local coordinants
# xi_sample, w = quadrature_rule(Nint)
xi_sample = np.linspace(-1, 1, Nsamples)
# find number of samples
# Nsamples = xi_sample.size
# initialize displacement vector
u = np.zeros((Ndof, Nell * Nsamples+1))
# initialize error vector
error = np.zeros(Nell * Nint)
# initialize x vector
X = np.zeros(Nell * Nint)
# get node locations in global coordinates
xe = node_locations_x(Nell, he)
# get the knot vector
S = knot_vector(Nell, xe, p)
# find the Greville Abscissae
ga = greville_abscissae(S, p)
# set up resulting x location vector
x_sample = np.zeros(Nell*Nsamples+1)
# loop over elements
print "start loop"
count = 0
count1 = 0
for e in np.arange(0, Nell):
# loop over samples
# for i in np.arange(0, Nsamples):
for i in np.arange(0, Nsamples):
B, Bdxi, Bdxidxi = local_bernstein(xi_sample[i], p)
N, Nedxi, Nedxidxi = local_bezier_extraction(p, e + 1, Nell, B, Bdxi, Bdxidxi)
Ndx, Ndxdx, dxdxi, x = global_bezier_extraction(ga[e:e + p + 1], N, Nedxi, Nedxidxi)
x_sample[e*Nsamples+i] = x
# print x, xi_sample[i]
u_temp = np.zeros(Ndof)
for a in np.arange(0, p + 1):
for idof in np.arange(0, Ndof):
# idx = int(IEN[a*Ndof+idof, e]) - 1
#TODO correct the indexing
if LM[a * Ndof + idof, e] == 0:
count1 += 1
continue
# u_temp[idof] += N[a] * sol[e*Ndof+idof]
# u_temp[idof] += N[a] * sol[int(LM[a*Ndof+idof, e]+Ndof) - 1]
u_temp[idof] += N[a] * sol[int(LM_full[a*Ndof+idof, e]) - 1]
# u_temp[idof] += N[a] * sol[int(LM[a*Ndof+idof, e]+count1) - 1]
# u_temp[idof] += N[a] * sol[int(IEN[a*Ndof+idof, e]) - 1]
# u_temp[idof] += N[a] * sol[e*Ndof + idof]
# if np.any(u_temp) > 0:
# print "success"
# quit()
# u[int(e * Nint + i)]
u[:, count] = u_temp
count += 1
return u, x_sample
def get_u_of_x_exact(x, q, ffunc_num):
u_ex = 0.
if ffunc_num == 0:
u_ex = q*(1.-x**2)/2.
elif ffunc_num == 1:
u_ex = q*(1.-x**3)/6.
elif ffunc_num == 2:
u_ex = q * (1. - x ** 4) / 12.
return u_ex
def knot_vector(Nell, Xe, p, open=True):
"""
Construct knot vector
:param Nell: number of elements
:param he: array containing the length of each element
:param p: order of basis functions
:return knots: knot vector
"""
# initialize knot vector
knots = np.zeros([Nell+2*p+1])
# populate knot vector
if open:
knots[0:p+1] = Xe[0]
knots[-p-1:] = Xe[-1]
for i in np.arange(1, Nell):
knots[i+p] = Xe[i]
return knots
def greville_abscissae(S, p):
Nell = len(S) - 2*p - 1
GA = np.zeros(Nell+p)
for i in np.arange(0, Nell+p):
GA[i] = (1./p)*(np.sum(S[i+1:i+p+1]))
# print i, GA[i], S[i+1:i+p+1], np.sum(S[i+1:i+p+1]), p
return GA
def get_id(case, Nell, p, Ndof=6):
ID = np.zeros([Ndof, Nell+p])
# cantilever L
if case == 0:
# print 'here in ', case
count = 0
for i in np.arange(1, Nell+p):
for j in np.arange(0, Ndof):
count += 1
ID[j, i] = count
# cantilever R
elif case == 1:
# print 'here in ', case
# print np.arange(1,Nell+p)
ID[2:] = np.arange(1,Nell+p-1)
# coding two part one
elif case == 2:
ID[0:Nell+p-1] = np.arange(1, Nell+p)
# simply supported (pin left, roller right)
elif case == 3:
count = 0
for i in np.arange(0, Nell + p):
for j in np.arange(0, Ndof):
if i == 0 and j != 4:
continue
elif i == Nell + p - 1 and (j == 0 or j == 1 or j == 3 or j == 5):
continue
count += 1
ID[j, i] = count
else:
raise ValueError('invalid support case')
# quit()
return ID
def ien_array(Nell, p, Ndof):
Nell = int(Nell)
Ndof = int(Ndof)
p = int(p)
IEN = np.zeros([Ndof*(p+1), Nell])
for i in np.arange(0, p+1):
for j in np.arange(0, Ndof):
IEN[i*Ndof+j,:] = np.arange(i+1, i+1+Nell)
return IEN
def local_bernstein(xi, p):
# check if xi is in the acceptable range
if np.any(xi < -1) or np.any(xi >1):
raise ValueError("the value of xi is $f, but must be in the range [-1, 1]" %xi)
# check if p is in the acceptable range for this code
# if p > 3 or p < 2:
# raise ValueError("the value of p must be 2 or 3, but %i was given" % p)
# initialize Bernstein polynomial vectors
B = np.zeros(p+1)
Bdxi = np.zeros(p+1)
Bdxidxi = np.zeros(p+1)
for a in np.arange(1., p + 2.):
# compute common factor of B and it's derivatives
eta = (1. / (2. ** p)) * (m.factorial(p) / (m.factorial(a - 1.) * m.factorial(p + 1. - a)))
# calculate the value and derivatives of each element of the Bernstein polynomial vector
# print eta*((1.-xi)**(p-(a-1.)))*((1+xi)**(a-1.))
B[int(a - 1)] = eta * ((1. - xi) ** (p - (a - 1.))) * ((1. + xi) ** (a - 1.))
if xi == -1.:
if p == 2:
Bdxi[0] = -1.
Bdxi[1] = 1.
Bdxi[2] = 0.
Bdxidxi[0] = 0.5
Bdxidxi[1] = -1.0
Bdxidxi[2] = 0.5
elif p == 3:
Bdxi[0] = -1.5
Bdxi[1] = 1.5
Bdxi[2] = 0.
Bdxi[3] = 0.
Bdxidxi[0] = 1.5
Bdxidxi[1] = -3.
Bdxidxi[2] = 1.5
Bdxidxi[3] = 0.
elif xi == 1.:
if p == 2:
Bdxi[0] = 0.
Bdxi[1] = -1.
Bdxi[2] = 1.
Bdxidxi[0] = 0.5
Bdxidxi[1] = -1.0
Bdxidxi[2] = 0.5
if p == 3:
Bdxi[0] = 0.
Bdxi[1] = 0.
Bdxi[2] = -1.5
Bdxi[3] = 1.5
Bdxidxi[0] = 0.
Bdxidxi[1] = 1.5
Bdxidxi[2] = -3.
Bdxidxi[3] = 1.5
else:
# solve for the Bernstein polynomial vectors
for a in np.arange(1, p+2):
# compute common factor of B and it's derivatives
eta = (1./(2.**p))*(m.factorial(p)/(m.factorial(a-1.)*m.factorial(p+1.-a)))
# calculate the value and derivatives of each element of the Bernstein polynomial vector
# print eta*((1.-xi)**(p-(a-1.)))*((1+xi)**(a-1.))
# B[a-1] = eta*((1.-xi)**(p-(a-1.)))*((1+xi)**(a-1.))
Bdxi[a-1] = eta*(((1.-xi)**(p-a+1.))*(a-1.)*((1.+xi)**(a-2.))-
((1.+xi)**(a-1.))*(p-a+1.)*((1.-xi)**(p-a)))
# set up terms for second derivative
t1 = ((1.-xi)**(p-a+1))*(a-2.)*((1+xi)**(a-3.))
t2 = -((1.+xi)**(a-2.))*(p-a+1.)*((1.-xi)**(p-a))
t3 = -((1.+xi)**(a-1.))*(p-a)*((1.-xi)**(p-a-1.))
t4 = ((1.-xi)**(p-a))*(a-1.)*((1.+xi)**(a-2.))
Bdxidxi[a-1] = eta*((a-1.)*(t1+t2)-(p-a+1.)*(t3+t4))
return B, Bdxi, Bdxidxi
def local_bezier_extraction(p, e, Nell, B, Bdxi, Bdxidxi):
# if Nell = 1 C = Identity
# determine the appropriate Bezier extraction matrix
if p == 1 or Nell == 1:
C = np.identity(p+1)
elif p == 2:
if e == 1:
C = np.array([[1., 0., 0. ],
[0., 1., 0.5],
[0., 0., 0.5]])
elif e >=2 and e <= Nell-1.:
C = np.array([[0.5, 0., 0. ],
[0.5, 1., 0.5],
[0., 0., 0.5]])
elif e == Nell:
C = np.array([[0.5, 0., 0.],
[0.5, 1., 0.],
[0., 0., 1.]])
else:
raise ValueError('Invalid value of e. Must be in [1, %i], but %i was given' % (Nell,e))
elif p == 3:
if e == 1:
C = np.array([[1., 0., 0., 0. ],
[0., 1., 0.5, 0.25 ],
[0., 0., 0.5, 7./12.],
[0., 0., 0., 1./6. ]])
elif e == 2:
C = np.array([[0.25, 0., 0., 0. ],
[7./12., 2./3., 1./3., 1./6.],
[1./6., 1./3., 2./3., 2./3.],
[0., 0., 0., 1./6.]])
elif e >= 3 and e <= Nell-2:
C = np.array([[1./6., 0., 0., 0. ],
[2./3., 2./3., 1./3., 1./6.],
[1./6., 1./3., 2./3., 2./3.],
[0., 0., 0., 1./6.]])
elif e == Nell-1.:
C = np.array([[1./6., 0., 0., 0. ],
[2./3., 2./3., 1./3., 1./6. ],
[1./6., 1./3., 2./3., 7./12.],
[0., 0., 0., 0.25 ]])
elif e == Nell:
C = np.array([[1./6., 0., 0., 0.],
[7./12., 0.5, 0., 0.],
[0.25, 0.5, 1., 0.],
[0., 0., 0., 1.]])
else:
raise ValueError('Invalid value of e. Must be in [1, %i], but %i was given' % (Nell, e))
else:
raise ValueError('p must be 2 or 3, but p=%f was given' % p)
# solve for the value of the Bezier basis function and derivatives on the element (Ne)
Ne = np.matmul(C, B)
Nedxi = np.matmul(C, Bdxi)
Nedxidxi = np.matmul(C, Bdxidxi)
return Ne, Nedxi, Nedxidxi
def global_bezier_extraction(GA, Ne, Nedxi, Nedxidxi):
# solve for xe and derivatives
xe = np.sum(GA*Ne)
# print GA, Nedxi
dxedxi = np.sum(GA*Nedxi)
dxedxedxidxi = np.sum(GA*Nedxidxi)
# derivatives of the basis function in global coordinates
Ndx = Nedxi/dxedxi
Ndxdx = (Nedxidxi - Ndx*dxedxedxidxi)/(dxedxi**2)
# print 'dxidxi', dxedxi
return Ndx, Ndxdx, dxedxi, xe
def error_quadrature(solution, p, Nell, Nint, he):
# get IEN array
IEN = ien_array(Nell, p)
# get quadrature points and weights in local coordinants
xi_sample, w = quadrature_rule(Nint)
# initialize displacement vector
u = np.zeros(Nell * Nint)
# initialize error vector
error = np.zeros(Nell * Nint)
# initialize x vector
X = np.zeros(Nell * Nint)
# get node locations in global coordinates
xe = node_locations_x(Nell, he)
# get the knot vector
S = knot_vector(Nell, xe, p)
# find the Greville Abscissae
ga = greville_abscissae(S, p)
# loop over elements
print "start loop"
for e in np.arange(0, Nell):
# loop over samples
# for i in np.arange(0, Nsamples):
for i in np.arange(0, xi_sample.size):
B, Bdxi, Bdxidxi = local_bernstein(xi_sample[i], p)
N, Nedxi, Nedxidxi = local_bezier_extraction(p, e + 1, Nell, B, Bdxi, Bdxidxi)
Ndx, Ndxdx, dxdxi, x = global_bezier_extraction(ga[e:e+p+1],N, Nedxi, Nedxidxi)
# print x, xi_sample[i]
for a in np.arange(0, p + 1):
u[int(e * Nint + i)] += N[a] * solution[int(IEN[a, e]) - 1]
u_ex = get_u_of_x_exact(x, 1, 2)
error[e * Nint + i] += ((u_ex - u[int(e * Nint + i)])**2)*dxdxi*w[i]
# print error, e, i, e * Nint + i, u, X
# print "end loop", error
error = np.sqrt(abs(np.sum(error)))
# initialize location array
# x = np.linspace(0., 1., Nell * Nsamples)
# x_ex = np.linspace(0., 1., 500)
# print x
# print u
# q = 1
# u_ex = get_u_of_x_exact(x_ex, q, 2)
# print error
# quit()
return error, X
def lm_array(Nell, p, ID, IEN, Ndof):
Nell = int(Nell)
p = int(p)
Ndof = int(Ndof)
LM = np.zeros([Ndof*(p+1), Nell])
for a in range(0, p+1):
for i in np.arange(0, Ndof):
for e in np.arange(0, Nell):
LM[a*Ndof+i, e] = ID[i, int(int(IEN[a*Ndof+i, e])-1)]
return LM
def plot_error():
E = I = 1.
Nint = 3
n = np.array([1, 10, 100])
theoretical_error = np.zeros([2, n.size])
real_error = np.zeros([2, n.size])
# slope = np.zeros([2, n.size-1])
q = 1
h = np.zeros([2, n.size])
nodes = np.zeros([2, n.size])
r_slope = np.zeros(2)
t_slope = np.zeros(2)
# print h, n
for p, i in zip(np.array([2, 3]), np.arange(0, 2)):
for Nell, j in zip(n, np.arange(n.size)):
print 'p = %i, N = %i' % (p, Nell)
# run_quadratic(Nell, Nint, p)
nodes[i,j] = Nell + p
he = np.ones(Nell) / Nell
h[i, j] = he[0]
ID = get_id(2, Nell, p)
K, F, d, sol, da = fem_solver(Nell, he, Nint, p, ID, E, I)
# u = solve_for_displacements(d, Nell, he, g=0)
real_error[i, j], x = error_quadrature(sol, p, Nell, Nint, he)
# u_ap = get_u_of_x_approx(x, u, he)
u_ex = get_u_of_x_exact(x, q, 2)
# print u_ap, u_ex
# error[i, j] = np.sum(n(u_ap - u_ex)**2)
theoretical_error[i, j] = (abs(u_ex[0])*he[0]**(p+1))
# print theoretical_error
# print "ffunc: %i, Nell: %i, Error: %f" % (ffunc_num, Nell, error[i, j])
r_slope[i] = -np.log(real_error[i, 2]/real_error[i, 0])/np.log(n[2]/n[0])
t_slope[i] = -np.log(theoretical_error[i, -1]/theoretical_error[i, 0])/np.log(n[-1]/n[0])
# print (np.log(error[1])-np.log(error[0]))/(x[1]-x[0])
# print real_error.shape
# quit()
# np.savetxt('error.txt', np.c_[n, he, np.transpose(error)], header="Nell, h, E(f(x)=c), E(f(x)=x), E(f(x)=x^2)")
# print h.shape, real_error.shape
plt.loglog(h[0, :], theoretical_error[0,:], '--or', label='A priori, $p=2, slope=%.3f$' % t_slope[0])
plt.loglog(h[0, :], real_error[0,:], '-or', label='Real, $p=2$, $slope=%.3f$' % r_slope[0])
plt.loglog(h[1, :], theoretical_error[1,:], '--ob', label='A priori, $p=3, slope=%.3f$' % t_slope[1])
plt.loglog(h[1, :], real_error[1,:], '-ob', label='Real, $p=3$, $slope=%.3f$' % r_slope[1])
# plt.loglog(he, error[2,:], '-o', label='$f(x)=x^2$')
leg = plt.legend(loc=4, frameon=False)
leg.get_frame().set_alpha(0.0)
plt.xlabel('$h$')
plt.ylabel('$Error$')
plt.savefig('error_he.pdf', tranparent=True)
plt.show()
plt.loglog(nodes[0, :], theoretical_error[0, :], '--or', label='A priori, $p=2$' % t_slope[0])
plt.loglog(nodes[0, :], real_error[0, :], '-or', label='Real, $p=2$' % r_slope[0])
plt.loglog(nodes[1, :], theoretical_error[1, :], '--ob', label='A priori, $p=3$' % t_slope[1])
plt.loglog(nodes[1, :], real_error[1, :], '-ob', label='Real, $p=3$' % r_slope[1])
# plt.loglog(he, error[2,:], '-o', label='$f(x)=x^2$')
leg=plt.legend(loc=1, frameon=False)
leg.get_frame().set_alpha(0.0)
plt.xlabel('$Nodes$')
plt.ylabel('$Error$')
plt.savefig('error_nodes.pdf', transparent=True)
plt.show()
return
def plot_displacements(u, x, he, Nell, q=1, ffunc=ffunc_constant, ffunc_args=np.array([1])):
plt.rcParams.update({'font.size': 22})
x_ex = np.linspace(0, 1., 100)
x_el = node_locations_x(Nell, he)
u_ex = get_u_of_x_exact(x_ex, q, ffunc_num=len(ffunc_args)-1)
u_a = get_u_of_x_approx(x, u, he)
plt.figure()
plt.plot(x_ex, u_ex, label="Exact sol.", linewidth=3)
# plt.plot(x_el, u, '-s', label="Approx. sol. (nodes)")
plt.plot(x, u_a, '--r', markerfacecolor='none', label="Approx. sol.", linewidth=3)
plt.xlabel('X Position')
plt.ylabel("Displacement")
functions = ["$f(x)=c$", "$f(x)=x$", "$f(x)=x^2$"]
# plt.title(functions[ffunc]+", $n=%i$" %Nell, y=1.02)
plt.legend(loc=3, frameon=False)
plt.tight_layout()
# plt.savefig("displacement_func%i_Nell%i.pdf" %(ffunc, Nell))
plt.show()
plt.close()
return
def beam_solution_1():
# problem parameters
E = 200E9 # (Pa) modulous of elasity for steel
nu = 0.3 # poisson's ratio for steel
b = 0.005
h = 0.005
d = 0.02
A = b*h
l = 1.
Nint = 3
Ndof = 6
Px = 10.
# get the moment of inertia of the cross section
Ix, Iy, _ = moment_of_inertia_rectangle(b, h)
# Ix, Iy, _ = moment_of_inertia_rod(d)
J = Ix + Iy # polar moment of inertia
# print Ix*E
# quit()
# set cases to use
coding_3_problem = 3
# support_case = 0 # 0: cantilever fixed on the left
# # 1: cantilever fixed on the right
# # 2: coding 2 part 1
# # 3: simply supported (pin left, roller right)
#
# load_case = 0 # 0: constant axial load
# # 1: constant transverse load
# # 2: Linearly distributed transverse load (0 at left, N at right)
if coding_3_problem == 2:
support_case = 0
load_case = 0
# number of elements
n = np.array([1, 10])
# order of basis
p_vector = np.array([1])
plotdofs = np.array([2])
leg_loc = 4
elif coding_3_problem == 3:
support_case = 0
load_case = 1
# number of elements
n = np.array([10, 100])
# order of basis
p_vector = np.array([1, 2, 3])
plotdofs = np.array([0, 4])
leg_loc = 3
elif coding_3_problem == 4:
support_case = 3
load_case = 2
# number of elements
n = np.array([10, 100])
# order of basis
p_vector = np.array([1, 2, 3])
plotdofs = np.array([0, 4])
leg_loc = 9
else:
raise ValueError('Invalid problem number')
# forcing function
ffunc = ffunc_beam
# forcing function arguments
ffunc_args = np.array([Px, 1., load_case])
max_deflection_fem = np.zeros([p_vector.size, n.size, Ndof])
max_deflection_theoretical = np.zeros([p_vector.size, n.size, Ndof])
nodes = np.zeros([p_vector.size, int(n.size)])
num = 50
x_exact = np.linspace(0, l + l / num, num)
u_exact = np.zeros((Ndof, x_exact.size))
if coding_3_problem == 2:
u_exact[2, :] = (-Px * (1. - (x_exact - 1.) ** 2) / (2. * E * A))
elif coding_3_problem == 3:
u_exact[0, :] = (-Px * x_exact ** 2) * (
x_exact ** 2 + 6. * l ** 2 - 4. * l * x_exact) / (24. * E * Ix)
u_exact[4, :] = (-Px * x_exact) * (
3. * l ** 2 - 3. * l * x_exact + x_exact ** 2) / (6. * E * Ix)
elif coding_3_problem == 4:
u_exact[0, :] = (-Px * x_exact / (360. * l * E * Ix)) * (
7. * l ** 4 - 10. * (l ** 2) * (x_exact ** 2) + 3. * x_exact ** 4)
u_exact[4, :] = (-Px / (360. * l * E * Ix)) * (
7. * l ** 4 - 30. * (l ** 2) * (x_exact ** 2) + 15. * x_exact ** 4)
for plotdof in plotdofs:
figure, axes = plt.subplots(p_vector.size, n.size, sharex=True, sharey=True)
for p, i in zip(p_vector, np.arange(0, p_vector.size)):
for Nell, j in zip(n, np.arange(0, n.size)):
# vector of element lengths
he = np.ones(Nell) / Nell
# vector of element locations
x = np.linspace(0, 1, 4 * Nell + 1)
# ID array
ID = get_id(support_case, Nell, p, Ndof)
# if Nell == 10:
#
# print ID
# quit()
nodes[i, j] = Nell+p
tic = time.time()
K, F, d, sol = fem_solver(Nell, he, Nint, p, ID, E, Ix, Iy, J, A, nu, ffunc=ffunc, ffunc_args=ffunc_args, case=support_case, Ndof=Ndof)
toc = time.time()
# print he, Nell, K
print "Time to run fem solver: %.3f (s)" % (toc - tic)
Nsamples = int(100./Nell)
u, x = get_u_of_x_approx(sol, he, Nell, Nint, p, ID, Nsamples)
# print np.array([1. / 6., 21. / 128., 7. / 48., 37. / 384., 0])
print "Time to solve for u(x): %.3f (s)" % (toc - tic)
print "Finished"
# print d, u, x[::-1]
# if Nell > 1:
max_deflection_fem[i, j, plotdof] = np.amax(np.abs(u[plotdof, :]))
max_deflection_theoretical[i, j, plotdof] = np.amax(np.abs(u_exact[plotdof, :]))
print ""
print "problem %i" % coding_3_problem
print "p=%i, Nell=%i, DOF=%i" %(p, Nell, plotdof)
print "Analytical max def: %s" %(max_deflection_theoretical[i, j, plotdof])
print "FEM max def: %s" %(max_deflection_fem[i, j, plotdof])
print ""
if p_vector.size == 1 and n.size == 1:
axes.plot(x_exact[:-1], u_exact[plotdof, :-1], '-r', linewidth=1.5, label='Analytic')
axes.plot(x[:-1], u[plotdof, :-1], '--b', label='FEM')
elif p_vector.size == 1:
axes[j].plot(x_exact[:-1], u_exact[plotdof, :-1], '-r', linewidth=1.5, label='Analytic')
axes[j].plot(x[:-1], u[plotdof, :-1], '--b', label='FEM')
elif n.size == 1:
axes[i].plot(x_exact[:-1], u_exact[plotdof, :-1], '-r', linewidth=1.5, label='Analytic')
axes[i].plot(x[:-1], u[plotdof, :-1], '--b', label='FEM')
else:
axes[i,j].plot(x_exact[:-1], u_exact[plotdof, :-1], '-r', linewidth=1.5, label='Analytic')
axes[i,j].plot(x[:-1], u[plotdof, :-1], '--b', label='FEM')
for i in np.arange(0, p_vector.size):
if p_vector.size == 1 and n.size == 1:
axes.set_ylabel('Deflection, $p=%i$' % (p_vector[i]))
axes.legend(loc=leg_loc, frameon=False)
elif p_vector.size == 1:
axes[0].set_ylabel('Deflection, $p=%i$' % (p_vector[i]))
axes[0].legend(loc=leg_loc, frameon=False)
elif n.size == 1:
axes[i].set_ylabel('Deflection, $p=%i$' % (p_vector[i]))
axes[i].legend(loc=leg_loc, frameon=False)
else:
axes[i, 0].set_ylabel('Deflection, $p=%i$' % (p_vector[i]))
axes[0, -1].legend(loc=leg_loc, frameon=False)
for j in np.arange(0, n.size):
if p_vector.size == 1 and n.size == 1:
axes.set_xlabel('X Position')
axes.set_title('$N_{ell}=%i$' % (n[j]))
elif p_vector.size == 1:
axes[j].set_xlabel('X Position')
axes[j].set_title('$N_{ell}=%i$' % (n[j]))
elif n.size == 1:
axes[-1].set_xlabel('X Position')
axes[0].set_title('$N_{ell}=%i$' % (n[j]))
else:
axes[-1, j].set_xlabel('X Position')
axes[0, j].set_title('$N_{ell}=%i$' % (n[j]))
plt.tight_layout()
# axes[0,0].legend('Exact', 'FEM')
plt.savefig('beam1_deflection_prob%i_dof%i.pdf' % (coding_3_problem, plotdof))
plt.show()
# for plotdof in plotdofs:
# fig = plt.figure()
#
# plt.plot(nodes[0,:], max_deflection_thoeretical[plotdof,:], 'r', label='theoretical')
# plt.plot(nodes[0,:], max_deflection_fem[plotdof,:],'--ob', label='fem, p=2')
# # plt.plot(nodes[1,:], max_deflection_thoeretical[1,:], label='theoretical, p=3')
# plt.plot(nodes[1,:], max_deflection_fem[plotdof,:], '--og', label='fem, p=3')
# plt.xlabel('Nodes')
# plt.ylabel('Max Deflection')
# # plt.ylim([0.0028, 0.0032])
# plt.legend(loc = 0)
#
# plt.tight_layout()
# plt.savefig('max_deflection_vs_n.pdf', transparent=True)
# plt.show()
return
if __name__ == "__main__":
beam_solution_1() | mit | 4,212,410,585,531,477,000 | 30.11553 | 151 | 0.474754 | false |
librelab/qtmoko-test | qtopiacore/qt/util/local_database/qlocalexml2cpp.py | 1 | 18278 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
## Contact: Nokia Corporation ([email protected])
##
## This file is part of the test suite of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial Usage
## Licensees holding valid Qt Commercial licenses may use this file in
## accordance with the Qt Commercial License Agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Nokia.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Nokia gives you certain additional
## rights. These rights are described in the Nokia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU General Public License version 3.0 requirements will be
## met: http://www.gnu.org/copyleft/gpl.html.
##
## If you have questions regarding the use of this file, please contact
## Nokia at [email protected].
## $QT_END_LICENSE$
##
#############################################################################
import sys
import xml.dom.minidom
def check_static_char_array_length(name, array):
# some compilers like VC6 doesn't allow static arrays more than 64K bytes size.
size = reduce(lambda x, y: x+len(escapedString(y)), array, 0)
if size > 65535:
print "\n\n\n#error Array %s is too long! " % name
sys.stderr.write("\n\n\nERROR: the content of the array '%s' is too long: %d > 65535 " % (name, size))
sys.exit(1)
def wrap_list(lst):
def split(lst, size):
for i in range(len(lst)/size+1):
yield lst[i*size:(i+1)*size]
return ",\n".join(map(lambda x: ", ".join(x), split(lst, 20)))
def firstChildElt(parent, name):
child = parent.firstChild
while child:
if child.nodeType == parent.ELEMENT_NODE \
and (not name or child.nodeName == name):
return child
child = child.nextSibling
return False
def nextSiblingElt(sibling, name):
sib = sibling.nextSibling
while sib:
if sib.nodeType == sibling.ELEMENT_NODE \
and (not name or sib.nodeName == name):
return sib
sib = sib.nextSibling
return False
def eltText(elt):
result = ""
child = elt.firstChild
while child:
if child.nodeType == elt.TEXT_NODE:
if result:
result += " "
result += child.nodeValue
child = child.nextSibling
return result
def loadLanguageMap(doc):
result = {}
language_list_elt = firstChildElt(doc.documentElement, "languageList")
language_elt = firstChildElt(language_list_elt, "language")
while language_elt:
language_id = int(eltText(firstChildElt(language_elt, "id")))
language_name = eltText(firstChildElt(language_elt, "name"))
language_code = eltText(firstChildElt(language_elt, "code"))
result[language_id] = (language_name, language_code)
language_elt = nextSiblingElt(language_elt, "language")
return result
def loadCountryMap(doc):
result = {}
country_list_elt = firstChildElt(doc.documentElement, "countryList")
country_elt = firstChildElt(country_list_elt, "country")
while country_elt:
country_id = int(eltText(firstChildElt(country_elt, "id")))
country_name = eltText(firstChildElt(country_elt, "name"))
country_code = eltText(firstChildElt(country_elt, "code"))
result[country_id] = (country_name, country_code)
country_elt = nextSiblingElt(country_elt, "country")
return result
def loadDefaultMap(doc):
result = {}
list_elt = firstChildElt(doc.documentElement, "defaultCountryList")
elt = firstChildElt(list_elt, "defaultCountry")
while elt:
country = eltText(firstChildElt(elt, "country"));
language = eltText(firstChildElt(elt, "language"));
result[language] = country;
elt = nextSiblingElt(elt, "defaultCountry");
return result
def fixedCountryName(name, dupes):
if name in dupes:
return name + "Country"
return name
def fixedLanguageName(name, dupes):
if name in dupes:
return name + "Language"
return name
def findDupes(country_map, language_map):
country_set = set([ v[0] for a, v in country_map.iteritems() ])
language_set = set([ v[0] for a, v in language_map.iteritems() ])
return country_set & language_set
def languageNameToId(name, language_map):
for key in language_map.keys():
if language_map[key][0] == name:
return key
return -1
def countryNameToId(name, country_map):
for key in country_map.keys():
if country_map[key][0] == name:
return key
return -1
def convertFormat(format):
result = ""
i = 0
while i < len(format):
if format[i] == "'":
result += "'"
i += 1
while i < len(format) and format[i] != "'":
result += format[i]
i += 1
if i < len(format):
result += "'"
i += 1
else:
s = format[i:]
if s.startswith("EEEE"):
result += "dddd"
i += 4
elif s.startswith("EEE"):
result += "ddd"
i += 3
elif s.startswith("a"):
result += "AP"
i += 1
elif s.startswith("z"):
result += "t"
i += 1
elif s.startswith("v"):
i += 1
else:
result += format[i]
i += 1
return result
class Locale:
def __init__(self, elt):
self.language = eltText(firstChildElt(elt, "language"))
self.country = eltText(firstChildElt(elt, "country"))
self.decimal = int(eltText(firstChildElt(elt, "decimal")))
self.group = int(eltText(firstChildElt(elt, "group")))
self.listDelim = int(eltText(firstChildElt(elt, "list")))
self.percent = int(eltText(firstChildElt(elt, "percent")))
self.zero = int(eltText(firstChildElt(elt, "zero")))
self.minus = int(eltText(firstChildElt(elt, "minus")))
self.plus = int(eltText(firstChildElt(elt, "plus")))
self.exp = int(eltText(firstChildElt(elt, "exp")))
self.am = eltText(firstChildElt(elt, "am"))
self.pm = eltText(firstChildElt(elt, "pm"))
self.longDateFormat = convertFormat(eltText(firstChildElt(elt, "longDateFormat")))
self.shortDateFormat = convertFormat(eltText(firstChildElt(elt, "shortDateFormat")))
self.longTimeFormat = convertFormat(eltText(firstChildElt(elt, "longTimeFormat")))
self.shortTimeFormat = convertFormat(eltText(firstChildElt(elt, "shortTimeFormat")))
self.standaloneLongMonths = eltText(firstChildElt(elt, "standaloneLongMonths"))
self.standaloneShortMonths = eltText(firstChildElt(elt, "standaloneShortMonths"))
self.standaloneNarrowMonths = eltText(firstChildElt(elt, "standaloneNarrowMonths"))
self.longMonths = eltText(firstChildElt(elt, "longMonths"))
self.shortMonths = eltText(firstChildElt(elt, "shortMonths"))
self.narrowMonths = eltText(firstChildElt(elt, "narrowMonths"))
self.standaloneLongDays = eltText(firstChildElt(elt, "standaloneLongDays"))
self.standaloneShortDays = eltText(firstChildElt(elt, "standaloneShortDays"))
self.standaloneNarrowDays = eltText(firstChildElt(elt, "standaloneNarrowDays"))
self.longDays = eltText(firstChildElt(elt, "longDays"))
self.shortDays = eltText(firstChildElt(elt, "shortDays"))
self.narrowDays = eltText(firstChildElt(elt, "narrowDays"))
def loadLocaleMap(doc, language_map, country_map):
result = {}
locale_list_elt = firstChildElt(doc.documentElement, "localeList")
locale_elt = firstChildElt(locale_list_elt, "locale")
while locale_elt:
locale = Locale(locale_elt)
language_id = languageNameToId(locale.language, language_map)
country_id = countryNameToId(locale.country, country_map)
result[(language_id, country_id)] = locale
locale_elt = nextSiblingElt(locale_elt, "locale")
return result
def compareLocaleKeys(key1, key2):
if key1 == key2:
return 0
if key1[0] == key2[0]:
l1 = compareLocaleKeys.locale_map[key1]
l2 = compareLocaleKeys.locale_map[key2]
if l1.language in compareLocaleKeys.default_map:
default = compareLocaleKeys.default_map[l1.language]
if l1.country == default:
return -1
if l2.country == default:
return 1
else:
return key1[0] - key2[0]
return key1[1] - key2[1]
def languageCount(language_id, locale_map):
result = 0
for key in locale_map.keys():
if key[0] == language_id:
result += 1
return result
class StringDataToken:
def __init__(self, index, length):
self.index = index
self.length = length
def __str__(self):
return " %d,%d " % (self.index, self.length)
class StringData:
def __init__(self):
self.data = []
self.hash = {}
def append(self, s):
if s in self.hash:
return self.hash[s]
lst = map(lambda x: hex(ord(x)), s)
token = StringDataToken(len(self.data), len(lst))
self.hash[s] = token
self.data += lst
return token
def escapedString(s):
result = ""
i = 0
while i < len(s):
if s[i] == '"':
result += '\\"'
i += 1
else:
result += s[i]
i += 1
s = result
line = ""
need_escape = False
result = ""
for c in s:
if ord(c) < 128 and (not need_escape or ord(c.lower()) < ord('a') or ord(c.lower()) > ord('f')):
line += c
need_escape = False
else:
line += "\\x%02x" % (ord(c))
need_escape = True
if len(line) > 80:
result = result + "\n" + "\"" + line + "\""
line = ""
line += "\\0"
result = result + "\n" + "\"" + line + "\""
if result[0] == "\n":
result = result[1:]
return result
def printEscapedString(s):
print escapedString(s);
def main():
doc = xml.dom.minidom.parse("locale.xml")
language_map = loadLanguageMap(doc)
country_map = loadCountryMap(doc)
default_map = loadDefaultMap(doc)
locale_map = loadLocaleMap(doc, language_map, country_map)
dupes = findDupes(language_map, country_map)
# Language enum
print "enum Language {"
language = ""
for key in language_map.keys():
language = fixedLanguageName(language_map[key][0], dupes)
print " " + language + " = " + str(key) + ","
print " LastLanguage = " + language
print "};"
print
# Country enum
print "enum Country {"
country = ""
for key in country_map.keys():
country = fixedCountryName(country_map[key][0], dupes)
print " " + country + " = " + str(key) + ","
print " LastCountry = " + country
print "};"
print
# Locale index
print "static const uint locale_index[] = {"
print " 0, // unused"
index = 0
for key in language_map.keys():
i = 0
count = languageCount(key, locale_map)
if count > 0:
i = index
index += count
print "%6d, // %s" % (i, language_map[key][0])
print " 0 // trailing 0"
print "};"
print
date_format_data = StringData()
time_format_data = StringData()
months_data = StringData()
standalone_months_data = StringData()
days_data = StringData()
am_data = StringData()
pm_data = StringData()
# Locale data
print "static const QLocalePrivate locale_data[] = {"
print "// lang terr dec group list prcnt zero minus plus exp sDtFmt lDtFmt sTmFmt lTmFmt ssMonth slMonth sMonth lMonth sDays lDays am,len pm,len"
locale_keys = locale_map.keys()
compareLocaleKeys.default_map = default_map
compareLocaleKeys.locale_map = locale_map
locale_keys.sort(compareLocaleKeys)
for key in locale_keys:
l = locale_map[key]
print " { %6d,%6d,%6d,%6d,%6d,%6d,%6d,%6d,%6d,%6d,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s }, // %s/%s" \
% (key[0], key[1],
l.decimal,
l.group,
l.listDelim,
l.percent,
l.zero,
l.minus,
l.plus,
l.exp,
date_format_data.append(l.shortDateFormat),
date_format_data.append(l.longDateFormat),
time_format_data.append(l.shortTimeFormat),
time_format_data.append(l.longTimeFormat),
standalone_months_data.append(l.standaloneShortMonths),
standalone_months_data.append(l.standaloneLongMonths),
standalone_months_data.append(l.standaloneNarrowMonths),
months_data.append(l.shortMonths),
months_data.append(l.longMonths),
months_data.append(l.narrowMonths),
days_data.append(l.standaloneShortDays),
days_data.append(l.standaloneLongDays),
days_data.append(l.standaloneNarrowDays),
days_data.append(l.shortDays),
days_data.append(l.longDays),
days_data.append(l.narrowDays),
am_data.append(l.am),
pm_data.append(l.pm),
l.language,
l.country)
print " { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, 0,0 } // trailing 0s"
print "};"
print
# Date format data
#check_static_char_array_length("date_format", date_format_data.data)
print "static const ushort date_format_data[] = {"
print wrap_list(date_format_data.data)
print "};"
print
# Time format data
#check_static_char_array_length("time_format", time_format_data.data)
print "static const ushort time_format_data[] = {"
print wrap_list(time_format_data.data)
print "};"
print
# Months data
#check_static_char_array_length("months", months_data.data)
print "static const ushort months_data[] = {"
print wrap_list(months_data.data)
print "};"
print
# Standalone months data
#check_static_char_array_length("standalone_months", standalone_months_data.data)
print "static const ushort standalone_months_data[] = {"
print wrap_list(standalone_months_data.data)
print "};"
print
# Days data
#check_static_char_array_length("days", days_data.data)
print "static const ushort days_data[] = {"
print wrap_list(days_data.data)
print "};"
print
# AM data
#check_static_char_array_length("am", am_data.data)
print "static const ushort am_data[] = {"
print wrap_list(am_data.data)
print "};"
print
# PM data
#check_static_char_array_length("pm", am_data.data)
print "static const ushort pm_data[] = {"
print wrap_list(pm_data.data)
print "};"
print
# Language name list
print "static const char language_name_list[] ="
print "\"Default\\0\""
for key in language_map.keys():
print "\"" + language_map[key][0] + "\\0\""
print ";"
print
# Language name index
print "static const uint language_name_index[] = {"
print " 0, // Unused"
index = 8
for key in language_map.keys():
language = language_map[key][0]
print "%6d, // %s" % (index, language)
index += len(language) + 1
print "};"
print
# Country name list
print "static const char country_name_list[] ="
print "\"Default\\0\""
for key in country_map.keys():
if key == 0:
continue
print "\"" + country_map[key][0] + "\\0\""
print ";"
print
# Country name index
print "static const uint country_name_index[] = {"
print " 0, // AnyCountry"
index = 8
for key in country_map.keys():
if key == 0:
continue
country = country_map[key][0]
print "%6d, // %s" % (index, country)
index += len(country) + 1
print "};"
print
# Language code list
print "static const unsigned char language_code_list[] ="
print "\" \\0\" // Unused"
for key in language_map.keys():
code = language_map[key][1]
if len(code) == 2:
code += r"\0"
print "\"%2s\" // %s" % (code, language_map[key][0])
print ";"
print
# Country code list
print "static const unsigned char country_code_list[] ="
for key in country_map.keys():
print "\"%2s\" // %s" % (country_map[key][1], country_map[key][0])
print ";"
if __name__ == "__main__":
main()
| gpl-2.0 | 4,154,200,599,349,329,400 | 32.661142 | 248 | 0.57747 | false |
anurag03/integration_tests | cfme/tests/cloud_infra_common/test_discovery.py | 1 | 2639 | # -*- coding: utf-8 -*-
import pytest
import time
from cfme.common.provider import BaseProvider
from cfme.exceptions import CFMEException
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.wait import TimedOutError
from cfme import test_requirements
pytestmark = [
pytest.mark.tier(2),
test_requirements.discovery,
pytest.mark.provider([BaseProvider], scope='module')
]
@pytest.fixture(scope="module")
def vm_name():
return random_vm_name("dscvry")
@pytest.fixture(scope="module")
def vm_crud(vm_name, provider):
collection = provider.appliance.provider_based_collection(provider)
return collection.instantiate(vm_name, provider)
def if_scvmm_refresh_provider(provider):
# No eventing from SCVMM so force a relationship refresh
if isinstance(provider, SCVMMProvider):
provider.refresh_provider_relationships()
def wait_for_vm_state_changes(vm, timeout=600):
count = 0
while count < timeout:
try:
vm_state = vm.find_quadicon(from_any_provider=True).data['state'].lower()
logger.info("Quadicon state for %s is %s", vm.name, repr(vm_state))
if "archived" in vm_state:
return True
elif "orphaned" in vm_state:
raise CFMEException("VM should be Archived but it is Orphaned now.")
except Exception as e:
logger.exception(e)
pass
time.sleep(15)
count += 15
if count > timeout:
raise CFMEException("VM should be Archived but it is Orphaned now.")
@pytest.mark.rhv2
def test_vm_discovery(request, setup_provider, provider, vm_crud):
""" Tests whether cfme will discover a vm change (add/delete) without being manually refreshed.
Prerequisities:
* Desired provider set up
Steps:
* Create a virtual machine on the provider.
* Wait for the VM to appear
* Delete the VM from the provider (not using CFME)
* Wait for the VM to become Archived.
Metadata:
test_flag: discovery
"""
@request.addfinalizer
def _cleanup():
vm_crud.cleanup_on_provider()
if_scvmm_refresh_provider(provider)
vm_crud.create_on_provider(allow_skip="default")
if_scvmm_refresh_provider(provider)
try:
vm_crud.wait_to_appear(timeout=600, load_details=False)
except TimedOutError:
pytest.fail("VM was not found in CFME")
vm_crud.cleanup_on_provider()
if_scvmm_refresh_provider(provider)
wait_for_vm_state_changes(vm_crud)
| gpl-2.0 | 7,873,537,507,350,937,000 | 28.651685 | 99 | 0.674877 | false |
turdusmerula/kipartman | kipartbase/swagger_server/models/part_offer_data.py | 1 | 7328 | # coding: utf-8
from __future__ import absolute_import
from .base_model_ import Model
from datetime import date, datetime
from typing import List, Dict
from ..util import deserialize_model
class PartOfferData(Model):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, packaging_unit=None, quantity=None, min_order_quantity=None, unit_price=None, available_stock=None, packaging=None, currency=None, sku=None, updated=None):
"""
PartOfferData - a model defined in Swagger
:param packaging_unit: The packaging_unit of this PartOfferData.
:type packaging_unit: int
:param quantity: The quantity of this PartOfferData.
:type quantity: int
:param min_order_quantity: The min_order_quantity of this PartOfferData.
:type min_order_quantity: int
:param unit_price: The unit_price of this PartOfferData.
:type unit_price: float
:param available_stock: The available_stock of this PartOfferData.
:type available_stock: int
:param packaging: The packaging of this PartOfferData.
:type packaging: str
:param currency: The currency of this PartOfferData.
:type currency: str
:param sku: The sku of this PartOfferData.
:type sku: str
:param updated: The updated of this PartOfferData.
:type updated: str
"""
self.swagger_types = {
'packaging_unit': int,
'quantity': int,
'min_order_quantity': int,
'unit_price': float,
'available_stock': int,
'packaging': str,
'currency': str,
'sku': str,
'updated': str
}
self.attribute_map = {
'packaging_unit': 'packaging_unit',
'quantity': 'quantity',
'min_order_quantity': 'min_order_quantity',
'unit_price': 'unit_price',
'available_stock': 'available_stock',
'packaging': 'packaging',
'currency': 'currency',
'sku': 'sku',
'updated': 'updated'
}
self._packaging_unit = packaging_unit
self._quantity = quantity
self._min_order_quantity = min_order_quantity
self._unit_price = unit_price
self._available_stock = available_stock
self._packaging = packaging
self._currency = currency
self._sku = sku
self._updated = updated
@classmethod
def from_dict(cls, dikt):
"""
Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The PartOfferData of this PartOfferData.
:rtype: PartOfferData
"""
return deserialize_model(dikt, cls)
@property
def packaging_unit(self):
"""
Gets the packaging_unit of this PartOfferData.
:return: The packaging_unit of this PartOfferData.
:rtype: int
"""
return self._packaging_unit
@packaging_unit.setter
def packaging_unit(self, packaging_unit):
"""
Sets the packaging_unit of this PartOfferData.
:param packaging_unit: The packaging_unit of this PartOfferData.
:type packaging_unit: int
"""
self._packaging_unit = packaging_unit
@property
def quantity(self):
"""
Gets the quantity of this PartOfferData.
:return: The quantity of this PartOfferData.
:rtype: int
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""
Sets the quantity of this PartOfferData.
:param quantity: The quantity of this PartOfferData.
:type quantity: int
"""
self._quantity = quantity
@property
def min_order_quantity(self):
"""
Gets the min_order_quantity of this PartOfferData.
:return: The min_order_quantity of this PartOfferData.
:rtype: int
"""
return self._min_order_quantity
@min_order_quantity.setter
def min_order_quantity(self, min_order_quantity):
"""
Sets the min_order_quantity of this PartOfferData.
:param min_order_quantity: The min_order_quantity of this PartOfferData.
:type min_order_quantity: int
"""
self._min_order_quantity = min_order_quantity
@property
def unit_price(self):
"""
Gets the unit_price of this PartOfferData.
:return: The unit_price of this PartOfferData.
:rtype: float
"""
return self._unit_price
@unit_price.setter
def unit_price(self, unit_price):
"""
Sets the unit_price of this PartOfferData.
:param unit_price: The unit_price of this PartOfferData.
:type unit_price: float
"""
self._unit_price = unit_price
@property
def available_stock(self):
"""
Gets the available_stock of this PartOfferData.
:return: The available_stock of this PartOfferData.
:rtype: int
"""
return self._available_stock
@available_stock.setter
def available_stock(self, available_stock):
"""
Sets the available_stock of this PartOfferData.
:param available_stock: The available_stock of this PartOfferData.
:type available_stock: int
"""
self._available_stock = available_stock
@property
def packaging(self):
"""
Gets the packaging of this PartOfferData.
:return: The packaging of this PartOfferData.
:rtype: str
"""
return self._packaging
@packaging.setter
def packaging(self, packaging):
"""
Sets the packaging of this PartOfferData.
:param packaging: The packaging of this PartOfferData.
:type packaging: str
"""
self._packaging = packaging
@property
def currency(self):
"""
Gets the currency of this PartOfferData.
:return: The currency of this PartOfferData.
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""
Sets the currency of this PartOfferData.
:param currency: The currency of this PartOfferData.
:type currency: str
"""
self._currency = currency
@property
def sku(self):
"""
Gets the sku of this PartOfferData.
:return: The sku of this PartOfferData.
:rtype: str
"""
return self._sku
@sku.setter
def sku(self, sku):
"""
Sets the sku of this PartOfferData.
:param sku: The sku of this PartOfferData.
:type sku: str
"""
self._sku = sku
@property
def updated(self):
"""
Gets the updated of this PartOfferData.
:return: The updated of this PartOfferData.
:rtype: str
"""
return self._updated
@updated.setter
def updated(self, updated):
"""
Sets the updated of this PartOfferData.
:param updated: The updated of this PartOfferData.
:type updated: str
"""
self._updated = updated
| gpl-3.0 | 7,979,520,396,145,676,000 | 25.941176 | 178 | 0.586517 | false |